xref: /openbmc/qemu/hw/ide/core.c (revision a00cfed0ed9c366a383fe39c2d283362ebaaefd3)
1  /*
2   * QEMU IDE disk and CD/DVD-ROM Emulator
3   *
4   * Copyright (c) 2003 Fabrice Bellard
5   * Copyright (c) 2006 Openedhand Ltd.
6   *
7   * Permission is hereby granted, free of charge, to any person obtaining a copy
8   * of this software and associated documentation files (the "Software"), to deal
9   * in the Software without restriction, including without limitation the rights
10   * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11   * copies of the Software, and to permit persons to whom the Software is
12   * furnished to do so, subject to the following conditions:
13   *
14   * The above copyright notice and this permission notice shall be included in
15   * all copies or substantial portions of the Software.
16   *
17   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20   * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22   * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23   * THE SOFTWARE.
24   */
25  
26  #include "qemu/osdep.h"
27  #include "hw/isa/isa.h"
28  #include "migration/vmstate.h"
29  #include "qemu/error-report.h"
30  #include "qemu/main-loop.h"
31  #include "qemu/timer.h"
32  #include "sysemu/sysemu.h"
33  #include "sysemu/blockdev.h"
34  #include "sysemu/dma.h"
35  #include "hw/block/block.h"
36  #include "sysemu/block-backend.h"
37  #include "qapi/error.h"
38  #include "qemu/cutils.h"
39  #include "sysemu/replay.h"
40  #include "sysemu/runstate.h"
41  #include "hw/ide/internal.h"
42  #include "trace.h"
43  
44  /* These values were based on a Seagate ST3500418AS but have been modified
45     to make more sense in QEMU */
46  static const int smart_attributes[][12] = {
47      /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
48      /* raw read error rate*/
49      { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
50      /* spin up */
51      { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52      /* start stop count */
53      { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
54      /* remapped sectors */
55      { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
56      /* power on hours */
57      { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58      /* power cycle count */
59      { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60      /* airflow-temperature-celsius */
61      { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
62  };
63  
64  const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
65      [IDE_DMA_READ] = "DMA READ",
66      [IDE_DMA_WRITE] = "DMA WRITE",
67      [IDE_DMA_TRIM] = "DMA TRIM",
68      [IDE_DMA_ATAPI] = "DMA ATAPI"
69  };
70  
71  static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
72  {
73      if ((unsigned)enval < IDE_DMA__COUNT) {
74          return IDE_DMA_CMD_lookup[enval];
75      }
76      return "DMA UNKNOWN CMD";
77  }
78  
79  static void ide_dummy_transfer_stop(IDEState *s);
80  
81  static void padstr(char *str, const char *src, int len)
82  {
83      int i, v;
84      for(i = 0; i < len; i++) {
85          if (*src)
86              v = *src++;
87          else
88              v = ' ';
89          str[i^1] = v;
90      }
91  }
92  
93  static void put_le16(uint16_t *p, unsigned int v)
94  {
95      *p = cpu_to_le16(v);
96  }
97  
98  static void ide_identify_size(IDEState *s)
99  {
100      uint16_t *p = (uint16_t *)s->identify_data;
101      put_le16(p + 60, s->nb_sectors);
102      put_le16(p + 61, s->nb_sectors >> 16);
103      put_le16(p + 100, s->nb_sectors);
104      put_le16(p + 101, s->nb_sectors >> 16);
105      put_le16(p + 102, s->nb_sectors >> 32);
106      put_le16(p + 103, s->nb_sectors >> 48);
107  }
108  
109  static void ide_identify(IDEState *s)
110  {
111      uint16_t *p;
112      unsigned int oldsize;
113      IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
114  
115      p = (uint16_t *)s->identify_data;
116      if (s->identify_set) {
117          goto fill_buffer;
118      }
119      memset(p, 0, sizeof(s->identify_data));
120  
121      put_le16(p + 0, 0x0040);
122      put_le16(p + 1, s->cylinders);
123      put_le16(p + 3, s->heads);
124      put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
125      put_le16(p + 5, 512); /* XXX: retired, remove ? */
126      put_le16(p + 6, s->sectors);
127      padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
128      put_le16(p + 20, 3); /* XXX: retired, remove ? */
129      put_le16(p + 21, 512); /* cache size in sectors */
130      put_le16(p + 22, 4); /* ecc bytes */
131      padstr((char *)(p + 23), s->version, 8); /* firmware version */
132      padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
133  #if MAX_MULT_SECTORS > 1
134      put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
135  #endif
136      put_le16(p + 48, 1); /* dword I/O */
137      put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
138      put_le16(p + 51, 0x200); /* PIO transfer cycle */
139      put_le16(p + 52, 0x200); /* DMA transfer cycle */
140      put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
141      put_le16(p + 54, s->cylinders);
142      put_le16(p + 55, s->heads);
143      put_le16(p + 56, s->sectors);
144      oldsize = s->cylinders * s->heads * s->sectors;
145      put_le16(p + 57, oldsize);
146      put_le16(p + 58, oldsize >> 16);
147      if (s->mult_sectors)
148          put_le16(p + 59, 0x100 | s->mult_sectors);
149      /* *(p + 60) := nb_sectors       -- see ide_identify_size */
150      /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
151      put_le16(p + 62, 0x07); /* single word dma0-2 supported */
152      put_le16(p + 63, 0x07); /* mdma0-2 supported */
153      put_le16(p + 64, 0x03); /* pio3-4 supported */
154      put_le16(p + 65, 120);
155      put_le16(p + 66, 120);
156      put_le16(p + 67, 120);
157      put_le16(p + 68, 120);
158      if (dev && dev->conf.discard_granularity) {
159          put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
160      }
161  
162      if (s->ncq_queues) {
163          put_le16(p + 75, s->ncq_queues - 1);
164          /* NCQ supported */
165          put_le16(p + 76, (1 << 8));
166      }
167  
168      put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
169      put_le16(p + 81, 0x16); /* conforms to ata5 */
170      /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
171      put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
172      /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
173      put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
174      /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
175      if (s->wwn) {
176          put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
177      } else {
178          put_le16(p + 84, (1 << 14) | 0);
179      }
180      /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
181      if (blk_enable_write_cache(s->blk)) {
182          put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
183      } else {
184          put_le16(p + 85, (1 << 14) | 1);
185      }
186      /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
187      put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
188      /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
189      if (s->wwn) {
190          put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
191      } else {
192          put_le16(p + 87, (1 << 14) | 0);
193      }
194      put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
195      put_le16(p + 93, 1 | (1 << 14) | 0x2000);
196      /* *(p + 100) := nb_sectors       -- see ide_identify_size */
197      /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
198      /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
199      /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
200  
201      if (dev && dev->conf.physical_block_size)
202          put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
203      if (s->wwn) {
204          /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
205          put_le16(p + 108, s->wwn >> 48);
206          put_le16(p + 109, s->wwn >> 32);
207          put_le16(p + 110, s->wwn >> 16);
208          put_le16(p + 111, s->wwn);
209      }
210      if (dev && dev->conf.discard_granularity) {
211          put_le16(p + 169, 1); /* TRIM support */
212      }
213      if (dev) {
214          put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
215      }
216  
217      ide_identify_size(s);
218      s->identify_set = 1;
219  
220  fill_buffer:
221      memcpy(s->io_buffer, p, sizeof(s->identify_data));
222  }
223  
224  static void ide_atapi_identify(IDEState *s)
225  {
226      uint16_t *p;
227  
228      p = (uint16_t *)s->identify_data;
229      if (s->identify_set) {
230          goto fill_buffer;
231      }
232      memset(p, 0, sizeof(s->identify_data));
233  
234      /* Removable CDROM, 50us response, 12 byte packets */
235      put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
236      padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
237      put_le16(p + 20, 3); /* buffer type */
238      put_le16(p + 21, 512); /* cache size in sectors */
239      put_le16(p + 22, 4); /* ecc bytes */
240      padstr((char *)(p + 23), s->version, 8); /* firmware version */
241      padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
242      put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
243  #ifdef USE_DMA_CDROM
244      put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
245      put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
246      put_le16(p + 62, 7);  /* single word dma0-2 supported */
247      put_le16(p + 63, 7);  /* mdma0-2 supported */
248  #else
249      put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
250      put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
251      put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
252  #endif
253      put_le16(p + 64, 3); /* pio3-4 supported */
254      put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
255      put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
256      put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
257      put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
258  
259      put_le16(p + 71, 30); /* in ns */
260      put_le16(p + 72, 30); /* in ns */
261  
262      if (s->ncq_queues) {
263          put_le16(p + 75, s->ncq_queues - 1);
264          /* NCQ supported */
265          put_le16(p + 76, (1 << 8));
266      }
267  
268      put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
269      if (s->wwn) {
270          put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
271          put_le16(p + 87, (1 << 8)); /* WWN enabled */
272      }
273  
274  #ifdef USE_DMA_CDROM
275      put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
276  #endif
277  
278      if (s->wwn) {
279          /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
280          put_le16(p + 108, s->wwn >> 48);
281          put_le16(p + 109, s->wwn >> 32);
282          put_le16(p + 110, s->wwn >> 16);
283          put_le16(p + 111, s->wwn);
284      }
285  
286      s->identify_set = 1;
287  
288  fill_buffer:
289      memcpy(s->io_buffer, p, sizeof(s->identify_data));
290  }
291  
292  static void ide_cfata_identify_size(IDEState *s)
293  {
294      uint16_t *p = (uint16_t *)s->identify_data;
295      put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
296      put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
297      put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
298      put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
299  }
300  
301  static void ide_cfata_identify(IDEState *s)
302  {
303      uint16_t *p;
304      uint32_t cur_sec;
305  
306      p = (uint16_t *)s->identify_data;
307      if (s->identify_set) {
308          goto fill_buffer;
309      }
310      memset(p, 0, sizeof(s->identify_data));
311  
312      cur_sec = s->cylinders * s->heads * s->sectors;
313  
314      put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
315      put_le16(p + 1, s->cylinders);		/* Default cylinders */
316      put_le16(p + 3, s->heads);			/* Default heads */
317      put_le16(p + 6, s->sectors);		/* Default sectors per track */
318      /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
319      /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
320      padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
321      put_le16(p + 22, 0x0004);			/* ECC bytes */
322      padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
323      padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
324  #if MAX_MULT_SECTORS > 1
325      put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
326  #else
327      put_le16(p + 47, 0x0000);
328  #endif
329      put_le16(p + 49, 0x0f00);			/* Capabilities */
330      put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
331      put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
332      put_le16(p + 53, 0x0003);			/* Translation params valid */
333      put_le16(p + 54, s->cylinders);		/* Current cylinders */
334      put_le16(p + 55, s->heads);			/* Current heads */
335      put_le16(p + 56, s->sectors);		/* Current sectors */
336      put_le16(p + 57, cur_sec);			/* Current capacity */
337      put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
338      if (s->mult_sectors)			/* Multiple sector setting */
339          put_le16(p + 59, 0x100 | s->mult_sectors);
340      /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
341      /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
342      put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
343      put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
344      put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
345      put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
346      put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
347      put_le16(p + 82, 0x400c);			/* Command Set supported */
348      put_le16(p + 83, 0x7068);			/* Command Set supported */
349      put_le16(p + 84, 0x4000);			/* Features supported */
350      put_le16(p + 85, 0x000c);			/* Command Set enabled */
351      put_le16(p + 86, 0x7044);			/* Command Set enabled */
352      put_le16(p + 87, 0x4000);			/* Features enabled */
353      put_le16(p + 91, 0x4060);			/* Current APM level */
354      put_le16(p + 129, 0x0002);			/* Current features option */
355      put_le16(p + 130, 0x0005);			/* Reassigned sectors */
356      put_le16(p + 131, 0x0001);			/* Initial power mode */
357      put_le16(p + 132, 0x0000);			/* User signature */
358      put_le16(p + 160, 0x8100);			/* Power requirement */
359      put_le16(p + 161, 0x8001);			/* CF command set */
360  
361      ide_cfata_identify_size(s);
362      s->identify_set = 1;
363  
364  fill_buffer:
365      memcpy(s->io_buffer, p, sizeof(s->identify_data));
366  }
367  
368  static void ide_set_signature(IDEState *s)
369  {
370      s->select &= ~(ATA_DEV_HS); /* clear head */
371      /* put signature */
372      s->nsector = 1;
373      s->sector = 1;
374      if (s->drive_kind == IDE_CD) {
375          s->lcyl = 0x14;
376          s->hcyl = 0xeb;
377      } else if (s->blk) {
378          s->lcyl = 0;
379          s->hcyl = 0;
380      } else {
381          s->lcyl = 0xff;
382          s->hcyl = 0xff;
383      }
384  }
385  
386  static bool ide_sect_range_ok(IDEState *s,
387                                uint64_t sector, uint64_t nb_sectors)
388  {
389      uint64_t total_sectors;
390  
391      blk_get_geometry(s->blk, &total_sectors);
392      if (sector > total_sectors || nb_sectors > total_sectors - sector) {
393          return false;
394      }
395      return true;
396  }
397  
398  typedef struct TrimAIOCB {
399      BlockAIOCB common;
400      IDEState *s;
401      QEMUBH *bh;
402      int ret;
403      QEMUIOVector *qiov;
404      BlockAIOCB *aiocb;
405      int i, j;
406  } TrimAIOCB;
407  
408  static void trim_aio_cancel(BlockAIOCB *acb)
409  {
410      TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
411  
412      /* Exit the loop so ide_issue_trim_cb will not continue  */
413      iocb->j = iocb->qiov->niov - 1;
414      iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
415  
416      iocb->ret = -ECANCELED;
417  
418      if (iocb->aiocb) {
419          blk_aio_cancel_async(iocb->aiocb);
420          iocb->aiocb = NULL;
421      }
422  }
423  
424  static const AIOCBInfo trim_aiocb_info = {
425      .aiocb_size         = sizeof(TrimAIOCB),
426      .cancel_async       = trim_aio_cancel,
427  };
428  
429  static void ide_trim_bh_cb(void *opaque)
430  {
431      TrimAIOCB *iocb = opaque;
432  
433      iocb->common.cb(iocb->common.opaque, iocb->ret);
434  
435      qemu_bh_delete(iocb->bh);
436      iocb->bh = NULL;
437      qemu_aio_unref(iocb);
438  }
439  
440  static void ide_issue_trim_cb(void *opaque, int ret)
441  {
442      TrimAIOCB *iocb = opaque;
443      IDEState *s = iocb->s;
444  
445      if (iocb->i >= 0) {
446          if (ret >= 0) {
447              block_acct_done(blk_get_stats(s->blk), &s->acct);
448          } else {
449              block_acct_failed(blk_get_stats(s->blk), &s->acct);
450          }
451      }
452  
453      if (ret >= 0) {
454          while (iocb->j < iocb->qiov->niov) {
455              int j = iocb->j;
456              while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
457                  int i = iocb->i;
458                  uint64_t *buffer = iocb->qiov->iov[j].iov_base;
459  
460                  /* 6-byte LBA + 2-byte range per entry */
461                  uint64_t entry = le64_to_cpu(buffer[i]);
462                  uint64_t sector = entry & 0x0000ffffffffffffULL;
463                  uint16_t count = entry >> 48;
464  
465                  if (count == 0) {
466                      continue;
467                  }
468  
469                  if (!ide_sect_range_ok(s, sector, count)) {
470                      block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
471                      iocb->ret = -EINVAL;
472                      goto done;
473                  }
474  
475                  block_acct_start(blk_get_stats(s->blk), &s->acct,
476                                   count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
477  
478                  /* Got an entry! Submit and exit.  */
479                  iocb->aiocb = blk_aio_pdiscard(s->blk,
480                                                 sector << BDRV_SECTOR_BITS,
481                                                 count << BDRV_SECTOR_BITS,
482                                                 ide_issue_trim_cb, opaque);
483                  return;
484              }
485  
486              iocb->j++;
487              iocb->i = -1;
488          }
489      } else {
490          iocb->ret = ret;
491      }
492  
493  done:
494      iocb->aiocb = NULL;
495      if (iocb->bh) {
496          replay_bh_schedule_event(iocb->bh);
497      }
498  }
499  
500  BlockAIOCB *ide_issue_trim(
501          int64_t offset, QEMUIOVector *qiov,
502          BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
503  {
504      IDEState *s = opaque;
505      TrimAIOCB *iocb;
506  
507      iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
508      iocb->s = s;
509      iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
510      iocb->ret = 0;
511      iocb->qiov = qiov;
512      iocb->i = -1;
513      iocb->j = 0;
514      ide_issue_trim_cb(iocb, 0);
515      return &iocb->common;
516  }
517  
518  void ide_abort_command(IDEState *s)
519  {
520      ide_transfer_stop(s);
521      s->status = READY_STAT | ERR_STAT;
522      s->error = ABRT_ERR;
523  }
524  
525  static void ide_set_retry(IDEState *s)
526  {
527      s->bus->retry_unit = s->unit;
528      s->bus->retry_sector_num = ide_get_sector(s);
529      s->bus->retry_nsector = s->nsector;
530  }
531  
532  static void ide_clear_retry(IDEState *s)
533  {
534      s->bus->retry_unit = -1;
535      s->bus->retry_sector_num = 0;
536      s->bus->retry_nsector = 0;
537  }
538  
539  /* prepare data transfer and tell what to do after */
540  bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
541                                    EndTransferFunc *end_transfer_func)
542  {
543      s->data_ptr = buf;
544      s->data_end = buf + size;
545      ide_set_retry(s);
546      if (!(s->status & ERR_STAT)) {
547          s->status |= DRQ_STAT;
548      }
549      if (!s->bus->dma->ops->pio_transfer) {
550          s->end_transfer_func = end_transfer_func;
551          return false;
552      }
553      s->bus->dma->ops->pio_transfer(s->bus->dma);
554      return true;
555  }
556  
557  void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
558                          EndTransferFunc *end_transfer_func)
559  {
560      if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
561          end_transfer_func(s);
562      }
563  }
564  
565  static void ide_cmd_done(IDEState *s)
566  {
567      if (s->bus->dma->ops->cmd_done) {
568          s->bus->dma->ops->cmd_done(s->bus->dma);
569      }
570  }
571  
572  static void ide_transfer_halt(IDEState *s)
573  {
574      s->end_transfer_func = ide_transfer_stop;
575      s->data_ptr = s->io_buffer;
576      s->data_end = s->io_buffer;
577      s->status &= ~DRQ_STAT;
578  }
579  
580  void ide_transfer_stop(IDEState *s)
581  {
582      ide_transfer_halt(s);
583      ide_cmd_done(s);
584  }
585  
586  int64_t ide_get_sector(IDEState *s)
587  {
588      int64_t sector_num;
589      if (s->select & (ATA_DEV_LBA)) {
590          if (s->lba48) {
591              sector_num = ((int64_t)s->hob_hcyl << 40) |
592                  ((int64_t) s->hob_lcyl << 32) |
593                  ((int64_t) s->hob_sector << 24) |
594                  ((int64_t) s->hcyl << 16) |
595                  ((int64_t) s->lcyl << 8) | s->sector;
596          } else {
597              /* LBA28 */
598              sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
599                  (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
600          }
601      } else {
602          /* CHS */
603          sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
604              (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
605      }
606  
607      return sector_num;
608  }
609  
610  void ide_set_sector(IDEState *s, int64_t sector_num)
611  {
612      unsigned int cyl, r;
613      if (s->select & (ATA_DEV_LBA)) {
614          if (s->lba48) {
615              s->sector = sector_num;
616              s->lcyl = sector_num >> 8;
617              s->hcyl = sector_num >> 16;
618              s->hob_sector = sector_num >> 24;
619              s->hob_lcyl = sector_num >> 32;
620              s->hob_hcyl = sector_num >> 40;
621          } else {
622              /* LBA28 */
623              s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
624                  ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
625              s->hcyl = (sector_num >> 16);
626              s->lcyl = (sector_num >> 8);
627              s->sector = (sector_num);
628          }
629      } else {
630          /* CHS */
631          cyl = sector_num / (s->heads * s->sectors);
632          r = sector_num % (s->heads * s->sectors);
633          s->hcyl = cyl >> 8;
634          s->lcyl = cyl;
635          s->select = (s->select & ~(ATA_DEV_HS)) |
636              ((r / s->sectors) & (ATA_DEV_HS));
637          s->sector = (r % s->sectors) + 1;
638      }
639  }
640  
641  static void ide_rw_error(IDEState *s) {
642      ide_abort_command(s);
643      ide_set_irq(s->bus);
644  }
645  
646  static void ide_buffered_readv_cb(void *opaque, int ret)
647  {
648      IDEBufferedRequest *req = opaque;
649      if (!req->orphaned) {
650          if (!ret) {
651              assert(req->qiov.size == req->original_qiov->size);
652              qemu_iovec_from_buf(req->original_qiov, 0,
653                                  req->qiov.local_iov.iov_base,
654                                  req->original_qiov->size);
655          }
656          req->original_cb(req->original_opaque, ret);
657      }
658      QLIST_REMOVE(req, list);
659      qemu_vfree(qemu_iovec_buf(&req->qiov));
660      g_free(req);
661  }
662  
663  #define MAX_BUFFERED_REQS 16
664  
665  BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
666                                 QEMUIOVector *iov, int nb_sectors,
667                                 BlockCompletionFunc *cb, void *opaque)
668  {
669      BlockAIOCB *aioreq;
670      IDEBufferedRequest *req;
671      int c = 0;
672  
673      QLIST_FOREACH(req, &s->buffered_requests, list) {
674          c++;
675      }
676      if (c > MAX_BUFFERED_REQS) {
677          return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
678      }
679  
680      req = g_new0(IDEBufferedRequest, 1);
681      req->original_qiov = iov;
682      req->original_cb = cb;
683      req->original_opaque = opaque;
684      qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
685                          iov->size);
686  
687      aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
688                              &req->qiov, 0, ide_buffered_readv_cb, req);
689  
690      QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
691      return aioreq;
692  }
693  
694  /**
695   * Cancel all pending DMA requests.
696   * Any buffered DMA requests are instantly canceled,
697   * but any pending unbuffered DMA requests must be waited on.
698   */
699  void ide_cancel_dma_sync(IDEState *s)
700  {
701      IDEBufferedRequest *req;
702  
703      /* First invoke the callbacks of all buffered requests
704       * and flag those requests as orphaned. Ideally there
705       * are no unbuffered (Scatter Gather DMA Requests or
706       * write requests) pending and we can avoid to drain. */
707      QLIST_FOREACH(req, &s->buffered_requests, list) {
708          if (!req->orphaned) {
709              trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
710              req->original_cb(req->original_opaque, -ECANCELED);
711          }
712          req->orphaned = true;
713      }
714  
715      /*
716       * We can't cancel Scatter Gather DMA in the middle of the
717       * operation or a partial (not full) DMA transfer would reach
718       * the storage so we wait for completion instead (we behave
719       * like if the DMA was completed by the time the guest trying
720       * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
721       * set).
722       *
723       * In the future we'll be able to safely cancel the I/O if the
724       * whole DMA operation will be submitted to disk with a single
725       * aio operation with preadv/pwritev.
726       */
727      if (s->bus->dma->aiocb) {
728          trace_ide_cancel_dma_sync_remaining();
729          blk_drain(s->blk);
730          assert(s->bus->dma->aiocb == NULL);
731      }
732  }
733  
734  static void ide_sector_read(IDEState *s);
735  
736  static void ide_sector_read_cb(void *opaque, int ret)
737  {
738      IDEState *s = opaque;
739      int n;
740  
741      s->pio_aiocb = NULL;
742      s->status &= ~BUSY_STAT;
743  
744      if (ret != 0) {
745          if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
746                                  IDE_RETRY_READ)) {
747              return;
748          }
749      }
750  
751      block_acct_done(blk_get_stats(s->blk), &s->acct);
752  
753      n = s->nsector;
754      if (n > s->req_nb_sectors) {
755          n = s->req_nb_sectors;
756      }
757  
758      ide_set_sector(s, ide_get_sector(s) + n);
759      s->nsector -= n;
760      /* Allow the guest to read the io_buffer */
761      ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
762      ide_set_irq(s->bus);
763  }
764  
765  static void ide_sector_read(IDEState *s)
766  {
767      int64_t sector_num;
768      int n;
769  
770      s->status = READY_STAT | SEEK_STAT;
771      s->error = 0; /* not needed by IDE spec, but needed by Windows */
772      sector_num = ide_get_sector(s);
773      n = s->nsector;
774  
775      if (n == 0) {
776          ide_transfer_stop(s);
777          return;
778      }
779  
780      s->status |= BUSY_STAT;
781  
782      if (n > s->req_nb_sectors) {
783          n = s->req_nb_sectors;
784      }
785  
786      trace_ide_sector_read(sector_num, n);
787  
788      if (!ide_sect_range_ok(s, sector_num, n)) {
789          ide_rw_error(s);
790          block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
791          return;
792      }
793  
794      qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
795  
796      block_acct_start(blk_get_stats(s->blk), &s->acct,
797                       n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
798      s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
799                                        ide_sector_read_cb, s);
800  }
801  
802  void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
803  {
804      if (s->bus->dma->ops->commit_buf) {
805          s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
806      }
807      s->io_buffer_offset += tx_bytes;
808      qemu_sglist_destroy(&s->sg);
809  }
810  
811  void ide_set_inactive(IDEState *s, bool more)
812  {
813      s->bus->dma->aiocb = NULL;
814      ide_clear_retry(s);
815      if (s->bus->dma->ops->set_inactive) {
816          s->bus->dma->ops->set_inactive(s->bus->dma, more);
817      }
818      ide_cmd_done(s);
819  }
820  
821  void ide_dma_error(IDEState *s)
822  {
823      dma_buf_commit(s, 0);
824      ide_abort_command(s);
825      ide_set_inactive(s, false);
826      ide_set_irq(s->bus);
827  }
828  
829  int ide_handle_rw_error(IDEState *s, int error, int op)
830  {
831      bool is_read = (op & IDE_RETRY_READ) != 0;
832      BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
833  
834      if (action == BLOCK_ERROR_ACTION_STOP) {
835          assert(s->bus->retry_unit == s->unit);
836          s->bus->error_status = op;
837      } else if (action == BLOCK_ERROR_ACTION_REPORT) {
838          block_acct_failed(blk_get_stats(s->blk), &s->acct);
839          if (IS_IDE_RETRY_DMA(op)) {
840              ide_dma_error(s);
841          } else if (IS_IDE_RETRY_ATAPI(op)) {
842              ide_atapi_io_error(s, -error);
843          } else {
844              ide_rw_error(s);
845          }
846      }
847      blk_error_action(s->blk, action, is_read, error);
848      return action != BLOCK_ERROR_ACTION_IGNORE;
849  }
850  
851  static void ide_dma_cb(void *opaque, int ret)
852  {
853      IDEState *s = opaque;
854      int n;
855      int64_t sector_num;
856      uint64_t offset;
857      bool stay_active = false;
858      int32_t prep_size = 0;
859  
860      if (ret == -EINVAL) {
861          ide_dma_error(s);
862          return;
863      }
864  
865      if (ret < 0) {
866          if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
867              s->bus->dma->aiocb = NULL;
868              dma_buf_commit(s, 0);
869              return;
870          }
871      }
872  
873      if (s->io_buffer_size > s->nsector * 512) {
874          /*
875           * The PRDs were longer than needed for this request.
876           * The Active bit must remain set after the request completes.
877           */
878          n = s->nsector;
879          stay_active = true;
880      } else {
881          n = s->io_buffer_size >> 9;
882      }
883  
884      sector_num = ide_get_sector(s);
885      if (n > 0) {
886          assert(n * 512 == s->sg.size);
887          dma_buf_commit(s, s->sg.size);
888          sector_num += n;
889          ide_set_sector(s, sector_num);
890          s->nsector -= n;
891      }
892  
893      /* end of transfer ? */
894      if (s->nsector == 0) {
895          s->status = READY_STAT | SEEK_STAT;
896          ide_set_irq(s->bus);
897          goto eot;
898      }
899  
900      /* launch next transfer */
901      n = s->nsector;
902      s->io_buffer_index = 0;
903      s->io_buffer_size = n * 512;
904      prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
905      /* prepare_buf() must succeed and respect the limit */
906      assert(prep_size >= 0 && prep_size <= n * 512);
907  
908      /*
909       * Now prep_size stores the number of bytes in the sglist, and
910       * s->io_buffer_size stores the number of bytes described by the PRDs.
911       */
912  
913      if (prep_size < n * 512) {
914          /*
915           * The PRDs are too short for this request. Error condition!
916           * Reset the Active bit and don't raise the interrupt.
917           */
918          s->status = READY_STAT | SEEK_STAT;
919          dma_buf_commit(s, 0);
920          goto eot;
921      }
922  
923      trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
924  
925      if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
926          !ide_sect_range_ok(s, sector_num, n)) {
927          ide_dma_error(s);
928          block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
929          return;
930      }
931  
932      offset = sector_num << BDRV_SECTOR_BITS;
933      switch (s->dma_cmd) {
934      case IDE_DMA_READ:
935          s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
936                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
937          break;
938      case IDE_DMA_WRITE:
939          s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
940                                             BDRV_SECTOR_SIZE, ide_dma_cb, s);
941          break;
942      case IDE_DMA_TRIM:
943          s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
944                                          &s->sg, offset, BDRV_SECTOR_SIZE,
945                                          ide_issue_trim, s, ide_dma_cb, s,
946                                          DMA_DIRECTION_TO_DEVICE);
947          break;
948      default:
949          abort();
950      }
951      return;
952  
953  eot:
954      if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
955          block_acct_done(blk_get_stats(s->blk), &s->acct);
956      }
957      ide_set_inactive(s, stay_active);
958  }
959  
960  static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
961  {
962      s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
963      s->io_buffer_size = 0;
964      s->dma_cmd = dma_cmd;
965  
966      switch (dma_cmd) {
967      case IDE_DMA_READ:
968          block_acct_start(blk_get_stats(s->blk), &s->acct,
969                           s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
970          break;
971      case IDE_DMA_WRITE:
972          block_acct_start(blk_get_stats(s->blk), &s->acct,
973                           s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
974          break;
975      default:
976          break;
977      }
978  
979      ide_start_dma(s, ide_dma_cb);
980  }
981  
982  void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
983  {
984      s->io_buffer_index = 0;
985      ide_set_retry(s);
986      if (s->bus->dma->ops->start_dma) {
987          s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
988      }
989  }
990  
991  static void ide_sector_write(IDEState *s);
992  
993  static void ide_sector_write_timer_cb(void *opaque)
994  {
995      IDEState *s = opaque;
996      ide_set_irq(s->bus);
997  }
998  
999  static void ide_sector_write_cb(void *opaque, int ret)
1000  {
1001      IDEState *s = opaque;
1002      int n;
1003  
1004      s->pio_aiocb = NULL;
1005      s->status &= ~BUSY_STAT;
1006  
1007      if (ret != 0) {
1008          if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1009              return;
1010          }
1011      }
1012  
1013      block_acct_done(blk_get_stats(s->blk), &s->acct);
1014  
1015      n = s->nsector;
1016      if (n > s->req_nb_sectors) {
1017          n = s->req_nb_sectors;
1018      }
1019      s->nsector -= n;
1020  
1021      ide_set_sector(s, ide_get_sector(s) + n);
1022      if (s->nsector == 0) {
1023          /* no more sectors to write */
1024          ide_transfer_stop(s);
1025      } else {
1026          int n1 = s->nsector;
1027          if (n1 > s->req_nb_sectors) {
1028              n1 = s->req_nb_sectors;
1029          }
1030          ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1031                             ide_sector_write);
1032      }
1033  
1034      if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1035          /* It seems there is a bug in the Windows 2000 installer HDD
1036             IDE driver which fills the disk with empty logs when the
1037             IDE write IRQ comes too early. This hack tries to correct
1038             that at the expense of slower write performances. Use this
1039             option _only_ to install Windows 2000. You must disable it
1040             for normal use. */
1041          timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1042                    (NANOSECONDS_PER_SECOND / 1000));
1043      } else {
1044          ide_set_irq(s->bus);
1045      }
1046  }
1047  
1048  static void ide_sector_write(IDEState *s)
1049  {
1050      int64_t sector_num;
1051      int n;
1052  
1053      s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1054      sector_num = ide_get_sector(s);
1055  
1056      n = s->nsector;
1057      if (n > s->req_nb_sectors) {
1058          n = s->req_nb_sectors;
1059      }
1060  
1061      trace_ide_sector_write(sector_num, n);
1062  
1063      if (!ide_sect_range_ok(s, sector_num, n)) {
1064          ide_rw_error(s);
1065          block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1066          return;
1067      }
1068  
1069      qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1070  
1071      block_acct_start(blk_get_stats(s->blk), &s->acct,
1072                       n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1073      s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1074                                     &s->qiov, 0, ide_sector_write_cb, s);
1075  }
1076  
1077  static void ide_flush_cb(void *opaque, int ret)
1078  {
1079      IDEState *s = opaque;
1080  
1081      s->pio_aiocb = NULL;
1082  
1083      if (ret < 0) {
1084          /* XXX: What sector number to set here? */
1085          if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1086              return;
1087          }
1088      }
1089  
1090      if (s->blk) {
1091          block_acct_done(blk_get_stats(s->blk), &s->acct);
1092      }
1093      s->status = READY_STAT | SEEK_STAT;
1094      ide_cmd_done(s);
1095      ide_set_irq(s->bus);
1096  }
1097  
1098  static void ide_flush_cache(IDEState *s)
1099  {
1100      if (s->blk == NULL) {
1101          ide_flush_cb(s, 0);
1102          return;
1103      }
1104  
1105      s->status |= BUSY_STAT;
1106      ide_set_retry(s);
1107      block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1108      s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1109  }
1110  
1111  static void ide_cfata_metadata_inquiry(IDEState *s)
1112  {
1113      uint16_t *p;
1114      uint32_t spd;
1115  
1116      p = (uint16_t *) s->io_buffer;
1117      memset(p, 0, 0x200);
1118      spd = ((s->mdata_size - 1) >> 9) + 1;
1119  
1120      put_le16(p + 0, 0x0001);			/* Data format revision */
1121      put_le16(p + 1, 0x0000);			/* Media property: silicon */
1122      put_le16(p + 2, s->media_changed);		/* Media status */
1123      put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1124      put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1125      put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1126      put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1127  }
1128  
1129  static void ide_cfata_metadata_read(IDEState *s)
1130  {
1131      uint16_t *p;
1132  
1133      if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1134          s->status = ERR_STAT;
1135          s->error = ABRT_ERR;
1136          return;
1137      }
1138  
1139      p = (uint16_t *) s->io_buffer;
1140      memset(p, 0, 0x200);
1141  
1142      put_le16(p + 0, s->media_changed);		/* Media status */
1143      memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1144                      MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1145                                      s->nsector << 9), 0x200 - 2));
1146  }
1147  
1148  static void ide_cfata_metadata_write(IDEState *s)
1149  {
1150      if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1151          s->status = ERR_STAT;
1152          s->error = ABRT_ERR;
1153          return;
1154      }
1155  
1156      s->media_changed = 0;
1157  
1158      memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1159                      s->io_buffer + 2,
1160                      MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1161                                      s->nsector << 9), 0x200 - 2));
1162  }
1163  
1164  /* called when the inserted state of the media has changed */
1165  static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1166  {
1167      IDEState *s = opaque;
1168      uint64_t nb_sectors;
1169  
1170      s->tray_open = !load;
1171      blk_get_geometry(s->blk, &nb_sectors);
1172      s->nb_sectors = nb_sectors;
1173  
1174      /*
1175       * First indicate to the guest that a CD has been removed.  That's
1176       * done on the next command the guest sends us.
1177       *
1178       * Then we set UNIT_ATTENTION, by which the guest will
1179       * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1180       */
1181      s->cdrom_changed = 1;
1182      s->events.new_media = true;
1183      s->events.eject_request = false;
1184      ide_set_irq(s->bus);
1185  }
1186  
1187  static void ide_cd_eject_request_cb(void *opaque, bool force)
1188  {
1189      IDEState *s = opaque;
1190  
1191      s->events.eject_request = true;
1192      if (force) {
1193          s->tray_locked = false;
1194      }
1195      ide_set_irq(s->bus);
1196  }
1197  
1198  static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1199  {
1200      s->lba48 = lba48;
1201  
1202      /* handle the 'magic' 0 nsector count conversion here. to avoid
1203       * fiddling with the rest of the read logic, we just store the
1204       * full sector count in ->nsector and ignore ->hob_nsector from now
1205       */
1206      if (!s->lba48) {
1207          if (!s->nsector)
1208              s->nsector = 256;
1209      } else {
1210          if (!s->nsector && !s->hob_nsector)
1211              s->nsector = 65536;
1212          else {
1213              int lo = s->nsector;
1214              int hi = s->hob_nsector;
1215  
1216              s->nsector = (hi << 8) | lo;
1217          }
1218      }
1219  }
1220  
1221  static void ide_clear_hob(IDEBus *bus)
1222  {
1223      /* any write clears HOB high bit of device control register */
1224      bus->cmd &= ~(IDE_CTRL_HOB);
1225  }
1226  
1227  /* IOport [W]rite [R]egisters */
1228  enum ATA_IOPORT_WR {
1229      ATA_IOPORT_WR_DATA = 0,
1230      ATA_IOPORT_WR_FEATURES = 1,
1231      ATA_IOPORT_WR_SECTOR_COUNT = 2,
1232      ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1233      ATA_IOPORT_WR_CYLINDER_LOW = 4,
1234      ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1235      ATA_IOPORT_WR_DEVICE_HEAD = 6,
1236      ATA_IOPORT_WR_COMMAND = 7,
1237      ATA_IOPORT_WR_NUM_REGISTERS,
1238  };
1239  
1240  const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1241      [ATA_IOPORT_WR_DATA] = "Data",
1242      [ATA_IOPORT_WR_FEATURES] = "Features",
1243      [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1244      [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1245      [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1246      [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1247      [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1248      [ATA_IOPORT_WR_COMMAND] = "Command"
1249  };
1250  
1251  void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1252  {
1253      IDEBus *bus = opaque;
1254      IDEState *s = idebus_active_if(bus);
1255      int reg_num = addr & 7;
1256  
1257      trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1258  
1259      /* ignore writes to command block while busy with previous command */
1260      if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1261          return;
1262      }
1263  
1264      /* NOTE: Device0 and Device1 both receive incoming register writes.
1265       * (They're on the same bus! They have to!) */
1266  
1267      switch (reg_num) {
1268      case 0:
1269          break;
1270      case ATA_IOPORT_WR_FEATURES:
1271          ide_clear_hob(bus);
1272          bus->ifs[0].hob_feature = bus->ifs[0].feature;
1273          bus->ifs[1].hob_feature = bus->ifs[1].feature;
1274          bus->ifs[0].feature = val;
1275          bus->ifs[1].feature = val;
1276          break;
1277      case ATA_IOPORT_WR_SECTOR_COUNT:
1278          ide_clear_hob(bus);
1279          bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1280          bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1281          bus->ifs[0].nsector = val;
1282          bus->ifs[1].nsector = val;
1283          break;
1284      case ATA_IOPORT_WR_SECTOR_NUMBER:
1285          ide_clear_hob(bus);
1286          bus->ifs[0].hob_sector = bus->ifs[0].sector;
1287          bus->ifs[1].hob_sector = bus->ifs[1].sector;
1288          bus->ifs[0].sector = val;
1289          bus->ifs[1].sector = val;
1290          break;
1291      case ATA_IOPORT_WR_CYLINDER_LOW:
1292          ide_clear_hob(bus);
1293          bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1294          bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1295          bus->ifs[0].lcyl = val;
1296          bus->ifs[1].lcyl = val;
1297          break;
1298      case ATA_IOPORT_WR_CYLINDER_HIGH:
1299          ide_clear_hob(bus);
1300          bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1301          bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1302          bus->ifs[0].hcyl = val;
1303          bus->ifs[1].hcyl = val;
1304          break;
1305      case ATA_IOPORT_WR_DEVICE_HEAD:
1306          ide_clear_hob(bus);
1307          bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1308          bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1309          /* select drive */
1310          bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1311          break;
1312      default:
1313      case ATA_IOPORT_WR_COMMAND:
1314          ide_clear_hob(bus);
1315          qemu_irq_lower(bus->irq);
1316          ide_exec_cmd(bus, val);
1317          break;
1318      }
1319  }
1320  
1321  static void ide_reset(IDEState *s)
1322  {
1323      trace_ide_reset(s);
1324  
1325      if (s->pio_aiocb) {
1326          blk_aio_cancel(s->pio_aiocb);
1327          s->pio_aiocb = NULL;
1328      }
1329  
1330      if (s->drive_kind == IDE_CFATA)
1331          s->mult_sectors = 0;
1332      else
1333          s->mult_sectors = MAX_MULT_SECTORS;
1334      /* ide regs */
1335      s->feature = 0;
1336      s->error = 0;
1337      s->nsector = 0;
1338      s->sector = 0;
1339      s->lcyl = 0;
1340      s->hcyl = 0;
1341  
1342      /* lba48 */
1343      s->hob_feature = 0;
1344      s->hob_sector = 0;
1345      s->hob_nsector = 0;
1346      s->hob_lcyl = 0;
1347      s->hob_hcyl = 0;
1348  
1349      s->select = (ATA_DEV_ALWAYS_ON);
1350      s->status = READY_STAT | SEEK_STAT;
1351  
1352      s->lba48 = 0;
1353  
1354      /* ATAPI specific */
1355      s->sense_key = 0;
1356      s->asc = 0;
1357      s->cdrom_changed = 0;
1358      s->packet_transfer_size = 0;
1359      s->elementary_transfer_size = 0;
1360      s->io_buffer_index = 0;
1361      s->cd_sector_size = 0;
1362      s->atapi_dma = 0;
1363      s->tray_locked = 0;
1364      s->tray_open = 0;
1365      /* ATA DMA state */
1366      s->io_buffer_size = 0;
1367      s->req_nb_sectors = 0;
1368  
1369      ide_set_signature(s);
1370      /* init the transfer handler so that 0xffff is returned on data
1371         accesses */
1372      s->end_transfer_func = ide_dummy_transfer_stop;
1373      ide_dummy_transfer_stop(s);
1374      s->media_changed = 0;
1375  }
1376  
1377  static bool cmd_nop(IDEState *s, uint8_t cmd)
1378  {
1379      return true;
1380  }
1381  
1382  static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1383  {
1384      /* Halt PIO (in the DRQ phase), then DMA */
1385      ide_transfer_halt(s);
1386      ide_cancel_dma_sync(s);
1387  
1388      /* Reset any PIO commands, reset signature, etc */
1389      ide_reset(s);
1390  
1391      /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1392       * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1393      s->status = 0x00;
1394  
1395      /* Do not overwrite status register */
1396      return false;
1397  }
1398  
1399  static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1400  {
1401      switch (s->feature) {
1402      case DSM_TRIM:
1403          if (s->blk) {
1404              ide_sector_start_dma(s, IDE_DMA_TRIM);
1405              return false;
1406          }
1407          break;
1408      }
1409  
1410      ide_abort_command(s);
1411      return true;
1412  }
1413  
1414  static bool cmd_identify(IDEState *s, uint8_t cmd)
1415  {
1416      if (s->blk && s->drive_kind != IDE_CD) {
1417          if (s->drive_kind != IDE_CFATA) {
1418              ide_identify(s);
1419          } else {
1420              ide_cfata_identify(s);
1421          }
1422          s->status = READY_STAT | SEEK_STAT;
1423          ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1424          ide_set_irq(s->bus);
1425          return false;
1426      } else {
1427          if (s->drive_kind == IDE_CD) {
1428              ide_set_signature(s);
1429          }
1430          ide_abort_command(s);
1431      }
1432  
1433      return true;
1434  }
1435  
1436  static bool cmd_verify(IDEState *s, uint8_t cmd)
1437  {
1438      bool lba48 = (cmd == WIN_VERIFY_EXT);
1439  
1440      /* do sector number check ? */
1441      ide_cmd_lba48_transform(s, lba48);
1442  
1443      return true;
1444  }
1445  
1446  static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1447  {
1448      if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1449          /* Disable Read and Write Multiple */
1450          s->mult_sectors = 0;
1451      } else if ((s->nsector & 0xff) != 0 &&
1452          ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1453           (s->nsector & (s->nsector - 1)) != 0)) {
1454          ide_abort_command(s);
1455      } else {
1456          s->mult_sectors = s->nsector & 0xff;
1457      }
1458  
1459      return true;
1460  }
1461  
1462  static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1463  {
1464      bool lba48 = (cmd == WIN_MULTREAD_EXT);
1465  
1466      if (!s->blk || !s->mult_sectors) {
1467          ide_abort_command(s);
1468          return true;
1469      }
1470  
1471      ide_cmd_lba48_transform(s, lba48);
1472      s->req_nb_sectors = s->mult_sectors;
1473      ide_sector_read(s);
1474      return false;
1475  }
1476  
1477  static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1478  {
1479      bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1480      int n;
1481  
1482      if (!s->blk || !s->mult_sectors) {
1483          ide_abort_command(s);
1484          return true;
1485      }
1486  
1487      ide_cmd_lba48_transform(s, lba48);
1488  
1489      s->req_nb_sectors = s->mult_sectors;
1490      n = MIN(s->nsector, s->req_nb_sectors);
1491  
1492      s->status = SEEK_STAT | READY_STAT;
1493      ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1494  
1495      s->media_changed = 1;
1496  
1497      return false;
1498  }
1499  
1500  static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1501  {
1502      bool lba48 = (cmd == WIN_READ_EXT);
1503  
1504      if (s->drive_kind == IDE_CD) {
1505          ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1506          ide_abort_command(s);
1507          return true;
1508      }
1509  
1510      if (!s->blk) {
1511          ide_abort_command(s);
1512          return true;
1513      }
1514  
1515      ide_cmd_lba48_transform(s, lba48);
1516      s->req_nb_sectors = 1;
1517      ide_sector_read(s);
1518  
1519      return false;
1520  }
1521  
1522  static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1523  {
1524      bool lba48 = (cmd == WIN_WRITE_EXT);
1525  
1526      if (!s->blk) {
1527          ide_abort_command(s);
1528          return true;
1529      }
1530  
1531      ide_cmd_lba48_transform(s, lba48);
1532  
1533      s->req_nb_sectors = 1;
1534      s->status = SEEK_STAT | READY_STAT;
1535      ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1536  
1537      s->media_changed = 1;
1538  
1539      return false;
1540  }
1541  
1542  static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1543  {
1544      bool lba48 = (cmd == WIN_READDMA_EXT);
1545  
1546      if (!s->blk) {
1547          ide_abort_command(s);
1548          return true;
1549      }
1550  
1551      ide_cmd_lba48_transform(s, lba48);
1552      ide_sector_start_dma(s, IDE_DMA_READ);
1553  
1554      return false;
1555  }
1556  
1557  static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1558  {
1559      bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1560  
1561      if (!s->blk) {
1562          ide_abort_command(s);
1563          return true;
1564      }
1565  
1566      ide_cmd_lba48_transform(s, lba48);
1567      ide_sector_start_dma(s, IDE_DMA_WRITE);
1568  
1569      s->media_changed = 1;
1570  
1571      return false;
1572  }
1573  
1574  static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1575  {
1576      ide_flush_cache(s);
1577      return false;
1578  }
1579  
1580  static bool cmd_seek(IDEState *s, uint8_t cmd)
1581  {
1582      /* XXX: Check that seek is within bounds */
1583      return true;
1584  }
1585  
1586  static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1587  {
1588      bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1589  
1590      /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1591      if (s->nb_sectors == 0) {
1592          ide_abort_command(s);
1593          return true;
1594      }
1595  
1596      ide_cmd_lba48_transform(s, lba48);
1597      ide_set_sector(s, s->nb_sectors - 1);
1598  
1599      return true;
1600  }
1601  
1602  static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1603  {
1604      s->nsector = 0xff; /* device active or idle */
1605      return true;
1606  }
1607  
1608  static bool cmd_set_features(IDEState *s, uint8_t cmd)
1609  {
1610      uint16_t *identify_data;
1611  
1612      if (!s->blk) {
1613          ide_abort_command(s);
1614          return true;
1615      }
1616  
1617      /* XXX: valid for CDROM ? */
1618      switch (s->feature) {
1619      case 0x02: /* write cache enable */
1620          blk_set_enable_write_cache(s->blk, true);
1621          identify_data = (uint16_t *)s->identify_data;
1622          put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1623          return true;
1624      case 0x82: /* write cache disable */
1625          blk_set_enable_write_cache(s->blk, false);
1626          identify_data = (uint16_t *)s->identify_data;
1627          put_le16(identify_data + 85, (1 << 14) | 1);
1628          ide_flush_cache(s);
1629          return false;
1630      case 0xcc: /* reverting to power-on defaults enable */
1631      case 0x66: /* reverting to power-on defaults disable */
1632      case 0xaa: /* read look-ahead enable */
1633      case 0x55: /* read look-ahead disable */
1634      case 0x05: /* set advanced power management mode */
1635      case 0x85: /* disable advanced power management mode */
1636      case 0x69: /* NOP */
1637      case 0x67: /* NOP */
1638      case 0x96: /* NOP */
1639      case 0x9a: /* NOP */
1640      case 0x42: /* enable Automatic Acoustic Mode */
1641      case 0xc2: /* disable Automatic Acoustic Mode */
1642          return true;
1643      case 0x03: /* set transfer mode */
1644          {
1645              uint8_t val = s->nsector & 0x07;
1646              identify_data = (uint16_t *)s->identify_data;
1647  
1648              switch (s->nsector >> 3) {
1649              case 0x00: /* pio default */
1650              case 0x01: /* pio mode */
1651                  put_le16(identify_data + 62, 0x07);
1652                  put_le16(identify_data + 63, 0x07);
1653                  put_le16(identify_data + 88, 0x3f);
1654                  break;
1655              case 0x02: /* sigle word dma mode*/
1656                  put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1657                  put_le16(identify_data + 63, 0x07);
1658                  put_le16(identify_data + 88, 0x3f);
1659                  break;
1660              case 0x04: /* mdma mode */
1661                  put_le16(identify_data + 62, 0x07);
1662                  put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1663                  put_le16(identify_data + 88, 0x3f);
1664                  break;
1665              case 0x08: /* udma mode */
1666                  put_le16(identify_data + 62, 0x07);
1667                  put_le16(identify_data + 63, 0x07);
1668                  put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1669                  break;
1670              default:
1671                  goto abort_cmd;
1672              }
1673              return true;
1674          }
1675      }
1676  
1677  abort_cmd:
1678      ide_abort_command(s);
1679      return true;
1680  }
1681  
1682  
1683  /*** ATAPI commands ***/
1684  
1685  static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1686  {
1687      ide_atapi_identify(s);
1688      s->status = READY_STAT | SEEK_STAT;
1689      ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1690      ide_set_irq(s->bus);
1691      return false;
1692  }
1693  
1694  static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1695  {
1696      ide_set_signature(s);
1697  
1698      if (s->drive_kind == IDE_CD) {
1699          s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1700                          * devices to return a clear status register
1701                          * with READY_STAT *not* set. */
1702          s->error = 0x01;
1703      } else {
1704          s->status = READY_STAT | SEEK_STAT;
1705          /* The bits of the error register are not as usual for this command!
1706           * They are part of the regular output (this is why ERR_STAT isn't set)
1707           * Device 0 passed, Device 1 passed or not present. */
1708          s->error = 0x01;
1709          ide_set_irq(s->bus);
1710      }
1711  
1712      return false;
1713  }
1714  
1715  static bool cmd_packet(IDEState *s, uint8_t cmd)
1716  {
1717      /* overlapping commands not supported */
1718      if (s->feature & 0x02) {
1719          ide_abort_command(s);
1720          return true;
1721      }
1722  
1723      s->status = READY_STAT | SEEK_STAT;
1724      s->atapi_dma = s->feature & 1;
1725      if (s->atapi_dma) {
1726          s->dma_cmd = IDE_DMA_ATAPI;
1727      }
1728      s->nsector = 1;
1729      ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1730                         ide_atapi_cmd);
1731      return false;
1732  }
1733  
1734  
1735  /*** CF-ATA commands ***/
1736  
1737  static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1738  {
1739      s->error = 0x09;    /* miscellaneous error */
1740      s->status = READY_STAT | SEEK_STAT;
1741      ide_set_irq(s->bus);
1742  
1743      return false;
1744  }
1745  
1746  static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1747  {
1748      /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1749       * required for Windows 8 to work with AHCI */
1750  
1751      if (cmd == CFA_WEAR_LEVEL) {
1752          s->nsector = 0;
1753      }
1754  
1755      if (cmd == CFA_ERASE_SECTORS) {
1756          s->media_changed = 1;
1757      }
1758  
1759      return true;
1760  }
1761  
1762  static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1763  {
1764      s->status = READY_STAT | SEEK_STAT;
1765  
1766      memset(s->io_buffer, 0, 0x200);
1767      s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1768      s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1769      s->io_buffer[0x02] = s->select;                 /* Head */
1770      s->io_buffer[0x03] = s->sector;                 /* Sector */
1771      s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1772      s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1773      s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1774      s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1775      s->io_buffer[0x18] = 0x00;                      /* Hot count */
1776      s->io_buffer[0x19] = 0x00;                      /* Hot count */
1777      s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1778  
1779      ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1780      ide_set_irq(s->bus);
1781  
1782      return false;
1783  }
1784  
1785  static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1786  {
1787      switch (s->feature) {
1788      case 0x02:  /* Inquiry Metadata Storage */
1789          ide_cfata_metadata_inquiry(s);
1790          break;
1791      case 0x03:  /* Read Metadata Storage */
1792          ide_cfata_metadata_read(s);
1793          break;
1794      case 0x04:  /* Write Metadata Storage */
1795          ide_cfata_metadata_write(s);
1796          break;
1797      default:
1798          ide_abort_command(s);
1799          return true;
1800      }
1801  
1802      ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1803      s->status = 0x00; /* NOTE: READY is _not_ set */
1804      ide_set_irq(s->bus);
1805  
1806      return false;
1807  }
1808  
1809  static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1810  {
1811      switch (s->feature) {
1812      case 0x01:  /* sense temperature in device */
1813          s->nsector = 0x50;      /* +20 C */
1814          break;
1815      default:
1816          ide_abort_command(s);
1817          return true;
1818      }
1819  
1820      return true;
1821  }
1822  
1823  
1824  /*** SMART commands ***/
1825  
1826  static bool cmd_smart(IDEState *s, uint8_t cmd)
1827  {
1828      int n;
1829  
1830      if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1831          goto abort_cmd;
1832      }
1833  
1834      if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1835          goto abort_cmd;
1836      }
1837  
1838      switch (s->feature) {
1839      case SMART_DISABLE:
1840          s->smart_enabled = 0;
1841          return true;
1842  
1843      case SMART_ENABLE:
1844          s->smart_enabled = 1;
1845          return true;
1846  
1847      case SMART_ATTR_AUTOSAVE:
1848          switch (s->sector) {
1849          case 0x00:
1850              s->smart_autosave = 0;
1851              break;
1852          case 0xf1:
1853              s->smart_autosave = 1;
1854              break;
1855          default:
1856              goto abort_cmd;
1857          }
1858          return true;
1859  
1860      case SMART_STATUS:
1861          if (!s->smart_errors) {
1862              s->hcyl = 0xc2;
1863              s->lcyl = 0x4f;
1864          } else {
1865              s->hcyl = 0x2c;
1866              s->lcyl = 0xf4;
1867          }
1868          return true;
1869  
1870      case SMART_READ_THRESH:
1871          memset(s->io_buffer, 0, 0x200);
1872          s->io_buffer[0] = 0x01; /* smart struct version */
1873  
1874          for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1875              s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1876              s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1877          }
1878  
1879          /* checksum */
1880          for (n = 0; n < 511; n++) {
1881              s->io_buffer[511] += s->io_buffer[n];
1882          }
1883          s->io_buffer[511] = 0x100 - s->io_buffer[511];
1884  
1885          s->status = READY_STAT | SEEK_STAT;
1886          ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1887          ide_set_irq(s->bus);
1888          return false;
1889  
1890      case SMART_READ_DATA:
1891          memset(s->io_buffer, 0, 0x200);
1892          s->io_buffer[0] = 0x01; /* smart struct version */
1893  
1894          for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1895              int i;
1896              for (i = 0; i < 11; i++) {
1897                  s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1898              }
1899          }
1900  
1901          s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1902          if (s->smart_selftest_count == 0) {
1903              s->io_buffer[363] = 0;
1904          } else {
1905              s->io_buffer[363] =
1906                  s->smart_selftest_data[3 +
1907                             (s->smart_selftest_count - 1) *
1908                             24];
1909          }
1910          s->io_buffer[364] = 0x20;
1911          s->io_buffer[365] = 0x01;
1912          /* offline data collection capacity: execute + self-test*/
1913          s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1914          s->io_buffer[368] = 0x03; /* smart capability (1) */
1915          s->io_buffer[369] = 0x00; /* smart capability (2) */
1916          s->io_buffer[370] = 0x01; /* error logging supported */
1917          s->io_buffer[372] = 0x02; /* minutes for poll short test */
1918          s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1919          s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1920  
1921          for (n = 0; n < 511; n++) {
1922              s->io_buffer[511] += s->io_buffer[n];
1923          }
1924          s->io_buffer[511] = 0x100 - s->io_buffer[511];
1925  
1926          s->status = READY_STAT | SEEK_STAT;
1927          ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1928          ide_set_irq(s->bus);
1929          return false;
1930  
1931      case SMART_READ_LOG:
1932          switch (s->sector) {
1933          case 0x01: /* summary smart error log */
1934              memset(s->io_buffer, 0, 0x200);
1935              s->io_buffer[0] = 0x01;
1936              s->io_buffer[1] = 0x00; /* no error entries */
1937              s->io_buffer[452] = s->smart_errors & 0xff;
1938              s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1939  
1940              for (n = 0; n < 511; n++) {
1941                  s->io_buffer[511] += s->io_buffer[n];
1942              }
1943              s->io_buffer[511] = 0x100 - s->io_buffer[511];
1944              break;
1945          case 0x06: /* smart self test log */
1946              memset(s->io_buffer, 0, 0x200);
1947              s->io_buffer[0] = 0x01;
1948              if (s->smart_selftest_count == 0) {
1949                  s->io_buffer[508] = 0;
1950              } else {
1951                  s->io_buffer[508] = s->smart_selftest_count;
1952                  for (n = 2; n < 506; n++)  {
1953                      s->io_buffer[n] = s->smart_selftest_data[n];
1954                  }
1955              }
1956  
1957              for (n = 0; n < 511; n++) {
1958                  s->io_buffer[511] += s->io_buffer[n];
1959              }
1960              s->io_buffer[511] = 0x100 - s->io_buffer[511];
1961              break;
1962          default:
1963              goto abort_cmd;
1964          }
1965          s->status = READY_STAT | SEEK_STAT;
1966          ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1967          ide_set_irq(s->bus);
1968          return false;
1969  
1970      case SMART_EXECUTE_OFFLINE:
1971          switch (s->sector) {
1972          case 0: /* off-line routine */
1973          case 1: /* short self test */
1974          case 2: /* extended self test */
1975              s->smart_selftest_count++;
1976              if (s->smart_selftest_count > 21) {
1977                  s->smart_selftest_count = 1;
1978              }
1979              n = 2 + (s->smart_selftest_count - 1) * 24;
1980              s->smart_selftest_data[n] = s->sector;
1981              s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1982              s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1983              s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1984              break;
1985          default:
1986              goto abort_cmd;
1987          }
1988          return true;
1989      }
1990  
1991  abort_cmd:
1992      ide_abort_command(s);
1993      return true;
1994  }
1995  
1996  #define HD_OK (1u << IDE_HD)
1997  #define CD_OK (1u << IDE_CD)
1998  #define CFA_OK (1u << IDE_CFATA)
1999  #define HD_CFA_OK (HD_OK | CFA_OK)
2000  #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2001  
2002  /* Set the Disk Seek Completed status bit during completion */
2003  #define SET_DSC (1u << 8)
2004  
2005  /* See ACS-2 T13/2015-D Table B.2 Command codes */
2006  static const struct {
2007      /* Returns true if the completion code should be run */
2008      bool (*handler)(IDEState *s, uint8_t cmd);
2009      int flags;
2010  } ide_cmd_table[0x100] = {
2011      /* NOP not implemented, mandatory for CD */
2012      [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
2013      [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2014      [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2015      [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2016      [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2017      [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2018      [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2019      [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2020      [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2021      [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2022      [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2023      [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2024      [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2025      [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2026      [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2027      [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2028      [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2029      [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2030      [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2031      [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2032      [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2033      [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2034      [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2035      [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
2036      [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2037      [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2038      [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2039      [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2040      [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2041      [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2042      [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2043      [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2044      [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2045      [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2046      [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2047      [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2048      [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2049      [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2050      [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2051      [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2052      [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2053      [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2054      [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2055      [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2056      [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2057      [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2058      [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2059      [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2060      [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2061      [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2062      [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2063      [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2064      [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2065      [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2066      [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2067      [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2068  };
2069  
2070  static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2071  {
2072      return cmd < ARRAY_SIZE(ide_cmd_table)
2073          && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2074  }
2075  
2076  void ide_exec_cmd(IDEBus *bus, uint32_t val)
2077  {
2078      IDEState *s;
2079      bool complete;
2080  
2081      s = idebus_active_if(bus);
2082      trace_ide_exec_cmd(bus, s, val);
2083  
2084      /* ignore commands to non existent slave */
2085      if (s != bus->ifs && !s->blk) {
2086          return;
2087      }
2088  
2089      /* Only RESET is allowed while BSY and/or DRQ are set,
2090       * and only to ATAPI devices. */
2091      if (s->status & (BUSY_STAT|DRQ_STAT)) {
2092          if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2093              return;
2094          }
2095      }
2096  
2097      if (!ide_cmd_permitted(s, val)) {
2098          ide_abort_command(s);
2099          ide_set_irq(s->bus);
2100          return;
2101      }
2102  
2103      s->status = READY_STAT | BUSY_STAT;
2104      s->error = 0;
2105      s->io_buffer_offset = 0;
2106  
2107      complete = ide_cmd_table[val].handler(s, val);
2108      if (complete) {
2109          s->status &= ~BUSY_STAT;
2110          assert(!!s->error == !!(s->status & ERR_STAT));
2111  
2112          if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2113              s->status |= SEEK_STAT;
2114          }
2115  
2116          ide_cmd_done(s);
2117          ide_set_irq(s->bus);
2118      }
2119  }
2120  
2121  /* IOport [R]ead [R]egisters */
2122  enum ATA_IOPORT_RR {
2123      ATA_IOPORT_RR_DATA = 0,
2124      ATA_IOPORT_RR_ERROR = 1,
2125      ATA_IOPORT_RR_SECTOR_COUNT = 2,
2126      ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2127      ATA_IOPORT_RR_CYLINDER_LOW = 4,
2128      ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2129      ATA_IOPORT_RR_DEVICE_HEAD = 6,
2130      ATA_IOPORT_RR_STATUS = 7,
2131      ATA_IOPORT_RR_NUM_REGISTERS,
2132  };
2133  
2134  const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2135      [ATA_IOPORT_RR_DATA] = "Data",
2136      [ATA_IOPORT_RR_ERROR] = "Error",
2137      [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2138      [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2139      [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2140      [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2141      [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2142      [ATA_IOPORT_RR_STATUS] = "Status"
2143  };
2144  
2145  uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2146  {
2147      IDEBus *bus = opaque;
2148      IDEState *s = idebus_active_if(bus);
2149      uint32_t reg_num;
2150      int ret, hob;
2151  
2152      reg_num = addr & 7;
2153      hob = bus->cmd & (IDE_CTRL_HOB);
2154      switch (reg_num) {
2155      case ATA_IOPORT_RR_DATA:
2156          ret = 0xff;
2157          break;
2158      case ATA_IOPORT_RR_ERROR:
2159          if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2160              (s != bus->ifs && !s->blk)) {
2161              ret = 0;
2162          } else if (!hob) {
2163              ret = s->error;
2164          } else {
2165              ret = s->hob_feature;
2166          }
2167          break;
2168      case ATA_IOPORT_RR_SECTOR_COUNT:
2169          if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2170              ret = 0;
2171          } else if (!hob) {
2172              ret = s->nsector & 0xff;
2173          } else {
2174              ret = s->hob_nsector;
2175          }
2176          break;
2177      case ATA_IOPORT_RR_SECTOR_NUMBER:
2178          if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2179              ret = 0;
2180          } else if (!hob) {
2181              ret = s->sector;
2182          } else {
2183              ret = s->hob_sector;
2184          }
2185          break;
2186      case ATA_IOPORT_RR_CYLINDER_LOW:
2187          if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2188              ret = 0;
2189          } else if (!hob) {
2190              ret = s->lcyl;
2191          } else {
2192              ret = s->hob_lcyl;
2193          }
2194          break;
2195      case ATA_IOPORT_RR_CYLINDER_HIGH:
2196          if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2197              ret = 0;
2198          } else if (!hob) {
2199              ret = s->hcyl;
2200          } else {
2201              ret = s->hob_hcyl;
2202          }
2203          break;
2204      case ATA_IOPORT_RR_DEVICE_HEAD:
2205          if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2206              ret = 0;
2207          } else {
2208              ret = s->select;
2209          }
2210          break;
2211      default:
2212      case ATA_IOPORT_RR_STATUS:
2213          if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2214              (s != bus->ifs && !s->blk)) {
2215              ret = 0;
2216          } else {
2217              ret = s->status;
2218          }
2219          qemu_irq_lower(bus->irq);
2220          break;
2221      }
2222  
2223      trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2224      return ret;
2225  }
2226  
2227  uint32_t ide_status_read(void *opaque, uint32_t addr)
2228  {
2229      IDEBus *bus = opaque;
2230      IDEState *s = idebus_active_if(bus);
2231      int ret;
2232  
2233      if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2234          (s != bus->ifs && !s->blk)) {
2235          ret = 0;
2236      } else {
2237          ret = s->status;
2238      }
2239  
2240      trace_ide_status_read(addr, ret, bus, s);
2241      return ret;
2242  }
2243  
2244  static void ide_perform_srst(IDEState *s)
2245  {
2246      s->status |= BUSY_STAT;
2247  
2248      /* Halt PIO (Via register state); PIO BH remains scheduled. */
2249      ide_transfer_halt(s);
2250  
2251      /* Cancel DMA -- may drain block device and invoke callbacks */
2252      ide_cancel_dma_sync(s);
2253  
2254      /* Cancel PIO callback, reset registers/signature, etc */
2255      ide_reset(s);
2256  
2257      /* perform diagnostic */
2258      cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2259  }
2260  
2261  static void ide_bus_perform_srst(void *opaque)
2262  {
2263      IDEBus *bus = opaque;
2264      IDEState *s;
2265      int i;
2266  
2267      for (i = 0; i < 2; i++) {
2268          s = &bus->ifs[i];
2269          ide_perform_srst(s);
2270      }
2271  
2272      bus->cmd &= ~IDE_CTRL_RESET;
2273  }
2274  
2275  void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2276  {
2277      IDEBus *bus = opaque;
2278      IDEState *s;
2279      int i;
2280  
2281      trace_ide_ctrl_write(addr, val, bus);
2282  
2283      /* Device0 and Device1 each have their own control register,
2284       * but QEMU models it as just one register in the controller. */
2285      if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2286          for (i = 0; i < 2; i++) {
2287              s = &bus->ifs[i];
2288              s->status |= BUSY_STAT;
2289          }
2290          replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2291                                           ide_bus_perform_srst, bus);
2292      }
2293  
2294      bus->cmd = val;
2295  }
2296  
2297  /*
2298   * Returns true if the running PIO transfer is a PIO out (i.e. data is
2299   * transferred from the device to the guest), false if it's a PIO in
2300   */
2301  static bool ide_is_pio_out(IDEState *s)
2302  {
2303      if (s->end_transfer_func == ide_sector_write ||
2304          s->end_transfer_func == ide_atapi_cmd) {
2305          return false;
2306      } else if (s->end_transfer_func == ide_sector_read ||
2307                 s->end_transfer_func == ide_transfer_stop ||
2308                 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2309                 s->end_transfer_func == ide_dummy_transfer_stop) {
2310          return true;
2311      }
2312  
2313      abort();
2314  }
2315  
2316  void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2317  {
2318      IDEBus *bus = opaque;
2319      IDEState *s = idebus_active_if(bus);
2320      uint8_t *p;
2321  
2322      trace_ide_data_writew(addr, val, bus, s);
2323  
2324      /* PIO data access allowed only when DRQ bit is set. The result of a write
2325       * during PIO out is indeterminate, just ignore it. */
2326      if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2327          return;
2328      }
2329  
2330      p = s->data_ptr;
2331      if (p + 2 > s->data_end) {
2332          return;
2333      }
2334  
2335      *(uint16_t *)p = le16_to_cpu(val);
2336      p += 2;
2337      s->data_ptr = p;
2338      if (p >= s->data_end) {
2339          s->status &= ~DRQ_STAT;
2340          s->end_transfer_func(s);
2341      }
2342  }
2343  
2344  uint32_t ide_data_readw(void *opaque, uint32_t addr)
2345  {
2346      IDEBus *bus = opaque;
2347      IDEState *s = idebus_active_if(bus);
2348      uint8_t *p;
2349      int ret;
2350  
2351      /* PIO data access allowed only when DRQ bit is set. The result of a read
2352       * during PIO in is indeterminate, return 0 and don't move forward. */
2353      if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2354          return 0;
2355      }
2356  
2357      p = s->data_ptr;
2358      if (p + 2 > s->data_end) {
2359          return 0;
2360      }
2361  
2362      ret = cpu_to_le16(*(uint16_t *)p);
2363      p += 2;
2364      s->data_ptr = p;
2365      if (p >= s->data_end) {
2366          s->status &= ~DRQ_STAT;
2367          s->end_transfer_func(s);
2368      }
2369  
2370      trace_ide_data_readw(addr, ret, bus, s);
2371      return ret;
2372  }
2373  
2374  void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2375  {
2376      IDEBus *bus = opaque;
2377      IDEState *s = idebus_active_if(bus);
2378      uint8_t *p;
2379  
2380      trace_ide_data_writel(addr, val, bus, s);
2381  
2382      /* PIO data access allowed only when DRQ bit is set. The result of a write
2383       * during PIO out is indeterminate, just ignore it. */
2384      if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2385          return;
2386      }
2387  
2388      p = s->data_ptr;
2389      if (p + 4 > s->data_end) {
2390          return;
2391      }
2392  
2393      *(uint32_t *)p = le32_to_cpu(val);
2394      p += 4;
2395      s->data_ptr = p;
2396      if (p >= s->data_end) {
2397          s->status &= ~DRQ_STAT;
2398          s->end_transfer_func(s);
2399      }
2400  }
2401  
2402  uint32_t ide_data_readl(void *opaque, uint32_t addr)
2403  {
2404      IDEBus *bus = opaque;
2405      IDEState *s = idebus_active_if(bus);
2406      uint8_t *p;
2407      int ret;
2408  
2409      /* PIO data access allowed only when DRQ bit is set. The result of a read
2410       * during PIO in is indeterminate, return 0 and don't move forward. */
2411      if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2412          ret = 0;
2413          goto out;
2414      }
2415  
2416      p = s->data_ptr;
2417      if (p + 4 > s->data_end) {
2418          return 0;
2419      }
2420  
2421      ret = cpu_to_le32(*(uint32_t *)p);
2422      p += 4;
2423      s->data_ptr = p;
2424      if (p >= s->data_end) {
2425          s->status &= ~DRQ_STAT;
2426          s->end_transfer_func(s);
2427      }
2428  
2429  out:
2430      trace_ide_data_readl(addr, ret, bus, s);
2431      return ret;
2432  }
2433  
2434  static void ide_dummy_transfer_stop(IDEState *s)
2435  {
2436      s->data_ptr = s->io_buffer;
2437      s->data_end = s->io_buffer;
2438      s->io_buffer[0] = 0xff;
2439      s->io_buffer[1] = 0xff;
2440      s->io_buffer[2] = 0xff;
2441      s->io_buffer[3] = 0xff;
2442  }
2443  
2444  void ide_bus_reset(IDEBus *bus)
2445  {
2446      bus->unit = 0;
2447      bus->cmd = 0;
2448      ide_reset(&bus->ifs[0]);
2449      ide_reset(&bus->ifs[1]);
2450      ide_clear_hob(bus);
2451  
2452      /* pending async DMA */
2453      if (bus->dma->aiocb) {
2454          trace_ide_bus_reset_aio();
2455          blk_aio_cancel(bus->dma->aiocb);
2456          bus->dma->aiocb = NULL;
2457      }
2458  
2459      /* reset dma provider too */
2460      if (bus->dma->ops->reset) {
2461          bus->dma->ops->reset(bus->dma);
2462      }
2463  }
2464  
2465  static bool ide_cd_is_tray_open(void *opaque)
2466  {
2467      return ((IDEState *)opaque)->tray_open;
2468  }
2469  
2470  static bool ide_cd_is_medium_locked(void *opaque)
2471  {
2472      return ((IDEState *)opaque)->tray_locked;
2473  }
2474  
2475  static void ide_resize_cb(void *opaque)
2476  {
2477      IDEState *s = opaque;
2478      uint64_t nb_sectors;
2479  
2480      if (!s->identify_set) {
2481          return;
2482      }
2483  
2484      blk_get_geometry(s->blk, &nb_sectors);
2485      s->nb_sectors = nb_sectors;
2486  
2487      /* Update the identify data buffer. */
2488      if (s->drive_kind == IDE_CFATA) {
2489          ide_cfata_identify_size(s);
2490      } else {
2491          /* IDE_CD uses a different set of callbacks entirely. */
2492          assert(s->drive_kind != IDE_CD);
2493          ide_identify_size(s);
2494      }
2495  }
2496  
2497  static const BlockDevOps ide_cd_block_ops = {
2498      .change_media_cb = ide_cd_change_cb,
2499      .eject_request_cb = ide_cd_eject_request_cb,
2500      .is_tray_open = ide_cd_is_tray_open,
2501      .is_medium_locked = ide_cd_is_medium_locked,
2502  };
2503  
2504  static const BlockDevOps ide_hd_block_ops = {
2505      .resize_cb = ide_resize_cb,
2506  };
2507  
2508  int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2509                     const char *version, const char *serial, const char *model,
2510                     uint64_t wwn,
2511                     uint32_t cylinders, uint32_t heads, uint32_t secs,
2512                     int chs_trans, Error **errp)
2513  {
2514      uint64_t nb_sectors;
2515  
2516      s->blk = blk;
2517      s->drive_kind = kind;
2518  
2519      blk_get_geometry(blk, &nb_sectors);
2520      s->cylinders = cylinders;
2521      s->heads = heads;
2522      s->sectors = secs;
2523      s->chs_trans = chs_trans;
2524      s->nb_sectors = nb_sectors;
2525      s->wwn = wwn;
2526      /* The SMART values should be preserved across power cycles
2527         but they aren't.  */
2528      s->smart_enabled = 1;
2529      s->smart_autosave = 1;
2530      s->smart_errors = 0;
2531      s->smart_selftest_count = 0;
2532      if (kind == IDE_CD) {
2533          blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2534          blk_set_guest_block_size(blk, 2048);
2535      } else {
2536          if (!blk_is_inserted(s->blk)) {
2537              error_setg(errp, "Device needs media, but drive is empty");
2538              return -1;
2539          }
2540          if (!blk_is_writable(blk)) {
2541              error_setg(errp, "Can't use a read-only drive");
2542              return -1;
2543          }
2544          blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2545      }
2546      if (serial) {
2547          pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2548      } else {
2549          snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2550                   "QM%05d", s->drive_serial);
2551      }
2552      if (model) {
2553          pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2554      } else {
2555          switch (kind) {
2556          case IDE_CD:
2557              strcpy(s->drive_model_str, "QEMU DVD-ROM");
2558              break;
2559          case IDE_CFATA:
2560              strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2561              break;
2562          default:
2563              strcpy(s->drive_model_str, "QEMU HARDDISK");
2564              break;
2565          }
2566      }
2567  
2568      if (version) {
2569          pstrcpy(s->version, sizeof(s->version), version);
2570      } else {
2571          pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2572      }
2573  
2574      ide_reset(s);
2575      blk_iostatus_enable(blk);
2576      return 0;
2577  }
2578  
2579  static void ide_init1(IDEBus *bus, int unit)
2580  {
2581      static int drive_serial = 1;
2582      IDEState *s = &bus->ifs[unit];
2583  
2584      s->bus = bus;
2585      s->unit = unit;
2586      s->drive_serial = drive_serial++;
2587      /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2588      s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2589      s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2590      memset(s->io_buffer, 0, s->io_buffer_total_len);
2591  
2592      s->smart_selftest_data = blk_blockalign(s->blk, 512);
2593      memset(s->smart_selftest_data, 0, 512);
2594  
2595      s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2596                                             ide_sector_write_timer_cb, s);
2597  }
2598  
2599  static int ide_nop_int(const IDEDMA *dma, bool is_write)
2600  {
2601      return 0;
2602  }
2603  
2604  static void ide_nop(const IDEDMA *dma)
2605  {
2606  }
2607  
2608  static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2609  {
2610      return 0;
2611  }
2612  
2613  static const IDEDMAOps ide_dma_nop_ops = {
2614      .prepare_buf    = ide_nop_int32,
2615      .restart_dma    = ide_nop,
2616      .rw_buf         = ide_nop_int,
2617  };
2618  
2619  static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2620  {
2621      s->unit = s->bus->retry_unit;
2622      ide_set_sector(s, s->bus->retry_sector_num);
2623      s->nsector = s->bus->retry_nsector;
2624      s->bus->dma->ops->restart_dma(s->bus->dma);
2625      s->io_buffer_size = 0;
2626      s->dma_cmd = dma_cmd;
2627      ide_start_dma(s, ide_dma_cb);
2628  }
2629  
2630  static void ide_restart_bh(void *opaque)
2631  {
2632      IDEBus *bus = opaque;
2633      IDEState *s;
2634      bool is_read;
2635      int error_status;
2636  
2637      qemu_bh_delete(bus->bh);
2638      bus->bh = NULL;
2639  
2640      error_status = bus->error_status;
2641      if (bus->error_status == 0) {
2642          return;
2643      }
2644  
2645      s = idebus_active_if(bus);
2646      is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2647  
2648      /* The error status must be cleared before resubmitting the request: The
2649       * request may fail again, and this case can only be distinguished if the
2650       * called function can set a new error status. */
2651      bus->error_status = 0;
2652  
2653      /* The HBA has generically asked to be kicked on retry */
2654      if (error_status & IDE_RETRY_HBA) {
2655          if (s->bus->dma->ops->restart) {
2656              s->bus->dma->ops->restart(s->bus->dma);
2657          }
2658      } else if (IS_IDE_RETRY_DMA(error_status)) {
2659          if (error_status & IDE_RETRY_TRIM) {
2660              ide_restart_dma(s, IDE_DMA_TRIM);
2661          } else {
2662              ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2663          }
2664      } else if (IS_IDE_RETRY_PIO(error_status)) {
2665          if (is_read) {
2666              ide_sector_read(s);
2667          } else {
2668              ide_sector_write(s);
2669          }
2670      } else if (error_status & IDE_RETRY_FLUSH) {
2671          ide_flush_cache(s);
2672      } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2673          assert(s->end_transfer_func == ide_atapi_cmd);
2674          ide_atapi_dma_restart(s);
2675      } else {
2676          abort();
2677      }
2678  }
2679  
2680  static void ide_restart_cb(void *opaque, int running, RunState state)
2681  {
2682      IDEBus *bus = opaque;
2683  
2684      if (!running)
2685          return;
2686  
2687      if (!bus->bh) {
2688          bus->bh = qemu_bh_new(ide_restart_bh, bus);
2689          qemu_bh_schedule(bus->bh);
2690      }
2691  }
2692  
2693  void ide_register_restart_cb(IDEBus *bus)
2694  {
2695      if (bus->dma->ops->restart_dma) {
2696          bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2697      }
2698  }
2699  
2700  static IDEDMA ide_dma_nop = {
2701      .ops = &ide_dma_nop_ops,
2702      .aiocb = NULL,
2703  };
2704  
2705  void ide_init2(IDEBus *bus, qemu_irq irq)
2706  {
2707      int i;
2708  
2709      for(i = 0; i < 2; i++) {
2710          ide_init1(bus, i);
2711          ide_reset(&bus->ifs[i]);
2712      }
2713      bus->irq = irq;
2714      bus->dma = &ide_dma_nop;
2715  }
2716  
2717  void ide_exit(IDEState *s)
2718  {
2719      timer_free(s->sector_write_timer);
2720      qemu_vfree(s->smart_selftest_data);
2721      qemu_vfree(s->io_buffer);
2722  }
2723  
2724  static bool is_identify_set(void *opaque, int version_id)
2725  {
2726      IDEState *s = opaque;
2727  
2728      return s->identify_set != 0;
2729  }
2730  
2731  static EndTransferFunc* transfer_end_table[] = {
2732          ide_sector_read,
2733          ide_sector_write,
2734          ide_transfer_stop,
2735          ide_atapi_cmd_reply_end,
2736          ide_atapi_cmd,
2737          ide_dummy_transfer_stop,
2738  };
2739  
2740  static int transfer_end_table_idx(EndTransferFunc *fn)
2741  {
2742      int i;
2743  
2744      for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2745          if (transfer_end_table[i] == fn)
2746              return i;
2747  
2748      return -1;
2749  }
2750  
2751  static int ide_drive_post_load(void *opaque, int version_id)
2752  {
2753      IDEState *s = opaque;
2754  
2755      if (s->blk && s->identify_set) {
2756          blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2757      }
2758      return 0;
2759  }
2760  
2761  static int ide_drive_pio_post_load(void *opaque, int version_id)
2762  {
2763      IDEState *s = opaque;
2764  
2765      if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2766          return -EINVAL;
2767      }
2768      s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2769      s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2770      s->data_end = s->data_ptr + s->cur_io_buffer_len;
2771      s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2772  
2773      return 0;
2774  }
2775  
2776  static int ide_drive_pio_pre_save(void *opaque)
2777  {
2778      IDEState *s = opaque;
2779      int idx;
2780  
2781      s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2782      s->cur_io_buffer_len = s->data_end - s->data_ptr;
2783  
2784      idx = transfer_end_table_idx(s->end_transfer_func);
2785      if (idx == -1) {
2786          fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2787                          __func__);
2788          s->end_transfer_fn_idx = 2;
2789      } else {
2790          s->end_transfer_fn_idx = idx;
2791      }
2792  
2793      return 0;
2794  }
2795  
2796  static bool ide_drive_pio_state_needed(void *opaque)
2797  {
2798      IDEState *s = opaque;
2799  
2800      return ((s->status & DRQ_STAT) != 0)
2801          || (s->bus->error_status & IDE_RETRY_PIO);
2802  }
2803  
2804  static bool ide_tray_state_needed(void *opaque)
2805  {
2806      IDEState *s = opaque;
2807  
2808      return s->tray_open || s->tray_locked;
2809  }
2810  
2811  static bool ide_atapi_gesn_needed(void *opaque)
2812  {
2813      IDEState *s = opaque;
2814  
2815      return s->events.new_media || s->events.eject_request;
2816  }
2817  
2818  static bool ide_error_needed(void *opaque)
2819  {
2820      IDEBus *bus = opaque;
2821  
2822      return (bus->error_status != 0);
2823  }
2824  
2825  /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2826  static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2827      .name ="ide_drive/atapi/gesn_state",
2828      .version_id = 1,
2829      .minimum_version_id = 1,
2830      .needed = ide_atapi_gesn_needed,
2831      .fields = (VMStateField[]) {
2832          VMSTATE_BOOL(events.new_media, IDEState),
2833          VMSTATE_BOOL(events.eject_request, IDEState),
2834          VMSTATE_END_OF_LIST()
2835      }
2836  };
2837  
2838  static const VMStateDescription vmstate_ide_tray_state = {
2839      .name = "ide_drive/tray_state",
2840      .version_id = 1,
2841      .minimum_version_id = 1,
2842      .needed = ide_tray_state_needed,
2843      .fields = (VMStateField[]) {
2844          VMSTATE_BOOL(tray_open, IDEState),
2845          VMSTATE_BOOL(tray_locked, IDEState),
2846          VMSTATE_END_OF_LIST()
2847      }
2848  };
2849  
2850  static const VMStateDescription vmstate_ide_drive_pio_state = {
2851      .name = "ide_drive/pio_state",
2852      .version_id = 1,
2853      .minimum_version_id = 1,
2854      .pre_save = ide_drive_pio_pre_save,
2855      .post_load = ide_drive_pio_post_load,
2856      .needed = ide_drive_pio_state_needed,
2857      .fields = (VMStateField[]) {
2858          VMSTATE_INT32(req_nb_sectors, IDEState),
2859          VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2860                               vmstate_info_uint8, uint8_t),
2861          VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2862          VMSTATE_INT32(cur_io_buffer_len, IDEState),
2863          VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2864          VMSTATE_INT32(elementary_transfer_size, IDEState),
2865          VMSTATE_INT32(packet_transfer_size, IDEState),
2866          VMSTATE_END_OF_LIST()
2867      }
2868  };
2869  
2870  const VMStateDescription vmstate_ide_drive = {
2871      .name = "ide_drive",
2872      .version_id = 3,
2873      .minimum_version_id = 0,
2874      .post_load = ide_drive_post_load,
2875      .fields = (VMStateField[]) {
2876          VMSTATE_INT32(mult_sectors, IDEState),
2877          VMSTATE_INT32(identify_set, IDEState),
2878          VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2879          VMSTATE_UINT8(feature, IDEState),
2880          VMSTATE_UINT8(error, IDEState),
2881          VMSTATE_UINT32(nsector, IDEState),
2882          VMSTATE_UINT8(sector, IDEState),
2883          VMSTATE_UINT8(lcyl, IDEState),
2884          VMSTATE_UINT8(hcyl, IDEState),
2885          VMSTATE_UINT8(hob_feature, IDEState),
2886          VMSTATE_UINT8(hob_sector, IDEState),
2887          VMSTATE_UINT8(hob_nsector, IDEState),
2888          VMSTATE_UINT8(hob_lcyl, IDEState),
2889          VMSTATE_UINT8(hob_hcyl, IDEState),
2890          VMSTATE_UINT8(select, IDEState),
2891          VMSTATE_UINT8(status, IDEState),
2892          VMSTATE_UINT8(lba48, IDEState),
2893          VMSTATE_UINT8(sense_key, IDEState),
2894          VMSTATE_UINT8(asc, IDEState),
2895          VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2896          VMSTATE_END_OF_LIST()
2897      },
2898      .subsections = (const VMStateDescription*[]) {
2899          &vmstate_ide_drive_pio_state,
2900          &vmstate_ide_tray_state,
2901          &vmstate_ide_atapi_gesn_state,
2902          NULL
2903      }
2904  };
2905  
2906  static const VMStateDescription vmstate_ide_error_status = {
2907      .name ="ide_bus/error",
2908      .version_id = 2,
2909      .minimum_version_id = 1,
2910      .needed = ide_error_needed,
2911      .fields = (VMStateField[]) {
2912          VMSTATE_INT32(error_status, IDEBus),
2913          VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2914          VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2915          VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2916          VMSTATE_END_OF_LIST()
2917      }
2918  };
2919  
2920  const VMStateDescription vmstate_ide_bus = {
2921      .name = "ide_bus",
2922      .version_id = 1,
2923      .minimum_version_id = 1,
2924      .fields = (VMStateField[]) {
2925          VMSTATE_UINT8(cmd, IDEBus),
2926          VMSTATE_UINT8(unit, IDEBus),
2927          VMSTATE_END_OF_LIST()
2928      },
2929      .subsections = (const VMStateDescription*[]) {
2930          &vmstate_ide_error_status,
2931          NULL
2932      }
2933  };
2934  
2935  void ide_drive_get(DriveInfo **hd, int n)
2936  {
2937      int i;
2938  
2939      for (i = 0; i < n; i++) {
2940          hd[i] = drive_get_by_index(IF_IDE, i);
2941      }
2942  }
2943