1 /* 2 * QEMU IDE disk and CD/DVD-ROM Emulator 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * Copyright (c) 2006 Openedhand Ltd. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 #include <hw/hw.h> 26 #include <hw/i386/pc.h> 27 #include <hw/pci/pci.h> 28 #include <hw/isa/isa.h> 29 #include "qemu/error-report.h" 30 #include "qemu/timer.h" 31 #include "sysemu/sysemu.h" 32 #include "sysemu/dma.h" 33 #include "hw/block/block.h" 34 #include "sysemu/block-backend.h" 35 36 #include <hw/ide/internal.h> 37 38 /* These values were based on a Seagate ST3500418AS but have been modified 39 to make more sense in QEMU */ 40 static const int smart_attributes[][12] = { 41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */ 42 /* raw read error rate*/ 43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}, 44 /* spin up */ 45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 46 /* start stop count */ 47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14}, 48 /* remapped sectors */ 49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24}, 50 /* power on hours */ 51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 52 /* power cycle count */ 53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 54 /* airflow-temperature-celsius */ 55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32}, 56 }; 57 58 static int ide_handle_rw_error(IDEState *s, int error, int op); 59 static void ide_dummy_transfer_stop(IDEState *s); 60 61 static void padstr(char *str, const char *src, int len) 62 { 63 int i, v; 64 for(i = 0; i < len; i++) { 65 if (*src) 66 v = *src++; 67 else 68 v = ' '; 69 str[i^1] = v; 70 } 71 } 72 73 static void put_le16(uint16_t *p, unsigned int v) 74 { 75 *p = cpu_to_le16(v); 76 } 77 78 static void ide_identify_size(IDEState *s) 79 { 80 uint16_t *p = (uint16_t *)s->identify_data; 81 put_le16(p + 60, s->nb_sectors); 82 put_le16(p + 61, s->nb_sectors >> 16); 83 put_le16(p + 100, s->nb_sectors); 84 put_le16(p + 101, s->nb_sectors >> 16); 85 put_le16(p + 102, s->nb_sectors >> 32); 86 put_le16(p + 103, s->nb_sectors >> 48); 87 } 88 89 static void ide_identify(IDEState *s) 90 { 91 uint16_t *p; 92 unsigned int oldsize; 93 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master; 94 95 p = (uint16_t *)s->identify_data; 96 if (s->identify_set) { 97 goto fill_buffer; 98 } 99 memset(p, 0, sizeof(s->identify_data)); 100 101 put_le16(p + 0, 0x0040); 102 put_le16(p + 1, s->cylinders); 103 put_le16(p + 3, s->heads); 104 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */ 105 put_le16(p + 5, 512); /* XXX: retired, remove ? */ 106 put_le16(p + 6, s->sectors); 107 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ 108 put_le16(p + 20, 3); /* XXX: retired, remove ? */ 109 put_le16(p + 21, 512); /* cache size in sectors */ 110 put_le16(p + 22, 4); /* ecc bytes */ 111 padstr((char *)(p + 23), s->version, 8); /* firmware version */ 112 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */ 113 #if MAX_MULT_SECTORS > 1 114 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); 115 #endif 116 put_le16(p + 48, 1); /* dword I/O */ 117 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */ 118 put_le16(p + 51, 0x200); /* PIO transfer cycle */ 119 put_le16(p + 52, 0x200); /* DMA transfer cycle */ 120 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */ 121 put_le16(p + 54, s->cylinders); 122 put_le16(p + 55, s->heads); 123 put_le16(p + 56, s->sectors); 124 oldsize = s->cylinders * s->heads * s->sectors; 125 put_le16(p + 57, oldsize); 126 put_le16(p + 58, oldsize >> 16); 127 if (s->mult_sectors) 128 put_le16(p + 59, 0x100 | s->mult_sectors); 129 /* *(p + 60) := nb_sectors -- see ide_identify_size */ 130 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */ 131 put_le16(p + 62, 0x07); /* single word dma0-2 supported */ 132 put_le16(p + 63, 0x07); /* mdma0-2 supported */ 133 put_le16(p + 64, 0x03); /* pio3-4 supported */ 134 put_le16(p + 65, 120); 135 put_le16(p + 66, 120); 136 put_le16(p + 67, 120); 137 put_le16(p + 68, 120); 138 if (dev && dev->conf.discard_granularity) { 139 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */ 140 } 141 142 if (s->ncq_queues) { 143 put_le16(p + 75, s->ncq_queues - 1); 144 /* NCQ supported */ 145 put_le16(p + 76, (1 << 8)); 146 } 147 148 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */ 149 put_le16(p + 81, 0x16); /* conforms to ata5 */ 150 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */ 151 put_le16(p + 82, (1 << 14) | (1 << 5) | 1); 152 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */ 153 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10)); 154 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */ 155 if (s->wwn) { 156 put_le16(p + 84, (1 << 14) | (1 << 8) | 0); 157 } else { 158 put_le16(p + 84, (1 << 14) | 0); 159 } 160 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */ 161 if (blk_enable_write_cache(s->blk)) { 162 put_le16(p + 85, (1 << 14) | (1 << 5) | 1); 163 } else { 164 put_le16(p + 85, (1 << 14) | 1); 165 } 166 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */ 167 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10)); 168 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */ 169 if (s->wwn) { 170 put_le16(p + 87, (1 << 14) | (1 << 8) | 0); 171 } else { 172 put_le16(p + 87, (1 << 14) | 0); 173 } 174 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ 175 put_le16(p + 93, 1 | (1 << 14) | 0x2000); 176 /* *(p + 100) := nb_sectors -- see ide_identify_size */ 177 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */ 178 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */ 179 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */ 180 181 if (dev && dev->conf.physical_block_size) 182 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf)); 183 if (s->wwn) { 184 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */ 185 put_le16(p + 108, s->wwn >> 48); 186 put_le16(p + 109, s->wwn >> 32); 187 put_le16(p + 110, s->wwn >> 16); 188 put_le16(p + 111, s->wwn); 189 } 190 if (dev && dev->conf.discard_granularity) { 191 put_le16(p + 169, 1); /* TRIM support */ 192 } 193 194 ide_identify_size(s); 195 s->identify_set = 1; 196 197 fill_buffer: 198 memcpy(s->io_buffer, p, sizeof(s->identify_data)); 199 } 200 201 static void ide_atapi_identify(IDEState *s) 202 { 203 uint16_t *p; 204 205 p = (uint16_t *)s->identify_data; 206 if (s->identify_set) { 207 goto fill_buffer; 208 } 209 memset(p, 0, sizeof(s->identify_data)); 210 211 /* Removable CDROM, 50us response, 12 byte packets */ 212 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0)); 213 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ 214 put_le16(p + 20, 3); /* buffer type */ 215 put_le16(p + 21, 512); /* cache size in sectors */ 216 put_le16(p + 22, 4); /* ecc bytes */ 217 padstr((char *)(p + 23), s->version, 8); /* firmware version */ 218 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */ 219 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */ 220 #ifdef USE_DMA_CDROM 221 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */ 222 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */ 223 put_le16(p + 62, 7); /* single word dma0-2 supported */ 224 put_le16(p + 63, 7); /* mdma0-2 supported */ 225 #else 226 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */ 227 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */ 228 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */ 229 #endif 230 put_le16(p + 64, 3); /* pio3-4 supported */ 231 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */ 232 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */ 233 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */ 234 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */ 235 236 put_le16(p + 71, 30); /* in ns */ 237 put_le16(p + 72, 30); /* in ns */ 238 239 if (s->ncq_queues) { 240 put_le16(p + 75, s->ncq_queues - 1); 241 /* NCQ supported */ 242 put_le16(p + 76, (1 << 8)); 243 } 244 245 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */ 246 if (s->wwn) { 247 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */ 248 put_le16(p + 87, (1 << 8)); /* WWN enabled */ 249 } 250 251 #ifdef USE_DMA_CDROM 252 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ 253 #endif 254 255 if (s->wwn) { 256 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */ 257 put_le16(p + 108, s->wwn >> 48); 258 put_le16(p + 109, s->wwn >> 32); 259 put_le16(p + 110, s->wwn >> 16); 260 put_le16(p + 111, s->wwn); 261 } 262 263 s->identify_set = 1; 264 265 fill_buffer: 266 memcpy(s->io_buffer, p, sizeof(s->identify_data)); 267 } 268 269 static void ide_cfata_identify_size(IDEState *s) 270 { 271 uint16_t *p = (uint16_t *)s->identify_data; 272 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */ 273 put_le16(p + 8, s->nb_sectors); /* Sectors per card */ 274 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */ 275 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */ 276 } 277 278 static void ide_cfata_identify(IDEState *s) 279 { 280 uint16_t *p; 281 uint32_t cur_sec; 282 283 p = (uint16_t *)s->identify_data; 284 if (s->identify_set) { 285 goto fill_buffer; 286 } 287 memset(p, 0, sizeof(s->identify_data)); 288 289 cur_sec = s->cylinders * s->heads * s->sectors; 290 291 put_le16(p + 0, 0x848a); /* CF Storage Card signature */ 292 put_le16(p + 1, s->cylinders); /* Default cylinders */ 293 put_le16(p + 3, s->heads); /* Default heads */ 294 put_le16(p + 6, s->sectors); /* Default sectors per track */ 295 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */ 296 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */ 297 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ 298 put_le16(p + 22, 0x0004); /* ECC bytes */ 299 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ 300 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */ 301 #if MAX_MULT_SECTORS > 1 302 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); 303 #else 304 put_le16(p + 47, 0x0000); 305 #endif 306 put_le16(p + 49, 0x0f00); /* Capabilities */ 307 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ 308 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ 309 put_le16(p + 53, 0x0003); /* Translation params valid */ 310 put_le16(p + 54, s->cylinders); /* Current cylinders */ 311 put_le16(p + 55, s->heads); /* Current heads */ 312 put_le16(p + 56, s->sectors); /* Current sectors */ 313 put_le16(p + 57, cur_sec); /* Current capacity */ 314 put_le16(p + 58, cur_sec >> 16); /* Current capacity */ 315 if (s->mult_sectors) /* Multiple sector setting */ 316 put_le16(p + 59, 0x100 | s->mult_sectors); 317 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */ 318 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */ 319 put_le16(p + 63, 0x0203); /* Multiword DMA capability */ 320 put_le16(p + 64, 0x0001); /* Flow Control PIO support */ 321 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ 322 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ 323 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ 324 put_le16(p + 82, 0x400c); /* Command Set supported */ 325 put_le16(p + 83, 0x7068); /* Command Set supported */ 326 put_le16(p + 84, 0x4000); /* Features supported */ 327 put_le16(p + 85, 0x000c); /* Command Set enabled */ 328 put_le16(p + 86, 0x7044); /* Command Set enabled */ 329 put_le16(p + 87, 0x4000); /* Features enabled */ 330 put_le16(p + 91, 0x4060); /* Current APM level */ 331 put_le16(p + 129, 0x0002); /* Current features option */ 332 put_le16(p + 130, 0x0005); /* Reassigned sectors */ 333 put_le16(p + 131, 0x0001); /* Initial power mode */ 334 put_le16(p + 132, 0x0000); /* User signature */ 335 put_le16(p + 160, 0x8100); /* Power requirement */ 336 put_le16(p + 161, 0x8001); /* CF command set */ 337 338 ide_cfata_identify_size(s); 339 s->identify_set = 1; 340 341 fill_buffer: 342 memcpy(s->io_buffer, p, sizeof(s->identify_data)); 343 } 344 345 static void ide_set_signature(IDEState *s) 346 { 347 s->select &= 0xf0; /* clear head */ 348 /* put signature */ 349 s->nsector = 1; 350 s->sector = 1; 351 if (s->drive_kind == IDE_CD) { 352 s->lcyl = 0x14; 353 s->hcyl = 0xeb; 354 } else if (s->blk) { 355 s->lcyl = 0; 356 s->hcyl = 0; 357 } else { 358 s->lcyl = 0xff; 359 s->hcyl = 0xff; 360 } 361 } 362 363 typedef struct TrimAIOCB { 364 BlockAIOCB common; 365 BlockBackend *blk; 366 QEMUBH *bh; 367 int ret; 368 QEMUIOVector *qiov; 369 BlockAIOCB *aiocb; 370 int i, j; 371 } TrimAIOCB; 372 373 static void trim_aio_cancel(BlockAIOCB *acb) 374 { 375 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common); 376 377 /* Exit the loop so ide_issue_trim_cb will not continue */ 378 iocb->j = iocb->qiov->niov - 1; 379 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1; 380 381 iocb->ret = -ECANCELED; 382 383 if (iocb->aiocb) { 384 blk_aio_cancel_async(iocb->aiocb); 385 iocb->aiocb = NULL; 386 } 387 } 388 389 static const AIOCBInfo trim_aiocb_info = { 390 .aiocb_size = sizeof(TrimAIOCB), 391 .cancel_async = trim_aio_cancel, 392 }; 393 394 static void ide_trim_bh_cb(void *opaque) 395 { 396 TrimAIOCB *iocb = opaque; 397 398 iocb->common.cb(iocb->common.opaque, iocb->ret); 399 400 qemu_bh_delete(iocb->bh); 401 iocb->bh = NULL; 402 qemu_aio_unref(iocb); 403 } 404 405 static void ide_issue_trim_cb(void *opaque, int ret) 406 { 407 TrimAIOCB *iocb = opaque; 408 if (ret >= 0) { 409 while (iocb->j < iocb->qiov->niov) { 410 int j = iocb->j; 411 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) { 412 int i = iocb->i; 413 uint64_t *buffer = iocb->qiov->iov[j].iov_base; 414 415 /* 6-byte LBA + 2-byte range per entry */ 416 uint64_t entry = le64_to_cpu(buffer[i]); 417 uint64_t sector = entry & 0x0000ffffffffffffULL; 418 uint16_t count = entry >> 48; 419 420 if (count == 0) { 421 continue; 422 } 423 424 /* Got an entry! Submit and exit. */ 425 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count, 426 ide_issue_trim_cb, opaque); 427 return; 428 } 429 430 iocb->j++; 431 iocb->i = -1; 432 } 433 } else { 434 iocb->ret = ret; 435 } 436 437 iocb->aiocb = NULL; 438 if (iocb->bh) { 439 qemu_bh_schedule(iocb->bh); 440 } 441 } 442 443 BlockAIOCB *ide_issue_trim(BlockBackend *blk, 444 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, 445 BlockCompletionFunc *cb, void *opaque) 446 { 447 TrimAIOCB *iocb; 448 449 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque); 450 iocb->blk = blk; 451 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); 452 iocb->ret = 0; 453 iocb->qiov = qiov; 454 iocb->i = -1; 455 iocb->j = 0; 456 ide_issue_trim_cb(iocb, 0); 457 return &iocb->common; 458 } 459 460 static inline void ide_abort_command(IDEState *s) 461 { 462 ide_transfer_stop(s); 463 s->status = READY_STAT | ERR_STAT; 464 s->error = ABRT_ERR; 465 } 466 467 /* prepare data transfer and tell what to do after */ 468 void ide_transfer_start(IDEState *s, uint8_t *buf, int size, 469 EndTransferFunc *end_transfer_func) 470 { 471 s->end_transfer_func = end_transfer_func; 472 s->data_ptr = buf; 473 s->data_end = buf + size; 474 if (!(s->status & ERR_STAT)) { 475 s->status |= DRQ_STAT; 476 } 477 if (s->bus->dma->ops->start_transfer) { 478 s->bus->dma->ops->start_transfer(s->bus->dma); 479 } 480 } 481 482 static void ide_cmd_done(IDEState *s) 483 { 484 if (s->bus->dma->ops->cmd_done) { 485 s->bus->dma->ops->cmd_done(s->bus->dma); 486 } 487 } 488 489 void ide_transfer_stop(IDEState *s) 490 { 491 s->end_transfer_func = ide_transfer_stop; 492 s->data_ptr = s->io_buffer; 493 s->data_end = s->io_buffer; 494 s->status &= ~DRQ_STAT; 495 ide_cmd_done(s); 496 } 497 498 int64_t ide_get_sector(IDEState *s) 499 { 500 int64_t sector_num; 501 if (s->select & 0x40) { 502 /* lba */ 503 if (!s->lba48) { 504 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) | 505 (s->lcyl << 8) | s->sector; 506 } else { 507 sector_num = ((int64_t)s->hob_hcyl << 40) | 508 ((int64_t) s->hob_lcyl << 32) | 509 ((int64_t) s->hob_sector << 24) | 510 ((int64_t) s->hcyl << 16) | 511 ((int64_t) s->lcyl << 8) | s->sector; 512 } 513 } else { 514 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors + 515 (s->select & 0x0f) * s->sectors + (s->sector - 1); 516 } 517 return sector_num; 518 } 519 520 void ide_set_sector(IDEState *s, int64_t sector_num) 521 { 522 unsigned int cyl, r; 523 if (s->select & 0x40) { 524 if (!s->lba48) { 525 s->select = (s->select & 0xf0) | (sector_num >> 24); 526 s->hcyl = (sector_num >> 16); 527 s->lcyl = (sector_num >> 8); 528 s->sector = (sector_num); 529 } else { 530 s->sector = sector_num; 531 s->lcyl = sector_num >> 8; 532 s->hcyl = sector_num >> 16; 533 s->hob_sector = sector_num >> 24; 534 s->hob_lcyl = sector_num >> 32; 535 s->hob_hcyl = sector_num >> 40; 536 } 537 } else { 538 cyl = sector_num / (s->heads * s->sectors); 539 r = sector_num % (s->heads * s->sectors); 540 s->hcyl = cyl >> 8; 541 s->lcyl = cyl; 542 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f); 543 s->sector = (r % s->sectors) + 1; 544 } 545 } 546 547 static void ide_rw_error(IDEState *s) { 548 ide_abort_command(s); 549 ide_set_irq(s->bus); 550 } 551 552 static bool ide_sect_range_ok(IDEState *s, 553 uint64_t sector, uint64_t nb_sectors) 554 { 555 uint64_t total_sectors; 556 557 blk_get_geometry(s->blk, &total_sectors); 558 if (sector > total_sectors || nb_sectors > total_sectors - sector) { 559 return false; 560 } 561 return true; 562 } 563 564 static void ide_sector_read(IDEState *s); 565 566 static void ide_sector_read_cb(void *opaque, int ret) 567 { 568 IDEState *s = opaque; 569 int n; 570 571 s->pio_aiocb = NULL; 572 s->status &= ~BUSY_STAT; 573 574 if (ret == -ECANCELED) { 575 return; 576 } 577 block_acct_done(blk_get_stats(s->blk), &s->acct); 578 if (ret != 0) { 579 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO | 580 IDE_RETRY_READ)) { 581 return; 582 } 583 } 584 585 n = s->nsector; 586 if (n > s->req_nb_sectors) { 587 n = s->req_nb_sectors; 588 } 589 590 ide_set_sector(s, ide_get_sector(s) + n); 591 s->nsector -= n; 592 /* Allow the guest to read the io_buffer */ 593 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read); 594 s->io_buffer_offset += 512 * n; 595 ide_set_irq(s->bus); 596 } 597 598 static void ide_sector_read(IDEState *s) 599 { 600 int64_t sector_num; 601 int n; 602 603 s->status = READY_STAT | SEEK_STAT; 604 s->error = 0; /* not needed by IDE spec, but needed by Windows */ 605 sector_num = ide_get_sector(s); 606 n = s->nsector; 607 608 if (n == 0) { 609 ide_transfer_stop(s); 610 return; 611 } 612 613 s->status |= BUSY_STAT; 614 615 if (n > s->req_nb_sectors) { 616 n = s->req_nb_sectors; 617 } 618 619 #if defined(DEBUG_IDE) 620 printf("sector=%" PRId64 "\n", sector_num); 621 #endif 622 623 if (!ide_sect_range_ok(s, sector_num, n)) { 624 ide_rw_error(s); 625 return; 626 } 627 628 s->iov.iov_base = s->io_buffer; 629 s->iov.iov_len = n * BDRV_SECTOR_SIZE; 630 qemu_iovec_init_external(&s->qiov, &s->iov, 1); 631 632 block_acct_start(blk_get_stats(s->blk), &s->acct, 633 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); 634 s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n, 635 ide_sector_read_cb, s); 636 } 637 638 static void dma_buf_commit(IDEState *s, uint32_t tx_bytes) 639 { 640 if (s->bus->dma->ops->commit_buf) { 641 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes); 642 } 643 qemu_sglist_destroy(&s->sg); 644 } 645 646 void ide_set_inactive(IDEState *s, bool more) 647 { 648 s->bus->dma->aiocb = NULL; 649 s->bus->retry_unit = -1; 650 s->bus->retry_sector_num = 0; 651 s->bus->retry_nsector = 0; 652 if (s->bus->dma->ops->set_inactive) { 653 s->bus->dma->ops->set_inactive(s->bus->dma, more); 654 } 655 ide_cmd_done(s); 656 } 657 658 void ide_dma_error(IDEState *s) 659 { 660 dma_buf_commit(s, 0); 661 ide_abort_command(s); 662 ide_set_inactive(s, false); 663 ide_set_irq(s->bus); 664 } 665 666 static int ide_handle_rw_error(IDEState *s, int error, int op) 667 { 668 bool is_read = (op & IDE_RETRY_READ) != 0; 669 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); 670 671 if (action == BLOCK_ERROR_ACTION_STOP) { 672 assert(s->bus->retry_unit == s->unit); 673 s->bus->error_status = op; 674 } else if (action == BLOCK_ERROR_ACTION_REPORT) { 675 if (op & IDE_RETRY_DMA) { 676 ide_dma_error(s); 677 } else { 678 ide_rw_error(s); 679 } 680 } 681 blk_error_action(s->blk, action, is_read, error); 682 return action != BLOCK_ERROR_ACTION_IGNORE; 683 } 684 685 static void ide_dma_cb(void *opaque, int ret) 686 { 687 IDEState *s = opaque; 688 int n; 689 int64_t sector_num; 690 bool stay_active = false; 691 692 if (ret == -ECANCELED) { 693 return; 694 } 695 if (ret < 0) { 696 int op = IDE_RETRY_DMA; 697 698 if (s->dma_cmd == IDE_DMA_READ) 699 op |= IDE_RETRY_READ; 700 else if (s->dma_cmd == IDE_DMA_TRIM) 701 op |= IDE_RETRY_TRIM; 702 703 if (ide_handle_rw_error(s, -ret, op)) { 704 return; 705 } 706 } 707 708 n = s->io_buffer_size >> 9; 709 if (n > s->nsector) { 710 /* The PRDs were longer than needed for this request. Shorten them so 711 * we don't get a negative remainder. The Active bit must remain set 712 * after the request completes. */ 713 n = s->nsector; 714 stay_active = true; 715 } 716 717 sector_num = ide_get_sector(s); 718 if (n > 0) { 719 assert(n * 512 == s->sg.size); 720 dma_buf_commit(s, s->sg.size); 721 sector_num += n; 722 ide_set_sector(s, sector_num); 723 s->nsector -= n; 724 } 725 726 /* end of transfer ? */ 727 if (s->nsector == 0) { 728 s->status = READY_STAT | SEEK_STAT; 729 ide_set_irq(s->bus); 730 goto eot; 731 } 732 733 /* launch next transfer */ 734 n = s->nsector; 735 s->io_buffer_index = 0; 736 s->io_buffer_size = n * 512; 737 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) { 738 /* The PRDs were too short. Reset the Active bit, but don't raise an 739 * interrupt. */ 740 s->status = READY_STAT | SEEK_STAT; 741 dma_buf_commit(s, 0); 742 goto eot; 743 } 744 745 #ifdef DEBUG_AIO 746 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n", 747 sector_num, n, s->dma_cmd); 748 #endif 749 750 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) && 751 !ide_sect_range_ok(s, sector_num, n)) { 752 ide_dma_error(s); 753 return; 754 } 755 756 switch (s->dma_cmd) { 757 case IDE_DMA_READ: 758 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num, 759 ide_dma_cb, s); 760 break; 761 case IDE_DMA_WRITE: 762 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num, 763 ide_dma_cb, s); 764 break; 765 case IDE_DMA_TRIM: 766 s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num, 767 ide_issue_trim, ide_dma_cb, s, 768 DMA_DIRECTION_TO_DEVICE); 769 break; 770 } 771 return; 772 773 eot: 774 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { 775 block_acct_done(blk_get_stats(s->blk), &s->acct); 776 } 777 ide_set_inactive(s, stay_active); 778 } 779 780 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd) 781 { 782 s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; 783 s->io_buffer_size = 0; 784 s->dma_cmd = dma_cmd; 785 786 switch (dma_cmd) { 787 case IDE_DMA_READ: 788 block_acct_start(blk_get_stats(s->blk), &s->acct, 789 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); 790 break; 791 case IDE_DMA_WRITE: 792 block_acct_start(blk_get_stats(s->blk), &s->acct, 793 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); 794 break; 795 default: 796 break; 797 } 798 799 ide_start_dma(s, ide_dma_cb); 800 } 801 802 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb) 803 { 804 s->io_buffer_index = 0; 805 s->bus->retry_unit = s->unit; 806 s->bus->retry_sector_num = ide_get_sector(s); 807 s->bus->retry_nsector = s->nsector; 808 if (s->bus->dma->ops->start_dma) { 809 s->bus->dma->ops->start_dma(s->bus->dma, s, cb); 810 } 811 } 812 813 static void ide_sector_write(IDEState *s); 814 815 static void ide_sector_write_timer_cb(void *opaque) 816 { 817 IDEState *s = opaque; 818 ide_set_irq(s->bus); 819 } 820 821 static void ide_sector_write_cb(void *opaque, int ret) 822 { 823 IDEState *s = opaque; 824 int n; 825 826 if (ret == -ECANCELED) { 827 return; 828 } 829 block_acct_done(blk_get_stats(s->blk), &s->acct); 830 831 s->pio_aiocb = NULL; 832 s->status &= ~BUSY_STAT; 833 834 if (ret != 0) { 835 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) { 836 return; 837 } 838 } 839 840 n = s->nsector; 841 if (n > s->req_nb_sectors) { 842 n = s->req_nb_sectors; 843 } 844 s->nsector -= n; 845 s->io_buffer_offset += 512 * n; 846 847 ide_set_sector(s, ide_get_sector(s) + n); 848 if (s->nsector == 0) { 849 /* no more sectors to write */ 850 ide_transfer_stop(s); 851 } else { 852 int n1 = s->nsector; 853 if (n1 > s->req_nb_sectors) { 854 n1 = s->req_nb_sectors; 855 } 856 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE, 857 ide_sector_write); 858 } 859 860 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) { 861 /* It seems there is a bug in the Windows 2000 installer HDD 862 IDE driver which fills the disk with empty logs when the 863 IDE write IRQ comes too early. This hack tries to correct 864 that at the expense of slower write performances. Use this 865 option _only_ to install Windows 2000. You must disable it 866 for normal use. */ 867 timer_mod(s->sector_write_timer, 868 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000)); 869 } else { 870 ide_set_irq(s->bus); 871 } 872 } 873 874 static void ide_sector_write(IDEState *s) 875 { 876 int64_t sector_num; 877 int n; 878 879 s->status = READY_STAT | SEEK_STAT | BUSY_STAT; 880 sector_num = ide_get_sector(s); 881 #if defined(DEBUG_IDE) 882 printf("sector=%" PRId64 "\n", sector_num); 883 #endif 884 n = s->nsector; 885 if (n > s->req_nb_sectors) { 886 n = s->req_nb_sectors; 887 } 888 889 if (!ide_sect_range_ok(s, sector_num, n)) { 890 ide_rw_error(s); 891 return; 892 } 893 894 s->iov.iov_base = s->io_buffer; 895 s->iov.iov_len = n * BDRV_SECTOR_SIZE; 896 qemu_iovec_init_external(&s->qiov, &s->iov, 1); 897 898 block_acct_start(blk_get_stats(s->blk), &s->acct, 899 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); 900 s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n, 901 ide_sector_write_cb, s); 902 } 903 904 static void ide_flush_cb(void *opaque, int ret) 905 { 906 IDEState *s = opaque; 907 908 s->pio_aiocb = NULL; 909 910 if (ret == -ECANCELED) { 911 return; 912 } 913 if (ret < 0) { 914 /* XXX: What sector number to set here? */ 915 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) { 916 return; 917 } 918 } 919 920 if (s->blk) { 921 block_acct_done(blk_get_stats(s->blk), &s->acct); 922 } 923 s->status = READY_STAT | SEEK_STAT; 924 ide_cmd_done(s); 925 ide_set_irq(s->bus); 926 } 927 928 static void ide_flush_cache(IDEState *s) 929 { 930 if (s->blk == NULL) { 931 ide_flush_cb(s, 0); 932 return; 933 } 934 935 s->status |= BUSY_STAT; 936 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH); 937 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s); 938 } 939 940 static void ide_cfata_metadata_inquiry(IDEState *s) 941 { 942 uint16_t *p; 943 uint32_t spd; 944 945 p = (uint16_t *) s->io_buffer; 946 memset(p, 0, 0x200); 947 spd = ((s->mdata_size - 1) >> 9) + 1; 948 949 put_le16(p + 0, 0x0001); /* Data format revision */ 950 put_le16(p + 1, 0x0000); /* Media property: silicon */ 951 put_le16(p + 2, s->media_changed); /* Media status */ 952 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ 953 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ 954 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ 955 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ 956 } 957 958 static void ide_cfata_metadata_read(IDEState *s) 959 { 960 uint16_t *p; 961 962 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) { 963 s->status = ERR_STAT; 964 s->error = ABRT_ERR; 965 return; 966 } 967 968 p = (uint16_t *) s->io_buffer; 969 memset(p, 0, 0x200); 970 971 put_le16(p + 0, s->media_changed); /* Media status */ 972 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), 973 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), 974 s->nsector << 9), 0x200 - 2)); 975 } 976 977 static void ide_cfata_metadata_write(IDEState *s) 978 { 979 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) { 980 s->status = ERR_STAT; 981 s->error = ABRT_ERR; 982 return; 983 } 984 985 s->media_changed = 0; 986 987 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), 988 s->io_buffer + 2, 989 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), 990 s->nsector << 9), 0x200 - 2)); 991 } 992 993 /* called when the inserted state of the media has changed */ 994 static void ide_cd_change_cb(void *opaque, bool load) 995 { 996 IDEState *s = opaque; 997 uint64_t nb_sectors; 998 999 s->tray_open = !load; 1000 blk_get_geometry(s->blk, &nb_sectors); 1001 s->nb_sectors = nb_sectors; 1002 1003 /* 1004 * First indicate to the guest that a CD has been removed. That's 1005 * done on the next command the guest sends us. 1006 * 1007 * Then we set UNIT_ATTENTION, by which the guest will 1008 * detect a new CD in the drive. See ide_atapi_cmd() for details. 1009 */ 1010 s->cdrom_changed = 1; 1011 s->events.new_media = true; 1012 s->events.eject_request = false; 1013 ide_set_irq(s->bus); 1014 } 1015 1016 static void ide_cd_eject_request_cb(void *opaque, bool force) 1017 { 1018 IDEState *s = opaque; 1019 1020 s->events.eject_request = true; 1021 if (force) { 1022 s->tray_locked = false; 1023 } 1024 ide_set_irq(s->bus); 1025 } 1026 1027 static void ide_cmd_lba48_transform(IDEState *s, int lba48) 1028 { 1029 s->lba48 = lba48; 1030 1031 /* handle the 'magic' 0 nsector count conversion here. to avoid 1032 * fiddling with the rest of the read logic, we just store the 1033 * full sector count in ->nsector and ignore ->hob_nsector from now 1034 */ 1035 if (!s->lba48) { 1036 if (!s->nsector) 1037 s->nsector = 256; 1038 } else { 1039 if (!s->nsector && !s->hob_nsector) 1040 s->nsector = 65536; 1041 else { 1042 int lo = s->nsector; 1043 int hi = s->hob_nsector; 1044 1045 s->nsector = (hi << 8) | lo; 1046 } 1047 } 1048 } 1049 1050 static void ide_clear_hob(IDEBus *bus) 1051 { 1052 /* any write clears HOB high bit of device control register */ 1053 bus->ifs[0].select &= ~(1 << 7); 1054 bus->ifs[1].select &= ~(1 << 7); 1055 } 1056 1057 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val) 1058 { 1059 IDEBus *bus = opaque; 1060 1061 #ifdef DEBUG_IDE 1062 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val); 1063 #endif 1064 1065 addr &= 7; 1066 1067 /* ignore writes to command block while busy with previous command */ 1068 if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT))) 1069 return; 1070 1071 switch(addr) { 1072 case 0: 1073 break; 1074 case 1: 1075 ide_clear_hob(bus); 1076 /* NOTE: data is written to the two drives */ 1077 bus->ifs[0].hob_feature = bus->ifs[0].feature; 1078 bus->ifs[1].hob_feature = bus->ifs[1].feature; 1079 bus->ifs[0].feature = val; 1080 bus->ifs[1].feature = val; 1081 break; 1082 case 2: 1083 ide_clear_hob(bus); 1084 bus->ifs[0].hob_nsector = bus->ifs[0].nsector; 1085 bus->ifs[1].hob_nsector = bus->ifs[1].nsector; 1086 bus->ifs[0].nsector = val; 1087 bus->ifs[1].nsector = val; 1088 break; 1089 case 3: 1090 ide_clear_hob(bus); 1091 bus->ifs[0].hob_sector = bus->ifs[0].sector; 1092 bus->ifs[1].hob_sector = bus->ifs[1].sector; 1093 bus->ifs[0].sector = val; 1094 bus->ifs[1].sector = val; 1095 break; 1096 case 4: 1097 ide_clear_hob(bus); 1098 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl; 1099 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl; 1100 bus->ifs[0].lcyl = val; 1101 bus->ifs[1].lcyl = val; 1102 break; 1103 case 5: 1104 ide_clear_hob(bus); 1105 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl; 1106 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl; 1107 bus->ifs[0].hcyl = val; 1108 bus->ifs[1].hcyl = val; 1109 break; 1110 case 6: 1111 /* FIXME: HOB readback uses bit 7 */ 1112 bus->ifs[0].select = (val & ~0x10) | 0xa0; 1113 bus->ifs[1].select = (val | 0x10) | 0xa0; 1114 /* select drive */ 1115 bus->unit = (val >> 4) & 1; 1116 break; 1117 default: 1118 case 7: 1119 /* command */ 1120 ide_exec_cmd(bus, val); 1121 break; 1122 } 1123 } 1124 1125 static bool cmd_nop(IDEState *s, uint8_t cmd) 1126 { 1127 return true; 1128 } 1129 1130 static bool cmd_data_set_management(IDEState *s, uint8_t cmd) 1131 { 1132 switch (s->feature) { 1133 case DSM_TRIM: 1134 if (s->blk) { 1135 ide_sector_start_dma(s, IDE_DMA_TRIM); 1136 return false; 1137 } 1138 break; 1139 } 1140 1141 ide_abort_command(s); 1142 return true; 1143 } 1144 1145 static bool cmd_identify(IDEState *s, uint8_t cmd) 1146 { 1147 if (s->blk && s->drive_kind != IDE_CD) { 1148 if (s->drive_kind != IDE_CFATA) { 1149 ide_identify(s); 1150 } else { 1151 ide_cfata_identify(s); 1152 } 1153 s->status = READY_STAT | SEEK_STAT; 1154 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); 1155 ide_set_irq(s->bus); 1156 return false; 1157 } else { 1158 if (s->drive_kind == IDE_CD) { 1159 ide_set_signature(s); 1160 } 1161 ide_abort_command(s); 1162 } 1163 1164 return true; 1165 } 1166 1167 static bool cmd_verify(IDEState *s, uint8_t cmd) 1168 { 1169 bool lba48 = (cmd == WIN_VERIFY_EXT); 1170 1171 /* do sector number check ? */ 1172 ide_cmd_lba48_transform(s, lba48); 1173 1174 return true; 1175 } 1176 1177 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd) 1178 { 1179 if (s->drive_kind == IDE_CFATA && s->nsector == 0) { 1180 /* Disable Read and Write Multiple */ 1181 s->mult_sectors = 0; 1182 } else if ((s->nsector & 0xff) != 0 && 1183 ((s->nsector & 0xff) > MAX_MULT_SECTORS || 1184 (s->nsector & (s->nsector - 1)) != 0)) { 1185 ide_abort_command(s); 1186 } else { 1187 s->mult_sectors = s->nsector & 0xff; 1188 } 1189 1190 return true; 1191 } 1192 1193 static bool cmd_read_multiple(IDEState *s, uint8_t cmd) 1194 { 1195 bool lba48 = (cmd == WIN_MULTREAD_EXT); 1196 1197 if (!s->blk || !s->mult_sectors) { 1198 ide_abort_command(s); 1199 return true; 1200 } 1201 1202 ide_cmd_lba48_transform(s, lba48); 1203 s->req_nb_sectors = s->mult_sectors; 1204 ide_sector_read(s); 1205 return false; 1206 } 1207 1208 static bool cmd_write_multiple(IDEState *s, uint8_t cmd) 1209 { 1210 bool lba48 = (cmd == WIN_MULTWRITE_EXT); 1211 int n; 1212 1213 if (!s->blk || !s->mult_sectors) { 1214 ide_abort_command(s); 1215 return true; 1216 } 1217 1218 ide_cmd_lba48_transform(s, lba48); 1219 1220 s->req_nb_sectors = s->mult_sectors; 1221 n = MIN(s->nsector, s->req_nb_sectors); 1222 1223 s->status = SEEK_STAT | READY_STAT; 1224 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write); 1225 1226 s->media_changed = 1; 1227 1228 return false; 1229 } 1230 1231 static bool cmd_read_pio(IDEState *s, uint8_t cmd) 1232 { 1233 bool lba48 = (cmd == WIN_READ_EXT); 1234 1235 if (s->drive_kind == IDE_CD) { 1236 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */ 1237 ide_abort_command(s); 1238 return true; 1239 } 1240 1241 if (!s->blk) { 1242 ide_abort_command(s); 1243 return true; 1244 } 1245 1246 ide_cmd_lba48_transform(s, lba48); 1247 s->req_nb_sectors = 1; 1248 ide_sector_read(s); 1249 1250 return false; 1251 } 1252 1253 static bool cmd_write_pio(IDEState *s, uint8_t cmd) 1254 { 1255 bool lba48 = (cmd == WIN_WRITE_EXT); 1256 1257 if (!s->blk) { 1258 ide_abort_command(s); 1259 return true; 1260 } 1261 1262 ide_cmd_lba48_transform(s, lba48); 1263 1264 s->req_nb_sectors = 1; 1265 s->status = SEEK_STAT | READY_STAT; 1266 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write); 1267 1268 s->media_changed = 1; 1269 1270 return false; 1271 } 1272 1273 static bool cmd_read_dma(IDEState *s, uint8_t cmd) 1274 { 1275 bool lba48 = (cmd == WIN_READDMA_EXT); 1276 1277 if (!s->blk) { 1278 ide_abort_command(s); 1279 return true; 1280 } 1281 1282 ide_cmd_lba48_transform(s, lba48); 1283 ide_sector_start_dma(s, IDE_DMA_READ); 1284 1285 return false; 1286 } 1287 1288 static bool cmd_write_dma(IDEState *s, uint8_t cmd) 1289 { 1290 bool lba48 = (cmd == WIN_WRITEDMA_EXT); 1291 1292 if (!s->blk) { 1293 ide_abort_command(s); 1294 return true; 1295 } 1296 1297 ide_cmd_lba48_transform(s, lba48); 1298 ide_sector_start_dma(s, IDE_DMA_WRITE); 1299 1300 s->media_changed = 1; 1301 1302 return false; 1303 } 1304 1305 static bool cmd_flush_cache(IDEState *s, uint8_t cmd) 1306 { 1307 ide_flush_cache(s); 1308 return false; 1309 } 1310 1311 static bool cmd_seek(IDEState *s, uint8_t cmd) 1312 { 1313 /* XXX: Check that seek is within bounds */ 1314 return true; 1315 } 1316 1317 static bool cmd_read_native_max(IDEState *s, uint8_t cmd) 1318 { 1319 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT); 1320 1321 /* Refuse if no sectors are addressable (e.g. medium not inserted) */ 1322 if (s->nb_sectors == 0) { 1323 ide_abort_command(s); 1324 return true; 1325 } 1326 1327 ide_cmd_lba48_transform(s, lba48); 1328 ide_set_sector(s, s->nb_sectors - 1); 1329 1330 return true; 1331 } 1332 1333 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd) 1334 { 1335 s->nsector = 0xff; /* device active or idle */ 1336 return true; 1337 } 1338 1339 static bool cmd_set_features(IDEState *s, uint8_t cmd) 1340 { 1341 uint16_t *identify_data; 1342 1343 if (!s->blk) { 1344 ide_abort_command(s); 1345 return true; 1346 } 1347 1348 /* XXX: valid for CDROM ? */ 1349 switch (s->feature) { 1350 case 0x02: /* write cache enable */ 1351 blk_set_enable_write_cache(s->blk, true); 1352 identify_data = (uint16_t *)s->identify_data; 1353 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1); 1354 return true; 1355 case 0x82: /* write cache disable */ 1356 blk_set_enable_write_cache(s->blk, false); 1357 identify_data = (uint16_t *)s->identify_data; 1358 put_le16(identify_data + 85, (1 << 14) | 1); 1359 ide_flush_cache(s); 1360 return false; 1361 case 0xcc: /* reverting to power-on defaults enable */ 1362 case 0x66: /* reverting to power-on defaults disable */ 1363 case 0xaa: /* read look-ahead enable */ 1364 case 0x55: /* read look-ahead disable */ 1365 case 0x05: /* set advanced power management mode */ 1366 case 0x85: /* disable advanced power management mode */ 1367 case 0x69: /* NOP */ 1368 case 0x67: /* NOP */ 1369 case 0x96: /* NOP */ 1370 case 0x9a: /* NOP */ 1371 case 0x42: /* enable Automatic Acoustic Mode */ 1372 case 0xc2: /* disable Automatic Acoustic Mode */ 1373 return true; 1374 case 0x03: /* set transfer mode */ 1375 { 1376 uint8_t val = s->nsector & 0x07; 1377 identify_data = (uint16_t *)s->identify_data; 1378 1379 switch (s->nsector >> 3) { 1380 case 0x00: /* pio default */ 1381 case 0x01: /* pio mode */ 1382 put_le16(identify_data + 62, 0x07); 1383 put_le16(identify_data + 63, 0x07); 1384 put_le16(identify_data + 88, 0x3f); 1385 break; 1386 case 0x02: /* sigle word dma mode*/ 1387 put_le16(identify_data + 62, 0x07 | (1 << (val + 8))); 1388 put_le16(identify_data + 63, 0x07); 1389 put_le16(identify_data + 88, 0x3f); 1390 break; 1391 case 0x04: /* mdma mode */ 1392 put_le16(identify_data + 62, 0x07); 1393 put_le16(identify_data + 63, 0x07 | (1 << (val + 8))); 1394 put_le16(identify_data + 88, 0x3f); 1395 break; 1396 case 0x08: /* udma mode */ 1397 put_le16(identify_data + 62, 0x07); 1398 put_le16(identify_data + 63, 0x07); 1399 put_le16(identify_data + 88, 0x3f | (1 << (val + 8))); 1400 break; 1401 default: 1402 goto abort_cmd; 1403 } 1404 return true; 1405 } 1406 } 1407 1408 abort_cmd: 1409 ide_abort_command(s); 1410 return true; 1411 } 1412 1413 1414 /*** ATAPI commands ***/ 1415 1416 static bool cmd_identify_packet(IDEState *s, uint8_t cmd) 1417 { 1418 ide_atapi_identify(s); 1419 s->status = READY_STAT | SEEK_STAT; 1420 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); 1421 ide_set_irq(s->bus); 1422 return false; 1423 } 1424 1425 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd) 1426 { 1427 ide_set_signature(s); 1428 1429 if (s->drive_kind == IDE_CD) { 1430 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet 1431 * devices to return a clear status register 1432 * with READY_STAT *not* set. */ 1433 s->error = 0x01; 1434 } else { 1435 s->status = READY_STAT | SEEK_STAT; 1436 /* The bits of the error register are not as usual for this command! 1437 * They are part of the regular output (this is why ERR_STAT isn't set) 1438 * Device 0 passed, Device 1 passed or not present. */ 1439 s->error = 0x01; 1440 ide_set_irq(s->bus); 1441 } 1442 1443 return false; 1444 } 1445 1446 static bool cmd_device_reset(IDEState *s, uint8_t cmd) 1447 { 1448 ide_set_signature(s); 1449 s->status = 0x00; /* NOTE: READY is _not_ set */ 1450 s->error = 0x01; 1451 1452 return false; 1453 } 1454 1455 static bool cmd_packet(IDEState *s, uint8_t cmd) 1456 { 1457 /* overlapping commands not supported */ 1458 if (s->feature & 0x02) { 1459 ide_abort_command(s); 1460 return true; 1461 } 1462 1463 s->status = READY_STAT | SEEK_STAT; 1464 s->atapi_dma = s->feature & 1; 1465 s->nsector = 1; 1466 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE, 1467 ide_atapi_cmd); 1468 return false; 1469 } 1470 1471 1472 /*** CF-ATA commands ***/ 1473 1474 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd) 1475 { 1476 s->error = 0x09; /* miscellaneous error */ 1477 s->status = READY_STAT | SEEK_STAT; 1478 ide_set_irq(s->bus); 1479 1480 return false; 1481 } 1482 1483 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd) 1484 { 1485 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is 1486 * required for Windows 8 to work with AHCI */ 1487 1488 if (cmd == CFA_WEAR_LEVEL) { 1489 s->nsector = 0; 1490 } 1491 1492 if (cmd == CFA_ERASE_SECTORS) { 1493 s->media_changed = 1; 1494 } 1495 1496 return true; 1497 } 1498 1499 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd) 1500 { 1501 s->status = READY_STAT | SEEK_STAT; 1502 1503 memset(s->io_buffer, 0, 0x200); 1504 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */ 1505 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */ 1506 s->io_buffer[0x02] = s->select; /* Head */ 1507 s->io_buffer[0x03] = s->sector; /* Sector */ 1508 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */ 1509 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */ 1510 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */ 1511 s->io_buffer[0x13] = 0x00; /* Erase flag */ 1512 s->io_buffer[0x18] = 0x00; /* Hot count */ 1513 s->io_buffer[0x19] = 0x00; /* Hot count */ 1514 s->io_buffer[0x1a] = 0x01; /* Hot count */ 1515 1516 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); 1517 ide_set_irq(s->bus); 1518 1519 return false; 1520 } 1521 1522 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd) 1523 { 1524 switch (s->feature) { 1525 case 0x02: /* Inquiry Metadata Storage */ 1526 ide_cfata_metadata_inquiry(s); 1527 break; 1528 case 0x03: /* Read Metadata Storage */ 1529 ide_cfata_metadata_read(s); 1530 break; 1531 case 0x04: /* Write Metadata Storage */ 1532 ide_cfata_metadata_write(s); 1533 break; 1534 default: 1535 ide_abort_command(s); 1536 return true; 1537 } 1538 1539 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); 1540 s->status = 0x00; /* NOTE: READY is _not_ set */ 1541 ide_set_irq(s->bus); 1542 1543 return false; 1544 } 1545 1546 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd) 1547 { 1548 switch (s->feature) { 1549 case 0x01: /* sense temperature in device */ 1550 s->nsector = 0x50; /* +20 C */ 1551 break; 1552 default: 1553 ide_abort_command(s); 1554 return true; 1555 } 1556 1557 return true; 1558 } 1559 1560 1561 /*** SMART commands ***/ 1562 1563 static bool cmd_smart(IDEState *s, uint8_t cmd) 1564 { 1565 int n; 1566 1567 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) { 1568 goto abort_cmd; 1569 } 1570 1571 if (!s->smart_enabled && s->feature != SMART_ENABLE) { 1572 goto abort_cmd; 1573 } 1574 1575 switch (s->feature) { 1576 case SMART_DISABLE: 1577 s->smart_enabled = 0; 1578 return true; 1579 1580 case SMART_ENABLE: 1581 s->smart_enabled = 1; 1582 return true; 1583 1584 case SMART_ATTR_AUTOSAVE: 1585 switch (s->sector) { 1586 case 0x00: 1587 s->smart_autosave = 0; 1588 break; 1589 case 0xf1: 1590 s->smart_autosave = 1; 1591 break; 1592 default: 1593 goto abort_cmd; 1594 } 1595 return true; 1596 1597 case SMART_STATUS: 1598 if (!s->smart_errors) { 1599 s->hcyl = 0xc2; 1600 s->lcyl = 0x4f; 1601 } else { 1602 s->hcyl = 0x2c; 1603 s->lcyl = 0xf4; 1604 } 1605 return true; 1606 1607 case SMART_READ_THRESH: 1608 memset(s->io_buffer, 0, 0x200); 1609 s->io_buffer[0] = 0x01; /* smart struct version */ 1610 1611 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) { 1612 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0]; 1613 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11]; 1614 } 1615 1616 /* checksum */ 1617 for (n = 0; n < 511; n++) { 1618 s->io_buffer[511] += s->io_buffer[n]; 1619 } 1620 s->io_buffer[511] = 0x100 - s->io_buffer[511]; 1621 1622 s->status = READY_STAT | SEEK_STAT; 1623 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); 1624 ide_set_irq(s->bus); 1625 return false; 1626 1627 case SMART_READ_DATA: 1628 memset(s->io_buffer, 0, 0x200); 1629 s->io_buffer[0] = 0x01; /* smart struct version */ 1630 1631 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) { 1632 int i; 1633 for (i = 0; i < 11; i++) { 1634 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i]; 1635 } 1636 } 1637 1638 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00); 1639 if (s->smart_selftest_count == 0) { 1640 s->io_buffer[363] = 0; 1641 } else { 1642 s->io_buffer[363] = 1643 s->smart_selftest_data[3 + 1644 (s->smart_selftest_count - 1) * 1645 24]; 1646 } 1647 s->io_buffer[364] = 0x20; 1648 s->io_buffer[365] = 0x01; 1649 /* offline data collection capacity: execute + self-test*/ 1650 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1); 1651 s->io_buffer[368] = 0x03; /* smart capability (1) */ 1652 s->io_buffer[369] = 0x00; /* smart capability (2) */ 1653 s->io_buffer[370] = 0x01; /* error logging supported */ 1654 s->io_buffer[372] = 0x02; /* minutes for poll short test */ 1655 s->io_buffer[373] = 0x36; /* minutes for poll ext test */ 1656 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */ 1657 1658 for (n = 0; n < 511; n++) { 1659 s->io_buffer[511] += s->io_buffer[n]; 1660 } 1661 s->io_buffer[511] = 0x100 - s->io_buffer[511]; 1662 1663 s->status = READY_STAT | SEEK_STAT; 1664 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); 1665 ide_set_irq(s->bus); 1666 return false; 1667 1668 case SMART_READ_LOG: 1669 switch (s->sector) { 1670 case 0x01: /* summary smart error log */ 1671 memset(s->io_buffer, 0, 0x200); 1672 s->io_buffer[0] = 0x01; 1673 s->io_buffer[1] = 0x00; /* no error entries */ 1674 s->io_buffer[452] = s->smart_errors & 0xff; 1675 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8; 1676 1677 for (n = 0; n < 511; n++) { 1678 s->io_buffer[511] += s->io_buffer[n]; 1679 } 1680 s->io_buffer[511] = 0x100 - s->io_buffer[511]; 1681 break; 1682 case 0x06: /* smart self test log */ 1683 memset(s->io_buffer, 0, 0x200); 1684 s->io_buffer[0] = 0x01; 1685 if (s->smart_selftest_count == 0) { 1686 s->io_buffer[508] = 0; 1687 } else { 1688 s->io_buffer[508] = s->smart_selftest_count; 1689 for (n = 2; n < 506; n++) { 1690 s->io_buffer[n] = s->smart_selftest_data[n]; 1691 } 1692 } 1693 1694 for (n = 0; n < 511; n++) { 1695 s->io_buffer[511] += s->io_buffer[n]; 1696 } 1697 s->io_buffer[511] = 0x100 - s->io_buffer[511]; 1698 break; 1699 default: 1700 goto abort_cmd; 1701 } 1702 s->status = READY_STAT | SEEK_STAT; 1703 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); 1704 ide_set_irq(s->bus); 1705 return false; 1706 1707 case SMART_EXECUTE_OFFLINE: 1708 switch (s->sector) { 1709 case 0: /* off-line routine */ 1710 case 1: /* short self test */ 1711 case 2: /* extended self test */ 1712 s->smart_selftest_count++; 1713 if (s->smart_selftest_count > 21) { 1714 s->smart_selftest_count = 1; 1715 } 1716 n = 2 + (s->smart_selftest_count - 1) * 24; 1717 s->smart_selftest_data[n] = s->sector; 1718 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */ 1719 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */ 1720 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */ 1721 break; 1722 default: 1723 goto abort_cmd; 1724 } 1725 return true; 1726 } 1727 1728 abort_cmd: 1729 ide_abort_command(s); 1730 return true; 1731 } 1732 1733 #define HD_OK (1u << IDE_HD) 1734 #define CD_OK (1u << IDE_CD) 1735 #define CFA_OK (1u << IDE_CFATA) 1736 #define HD_CFA_OK (HD_OK | CFA_OK) 1737 #define ALL_OK (HD_OK | CD_OK | CFA_OK) 1738 1739 /* Set the Disk Seek Completed status bit during completion */ 1740 #define SET_DSC (1u << 8) 1741 1742 /* See ACS-2 T13/2015-D Table B.2 Command codes */ 1743 static const struct { 1744 /* Returns true if the completion code should be run */ 1745 bool (*handler)(IDEState *s, uint8_t cmd); 1746 int flags; 1747 } ide_cmd_table[0x100] = { 1748 /* NOP not implemented, mandatory for CD */ 1749 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK }, 1750 [WIN_DSM] = { cmd_data_set_management, ALL_OK }, 1751 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK }, 1752 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC}, 1753 [WIN_READ] = { cmd_read_pio, ALL_OK }, 1754 [WIN_READ_ONCE] = { cmd_read_pio, ALL_OK }, 1755 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK }, 1756 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK }, 1757 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC }, 1758 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK }, 1759 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK }, 1760 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK }, 1761 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK }, 1762 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK }, 1763 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK }, 1764 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK }, 1765 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK }, 1766 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC }, 1767 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC }, 1768 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC }, 1769 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC }, 1770 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK }, 1771 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK }, 1772 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC }, 1773 [WIN_STANDBYNOW2] = { cmd_nop, ALL_OK }, 1774 [WIN_IDLEIMMEDIATE2] = { cmd_nop, ALL_OK }, 1775 [WIN_STANDBY2] = { cmd_nop, ALL_OK }, 1776 [WIN_SETIDLE2] = { cmd_nop, ALL_OK }, 1777 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, ALL_OK | SET_DSC }, 1778 [WIN_SLEEPNOW2] = { cmd_nop, ALL_OK }, 1779 [WIN_PACKETCMD] = { cmd_packet, CD_OK }, 1780 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK }, 1781 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC }, 1782 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK }, 1783 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC }, 1784 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK }, 1785 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK }, 1786 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC }, 1787 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK }, 1788 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK }, 1789 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK }, 1790 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK }, 1791 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK }, 1792 [WIN_STANDBYNOW1] = { cmd_nop, ALL_OK }, 1793 [WIN_IDLEIMMEDIATE] = { cmd_nop, ALL_OK }, 1794 [WIN_STANDBY] = { cmd_nop, ALL_OK }, 1795 [WIN_SETIDLE1] = { cmd_nop, ALL_OK }, 1796 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, ALL_OK | SET_DSC }, 1797 [WIN_SLEEPNOW1] = { cmd_nop, ALL_OK }, 1798 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK }, 1799 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK }, 1800 [WIN_IDENTIFY] = { cmd_identify, ALL_OK }, 1801 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC }, 1802 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC }, 1803 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC }, 1804 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, ALL_OK | SET_DSC }, 1805 }; 1806 1807 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd) 1808 { 1809 return cmd < ARRAY_SIZE(ide_cmd_table) 1810 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind)); 1811 } 1812 1813 void ide_exec_cmd(IDEBus *bus, uint32_t val) 1814 { 1815 IDEState *s; 1816 bool complete; 1817 1818 #if defined(DEBUG_IDE) 1819 printf("ide: CMD=%02x\n", val); 1820 #endif 1821 s = idebus_active_if(bus); 1822 /* ignore commands to non existent slave */ 1823 if (s != bus->ifs && !s->blk) { 1824 return; 1825 } 1826 1827 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */ 1828 if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET) 1829 return; 1830 1831 if (!ide_cmd_permitted(s, val)) { 1832 ide_abort_command(s); 1833 ide_set_irq(s->bus); 1834 return; 1835 } 1836 1837 s->status = READY_STAT | BUSY_STAT; 1838 s->error = 0; 1839 s->io_buffer_offset = 0; 1840 1841 complete = ide_cmd_table[val].handler(s, val); 1842 if (complete) { 1843 s->status &= ~BUSY_STAT; 1844 assert(!!s->error == !!(s->status & ERR_STAT)); 1845 1846 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) { 1847 s->status |= SEEK_STAT; 1848 } 1849 1850 ide_cmd_done(s); 1851 ide_set_irq(s->bus); 1852 } 1853 } 1854 1855 uint32_t ide_ioport_read(void *opaque, uint32_t addr1) 1856 { 1857 IDEBus *bus = opaque; 1858 IDEState *s = idebus_active_if(bus); 1859 uint32_t addr; 1860 int ret, hob; 1861 1862 addr = addr1 & 7; 1863 /* FIXME: HOB readback uses bit 7, but it's always set right now */ 1864 //hob = s->select & (1 << 7); 1865 hob = 0; 1866 switch(addr) { 1867 case 0: 1868 ret = 0xff; 1869 break; 1870 case 1: 1871 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || 1872 (s != bus->ifs && !s->blk)) { 1873 ret = 0; 1874 } else if (!hob) { 1875 ret = s->error; 1876 } else { 1877 ret = s->hob_feature; 1878 } 1879 break; 1880 case 2: 1881 if (!bus->ifs[0].blk && !bus->ifs[1].blk) { 1882 ret = 0; 1883 } else if (!hob) { 1884 ret = s->nsector & 0xff; 1885 } else { 1886 ret = s->hob_nsector; 1887 } 1888 break; 1889 case 3: 1890 if (!bus->ifs[0].blk && !bus->ifs[1].blk) { 1891 ret = 0; 1892 } else if (!hob) { 1893 ret = s->sector; 1894 } else { 1895 ret = s->hob_sector; 1896 } 1897 break; 1898 case 4: 1899 if (!bus->ifs[0].blk && !bus->ifs[1].blk) { 1900 ret = 0; 1901 } else if (!hob) { 1902 ret = s->lcyl; 1903 } else { 1904 ret = s->hob_lcyl; 1905 } 1906 break; 1907 case 5: 1908 if (!bus->ifs[0].blk && !bus->ifs[1].blk) { 1909 ret = 0; 1910 } else if (!hob) { 1911 ret = s->hcyl; 1912 } else { 1913 ret = s->hob_hcyl; 1914 } 1915 break; 1916 case 6: 1917 if (!bus->ifs[0].blk && !bus->ifs[1].blk) { 1918 ret = 0; 1919 } else { 1920 ret = s->select; 1921 } 1922 break; 1923 default: 1924 case 7: 1925 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || 1926 (s != bus->ifs && !s->blk)) { 1927 ret = 0; 1928 } else { 1929 ret = s->status; 1930 } 1931 qemu_irq_lower(bus->irq); 1932 break; 1933 } 1934 #ifdef DEBUG_IDE 1935 printf("ide: read addr=0x%x val=%02x\n", addr1, ret); 1936 #endif 1937 return ret; 1938 } 1939 1940 uint32_t ide_status_read(void *opaque, uint32_t addr) 1941 { 1942 IDEBus *bus = opaque; 1943 IDEState *s = idebus_active_if(bus); 1944 int ret; 1945 1946 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || 1947 (s != bus->ifs && !s->blk)) { 1948 ret = 0; 1949 } else { 1950 ret = s->status; 1951 } 1952 #ifdef DEBUG_IDE 1953 printf("ide: read status addr=0x%x val=%02x\n", addr, ret); 1954 #endif 1955 return ret; 1956 } 1957 1958 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val) 1959 { 1960 IDEBus *bus = opaque; 1961 IDEState *s; 1962 int i; 1963 1964 #ifdef DEBUG_IDE 1965 printf("ide: write control addr=0x%x val=%02x\n", addr, val); 1966 #endif 1967 /* common for both drives */ 1968 if (!(bus->cmd & IDE_CMD_RESET) && 1969 (val & IDE_CMD_RESET)) { 1970 /* reset low to high */ 1971 for(i = 0;i < 2; i++) { 1972 s = &bus->ifs[i]; 1973 s->status = BUSY_STAT | SEEK_STAT; 1974 s->error = 0x01; 1975 } 1976 } else if ((bus->cmd & IDE_CMD_RESET) && 1977 !(val & IDE_CMD_RESET)) { 1978 /* high to low */ 1979 for(i = 0;i < 2; i++) { 1980 s = &bus->ifs[i]; 1981 if (s->drive_kind == IDE_CD) 1982 s->status = 0x00; /* NOTE: READY is _not_ set */ 1983 else 1984 s->status = READY_STAT | SEEK_STAT; 1985 ide_set_signature(s); 1986 } 1987 } 1988 1989 bus->cmd = val; 1990 } 1991 1992 /* 1993 * Returns true if the running PIO transfer is a PIO out (i.e. data is 1994 * transferred from the device to the guest), false if it's a PIO in 1995 */ 1996 static bool ide_is_pio_out(IDEState *s) 1997 { 1998 if (s->end_transfer_func == ide_sector_write || 1999 s->end_transfer_func == ide_atapi_cmd) { 2000 return false; 2001 } else if (s->end_transfer_func == ide_sector_read || 2002 s->end_transfer_func == ide_transfer_stop || 2003 s->end_transfer_func == ide_atapi_cmd_reply_end || 2004 s->end_transfer_func == ide_dummy_transfer_stop) { 2005 return true; 2006 } 2007 2008 abort(); 2009 } 2010 2011 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val) 2012 { 2013 IDEBus *bus = opaque; 2014 IDEState *s = idebus_active_if(bus); 2015 uint8_t *p; 2016 2017 /* PIO data access allowed only when DRQ bit is set. The result of a write 2018 * during PIO out is indeterminate, just ignore it. */ 2019 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) { 2020 return; 2021 } 2022 2023 p = s->data_ptr; 2024 if (p + 2 > s->data_end) { 2025 return; 2026 } 2027 2028 *(uint16_t *)p = le16_to_cpu(val); 2029 p += 2; 2030 s->data_ptr = p; 2031 if (p >= s->data_end) { 2032 s->status &= ~DRQ_STAT; 2033 s->end_transfer_func(s); 2034 } 2035 } 2036 2037 uint32_t ide_data_readw(void *opaque, uint32_t addr) 2038 { 2039 IDEBus *bus = opaque; 2040 IDEState *s = idebus_active_if(bus); 2041 uint8_t *p; 2042 int ret; 2043 2044 /* PIO data access allowed only when DRQ bit is set. The result of a read 2045 * during PIO in is indeterminate, return 0 and don't move forward. */ 2046 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) { 2047 return 0; 2048 } 2049 2050 p = s->data_ptr; 2051 if (p + 2 > s->data_end) { 2052 return 0; 2053 } 2054 2055 ret = cpu_to_le16(*(uint16_t *)p); 2056 p += 2; 2057 s->data_ptr = p; 2058 if (p >= s->data_end) { 2059 s->status &= ~DRQ_STAT; 2060 s->end_transfer_func(s); 2061 } 2062 return ret; 2063 } 2064 2065 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val) 2066 { 2067 IDEBus *bus = opaque; 2068 IDEState *s = idebus_active_if(bus); 2069 uint8_t *p; 2070 2071 /* PIO data access allowed only when DRQ bit is set. The result of a write 2072 * during PIO out is indeterminate, just ignore it. */ 2073 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) { 2074 return; 2075 } 2076 2077 p = s->data_ptr; 2078 if (p + 4 > s->data_end) { 2079 return; 2080 } 2081 2082 *(uint32_t *)p = le32_to_cpu(val); 2083 p += 4; 2084 s->data_ptr = p; 2085 if (p >= s->data_end) { 2086 s->status &= ~DRQ_STAT; 2087 s->end_transfer_func(s); 2088 } 2089 } 2090 2091 uint32_t ide_data_readl(void *opaque, uint32_t addr) 2092 { 2093 IDEBus *bus = opaque; 2094 IDEState *s = idebus_active_if(bus); 2095 uint8_t *p; 2096 int ret; 2097 2098 /* PIO data access allowed only when DRQ bit is set. The result of a read 2099 * during PIO in is indeterminate, return 0 and don't move forward. */ 2100 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) { 2101 return 0; 2102 } 2103 2104 p = s->data_ptr; 2105 if (p + 4 > s->data_end) { 2106 return 0; 2107 } 2108 2109 ret = cpu_to_le32(*(uint32_t *)p); 2110 p += 4; 2111 s->data_ptr = p; 2112 if (p >= s->data_end) { 2113 s->status &= ~DRQ_STAT; 2114 s->end_transfer_func(s); 2115 } 2116 return ret; 2117 } 2118 2119 static void ide_dummy_transfer_stop(IDEState *s) 2120 { 2121 s->data_ptr = s->io_buffer; 2122 s->data_end = s->io_buffer; 2123 s->io_buffer[0] = 0xff; 2124 s->io_buffer[1] = 0xff; 2125 s->io_buffer[2] = 0xff; 2126 s->io_buffer[3] = 0xff; 2127 } 2128 2129 static void ide_reset(IDEState *s) 2130 { 2131 #ifdef DEBUG_IDE 2132 printf("ide: reset\n"); 2133 #endif 2134 2135 if (s->pio_aiocb) { 2136 blk_aio_cancel(s->pio_aiocb); 2137 s->pio_aiocb = NULL; 2138 } 2139 2140 if (s->drive_kind == IDE_CFATA) 2141 s->mult_sectors = 0; 2142 else 2143 s->mult_sectors = MAX_MULT_SECTORS; 2144 /* ide regs */ 2145 s->feature = 0; 2146 s->error = 0; 2147 s->nsector = 0; 2148 s->sector = 0; 2149 s->lcyl = 0; 2150 s->hcyl = 0; 2151 2152 /* lba48 */ 2153 s->hob_feature = 0; 2154 s->hob_sector = 0; 2155 s->hob_nsector = 0; 2156 s->hob_lcyl = 0; 2157 s->hob_hcyl = 0; 2158 2159 s->select = 0xa0; 2160 s->status = READY_STAT | SEEK_STAT; 2161 2162 s->lba48 = 0; 2163 2164 /* ATAPI specific */ 2165 s->sense_key = 0; 2166 s->asc = 0; 2167 s->cdrom_changed = 0; 2168 s->packet_transfer_size = 0; 2169 s->elementary_transfer_size = 0; 2170 s->io_buffer_index = 0; 2171 s->cd_sector_size = 0; 2172 s->atapi_dma = 0; 2173 s->tray_locked = 0; 2174 s->tray_open = 0; 2175 /* ATA DMA state */ 2176 s->io_buffer_size = 0; 2177 s->req_nb_sectors = 0; 2178 2179 ide_set_signature(s); 2180 /* init the transfer handler so that 0xffff is returned on data 2181 accesses */ 2182 s->end_transfer_func = ide_dummy_transfer_stop; 2183 ide_dummy_transfer_stop(s); 2184 s->media_changed = 0; 2185 } 2186 2187 void ide_bus_reset(IDEBus *bus) 2188 { 2189 bus->unit = 0; 2190 bus->cmd = 0; 2191 ide_reset(&bus->ifs[0]); 2192 ide_reset(&bus->ifs[1]); 2193 ide_clear_hob(bus); 2194 2195 /* pending async DMA */ 2196 if (bus->dma->aiocb) { 2197 #ifdef DEBUG_AIO 2198 printf("aio_cancel\n"); 2199 #endif 2200 blk_aio_cancel(bus->dma->aiocb); 2201 bus->dma->aiocb = NULL; 2202 } 2203 2204 /* reset dma provider too */ 2205 if (bus->dma->ops->reset) { 2206 bus->dma->ops->reset(bus->dma); 2207 } 2208 } 2209 2210 static bool ide_cd_is_tray_open(void *opaque) 2211 { 2212 return ((IDEState *)opaque)->tray_open; 2213 } 2214 2215 static bool ide_cd_is_medium_locked(void *opaque) 2216 { 2217 return ((IDEState *)opaque)->tray_locked; 2218 } 2219 2220 static void ide_resize_cb(void *opaque) 2221 { 2222 IDEState *s = opaque; 2223 uint64_t nb_sectors; 2224 2225 if (!s->identify_set) { 2226 return; 2227 } 2228 2229 blk_get_geometry(s->blk, &nb_sectors); 2230 s->nb_sectors = nb_sectors; 2231 2232 /* Update the identify data buffer. */ 2233 if (s->drive_kind == IDE_CFATA) { 2234 ide_cfata_identify_size(s); 2235 } else { 2236 /* IDE_CD uses a different set of callbacks entirely. */ 2237 assert(s->drive_kind != IDE_CD); 2238 ide_identify_size(s); 2239 } 2240 } 2241 2242 static const BlockDevOps ide_cd_block_ops = { 2243 .change_media_cb = ide_cd_change_cb, 2244 .eject_request_cb = ide_cd_eject_request_cb, 2245 .is_tray_open = ide_cd_is_tray_open, 2246 .is_medium_locked = ide_cd_is_medium_locked, 2247 }; 2248 2249 static const BlockDevOps ide_hd_block_ops = { 2250 .resize_cb = ide_resize_cb, 2251 }; 2252 2253 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, 2254 const char *version, const char *serial, const char *model, 2255 uint64_t wwn, 2256 uint32_t cylinders, uint32_t heads, uint32_t secs, 2257 int chs_trans) 2258 { 2259 uint64_t nb_sectors; 2260 2261 s->blk = blk; 2262 s->drive_kind = kind; 2263 2264 blk_get_geometry(blk, &nb_sectors); 2265 s->cylinders = cylinders; 2266 s->heads = heads; 2267 s->sectors = secs; 2268 s->chs_trans = chs_trans; 2269 s->nb_sectors = nb_sectors; 2270 s->wwn = wwn; 2271 /* The SMART values should be preserved across power cycles 2272 but they aren't. */ 2273 s->smart_enabled = 1; 2274 s->smart_autosave = 1; 2275 s->smart_errors = 0; 2276 s->smart_selftest_count = 0; 2277 if (kind == IDE_CD) { 2278 blk_set_dev_ops(blk, &ide_cd_block_ops, s); 2279 blk_set_guest_block_size(blk, 2048); 2280 } else { 2281 if (!blk_is_inserted(s->blk)) { 2282 error_report("Device needs media, but drive is empty"); 2283 return -1; 2284 } 2285 if (blk_is_read_only(blk)) { 2286 error_report("Can't use a read-only drive"); 2287 return -1; 2288 } 2289 blk_set_dev_ops(blk, &ide_hd_block_ops, s); 2290 } 2291 if (serial) { 2292 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial); 2293 } else { 2294 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), 2295 "QM%05d", s->drive_serial); 2296 } 2297 if (model) { 2298 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model); 2299 } else { 2300 switch (kind) { 2301 case IDE_CD: 2302 strcpy(s->drive_model_str, "QEMU DVD-ROM"); 2303 break; 2304 case IDE_CFATA: 2305 strcpy(s->drive_model_str, "QEMU MICRODRIVE"); 2306 break; 2307 default: 2308 strcpy(s->drive_model_str, "QEMU HARDDISK"); 2309 break; 2310 } 2311 } 2312 2313 if (version) { 2314 pstrcpy(s->version, sizeof(s->version), version); 2315 } else { 2316 pstrcpy(s->version, sizeof(s->version), qemu_get_version()); 2317 } 2318 2319 ide_reset(s); 2320 blk_iostatus_enable(blk); 2321 return 0; 2322 } 2323 2324 static void ide_init1(IDEBus *bus, int unit) 2325 { 2326 static int drive_serial = 1; 2327 IDEState *s = &bus->ifs[unit]; 2328 2329 s->bus = bus; 2330 s->unit = unit; 2331 s->drive_serial = drive_serial++; 2332 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */ 2333 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4; 2334 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len); 2335 memset(s->io_buffer, 0, s->io_buffer_total_len); 2336 2337 s->smart_selftest_data = blk_blockalign(s->blk, 512); 2338 memset(s->smart_selftest_data, 0, 512); 2339 2340 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 2341 ide_sector_write_timer_cb, s); 2342 } 2343 2344 static int ide_nop_int(IDEDMA *dma, int x) 2345 { 2346 return 0; 2347 } 2348 2349 static void ide_nop(IDEDMA *dma) 2350 { 2351 } 2352 2353 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l) 2354 { 2355 return 0; 2356 } 2357 2358 static const IDEDMAOps ide_dma_nop_ops = { 2359 .prepare_buf = ide_nop_int32, 2360 .restart_dma = ide_nop, 2361 .rw_buf = ide_nop_int, 2362 }; 2363 2364 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd) 2365 { 2366 s->unit = s->bus->retry_unit; 2367 ide_set_sector(s, s->bus->retry_sector_num); 2368 s->nsector = s->bus->retry_nsector; 2369 s->bus->dma->ops->restart_dma(s->bus->dma); 2370 s->io_buffer_size = 0; 2371 s->dma_cmd = dma_cmd; 2372 ide_start_dma(s, ide_dma_cb); 2373 } 2374 2375 static void ide_restart_bh(void *opaque) 2376 { 2377 IDEBus *bus = opaque; 2378 IDEState *s; 2379 bool is_read; 2380 int error_status; 2381 2382 qemu_bh_delete(bus->bh); 2383 bus->bh = NULL; 2384 2385 error_status = bus->error_status; 2386 if (bus->error_status == 0) { 2387 return; 2388 } 2389 2390 s = idebus_active_if(bus); 2391 is_read = (bus->error_status & IDE_RETRY_READ) != 0; 2392 2393 /* The error status must be cleared before resubmitting the request: The 2394 * request may fail again, and this case can only be distinguished if the 2395 * called function can set a new error status. */ 2396 bus->error_status = 0; 2397 2398 /* The HBA has generically asked to be kicked on retry */ 2399 if (error_status & IDE_RETRY_HBA) { 2400 if (s->bus->dma->ops->restart) { 2401 s->bus->dma->ops->restart(s->bus->dma); 2402 } 2403 } 2404 2405 if (error_status & IDE_RETRY_DMA) { 2406 if (error_status & IDE_RETRY_TRIM) { 2407 ide_restart_dma(s, IDE_DMA_TRIM); 2408 } else { 2409 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE); 2410 } 2411 } else if (error_status & IDE_RETRY_PIO) { 2412 if (is_read) { 2413 ide_sector_read(s); 2414 } else { 2415 ide_sector_write(s); 2416 } 2417 } else if (error_status & IDE_RETRY_FLUSH) { 2418 ide_flush_cache(s); 2419 } else { 2420 /* 2421 * We've not got any bits to tell us about ATAPI - but 2422 * we do have the end_transfer_func that tells us what 2423 * we're trying to do. 2424 */ 2425 if (s->end_transfer_func == ide_atapi_cmd) { 2426 ide_atapi_dma_restart(s); 2427 } 2428 } 2429 } 2430 2431 static void ide_restart_cb(void *opaque, int running, RunState state) 2432 { 2433 IDEBus *bus = opaque; 2434 2435 if (!running) 2436 return; 2437 2438 if (!bus->bh) { 2439 bus->bh = qemu_bh_new(ide_restart_bh, bus); 2440 qemu_bh_schedule(bus->bh); 2441 } 2442 } 2443 2444 void ide_register_restart_cb(IDEBus *bus) 2445 { 2446 if (bus->dma->ops->restart_dma) { 2447 qemu_add_vm_change_state_handler(ide_restart_cb, bus); 2448 } 2449 } 2450 2451 static IDEDMA ide_dma_nop = { 2452 .ops = &ide_dma_nop_ops, 2453 .aiocb = NULL, 2454 }; 2455 2456 void ide_init2(IDEBus *bus, qemu_irq irq) 2457 { 2458 int i; 2459 2460 for(i = 0; i < 2; i++) { 2461 ide_init1(bus, i); 2462 ide_reset(&bus->ifs[i]); 2463 } 2464 bus->irq = irq; 2465 bus->dma = &ide_dma_nop; 2466 } 2467 2468 static const MemoryRegionPortio ide_portio_list[] = { 2469 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write }, 2470 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew }, 2471 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel }, 2472 PORTIO_END_OF_LIST(), 2473 }; 2474 2475 static const MemoryRegionPortio ide_portio2_list[] = { 2476 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write }, 2477 PORTIO_END_OF_LIST(), 2478 }; 2479 2480 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2) 2481 { 2482 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA 2483 bridge has been setup properly to always register with ISA. */ 2484 isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide"); 2485 2486 if (iobase2) { 2487 isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide"); 2488 } 2489 } 2490 2491 static bool is_identify_set(void *opaque, int version_id) 2492 { 2493 IDEState *s = opaque; 2494 2495 return s->identify_set != 0; 2496 } 2497 2498 static EndTransferFunc* transfer_end_table[] = { 2499 ide_sector_read, 2500 ide_sector_write, 2501 ide_transfer_stop, 2502 ide_atapi_cmd_reply_end, 2503 ide_atapi_cmd, 2504 ide_dummy_transfer_stop, 2505 }; 2506 2507 static int transfer_end_table_idx(EndTransferFunc *fn) 2508 { 2509 int i; 2510 2511 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++) 2512 if (transfer_end_table[i] == fn) 2513 return i; 2514 2515 return -1; 2516 } 2517 2518 static int ide_drive_post_load(void *opaque, int version_id) 2519 { 2520 IDEState *s = opaque; 2521 2522 if (s->blk && s->identify_set) { 2523 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5))); 2524 } 2525 return 0; 2526 } 2527 2528 static int ide_drive_pio_post_load(void *opaque, int version_id) 2529 { 2530 IDEState *s = opaque; 2531 2532 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) { 2533 return -EINVAL; 2534 } 2535 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx]; 2536 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset; 2537 s->data_end = s->data_ptr + s->cur_io_buffer_len; 2538 s->atapi_dma = s->feature & 1; /* as per cmd_packet */ 2539 2540 return 0; 2541 } 2542 2543 static void ide_drive_pio_pre_save(void *opaque) 2544 { 2545 IDEState *s = opaque; 2546 int idx; 2547 2548 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer; 2549 s->cur_io_buffer_len = s->data_end - s->data_ptr; 2550 2551 idx = transfer_end_table_idx(s->end_transfer_func); 2552 if (idx == -1) { 2553 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n", 2554 __func__); 2555 s->end_transfer_fn_idx = 2; 2556 } else { 2557 s->end_transfer_fn_idx = idx; 2558 } 2559 } 2560 2561 static bool ide_drive_pio_state_needed(void *opaque) 2562 { 2563 IDEState *s = opaque; 2564 2565 return ((s->status & DRQ_STAT) != 0) 2566 || (s->bus->error_status & IDE_RETRY_PIO); 2567 } 2568 2569 static bool ide_tray_state_needed(void *opaque) 2570 { 2571 IDEState *s = opaque; 2572 2573 return s->tray_open || s->tray_locked; 2574 } 2575 2576 static bool ide_atapi_gesn_needed(void *opaque) 2577 { 2578 IDEState *s = opaque; 2579 2580 return s->events.new_media || s->events.eject_request; 2581 } 2582 2583 static bool ide_error_needed(void *opaque) 2584 { 2585 IDEBus *bus = opaque; 2586 2587 return (bus->error_status != 0); 2588 } 2589 2590 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */ 2591 static const VMStateDescription vmstate_ide_atapi_gesn_state = { 2592 .name ="ide_drive/atapi/gesn_state", 2593 .version_id = 1, 2594 .minimum_version_id = 1, 2595 .needed = ide_atapi_gesn_needed, 2596 .fields = (VMStateField[]) { 2597 VMSTATE_BOOL(events.new_media, IDEState), 2598 VMSTATE_BOOL(events.eject_request, IDEState), 2599 VMSTATE_END_OF_LIST() 2600 } 2601 }; 2602 2603 static const VMStateDescription vmstate_ide_tray_state = { 2604 .name = "ide_drive/tray_state", 2605 .version_id = 1, 2606 .minimum_version_id = 1, 2607 .needed = ide_tray_state_needed, 2608 .fields = (VMStateField[]) { 2609 VMSTATE_BOOL(tray_open, IDEState), 2610 VMSTATE_BOOL(tray_locked, IDEState), 2611 VMSTATE_END_OF_LIST() 2612 } 2613 }; 2614 2615 static const VMStateDescription vmstate_ide_drive_pio_state = { 2616 .name = "ide_drive/pio_state", 2617 .version_id = 1, 2618 .minimum_version_id = 1, 2619 .pre_save = ide_drive_pio_pre_save, 2620 .post_load = ide_drive_pio_post_load, 2621 .needed = ide_drive_pio_state_needed, 2622 .fields = (VMStateField[]) { 2623 VMSTATE_INT32(req_nb_sectors, IDEState), 2624 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, 2625 vmstate_info_uint8, uint8_t), 2626 VMSTATE_INT32(cur_io_buffer_offset, IDEState), 2627 VMSTATE_INT32(cur_io_buffer_len, IDEState), 2628 VMSTATE_UINT8(end_transfer_fn_idx, IDEState), 2629 VMSTATE_INT32(elementary_transfer_size, IDEState), 2630 VMSTATE_INT32(packet_transfer_size, IDEState), 2631 VMSTATE_END_OF_LIST() 2632 } 2633 }; 2634 2635 const VMStateDescription vmstate_ide_drive = { 2636 .name = "ide_drive", 2637 .version_id = 3, 2638 .minimum_version_id = 0, 2639 .post_load = ide_drive_post_load, 2640 .fields = (VMStateField[]) { 2641 VMSTATE_INT32(mult_sectors, IDEState), 2642 VMSTATE_INT32(identify_set, IDEState), 2643 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set), 2644 VMSTATE_UINT8(feature, IDEState), 2645 VMSTATE_UINT8(error, IDEState), 2646 VMSTATE_UINT32(nsector, IDEState), 2647 VMSTATE_UINT8(sector, IDEState), 2648 VMSTATE_UINT8(lcyl, IDEState), 2649 VMSTATE_UINT8(hcyl, IDEState), 2650 VMSTATE_UINT8(hob_feature, IDEState), 2651 VMSTATE_UINT8(hob_sector, IDEState), 2652 VMSTATE_UINT8(hob_nsector, IDEState), 2653 VMSTATE_UINT8(hob_lcyl, IDEState), 2654 VMSTATE_UINT8(hob_hcyl, IDEState), 2655 VMSTATE_UINT8(select, IDEState), 2656 VMSTATE_UINT8(status, IDEState), 2657 VMSTATE_UINT8(lba48, IDEState), 2658 VMSTATE_UINT8(sense_key, IDEState), 2659 VMSTATE_UINT8(asc, IDEState), 2660 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3), 2661 VMSTATE_END_OF_LIST() 2662 }, 2663 .subsections = (const VMStateDescription*[]) { 2664 &vmstate_ide_drive_pio_state, 2665 &vmstate_ide_tray_state, 2666 &vmstate_ide_atapi_gesn_state, 2667 NULL 2668 } 2669 }; 2670 2671 static const VMStateDescription vmstate_ide_error_status = { 2672 .name ="ide_bus/error", 2673 .version_id = 2, 2674 .minimum_version_id = 1, 2675 .needed = ide_error_needed, 2676 .fields = (VMStateField[]) { 2677 VMSTATE_INT32(error_status, IDEBus), 2678 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2), 2679 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2), 2680 VMSTATE_UINT8_V(retry_unit, IDEBus, 2), 2681 VMSTATE_END_OF_LIST() 2682 } 2683 }; 2684 2685 const VMStateDescription vmstate_ide_bus = { 2686 .name = "ide_bus", 2687 .version_id = 1, 2688 .minimum_version_id = 1, 2689 .fields = (VMStateField[]) { 2690 VMSTATE_UINT8(cmd, IDEBus), 2691 VMSTATE_UINT8(unit, IDEBus), 2692 VMSTATE_END_OF_LIST() 2693 }, 2694 .subsections = (const VMStateDescription*[]) { 2695 &vmstate_ide_error_status, 2696 NULL 2697 } 2698 }; 2699 2700 void ide_drive_get(DriveInfo **hd, int n) 2701 { 2702 int i; 2703 int highest_bus = drive_get_max_bus(IF_IDE) + 1; 2704 int max_devs = drive_get_max_devs(IF_IDE); 2705 int n_buses = max_devs ? (n / max_devs) : n; 2706 2707 /* 2708 * Note: The number of actual buses available is not known. 2709 * We compute this based on the size of the DriveInfo* array, n. 2710 * If it is less than max_devs * <num_real_buses>, 2711 * We will stop looking for drives prematurely instead of overfilling 2712 * the array. 2713 */ 2714 2715 if (highest_bus > n_buses) { 2716 error_report("Too many IDE buses defined (%d > %d)", 2717 highest_bus, n_buses); 2718 exit(1); 2719 } 2720 2721 for (i = 0; i < n; i++) { 2722 hd[i] = drive_get_by_index(IF_IDE, i); 2723 } 2724 } 2725