1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-core.c - helper library for ATA 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9c6fd2807SJeff Garzik * Copyright 2003-2004 Jeff Garzik 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * 12c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or modify 13c6fd2807SJeff Garzik * it under the terms of the GNU General Public License as published by 14c6fd2807SJeff Garzik * the Free Software Foundation; either version 2, or (at your option) 15c6fd2807SJeff Garzik * any later version. 16c6fd2807SJeff Garzik * 17c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 18c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 19c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20c6fd2807SJeff Garzik * GNU General Public License for more details. 21c6fd2807SJeff Garzik * 22c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 23c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 24c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36c6fd2807SJeff Garzik #include <linux/module.h> 37c6fd2807SJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <linux/init.h> 39c6fd2807SJeff Garzik #include <linux/list.h> 40c6fd2807SJeff Garzik #include <linux/mm.h> 41c6fd2807SJeff Garzik #include <linux/highmem.h> 42c6fd2807SJeff Garzik #include <linux/spinlock.h> 43c6fd2807SJeff Garzik #include <linux/blkdev.h> 44c6fd2807SJeff Garzik #include <linux/delay.h> 45c6fd2807SJeff Garzik #include <linux/timer.h> 46c6fd2807SJeff Garzik #include <linux/interrupt.h> 47c6fd2807SJeff Garzik #include <linux/completion.h> 48c6fd2807SJeff Garzik #include <linux/suspend.h> 49c6fd2807SJeff Garzik #include <linux/workqueue.h> 50c6fd2807SJeff Garzik #include <linux/jiffies.h> 51c6fd2807SJeff Garzik #include <linux/scatterlist.h> 52c6fd2807SJeff Garzik #include <scsi/scsi.h> 53c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 54c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 55c6fd2807SJeff Garzik #include <linux/libata.h> 56c6fd2807SJeff Garzik #include <asm/io.h> 57c6fd2807SJeff Garzik #include <asm/semaphore.h> 58c6fd2807SJeff Garzik #include <asm/byteorder.h> 59c6fd2807SJeff Garzik 60c6fd2807SJeff Garzik #include "libata.h" 61c6fd2807SJeff Garzik 628bc3fc47SJeff Garzik #define DRV_VERSION "2.21" /* must be exactly four chars */ 63fda0efc5SJeff Garzik 64fda0efc5SJeff Garzik 65c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */ 66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 67c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 68c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 69c6fd2807SJeff Garzik 70c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev, 71c6fd2807SJeff Garzik u16 heads, u16 sectors); 72c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 739f45cbd3SKristen Carlson Accardi static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable); 74c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev); 7575683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 76c6fd2807SJeff Garzik 77f3187195STejun Heo unsigned int ata_print_id = 1; 78c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq; 79c6fd2807SJeff Garzik 80c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq; 81c6fd2807SJeff Garzik 82c6fd2807SJeff Garzik int atapi_enabled = 1; 83c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444); 84c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 85c6fd2807SJeff Garzik 86c6fd2807SJeff Garzik int atapi_dmadir = 0; 87c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444); 88c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 89c6fd2807SJeff Garzik 90baf4fdfaSMark Lord int atapi_passthru16 = 1; 91baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444); 92baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 93baf4fdfaSMark Lord 94c6fd2807SJeff Garzik int libata_fua = 0; 95c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444); 96c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 97c6fd2807SJeff Garzik 981e999736SAlan Cox static int ata_ignore_hpa = 0; 991e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 1001e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 1011e999736SAlan Cox 102c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; 103c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444); 104c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 105c6fd2807SJeff Garzik 106d7d0dad6SJeff Garzik int libata_noacpi = 1; 107d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444); 10811ef697bSKristen Carlson Accardi MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set"); 10911ef697bSKristen Carlson Accardi 110c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik"); 111c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices"); 112c6fd2807SJeff Garzik MODULE_LICENSE("GPL"); 113c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION); 114c6fd2807SJeff Garzik 115c6fd2807SJeff Garzik 116c6fd2807SJeff Garzik /** 117c6fd2807SJeff Garzik * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 118c6fd2807SJeff Garzik * @tf: Taskfile to convert 119c6fd2807SJeff Garzik * @pmp: Port multiplier port 1209977126cSTejun Heo * @is_cmd: This FIS is for command 1219977126cSTejun Heo * @fis: Buffer into which data will output 122c6fd2807SJeff Garzik * 123c6fd2807SJeff Garzik * Converts a standard ATA taskfile to a Serial ATA 124c6fd2807SJeff Garzik * FIS structure (Register - Host to Device). 125c6fd2807SJeff Garzik * 126c6fd2807SJeff Garzik * LOCKING: 127c6fd2807SJeff Garzik * Inherited from caller. 128c6fd2807SJeff Garzik */ 1299977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 130c6fd2807SJeff Garzik { 131c6fd2807SJeff Garzik fis[0] = 0x27; /* Register - Host to Device FIS */ 1329977126cSTejun Heo fis[1] = pmp & 0xf; /* Port multiplier number*/ 1339977126cSTejun Heo if (is_cmd) 1349977126cSTejun Heo fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 1359977126cSTejun Heo 136c6fd2807SJeff Garzik fis[2] = tf->command; 137c6fd2807SJeff Garzik fis[3] = tf->feature; 138c6fd2807SJeff Garzik 139c6fd2807SJeff Garzik fis[4] = tf->lbal; 140c6fd2807SJeff Garzik fis[5] = tf->lbam; 141c6fd2807SJeff Garzik fis[6] = tf->lbah; 142c6fd2807SJeff Garzik fis[7] = tf->device; 143c6fd2807SJeff Garzik 144c6fd2807SJeff Garzik fis[8] = tf->hob_lbal; 145c6fd2807SJeff Garzik fis[9] = tf->hob_lbam; 146c6fd2807SJeff Garzik fis[10] = tf->hob_lbah; 147c6fd2807SJeff Garzik fis[11] = tf->hob_feature; 148c6fd2807SJeff Garzik 149c6fd2807SJeff Garzik fis[12] = tf->nsect; 150c6fd2807SJeff Garzik fis[13] = tf->hob_nsect; 151c6fd2807SJeff Garzik fis[14] = 0; 152c6fd2807SJeff Garzik fis[15] = tf->ctl; 153c6fd2807SJeff Garzik 154c6fd2807SJeff Garzik fis[16] = 0; 155c6fd2807SJeff Garzik fis[17] = 0; 156c6fd2807SJeff Garzik fis[18] = 0; 157c6fd2807SJeff Garzik fis[19] = 0; 158c6fd2807SJeff Garzik } 159c6fd2807SJeff Garzik 160c6fd2807SJeff Garzik /** 161c6fd2807SJeff Garzik * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 162c6fd2807SJeff Garzik * @fis: Buffer from which data will be input 163c6fd2807SJeff Garzik * @tf: Taskfile to output 164c6fd2807SJeff Garzik * 165c6fd2807SJeff Garzik * Converts a serial ATA FIS structure to a standard ATA taskfile. 166c6fd2807SJeff Garzik * 167c6fd2807SJeff Garzik * LOCKING: 168c6fd2807SJeff Garzik * Inherited from caller. 169c6fd2807SJeff Garzik */ 170c6fd2807SJeff Garzik 171c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 172c6fd2807SJeff Garzik { 173c6fd2807SJeff Garzik tf->command = fis[2]; /* status */ 174c6fd2807SJeff Garzik tf->feature = fis[3]; /* error */ 175c6fd2807SJeff Garzik 176c6fd2807SJeff Garzik tf->lbal = fis[4]; 177c6fd2807SJeff Garzik tf->lbam = fis[5]; 178c6fd2807SJeff Garzik tf->lbah = fis[6]; 179c6fd2807SJeff Garzik tf->device = fis[7]; 180c6fd2807SJeff Garzik 181c6fd2807SJeff Garzik tf->hob_lbal = fis[8]; 182c6fd2807SJeff Garzik tf->hob_lbam = fis[9]; 183c6fd2807SJeff Garzik tf->hob_lbah = fis[10]; 184c6fd2807SJeff Garzik 185c6fd2807SJeff Garzik tf->nsect = fis[12]; 186c6fd2807SJeff Garzik tf->hob_nsect = fis[13]; 187c6fd2807SJeff Garzik } 188c6fd2807SJeff Garzik 189c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = { 190c6fd2807SJeff Garzik /* pio multi */ 191c6fd2807SJeff Garzik ATA_CMD_READ_MULTI, 192c6fd2807SJeff Garzik ATA_CMD_WRITE_MULTI, 193c6fd2807SJeff Garzik ATA_CMD_READ_MULTI_EXT, 194c6fd2807SJeff Garzik ATA_CMD_WRITE_MULTI_EXT, 195c6fd2807SJeff Garzik 0, 196c6fd2807SJeff Garzik 0, 197c6fd2807SJeff Garzik 0, 198c6fd2807SJeff Garzik ATA_CMD_WRITE_MULTI_FUA_EXT, 199c6fd2807SJeff Garzik /* pio */ 200c6fd2807SJeff Garzik ATA_CMD_PIO_READ, 201c6fd2807SJeff Garzik ATA_CMD_PIO_WRITE, 202c6fd2807SJeff Garzik ATA_CMD_PIO_READ_EXT, 203c6fd2807SJeff Garzik ATA_CMD_PIO_WRITE_EXT, 204c6fd2807SJeff Garzik 0, 205c6fd2807SJeff Garzik 0, 206c6fd2807SJeff Garzik 0, 207c6fd2807SJeff Garzik 0, 208c6fd2807SJeff Garzik /* dma */ 209c6fd2807SJeff Garzik ATA_CMD_READ, 210c6fd2807SJeff Garzik ATA_CMD_WRITE, 211c6fd2807SJeff Garzik ATA_CMD_READ_EXT, 212c6fd2807SJeff Garzik ATA_CMD_WRITE_EXT, 213c6fd2807SJeff Garzik 0, 214c6fd2807SJeff Garzik 0, 215c6fd2807SJeff Garzik 0, 216c6fd2807SJeff Garzik ATA_CMD_WRITE_FUA_EXT 217c6fd2807SJeff Garzik }; 218c6fd2807SJeff Garzik 219c6fd2807SJeff Garzik /** 220c6fd2807SJeff Garzik * ata_rwcmd_protocol - set taskfile r/w commands and protocol 221bd056d7eSTejun Heo * @tf: command to examine and configure 222bd056d7eSTejun Heo * @dev: device tf belongs to 223c6fd2807SJeff Garzik * 224c6fd2807SJeff Garzik * Examine the device configuration and tf->flags to calculate 225c6fd2807SJeff Garzik * the proper read/write commands and protocol to use. 226c6fd2807SJeff Garzik * 227c6fd2807SJeff Garzik * LOCKING: 228c6fd2807SJeff Garzik * caller. 229c6fd2807SJeff Garzik */ 230bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 231c6fd2807SJeff Garzik { 232c6fd2807SJeff Garzik u8 cmd; 233c6fd2807SJeff Garzik 234c6fd2807SJeff Garzik int index, fua, lba48, write; 235c6fd2807SJeff Garzik 236c6fd2807SJeff Garzik fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 237c6fd2807SJeff Garzik lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 238c6fd2807SJeff Garzik write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 239c6fd2807SJeff Garzik 240c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_PIO) { 241c6fd2807SJeff Garzik tf->protocol = ATA_PROT_PIO; 242c6fd2807SJeff Garzik index = dev->multi_count ? 0 : 8; 2439af5c9c9STejun Heo } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 244c6fd2807SJeff Garzik /* Unable to use DMA due to host limitation */ 245c6fd2807SJeff Garzik tf->protocol = ATA_PROT_PIO; 246c6fd2807SJeff Garzik index = dev->multi_count ? 0 : 8; 247c6fd2807SJeff Garzik } else { 248c6fd2807SJeff Garzik tf->protocol = ATA_PROT_DMA; 249c6fd2807SJeff Garzik index = 16; 250c6fd2807SJeff Garzik } 251c6fd2807SJeff Garzik 252c6fd2807SJeff Garzik cmd = ata_rw_cmds[index + fua + lba48 + write]; 253c6fd2807SJeff Garzik if (cmd) { 254c6fd2807SJeff Garzik tf->command = cmd; 255c6fd2807SJeff Garzik return 0; 256c6fd2807SJeff Garzik } 257c6fd2807SJeff Garzik return -1; 258c6fd2807SJeff Garzik } 259c6fd2807SJeff Garzik 260c6fd2807SJeff Garzik /** 26135b649feSTejun Heo * ata_tf_read_block - Read block address from ATA taskfile 26235b649feSTejun Heo * @tf: ATA taskfile of interest 26335b649feSTejun Heo * @dev: ATA device @tf belongs to 26435b649feSTejun Heo * 26535b649feSTejun Heo * LOCKING: 26635b649feSTejun Heo * None. 26735b649feSTejun Heo * 26835b649feSTejun Heo * Read block address from @tf. This function can handle all 26935b649feSTejun Heo * three address formats - LBA, LBA48 and CHS. tf->protocol and 27035b649feSTejun Heo * flags select the address format to use. 27135b649feSTejun Heo * 27235b649feSTejun Heo * RETURNS: 27335b649feSTejun Heo * Block address read from @tf. 27435b649feSTejun Heo */ 27535b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 27635b649feSTejun Heo { 27735b649feSTejun Heo u64 block = 0; 27835b649feSTejun Heo 27935b649feSTejun Heo if (tf->flags & ATA_TFLAG_LBA) { 28035b649feSTejun Heo if (tf->flags & ATA_TFLAG_LBA48) { 28135b649feSTejun Heo block |= (u64)tf->hob_lbah << 40; 28235b649feSTejun Heo block |= (u64)tf->hob_lbam << 32; 28335b649feSTejun Heo block |= tf->hob_lbal << 24; 28435b649feSTejun Heo } else 28535b649feSTejun Heo block |= (tf->device & 0xf) << 24; 28635b649feSTejun Heo 28735b649feSTejun Heo block |= tf->lbah << 16; 28835b649feSTejun Heo block |= tf->lbam << 8; 28935b649feSTejun Heo block |= tf->lbal; 29035b649feSTejun Heo } else { 29135b649feSTejun Heo u32 cyl, head, sect; 29235b649feSTejun Heo 29335b649feSTejun Heo cyl = tf->lbam | (tf->lbah << 8); 29435b649feSTejun Heo head = tf->device & 0xf; 29535b649feSTejun Heo sect = tf->lbal; 29635b649feSTejun Heo 29735b649feSTejun Heo block = (cyl * dev->heads + head) * dev->sectors + sect; 29835b649feSTejun Heo } 29935b649feSTejun Heo 30035b649feSTejun Heo return block; 30135b649feSTejun Heo } 30235b649feSTejun Heo 30335b649feSTejun Heo /** 304bd056d7eSTejun Heo * ata_build_rw_tf - Build ATA taskfile for given read/write request 305bd056d7eSTejun Heo * @tf: Target ATA taskfile 306bd056d7eSTejun Heo * @dev: ATA device @tf belongs to 307bd056d7eSTejun Heo * @block: Block address 308bd056d7eSTejun Heo * @n_block: Number of blocks 309bd056d7eSTejun Heo * @tf_flags: RW/FUA etc... 310bd056d7eSTejun Heo * @tag: tag 311bd056d7eSTejun Heo * 312bd056d7eSTejun Heo * LOCKING: 313bd056d7eSTejun Heo * None. 314bd056d7eSTejun Heo * 315bd056d7eSTejun Heo * Build ATA taskfile @tf for read/write request described by 316bd056d7eSTejun Heo * @block, @n_block, @tf_flags and @tag on @dev. 317bd056d7eSTejun Heo * 318bd056d7eSTejun Heo * RETURNS: 319bd056d7eSTejun Heo * 320bd056d7eSTejun Heo * 0 on success, -ERANGE if the request is too large for @dev, 321bd056d7eSTejun Heo * -EINVAL if the request is invalid. 322bd056d7eSTejun Heo */ 323bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 324bd056d7eSTejun Heo u64 block, u32 n_block, unsigned int tf_flags, 325bd056d7eSTejun Heo unsigned int tag) 326bd056d7eSTejun Heo { 327bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 328bd056d7eSTejun Heo tf->flags |= tf_flags; 329bd056d7eSTejun Heo 3306d1245bfSTejun Heo if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 331bd056d7eSTejun Heo /* yay, NCQ */ 332bd056d7eSTejun Heo if (!lba_48_ok(block, n_block)) 333bd056d7eSTejun Heo return -ERANGE; 334bd056d7eSTejun Heo 335bd056d7eSTejun Heo tf->protocol = ATA_PROT_NCQ; 336bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 337bd056d7eSTejun Heo 338bd056d7eSTejun Heo if (tf->flags & ATA_TFLAG_WRITE) 339bd056d7eSTejun Heo tf->command = ATA_CMD_FPDMA_WRITE; 340bd056d7eSTejun Heo else 341bd056d7eSTejun Heo tf->command = ATA_CMD_FPDMA_READ; 342bd056d7eSTejun Heo 343bd056d7eSTejun Heo tf->nsect = tag << 3; 344bd056d7eSTejun Heo tf->hob_feature = (n_block >> 8) & 0xff; 345bd056d7eSTejun Heo tf->feature = n_block & 0xff; 346bd056d7eSTejun Heo 347bd056d7eSTejun Heo tf->hob_lbah = (block >> 40) & 0xff; 348bd056d7eSTejun Heo tf->hob_lbam = (block >> 32) & 0xff; 349bd056d7eSTejun Heo tf->hob_lbal = (block >> 24) & 0xff; 350bd056d7eSTejun Heo tf->lbah = (block >> 16) & 0xff; 351bd056d7eSTejun Heo tf->lbam = (block >> 8) & 0xff; 352bd056d7eSTejun Heo tf->lbal = block & 0xff; 353bd056d7eSTejun Heo 354bd056d7eSTejun Heo tf->device = 1 << 6; 355bd056d7eSTejun Heo if (tf->flags & ATA_TFLAG_FUA) 356bd056d7eSTejun Heo tf->device |= 1 << 7; 357bd056d7eSTejun Heo } else if (dev->flags & ATA_DFLAG_LBA) { 358bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_LBA; 359bd056d7eSTejun Heo 360bd056d7eSTejun Heo if (lba_28_ok(block, n_block)) { 361bd056d7eSTejun Heo /* use LBA28 */ 362bd056d7eSTejun Heo tf->device |= (block >> 24) & 0xf; 363bd056d7eSTejun Heo } else if (lba_48_ok(block, n_block)) { 364bd056d7eSTejun Heo if (!(dev->flags & ATA_DFLAG_LBA48)) 365bd056d7eSTejun Heo return -ERANGE; 366bd056d7eSTejun Heo 367bd056d7eSTejun Heo /* use LBA48 */ 368bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_LBA48; 369bd056d7eSTejun Heo 370bd056d7eSTejun Heo tf->hob_nsect = (n_block >> 8) & 0xff; 371bd056d7eSTejun Heo 372bd056d7eSTejun Heo tf->hob_lbah = (block >> 40) & 0xff; 373bd056d7eSTejun Heo tf->hob_lbam = (block >> 32) & 0xff; 374bd056d7eSTejun Heo tf->hob_lbal = (block >> 24) & 0xff; 375bd056d7eSTejun Heo } else 376bd056d7eSTejun Heo /* request too large even for LBA48 */ 377bd056d7eSTejun Heo return -ERANGE; 378bd056d7eSTejun Heo 379bd056d7eSTejun Heo if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 380bd056d7eSTejun Heo return -EINVAL; 381bd056d7eSTejun Heo 382bd056d7eSTejun Heo tf->nsect = n_block & 0xff; 383bd056d7eSTejun Heo 384bd056d7eSTejun Heo tf->lbah = (block >> 16) & 0xff; 385bd056d7eSTejun Heo tf->lbam = (block >> 8) & 0xff; 386bd056d7eSTejun Heo tf->lbal = block & 0xff; 387bd056d7eSTejun Heo 388bd056d7eSTejun Heo tf->device |= ATA_LBA; 389bd056d7eSTejun Heo } else { 390bd056d7eSTejun Heo /* CHS */ 391bd056d7eSTejun Heo u32 sect, head, cyl, track; 392bd056d7eSTejun Heo 393bd056d7eSTejun Heo /* The request -may- be too large for CHS addressing. */ 394bd056d7eSTejun Heo if (!lba_28_ok(block, n_block)) 395bd056d7eSTejun Heo return -ERANGE; 396bd056d7eSTejun Heo 397bd056d7eSTejun Heo if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 398bd056d7eSTejun Heo return -EINVAL; 399bd056d7eSTejun Heo 400bd056d7eSTejun Heo /* Convert LBA to CHS */ 401bd056d7eSTejun Heo track = (u32)block / dev->sectors; 402bd056d7eSTejun Heo cyl = track / dev->heads; 403bd056d7eSTejun Heo head = track % dev->heads; 404bd056d7eSTejun Heo sect = (u32)block % dev->sectors + 1; 405bd056d7eSTejun Heo 406bd056d7eSTejun Heo DPRINTK("block %u track %u cyl %u head %u sect %u\n", 407bd056d7eSTejun Heo (u32)block, track, cyl, head, sect); 408bd056d7eSTejun Heo 409bd056d7eSTejun Heo /* Check whether the converted CHS can fit. 410bd056d7eSTejun Heo Cylinder: 0-65535 411bd056d7eSTejun Heo Head: 0-15 412bd056d7eSTejun Heo Sector: 1-255*/ 413bd056d7eSTejun Heo if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 414bd056d7eSTejun Heo return -ERANGE; 415bd056d7eSTejun Heo 416bd056d7eSTejun Heo tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 417bd056d7eSTejun Heo tf->lbal = sect; 418bd056d7eSTejun Heo tf->lbam = cyl; 419bd056d7eSTejun Heo tf->lbah = cyl >> 8; 420bd056d7eSTejun Heo tf->device |= head; 421bd056d7eSTejun Heo } 422bd056d7eSTejun Heo 423bd056d7eSTejun Heo return 0; 424bd056d7eSTejun Heo } 425bd056d7eSTejun Heo 426bd056d7eSTejun Heo /** 427c6fd2807SJeff Garzik * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 428c6fd2807SJeff Garzik * @pio_mask: pio_mask 429c6fd2807SJeff Garzik * @mwdma_mask: mwdma_mask 430c6fd2807SJeff Garzik * @udma_mask: udma_mask 431c6fd2807SJeff Garzik * 432c6fd2807SJeff Garzik * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 433c6fd2807SJeff Garzik * unsigned int xfer_mask. 434c6fd2807SJeff Garzik * 435c6fd2807SJeff Garzik * LOCKING: 436c6fd2807SJeff Garzik * None. 437c6fd2807SJeff Garzik * 438c6fd2807SJeff Garzik * RETURNS: 439c6fd2807SJeff Garzik * Packed xfer_mask. 440c6fd2807SJeff Garzik */ 441c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask, 442c6fd2807SJeff Garzik unsigned int mwdma_mask, 443c6fd2807SJeff Garzik unsigned int udma_mask) 444c6fd2807SJeff Garzik { 445c6fd2807SJeff Garzik return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 446c6fd2807SJeff Garzik ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 447c6fd2807SJeff Garzik ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 448c6fd2807SJeff Garzik } 449c6fd2807SJeff Garzik 450c6fd2807SJeff Garzik /** 451c6fd2807SJeff Garzik * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 452c6fd2807SJeff Garzik * @xfer_mask: xfer_mask to unpack 453c6fd2807SJeff Garzik * @pio_mask: resulting pio_mask 454c6fd2807SJeff Garzik * @mwdma_mask: resulting mwdma_mask 455c6fd2807SJeff Garzik * @udma_mask: resulting udma_mask 456c6fd2807SJeff Garzik * 457c6fd2807SJeff Garzik * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 458c6fd2807SJeff Garzik * Any NULL distination masks will be ignored. 459c6fd2807SJeff Garzik */ 460c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask, 461c6fd2807SJeff Garzik unsigned int *pio_mask, 462c6fd2807SJeff Garzik unsigned int *mwdma_mask, 463c6fd2807SJeff Garzik unsigned int *udma_mask) 464c6fd2807SJeff Garzik { 465c6fd2807SJeff Garzik if (pio_mask) 466c6fd2807SJeff Garzik *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 467c6fd2807SJeff Garzik if (mwdma_mask) 468c6fd2807SJeff Garzik *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 469c6fd2807SJeff Garzik if (udma_mask) 470c6fd2807SJeff Garzik *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 471c6fd2807SJeff Garzik } 472c6fd2807SJeff Garzik 473c6fd2807SJeff Garzik static const struct ata_xfer_ent { 474c6fd2807SJeff Garzik int shift, bits; 475c6fd2807SJeff Garzik u8 base; 476c6fd2807SJeff Garzik } ata_xfer_tbl[] = { 477c6fd2807SJeff Garzik { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, 478c6fd2807SJeff Garzik { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, 479c6fd2807SJeff Garzik { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, 480c6fd2807SJeff Garzik { -1, }, 481c6fd2807SJeff Garzik }; 482c6fd2807SJeff Garzik 483c6fd2807SJeff Garzik /** 484c6fd2807SJeff Garzik * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 485c6fd2807SJeff Garzik * @xfer_mask: xfer_mask of interest 486c6fd2807SJeff Garzik * 487c6fd2807SJeff Garzik * Return matching XFER_* value for @xfer_mask. Only the highest 488c6fd2807SJeff Garzik * bit of @xfer_mask is considered. 489c6fd2807SJeff Garzik * 490c6fd2807SJeff Garzik * LOCKING: 491c6fd2807SJeff Garzik * None. 492c6fd2807SJeff Garzik * 493c6fd2807SJeff Garzik * RETURNS: 494c6fd2807SJeff Garzik * Matching XFER_* value, 0 if no match found. 495c6fd2807SJeff Garzik */ 496c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask) 497c6fd2807SJeff Garzik { 498c6fd2807SJeff Garzik int highbit = fls(xfer_mask) - 1; 499c6fd2807SJeff Garzik const struct ata_xfer_ent *ent; 500c6fd2807SJeff Garzik 501c6fd2807SJeff Garzik for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 502c6fd2807SJeff Garzik if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 503c6fd2807SJeff Garzik return ent->base + highbit - ent->shift; 504c6fd2807SJeff Garzik return 0; 505c6fd2807SJeff Garzik } 506c6fd2807SJeff Garzik 507c6fd2807SJeff Garzik /** 508c6fd2807SJeff Garzik * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 509c6fd2807SJeff Garzik * @xfer_mode: XFER_* of interest 510c6fd2807SJeff Garzik * 511c6fd2807SJeff Garzik * Return matching xfer_mask for @xfer_mode. 512c6fd2807SJeff Garzik * 513c6fd2807SJeff Garzik * LOCKING: 514c6fd2807SJeff Garzik * None. 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * RETURNS: 517c6fd2807SJeff Garzik * Matching xfer_mask, 0 if no match found. 518c6fd2807SJeff Garzik */ 519c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode) 520c6fd2807SJeff Garzik { 521c6fd2807SJeff Garzik const struct ata_xfer_ent *ent; 522c6fd2807SJeff Garzik 523c6fd2807SJeff Garzik for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 524c6fd2807SJeff Garzik if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 525c6fd2807SJeff Garzik return 1 << (ent->shift + xfer_mode - ent->base); 526c6fd2807SJeff Garzik return 0; 527c6fd2807SJeff Garzik } 528c6fd2807SJeff Garzik 529c6fd2807SJeff Garzik /** 530c6fd2807SJeff Garzik * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 531c6fd2807SJeff Garzik * @xfer_mode: XFER_* of interest 532c6fd2807SJeff Garzik * 533c6fd2807SJeff Garzik * Return matching xfer_shift for @xfer_mode. 534c6fd2807SJeff Garzik * 535c6fd2807SJeff Garzik * LOCKING: 536c6fd2807SJeff Garzik * None. 537c6fd2807SJeff Garzik * 538c6fd2807SJeff Garzik * RETURNS: 539c6fd2807SJeff Garzik * Matching xfer_shift, -1 if no match found. 540c6fd2807SJeff Garzik */ 541c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode) 542c6fd2807SJeff Garzik { 543c6fd2807SJeff Garzik const struct ata_xfer_ent *ent; 544c6fd2807SJeff Garzik 545c6fd2807SJeff Garzik for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 546c6fd2807SJeff Garzik if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 547c6fd2807SJeff Garzik return ent->shift; 548c6fd2807SJeff Garzik return -1; 549c6fd2807SJeff Garzik } 550c6fd2807SJeff Garzik 551c6fd2807SJeff Garzik /** 552c6fd2807SJeff Garzik * ata_mode_string - convert xfer_mask to string 553c6fd2807SJeff Garzik * @xfer_mask: mask of bits supported; only highest bit counts. 554c6fd2807SJeff Garzik * 555c6fd2807SJeff Garzik * Determine string which represents the highest speed 556c6fd2807SJeff Garzik * (highest bit in @modemask). 557c6fd2807SJeff Garzik * 558c6fd2807SJeff Garzik * LOCKING: 559c6fd2807SJeff Garzik * None. 560c6fd2807SJeff Garzik * 561c6fd2807SJeff Garzik * RETURNS: 562c6fd2807SJeff Garzik * Constant C string representing highest speed listed in 563c6fd2807SJeff Garzik * @mode_mask, or the constant C string "<n/a>". 564c6fd2807SJeff Garzik */ 565c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask) 566c6fd2807SJeff Garzik { 567c6fd2807SJeff Garzik static const char * const xfer_mode_str[] = { 568c6fd2807SJeff Garzik "PIO0", 569c6fd2807SJeff Garzik "PIO1", 570c6fd2807SJeff Garzik "PIO2", 571c6fd2807SJeff Garzik "PIO3", 572c6fd2807SJeff Garzik "PIO4", 573b352e57dSAlan Cox "PIO5", 574b352e57dSAlan Cox "PIO6", 575c6fd2807SJeff Garzik "MWDMA0", 576c6fd2807SJeff Garzik "MWDMA1", 577c6fd2807SJeff Garzik "MWDMA2", 578b352e57dSAlan Cox "MWDMA3", 579b352e57dSAlan Cox "MWDMA4", 580c6fd2807SJeff Garzik "UDMA/16", 581c6fd2807SJeff Garzik "UDMA/25", 582c6fd2807SJeff Garzik "UDMA/33", 583c6fd2807SJeff Garzik "UDMA/44", 584c6fd2807SJeff Garzik "UDMA/66", 585c6fd2807SJeff Garzik "UDMA/100", 586c6fd2807SJeff Garzik "UDMA/133", 587c6fd2807SJeff Garzik "UDMA7", 588c6fd2807SJeff Garzik }; 589c6fd2807SJeff Garzik int highbit; 590c6fd2807SJeff Garzik 591c6fd2807SJeff Garzik highbit = fls(xfer_mask) - 1; 592c6fd2807SJeff Garzik if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 593c6fd2807SJeff Garzik return xfer_mode_str[highbit]; 594c6fd2807SJeff Garzik return "<n/a>"; 595c6fd2807SJeff Garzik } 596c6fd2807SJeff Garzik 597c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd) 598c6fd2807SJeff Garzik { 599c6fd2807SJeff Garzik static const char * const spd_str[] = { 600c6fd2807SJeff Garzik "1.5 Gbps", 601c6fd2807SJeff Garzik "3.0 Gbps", 602c6fd2807SJeff Garzik }; 603c6fd2807SJeff Garzik 604c6fd2807SJeff Garzik if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 605c6fd2807SJeff Garzik return "<unknown>"; 606c6fd2807SJeff Garzik return spd_str[spd - 1]; 607c6fd2807SJeff Garzik } 608c6fd2807SJeff Garzik 609c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev) 610c6fd2807SJeff Garzik { 61109d7f9b0STejun Heo if (ata_dev_enabled(dev)) { 6129af5c9c9STejun Heo if (ata_msg_drv(dev->link->ap)) 613c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 6144ae72a1eSTejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | 6154ae72a1eSTejun Heo ATA_DNXFER_QUIET); 616c6fd2807SJeff Garzik dev->class++; 617c6fd2807SJeff Garzik } 618c6fd2807SJeff Garzik } 619c6fd2807SJeff Garzik 620c6fd2807SJeff Garzik /** 621c6fd2807SJeff Garzik * ata_devchk - PATA device presence detection 622c6fd2807SJeff Garzik * @ap: ATA channel to examine 623c6fd2807SJeff Garzik * @device: Device to examine (starting at zero) 624c6fd2807SJeff Garzik * 6250d5ff566STejun Heo * This technique was originally described in 6260d5ff566STejun Heo * Hale Landis's ATADRVR (www.ata-atapi.com), and 6270d5ff566STejun Heo * later found its way into the ATA/ATAPI spec. 6280d5ff566STejun Heo * 6290d5ff566STejun Heo * Write a pattern to the ATA shadow registers, 6300d5ff566STejun Heo * and if a device is present, it will respond by 6310d5ff566STejun Heo * correctly storing and echoing back the 6320d5ff566STejun Heo * ATA shadow register contents. 633c6fd2807SJeff Garzik * 634c6fd2807SJeff Garzik * LOCKING: 635c6fd2807SJeff Garzik * caller. 636c6fd2807SJeff Garzik */ 637c6fd2807SJeff Garzik 6380d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 639c6fd2807SJeff Garzik { 6400d5ff566STejun Heo struct ata_ioports *ioaddr = &ap->ioaddr; 6410d5ff566STejun Heo u8 nsect, lbal; 6420d5ff566STejun Heo 6430d5ff566STejun Heo ap->ops->dev_select(ap, device); 6440d5ff566STejun Heo 6450d5ff566STejun Heo iowrite8(0x55, ioaddr->nsect_addr); 6460d5ff566STejun Heo iowrite8(0xaa, ioaddr->lbal_addr); 6470d5ff566STejun Heo 6480d5ff566STejun Heo iowrite8(0xaa, ioaddr->nsect_addr); 6490d5ff566STejun Heo iowrite8(0x55, ioaddr->lbal_addr); 6500d5ff566STejun Heo 6510d5ff566STejun Heo iowrite8(0x55, ioaddr->nsect_addr); 6520d5ff566STejun Heo iowrite8(0xaa, ioaddr->lbal_addr); 6530d5ff566STejun Heo 6540d5ff566STejun Heo nsect = ioread8(ioaddr->nsect_addr); 6550d5ff566STejun Heo lbal = ioread8(ioaddr->lbal_addr); 6560d5ff566STejun Heo 6570d5ff566STejun Heo if ((nsect == 0x55) && (lbal == 0xaa)) 6580d5ff566STejun Heo return 1; /* we found a device */ 6590d5ff566STejun Heo 6600d5ff566STejun Heo return 0; /* nothing found */ 661c6fd2807SJeff Garzik } 662c6fd2807SJeff Garzik 663c6fd2807SJeff Garzik /** 664c6fd2807SJeff Garzik * ata_dev_classify - determine device type based on ATA-spec signature 665c6fd2807SJeff Garzik * @tf: ATA taskfile register set for device to be identified 666c6fd2807SJeff Garzik * 667c6fd2807SJeff Garzik * Determine from taskfile register contents whether a device is 668c6fd2807SJeff Garzik * ATA or ATAPI, as per "Signature and persistence" section 669c6fd2807SJeff Garzik * of ATA/PI spec (volume 1, sect 5.14). 670c6fd2807SJeff Garzik * 671c6fd2807SJeff Garzik * LOCKING: 672c6fd2807SJeff Garzik * None. 673c6fd2807SJeff Garzik * 674c6fd2807SJeff Garzik * RETURNS: 675c6fd2807SJeff Garzik * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN 676c6fd2807SJeff Garzik * the event of failure. 677c6fd2807SJeff Garzik */ 678c6fd2807SJeff Garzik 679c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf) 680c6fd2807SJeff Garzik { 681c6fd2807SJeff Garzik /* Apple's open source Darwin code hints that some devices only 682c6fd2807SJeff Garzik * put a proper signature into the LBA mid/high registers, 683c6fd2807SJeff Garzik * So, we only check those. It's sufficient for uniqueness. 684c6fd2807SJeff Garzik */ 685c6fd2807SJeff Garzik 686c6fd2807SJeff Garzik if (((tf->lbam == 0) && (tf->lbah == 0)) || 687c6fd2807SJeff Garzik ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { 688c6fd2807SJeff Garzik DPRINTK("found ATA device by sig\n"); 689c6fd2807SJeff Garzik return ATA_DEV_ATA; 690c6fd2807SJeff Garzik } 691c6fd2807SJeff Garzik 692c6fd2807SJeff Garzik if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || 693c6fd2807SJeff Garzik ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { 694c6fd2807SJeff Garzik DPRINTK("found ATAPI device by sig\n"); 695c6fd2807SJeff Garzik return ATA_DEV_ATAPI; 696c6fd2807SJeff Garzik } 697c6fd2807SJeff Garzik 698c6fd2807SJeff Garzik DPRINTK("unknown device\n"); 699c6fd2807SJeff Garzik return ATA_DEV_UNKNOWN; 700c6fd2807SJeff Garzik } 701c6fd2807SJeff Garzik 702c6fd2807SJeff Garzik /** 703c6fd2807SJeff Garzik * ata_dev_try_classify - Parse returned ATA device signature 7043f19859eSTejun Heo * @dev: ATA device to classify (starting at zero) 7053f19859eSTejun Heo * @present: device seems present 706c6fd2807SJeff Garzik * @r_err: Value of error register on completion 707c6fd2807SJeff Garzik * 708c6fd2807SJeff Garzik * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 709c6fd2807SJeff Garzik * an ATA/ATAPI-defined set of values is placed in the ATA 710c6fd2807SJeff Garzik * shadow registers, indicating the results of device detection 711c6fd2807SJeff Garzik * and diagnostics. 712c6fd2807SJeff Garzik * 713c6fd2807SJeff Garzik * Select the ATA device, and read the values from the ATA shadow 714c6fd2807SJeff Garzik * registers. Then parse according to the Error register value, 715c6fd2807SJeff Garzik * and the spec-defined values examined by ata_dev_classify(). 716c6fd2807SJeff Garzik * 717c6fd2807SJeff Garzik * LOCKING: 718c6fd2807SJeff Garzik * caller. 719c6fd2807SJeff Garzik * 720c6fd2807SJeff Garzik * RETURNS: 721c6fd2807SJeff Garzik * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 722c6fd2807SJeff Garzik */ 7233f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present, 7243f19859eSTejun Heo u8 *r_err) 725c6fd2807SJeff Garzik { 7263f19859eSTejun Heo struct ata_port *ap = dev->link->ap; 727c6fd2807SJeff Garzik struct ata_taskfile tf; 728c6fd2807SJeff Garzik unsigned int class; 729c6fd2807SJeff Garzik u8 err; 730c6fd2807SJeff Garzik 7313f19859eSTejun Heo ap->ops->dev_select(ap, dev->devno); 732c6fd2807SJeff Garzik 733c6fd2807SJeff Garzik memset(&tf, 0, sizeof(tf)); 734c6fd2807SJeff Garzik 735c6fd2807SJeff Garzik ap->ops->tf_read(ap, &tf); 736c6fd2807SJeff Garzik err = tf.feature; 737c6fd2807SJeff Garzik if (r_err) 738c6fd2807SJeff Garzik *r_err = err; 739c6fd2807SJeff Garzik 74093590859SAlan Cox /* see if device passed diags: if master then continue and warn later */ 7413f19859eSTejun Heo if (err == 0 && dev->devno == 0) 74293590859SAlan Cox /* diagnostic fail : do nothing _YET_ */ 7433f19859eSTejun Heo dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 74493590859SAlan Cox else if (err == 1) 745c6fd2807SJeff Garzik /* do nothing */ ; 7463f19859eSTejun Heo else if ((dev->devno == 0) && (err == 0x81)) 747c6fd2807SJeff Garzik /* do nothing */ ; 748c6fd2807SJeff Garzik else 749c6fd2807SJeff Garzik return ATA_DEV_NONE; 750c6fd2807SJeff Garzik 751c6fd2807SJeff Garzik /* determine if device is ATA or ATAPI */ 752c6fd2807SJeff Garzik class = ata_dev_classify(&tf); 753c6fd2807SJeff Garzik 754d7fbee05STejun Heo if (class == ATA_DEV_UNKNOWN) { 755d7fbee05STejun Heo /* If the device failed diagnostic, it's likely to 756d7fbee05STejun Heo * have reported incorrect device signature too. 757d7fbee05STejun Heo * Assume ATA device if the device seems present but 758d7fbee05STejun Heo * device signature is invalid with diagnostic 759d7fbee05STejun Heo * failure. 760d7fbee05STejun Heo */ 761d7fbee05STejun Heo if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 762d7fbee05STejun Heo class = ATA_DEV_ATA; 763d7fbee05STejun Heo else 764d7fbee05STejun Heo class = ATA_DEV_NONE; 765d7fbee05STejun Heo } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 766d7fbee05STejun Heo class = ATA_DEV_NONE; 767d7fbee05STejun Heo 768c6fd2807SJeff Garzik return class; 769c6fd2807SJeff Garzik } 770c6fd2807SJeff Garzik 771c6fd2807SJeff Garzik /** 772c6fd2807SJeff Garzik * ata_id_string - Convert IDENTIFY DEVICE page into string 773c6fd2807SJeff Garzik * @id: IDENTIFY DEVICE results we will examine 774c6fd2807SJeff Garzik * @s: string into which data is output 775c6fd2807SJeff Garzik * @ofs: offset into identify device page 776c6fd2807SJeff Garzik * @len: length of string to return. must be an even number. 777c6fd2807SJeff Garzik * 778c6fd2807SJeff Garzik * The strings in the IDENTIFY DEVICE page are broken up into 779c6fd2807SJeff Garzik * 16-bit chunks. Run through the string, and output each 780c6fd2807SJeff Garzik * 8-bit chunk linearly, regardless of platform. 781c6fd2807SJeff Garzik * 782c6fd2807SJeff Garzik * LOCKING: 783c6fd2807SJeff Garzik * caller. 784c6fd2807SJeff Garzik */ 785c6fd2807SJeff Garzik 786c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s, 787c6fd2807SJeff Garzik unsigned int ofs, unsigned int len) 788c6fd2807SJeff Garzik { 789c6fd2807SJeff Garzik unsigned int c; 790c6fd2807SJeff Garzik 791c6fd2807SJeff Garzik while (len > 0) { 792c6fd2807SJeff Garzik c = id[ofs] >> 8; 793c6fd2807SJeff Garzik *s = c; 794c6fd2807SJeff Garzik s++; 795c6fd2807SJeff Garzik 796c6fd2807SJeff Garzik c = id[ofs] & 0xff; 797c6fd2807SJeff Garzik *s = c; 798c6fd2807SJeff Garzik s++; 799c6fd2807SJeff Garzik 800c6fd2807SJeff Garzik ofs++; 801c6fd2807SJeff Garzik len -= 2; 802c6fd2807SJeff Garzik } 803c6fd2807SJeff Garzik } 804c6fd2807SJeff Garzik 805c6fd2807SJeff Garzik /** 806c6fd2807SJeff Garzik * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 807c6fd2807SJeff Garzik * @id: IDENTIFY DEVICE results we will examine 808c6fd2807SJeff Garzik * @s: string into which data is output 809c6fd2807SJeff Garzik * @ofs: offset into identify device page 810c6fd2807SJeff Garzik * @len: length of string to return. must be an odd number. 811c6fd2807SJeff Garzik * 812c6fd2807SJeff Garzik * This function is identical to ata_id_string except that it 813c6fd2807SJeff Garzik * trims trailing spaces and terminates the resulting string with 814c6fd2807SJeff Garzik * null. @len must be actual maximum length (even number) + 1. 815c6fd2807SJeff Garzik * 816c6fd2807SJeff Garzik * LOCKING: 817c6fd2807SJeff Garzik * caller. 818c6fd2807SJeff Garzik */ 819c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s, 820c6fd2807SJeff Garzik unsigned int ofs, unsigned int len) 821c6fd2807SJeff Garzik { 822c6fd2807SJeff Garzik unsigned char *p; 823c6fd2807SJeff Garzik 824c6fd2807SJeff Garzik WARN_ON(!(len & 1)); 825c6fd2807SJeff Garzik 826c6fd2807SJeff Garzik ata_id_string(id, s, ofs, len - 1); 827c6fd2807SJeff Garzik 828c6fd2807SJeff Garzik p = s + strnlen(s, len - 1); 829c6fd2807SJeff Garzik while (p > s && p[-1] == ' ') 830c6fd2807SJeff Garzik p--; 831c6fd2807SJeff Garzik *p = '\0'; 832c6fd2807SJeff Garzik } 833c6fd2807SJeff Garzik 834db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id) 835db6f8759STejun Heo { 836db6f8759STejun Heo if (ata_id_has_lba(id)) { 837db6f8759STejun Heo if (ata_id_has_lba48(id)) 838db6f8759STejun Heo return ata_id_u64(id, 100); 839db6f8759STejun Heo else 840db6f8759STejun Heo return ata_id_u32(id, 60); 841db6f8759STejun Heo } else { 842db6f8759STejun Heo if (ata_id_current_chs_valid(id)) 843db6f8759STejun Heo return ata_id_u32(id, 57); 844db6f8759STejun Heo else 845db6f8759STejun Heo return id[1] * id[3] * id[6]; 846db6f8759STejun Heo } 847db6f8759STejun Heo } 848db6f8759STejun Heo 8491e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf) 8501e999736SAlan Cox { 8511e999736SAlan Cox u64 sectors = 0; 8521e999736SAlan Cox 8531e999736SAlan Cox sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 8541e999736SAlan Cox sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 8551e999736SAlan Cox sectors |= (tf->hob_lbal & 0xff) << 24; 8561e999736SAlan Cox sectors |= (tf->lbah & 0xff) << 16; 8571e999736SAlan Cox sectors |= (tf->lbam & 0xff) << 8; 8581e999736SAlan Cox sectors |= (tf->lbal & 0xff); 8591e999736SAlan Cox 8601e999736SAlan Cox return ++sectors; 8611e999736SAlan Cox } 8621e999736SAlan Cox 8631e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf) 8641e999736SAlan Cox { 8651e999736SAlan Cox u64 sectors = 0; 8661e999736SAlan Cox 8671e999736SAlan Cox sectors |= (tf->device & 0x0f) << 24; 8681e999736SAlan Cox sectors |= (tf->lbah & 0xff) << 16; 8691e999736SAlan Cox sectors |= (tf->lbam & 0xff) << 8; 8701e999736SAlan Cox sectors |= (tf->lbal & 0xff); 8711e999736SAlan Cox 8721e999736SAlan Cox return ++sectors; 8731e999736SAlan Cox } 8741e999736SAlan Cox 8751e999736SAlan Cox /** 876c728a914STejun Heo * ata_read_native_max_address - Read native max address 877c728a914STejun Heo * @dev: target device 878c728a914STejun Heo * @max_sectors: out parameter for the result native max address 8791e999736SAlan Cox * 880c728a914STejun Heo * Perform an LBA48 or LBA28 native size query upon the device in 881c728a914STejun Heo * question. 882c728a914STejun Heo * 883c728a914STejun Heo * RETURNS: 884c728a914STejun Heo * 0 on success, -EACCES if command is aborted by the drive. 885c728a914STejun Heo * -EIO on other errors. 8861e999736SAlan Cox */ 887c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 8881e999736SAlan Cox { 889c728a914STejun Heo unsigned int err_mask; 8901e999736SAlan Cox struct ata_taskfile tf; 891c728a914STejun Heo int lba48 = ata_id_has_lba48(dev->id); 8921e999736SAlan Cox 8931e999736SAlan Cox ata_tf_init(dev, &tf); 8941e999736SAlan Cox 895c728a914STejun Heo /* always clear all address registers */ 8961e999736SAlan Cox tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 897c728a914STejun Heo 898c728a914STejun Heo if (lba48) { 899c728a914STejun Heo tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 900c728a914STejun Heo tf.flags |= ATA_TFLAG_LBA48; 901c728a914STejun Heo } else 902c728a914STejun Heo tf.command = ATA_CMD_READ_NATIVE_MAX; 903c728a914STejun Heo 9041e999736SAlan Cox tf.protocol |= ATA_PROT_NODATA; 905c728a914STejun Heo tf.device |= ATA_LBA; 9061e999736SAlan Cox 907c728a914STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 908c728a914STejun Heo if (err_mask) { 909c728a914STejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to read native " 910c728a914STejun Heo "max address (err_mask=0x%x)\n", err_mask); 911c728a914STejun Heo if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 912c728a914STejun Heo return -EACCES; 913c728a914STejun Heo return -EIO; 914c728a914STejun Heo } 915c728a914STejun Heo 916c728a914STejun Heo if (lba48) 917c728a914STejun Heo *max_sectors = ata_tf_to_lba48(&tf); 918c728a914STejun Heo else 919c728a914STejun Heo *max_sectors = ata_tf_to_lba(&tf); 920c728a914STejun Heo 9211e999736SAlan Cox return 0; 9221e999736SAlan Cox } 9231e999736SAlan Cox 9241e999736SAlan Cox /** 925c728a914STejun Heo * ata_set_max_sectors - Set max sectors 926c728a914STejun Heo * @dev: target device 9276b38d1d1SRandy Dunlap * @new_sectors: new max sectors value to set for the device 9281e999736SAlan Cox * 929c728a914STejun Heo * Set max sectors of @dev to @new_sectors. 930c728a914STejun Heo * 931c728a914STejun Heo * RETURNS: 932c728a914STejun Heo * 0 on success, -EACCES if command is aborted or denied (due to 933c728a914STejun Heo * previous non-volatile SET_MAX) by the drive. -EIO on other 934c728a914STejun Heo * errors. 9351e999736SAlan Cox */ 93605027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 9371e999736SAlan Cox { 938c728a914STejun Heo unsigned int err_mask; 9391e999736SAlan Cox struct ata_taskfile tf; 940c728a914STejun Heo int lba48 = ata_id_has_lba48(dev->id); 9411e999736SAlan Cox 9421e999736SAlan Cox new_sectors--; 9431e999736SAlan Cox 9441e999736SAlan Cox ata_tf_init(dev, &tf); 9451e999736SAlan Cox 946c728a914STejun Heo tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 9471e999736SAlan Cox 948c728a914STejun Heo if (lba48) { 949c728a914STejun Heo tf.command = ATA_CMD_SET_MAX_EXT; 950c728a914STejun Heo tf.flags |= ATA_TFLAG_LBA48; 9511e999736SAlan Cox 9521e999736SAlan Cox tf.hob_lbal = (new_sectors >> 24) & 0xff; 9531e999736SAlan Cox tf.hob_lbam = (new_sectors >> 32) & 0xff; 9541e999736SAlan Cox tf.hob_lbah = (new_sectors >> 40) & 0xff; 955c728a914STejun Heo } else 9561e999736SAlan Cox tf.command = ATA_CMD_SET_MAX; 957c728a914STejun Heo 9581e999736SAlan Cox tf.protocol |= ATA_PROT_NODATA; 959c728a914STejun Heo tf.device |= ATA_LBA; 9601e999736SAlan Cox 9611e999736SAlan Cox tf.lbal = (new_sectors >> 0) & 0xff; 9621e999736SAlan Cox tf.lbam = (new_sectors >> 8) & 0xff; 9631e999736SAlan Cox tf.lbah = (new_sectors >> 16) & 0xff; 9641e999736SAlan Cox 965c728a914STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 966c728a914STejun Heo if (err_mask) { 967c728a914STejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to set " 968c728a914STejun Heo "max address (err_mask=0x%x)\n", err_mask); 969c728a914STejun Heo if (err_mask == AC_ERR_DEV && 970c728a914STejun Heo (tf.feature & (ATA_ABORTED | ATA_IDNF))) 971c728a914STejun Heo return -EACCES; 972c728a914STejun Heo return -EIO; 973c728a914STejun Heo } 974c728a914STejun Heo 9751e999736SAlan Cox return 0; 9761e999736SAlan Cox } 9771e999736SAlan Cox 9781e999736SAlan Cox /** 9791e999736SAlan Cox * ata_hpa_resize - Resize a device with an HPA set 9801e999736SAlan Cox * @dev: Device to resize 9811e999736SAlan Cox * 9821e999736SAlan Cox * Read the size of an LBA28 or LBA48 disk with HPA features and resize 9831e999736SAlan Cox * it if required to the full size of the media. The caller must check 9841e999736SAlan Cox * the drive has the HPA feature set enabled. 98505027adcSTejun Heo * 98605027adcSTejun Heo * RETURNS: 98705027adcSTejun Heo * 0 on success, -errno on failure. 9881e999736SAlan Cox */ 98905027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev) 9901e999736SAlan Cox { 99105027adcSTejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 99205027adcSTejun Heo int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 99305027adcSTejun Heo u64 sectors = ata_id_n_sectors(dev->id); 99405027adcSTejun Heo u64 native_sectors; 995c728a914STejun Heo int rc; 9961e999736SAlan Cox 99705027adcSTejun Heo /* do we need to do it? */ 99805027adcSTejun Heo if (dev->class != ATA_DEV_ATA || 99905027adcSTejun Heo !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 100005027adcSTejun Heo (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1001c728a914STejun Heo return 0; 10021e999736SAlan Cox 100305027adcSTejun Heo /* read native max address */ 100405027adcSTejun Heo rc = ata_read_native_max_address(dev, &native_sectors); 100505027adcSTejun Heo if (rc) { 100605027adcSTejun Heo /* If HPA isn't going to be unlocked, skip HPA 100705027adcSTejun Heo * resizing from the next try. 100805027adcSTejun Heo */ 100905027adcSTejun Heo if (!ata_ignore_hpa) { 101005027adcSTejun Heo ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 101105027adcSTejun Heo "broken, will skip HPA handling\n"); 101205027adcSTejun Heo dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 101305027adcSTejun Heo 101405027adcSTejun Heo /* we can continue if device aborted the command */ 101505027adcSTejun Heo if (rc == -EACCES) 101605027adcSTejun Heo rc = 0; 101705027adcSTejun Heo } 101805027adcSTejun Heo 101905027adcSTejun Heo return rc; 102005027adcSTejun Heo } 102105027adcSTejun Heo 102205027adcSTejun Heo /* nothing to do? */ 102305027adcSTejun Heo if (native_sectors <= sectors || !ata_ignore_hpa) { 102405027adcSTejun Heo if (!print_info || native_sectors == sectors) 102505027adcSTejun Heo return 0; 102605027adcSTejun Heo 102705027adcSTejun Heo if (native_sectors > sectors) 10281e999736SAlan Cox ata_dev_printk(dev, KERN_INFO, 102905027adcSTejun Heo "HPA detected: current %llu, native %llu\n", 103005027adcSTejun Heo (unsigned long long)sectors, 103105027adcSTejun Heo (unsigned long long)native_sectors); 103205027adcSTejun Heo else if (native_sectors < sectors) 103305027adcSTejun Heo ata_dev_printk(dev, KERN_WARNING, 103405027adcSTejun Heo "native sectors (%llu) is smaller than " 103505027adcSTejun Heo "sectors (%llu)\n", 103605027adcSTejun Heo (unsigned long long)native_sectors, 103705027adcSTejun Heo (unsigned long long)sectors); 103805027adcSTejun Heo return 0; 10391e999736SAlan Cox } 104037301a55STejun Heo 104105027adcSTejun Heo /* let's unlock HPA */ 104205027adcSTejun Heo rc = ata_set_max_sectors(dev, native_sectors); 104305027adcSTejun Heo if (rc == -EACCES) { 104405027adcSTejun Heo /* if device aborted the command, skip HPA resizing */ 104505027adcSTejun Heo ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 104605027adcSTejun Heo "(%llu -> %llu), skipping HPA handling\n", 104705027adcSTejun Heo (unsigned long long)sectors, 104805027adcSTejun Heo (unsigned long long)native_sectors); 104905027adcSTejun Heo dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 105005027adcSTejun Heo return 0; 105105027adcSTejun Heo } else if (rc) 105205027adcSTejun Heo return rc; 105305027adcSTejun Heo 105405027adcSTejun Heo /* re-read IDENTIFY data */ 105505027adcSTejun Heo rc = ata_dev_reread_id(dev, 0); 105605027adcSTejun Heo if (rc) { 105705027adcSTejun Heo ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 105805027adcSTejun Heo "data after HPA resizing\n"); 105905027adcSTejun Heo return rc; 106005027adcSTejun Heo } 106105027adcSTejun Heo 106205027adcSTejun Heo if (print_info) { 106305027adcSTejun Heo u64 new_sectors = ata_id_n_sectors(dev->id); 106405027adcSTejun Heo ata_dev_printk(dev, KERN_INFO, 106505027adcSTejun Heo "HPA unlocked: %llu -> %llu, native %llu\n", 106605027adcSTejun Heo (unsigned long long)sectors, 106705027adcSTejun Heo (unsigned long long)new_sectors, 106805027adcSTejun Heo (unsigned long long)native_sectors); 106905027adcSTejun Heo } 107005027adcSTejun Heo 107105027adcSTejun Heo return 0; 10721e999736SAlan Cox } 10731e999736SAlan Cox 1074c6fd2807SJeff Garzik /** 107510305f0fSAlan * ata_id_to_dma_mode - Identify DMA mode from id block 107610305f0fSAlan * @dev: device to identify 1077cc261267SRandy Dunlap * @unknown: mode to assume if we cannot tell 107810305f0fSAlan * 107910305f0fSAlan * Set up the timing values for the device based upon the identify 108010305f0fSAlan * reported values for the DMA mode. This function is used by drivers 108110305f0fSAlan * which rely upon firmware configured modes, but wish to report the 108210305f0fSAlan * mode correctly when possible. 108310305f0fSAlan * 108410305f0fSAlan * In addition we emit similarly formatted messages to the default 108510305f0fSAlan * ata_dev_set_mode handler, in order to provide consistency of 108610305f0fSAlan * presentation. 108710305f0fSAlan */ 108810305f0fSAlan 108910305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown) 109010305f0fSAlan { 109110305f0fSAlan unsigned int mask; 109210305f0fSAlan u8 mode; 109310305f0fSAlan 109410305f0fSAlan /* Pack the DMA modes */ 109510305f0fSAlan mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA; 109610305f0fSAlan if (dev->id[53] & 0x04) 109710305f0fSAlan mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA; 109810305f0fSAlan 109910305f0fSAlan /* Select the mode in use */ 110010305f0fSAlan mode = ata_xfer_mask2mode(mask); 110110305f0fSAlan 110210305f0fSAlan if (mode != 0) { 110310305f0fSAlan ata_dev_printk(dev, KERN_INFO, "configured for %s\n", 110410305f0fSAlan ata_mode_string(mask)); 110510305f0fSAlan } else { 110610305f0fSAlan /* SWDMA perhaps ? */ 110710305f0fSAlan mode = unknown; 110810305f0fSAlan ata_dev_printk(dev, KERN_INFO, "configured for DMA\n"); 110910305f0fSAlan } 111010305f0fSAlan 111110305f0fSAlan /* Configure the device reporting */ 111210305f0fSAlan dev->xfer_mode = mode; 111310305f0fSAlan dev->xfer_shift = ata_xfer_mode2shift(mode); 111410305f0fSAlan } 111510305f0fSAlan 111610305f0fSAlan /** 1117c6fd2807SJeff Garzik * ata_noop_dev_select - Select device 0/1 on ATA bus 1118c6fd2807SJeff Garzik * @ap: ATA channel to manipulate 1119c6fd2807SJeff Garzik * @device: ATA device (numbered from zero) to select 1120c6fd2807SJeff Garzik * 1121c6fd2807SJeff Garzik * This function performs no actual function. 1122c6fd2807SJeff Garzik * 1123c6fd2807SJeff Garzik * May be used as the dev_select() entry in ata_port_operations. 1124c6fd2807SJeff Garzik * 1125c6fd2807SJeff Garzik * LOCKING: 1126c6fd2807SJeff Garzik * caller. 1127c6fd2807SJeff Garzik */ 1128c6fd2807SJeff Garzik void ata_noop_dev_select (struct ata_port *ap, unsigned int device) 1129c6fd2807SJeff Garzik { 1130c6fd2807SJeff Garzik } 1131c6fd2807SJeff Garzik 1132c6fd2807SJeff Garzik 1133c6fd2807SJeff Garzik /** 1134c6fd2807SJeff Garzik * ata_std_dev_select - Select device 0/1 on ATA bus 1135c6fd2807SJeff Garzik * @ap: ATA channel to manipulate 1136c6fd2807SJeff Garzik * @device: ATA device (numbered from zero) to select 1137c6fd2807SJeff Garzik * 1138c6fd2807SJeff Garzik * Use the method defined in the ATA specification to 1139c6fd2807SJeff Garzik * make either device 0, or device 1, active on the 1140c6fd2807SJeff Garzik * ATA channel. Works with both PIO and MMIO. 1141c6fd2807SJeff Garzik * 1142c6fd2807SJeff Garzik * May be used as the dev_select() entry in ata_port_operations. 1143c6fd2807SJeff Garzik * 1144c6fd2807SJeff Garzik * LOCKING: 1145c6fd2807SJeff Garzik * caller. 1146c6fd2807SJeff Garzik */ 1147c6fd2807SJeff Garzik 1148c6fd2807SJeff Garzik void ata_std_dev_select (struct ata_port *ap, unsigned int device) 1149c6fd2807SJeff Garzik { 1150c6fd2807SJeff Garzik u8 tmp; 1151c6fd2807SJeff Garzik 1152c6fd2807SJeff Garzik if (device == 0) 1153c6fd2807SJeff Garzik tmp = ATA_DEVICE_OBS; 1154c6fd2807SJeff Garzik else 1155c6fd2807SJeff Garzik tmp = ATA_DEVICE_OBS | ATA_DEV1; 1156c6fd2807SJeff Garzik 11570d5ff566STejun Heo iowrite8(tmp, ap->ioaddr.device_addr); 1158c6fd2807SJeff Garzik ata_pause(ap); /* needed; also flushes, for mmio */ 1159c6fd2807SJeff Garzik } 1160c6fd2807SJeff Garzik 1161c6fd2807SJeff Garzik /** 1162c6fd2807SJeff Garzik * ata_dev_select - Select device 0/1 on ATA bus 1163c6fd2807SJeff Garzik * @ap: ATA channel to manipulate 1164c6fd2807SJeff Garzik * @device: ATA device (numbered from zero) to select 1165c6fd2807SJeff Garzik * @wait: non-zero to wait for Status register BSY bit to clear 1166c6fd2807SJeff Garzik * @can_sleep: non-zero if context allows sleeping 1167c6fd2807SJeff Garzik * 1168c6fd2807SJeff Garzik * Use the method defined in the ATA specification to 1169c6fd2807SJeff Garzik * make either device 0, or device 1, active on the 1170c6fd2807SJeff Garzik * ATA channel. 1171c6fd2807SJeff Garzik * 1172c6fd2807SJeff Garzik * This is a high-level version of ata_std_dev_select(), 1173c6fd2807SJeff Garzik * which additionally provides the services of inserting 1174c6fd2807SJeff Garzik * the proper pauses and status polling, where needed. 1175c6fd2807SJeff Garzik * 1176c6fd2807SJeff Garzik * LOCKING: 1177c6fd2807SJeff Garzik * caller. 1178c6fd2807SJeff Garzik */ 1179c6fd2807SJeff Garzik 1180c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device, 1181c6fd2807SJeff Garzik unsigned int wait, unsigned int can_sleep) 1182c6fd2807SJeff Garzik { 1183c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 118444877b4eSTejun Heo ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 118544877b4eSTejun Heo "device %u, wait %u\n", device, wait); 1186c6fd2807SJeff Garzik 1187c6fd2807SJeff Garzik if (wait) 1188c6fd2807SJeff Garzik ata_wait_idle(ap); 1189c6fd2807SJeff Garzik 1190c6fd2807SJeff Garzik ap->ops->dev_select(ap, device); 1191c6fd2807SJeff Garzik 1192c6fd2807SJeff Garzik if (wait) { 11939af5c9c9STejun Heo if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 1194c6fd2807SJeff Garzik msleep(150); 1195c6fd2807SJeff Garzik ata_wait_idle(ap); 1196c6fd2807SJeff Garzik } 1197c6fd2807SJeff Garzik } 1198c6fd2807SJeff Garzik 1199c6fd2807SJeff Garzik /** 1200c6fd2807SJeff Garzik * ata_dump_id - IDENTIFY DEVICE info debugging output 1201c6fd2807SJeff Garzik * @id: IDENTIFY DEVICE page to dump 1202c6fd2807SJeff Garzik * 1203c6fd2807SJeff Garzik * Dump selected 16-bit words from the given IDENTIFY DEVICE 1204c6fd2807SJeff Garzik * page. 1205c6fd2807SJeff Garzik * 1206c6fd2807SJeff Garzik * LOCKING: 1207c6fd2807SJeff Garzik * caller. 1208c6fd2807SJeff Garzik */ 1209c6fd2807SJeff Garzik 1210c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id) 1211c6fd2807SJeff Garzik { 1212c6fd2807SJeff Garzik DPRINTK("49==0x%04x " 1213c6fd2807SJeff Garzik "53==0x%04x " 1214c6fd2807SJeff Garzik "63==0x%04x " 1215c6fd2807SJeff Garzik "64==0x%04x " 1216c6fd2807SJeff Garzik "75==0x%04x \n", 1217c6fd2807SJeff Garzik id[49], 1218c6fd2807SJeff Garzik id[53], 1219c6fd2807SJeff Garzik id[63], 1220c6fd2807SJeff Garzik id[64], 1221c6fd2807SJeff Garzik id[75]); 1222c6fd2807SJeff Garzik DPRINTK("80==0x%04x " 1223c6fd2807SJeff Garzik "81==0x%04x " 1224c6fd2807SJeff Garzik "82==0x%04x " 1225c6fd2807SJeff Garzik "83==0x%04x " 1226c6fd2807SJeff Garzik "84==0x%04x \n", 1227c6fd2807SJeff Garzik id[80], 1228c6fd2807SJeff Garzik id[81], 1229c6fd2807SJeff Garzik id[82], 1230c6fd2807SJeff Garzik id[83], 1231c6fd2807SJeff Garzik id[84]); 1232c6fd2807SJeff Garzik DPRINTK("88==0x%04x " 1233c6fd2807SJeff Garzik "93==0x%04x\n", 1234c6fd2807SJeff Garzik id[88], 1235c6fd2807SJeff Garzik id[93]); 1236c6fd2807SJeff Garzik } 1237c6fd2807SJeff Garzik 1238c6fd2807SJeff Garzik /** 1239c6fd2807SJeff Garzik * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1240c6fd2807SJeff Garzik * @id: IDENTIFY data to compute xfer mask from 1241c6fd2807SJeff Garzik * 1242c6fd2807SJeff Garzik * Compute the xfermask for this device. This is not as trivial 1243c6fd2807SJeff Garzik * as it seems if we must consider early devices correctly. 1244c6fd2807SJeff Garzik * 1245c6fd2807SJeff Garzik * FIXME: pre IDE drive timing (do we care ?). 1246c6fd2807SJeff Garzik * 1247c6fd2807SJeff Garzik * LOCKING: 1248c6fd2807SJeff Garzik * None. 1249c6fd2807SJeff Garzik * 1250c6fd2807SJeff Garzik * RETURNS: 1251c6fd2807SJeff Garzik * Computed xfermask 1252c6fd2807SJeff Garzik */ 1253c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id) 1254c6fd2807SJeff Garzik { 1255c6fd2807SJeff Garzik unsigned int pio_mask, mwdma_mask, udma_mask; 1256c6fd2807SJeff Garzik 1257c6fd2807SJeff Garzik /* Usual case. Word 53 indicates word 64 is valid */ 1258c6fd2807SJeff Garzik if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1259c6fd2807SJeff Garzik pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1260c6fd2807SJeff Garzik pio_mask <<= 3; 1261c6fd2807SJeff Garzik pio_mask |= 0x7; 1262c6fd2807SJeff Garzik } else { 1263c6fd2807SJeff Garzik /* If word 64 isn't valid then Word 51 high byte holds 1264c6fd2807SJeff Garzik * the PIO timing number for the maximum. Turn it into 1265c6fd2807SJeff Garzik * a mask. 1266c6fd2807SJeff Garzik */ 12677a0f1c8aSLennert Buytenhek u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 126846767aebSAlan Cox if (mode < 5) /* Valid PIO range */ 126946767aebSAlan Cox pio_mask = (2 << mode) - 1; 127046767aebSAlan Cox else 127146767aebSAlan Cox pio_mask = 1; 1272c6fd2807SJeff Garzik 1273c6fd2807SJeff Garzik /* But wait.. there's more. Design your standards by 1274c6fd2807SJeff Garzik * committee and you too can get a free iordy field to 1275c6fd2807SJeff Garzik * process. However its the speeds not the modes that 1276c6fd2807SJeff Garzik * are supported... Note drivers using the timing API 1277c6fd2807SJeff Garzik * will get this right anyway 1278c6fd2807SJeff Garzik */ 1279c6fd2807SJeff Garzik } 1280c6fd2807SJeff Garzik 1281c6fd2807SJeff Garzik mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1282c6fd2807SJeff Garzik 1283b352e57dSAlan Cox if (ata_id_is_cfa(id)) { 1284b352e57dSAlan Cox /* 1285b352e57dSAlan Cox * Process compact flash extended modes 1286b352e57dSAlan Cox */ 1287b352e57dSAlan Cox int pio = id[163] & 0x7; 1288b352e57dSAlan Cox int dma = (id[163] >> 3) & 7; 1289b352e57dSAlan Cox 1290b352e57dSAlan Cox if (pio) 1291b352e57dSAlan Cox pio_mask |= (1 << 5); 1292b352e57dSAlan Cox if (pio > 1) 1293b352e57dSAlan Cox pio_mask |= (1 << 6); 1294b352e57dSAlan Cox if (dma) 1295b352e57dSAlan Cox mwdma_mask |= (1 << 3); 1296b352e57dSAlan Cox if (dma > 1) 1297b352e57dSAlan Cox mwdma_mask |= (1 << 4); 1298b352e57dSAlan Cox } 1299b352e57dSAlan Cox 1300c6fd2807SJeff Garzik udma_mask = 0; 1301c6fd2807SJeff Garzik if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1302c6fd2807SJeff Garzik udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1303c6fd2807SJeff Garzik 1304c6fd2807SJeff Garzik return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1305c6fd2807SJeff Garzik } 1306c6fd2807SJeff Garzik 1307c6fd2807SJeff Garzik /** 1308c6fd2807SJeff Garzik * ata_port_queue_task - Queue port_task 1309c6fd2807SJeff Garzik * @ap: The ata_port to queue port_task for 1310c6fd2807SJeff Garzik * @fn: workqueue function to be scheduled 131165f27f38SDavid Howells * @data: data for @fn to use 1312c6fd2807SJeff Garzik * @delay: delay time for workqueue function 1313c6fd2807SJeff Garzik * 1314c6fd2807SJeff Garzik * Schedule @fn(@data) for execution after @delay jiffies using 1315c6fd2807SJeff Garzik * port_task. There is one port_task per port and it's the 1316c6fd2807SJeff Garzik * user(low level driver)'s responsibility to make sure that only 1317c6fd2807SJeff Garzik * one task is active at any given time. 1318c6fd2807SJeff Garzik * 1319c6fd2807SJeff Garzik * libata core layer takes care of synchronization between 1320c6fd2807SJeff Garzik * port_task and EH. ata_port_queue_task() may be ignored for EH 1321c6fd2807SJeff Garzik * synchronization. 1322c6fd2807SJeff Garzik * 1323c6fd2807SJeff Garzik * LOCKING: 1324c6fd2807SJeff Garzik * Inherited from caller. 1325c6fd2807SJeff Garzik */ 132665f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, 1327c6fd2807SJeff Garzik unsigned long delay) 1328c6fd2807SJeff Garzik { 132965f27f38SDavid Howells PREPARE_DELAYED_WORK(&ap->port_task, fn); 133065f27f38SDavid Howells ap->port_task_data = data; 1331c6fd2807SJeff Garzik 133245a66c1cSOleg Nesterov /* may fail if ata_port_flush_task() in progress */ 133345a66c1cSOleg Nesterov queue_delayed_work(ata_wq, &ap->port_task, delay); 1334c6fd2807SJeff Garzik } 1335c6fd2807SJeff Garzik 1336c6fd2807SJeff Garzik /** 1337c6fd2807SJeff Garzik * ata_port_flush_task - Flush port_task 1338c6fd2807SJeff Garzik * @ap: The ata_port to flush port_task for 1339c6fd2807SJeff Garzik * 1340c6fd2807SJeff Garzik * After this function completes, port_task is guranteed not to 1341c6fd2807SJeff Garzik * be running or scheduled. 1342c6fd2807SJeff Garzik * 1343c6fd2807SJeff Garzik * LOCKING: 1344c6fd2807SJeff Garzik * Kernel thread context (may sleep) 1345c6fd2807SJeff Garzik */ 1346c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap) 1347c6fd2807SJeff Garzik { 1348c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1349c6fd2807SJeff Garzik 135045a66c1cSOleg Nesterov cancel_rearming_delayed_work(&ap->port_task); 1351c6fd2807SJeff Garzik 1352c6fd2807SJeff Garzik if (ata_msg_ctl(ap)) 1353c6fd2807SJeff Garzik ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); 1354c6fd2807SJeff Garzik } 1355c6fd2807SJeff Garzik 13567102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1357c6fd2807SJeff Garzik { 1358c6fd2807SJeff Garzik struct completion *waiting = qc->private_data; 1359c6fd2807SJeff Garzik 1360c6fd2807SJeff Garzik complete(waiting); 1361c6fd2807SJeff Garzik } 1362c6fd2807SJeff Garzik 1363c6fd2807SJeff Garzik /** 13642432697bSTejun Heo * ata_exec_internal_sg - execute libata internal command 1365c6fd2807SJeff Garzik * @dev: Device to which the command is sent 1366c6fd2807SJeff Garzik * @tf: Taskfile registers for the command and the result 1367c6fd2807SJeff Garzik * @cdb: CDB for packet command 1368c6fd2807SJeff Garzik * @dma_dir: Data tranfer direction of the command 13692432697bSTejun Heo * @sg: sg list for the data buffer of the command 13702432697bSTejun Heo * @n_elem: Number of sg entries 1371c6fd2807SJeff Garzik * 1372c6fd2807SJeff Garzik * Executes libata internal command with timeout. @tf contains 1373c6fd2807SJeff Garzik * command on entry and result on return. Timeout and error 1374c6fd2807SJeff Garzik * conditions are reported via return value. No recovery action 1375c6fd2807SJeff Garzik * is taken after a command times out. It's caller's duty to 1376c6fd2807SJeff Garzik * clean up after timeout. 1377c6fd2807SJeff Garzik * 1378c6fd2807SJeff Garzik * LOCKING: 1379c6fd2807SJeff Garzik * None. Should be called with kernel context, might sleep. 1380c6fd2807SJeff Garzik * 1381c6fd2807SJeff Garzik * RETURNS: 1382c6fd2807SJeff Garzik * Zero on success, AC_ERR_* mask on failure 1383c6fd2807SJeff Garzik */ 13842432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev, 1385c6fd2807SJeff Garzik struct ata_taskfile *tf, const u8 *cdb, 13862432697bSTejun Heo int dma_dir, struct scatterlist *sg, 13872432697bSTejun Heo unsigned int n_elem) 1388c6fd2807SJeff Garzik { 13899af5c9c9STejun Heo struct ata_link *link = dev->link; 13909af5c9c9STejun Heo struct ata_port *ap = link->ap; 1391c6fd2807SJeff Garzik u8 command = tf->command; 1392c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1393c6fd2807SJeff Garzik unsigned int tag, preempted_tag; 1394c6fd2807SJeff Garzik u32 preempted_sactive, preempted_qc_active; 1395c6fd2807SJeff Garzik DECLARE_COMPLETION_ONSTACK(wait); 1396c6fd2807SJeff Garzik unsigned long flags; 1397c6fd2807SJeff Garzik unsigned int err_mask; 1398c6fd2807SJeff Garzik int rc; 1399c6fd2807SJeff Garzik 1400c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1401c6fd2807SJeff Garzik 1402c6fd2807SJeff Garzik /* no internal command while frozen */ 1403c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) { 1404c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1405c6fd2807SJeff Garzik return AC_ERR_SYSTEM; 1406c6fd2807SJeff Garzik } 1407c6fd2807SJeff Garzik 1408c6fd2807SJeff Garzik /* initialize internal qc */ 1409c6fd2807SJeff Garzik 1410c6fd2807SJeff Garzik /* XXX: Tag 0 is used for drivers with legacy EH as some 1411c6fd2807SJeff Garzik * drivers choke if any other tag is given. This breaks 1412c6fd2807SJeff Garzik * ata_tag_internal() test for those drivers. Don't use new 1413c6fd2807SJeff Garzik * EH stuff without converting to it. 1414c6fd2807SJeff Garzik */ 1415c6fd2807SJeff Garzik if (ap->ops->error_handler) 1416c6fd2807SJeff Garzik tag = ATA_TAG_INTERNAL; 1417c6fd2807SJeff Garzik else 1418c6fd2807SJeff Garzik tag = 0; 1419c6fd2807SJeff Garzik 1420c6fd2807SJeff Garzik if (test_and_set_bit(tag, &ap->qc_allocated)) 1421c6fd2807SJeff Garzik BUG(); 1422c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1423c6fd2807SJeff Garzik 1424c6fd2807SJeff Garzik qc->tag = tag; 1425c6fd2807SJeff Garzik qc->scsicmd = NULL; 1426c6fd2807SJeff Garzik qc->ap = ap; 1427c6fd2807SJeff Garzik qc->dev = dev; 1428c6fd2807SJeff Garzik ata_qc_reinit(qc); 1429c6fd2807SJeff Garzik 14309af5c9c9STejun Heo preempted_tag = link->active_tag; 14319af5c9c9STejun Heo preempted_sactive = link->sactive; 1432c6fd2807SJeff Garzik preempted_qc_active = ap->qc_active; 14339af5c9c9STejun Heo link->active_tag = ATA_TAG_POISON; 14349af5c9c9STejun Heo link->sactive = 0; 1435c6fd2807SJeff Garzik ap->qc_active = 0; 1436c6fd2807SJeff Garzik 1437c6fd2807SJeff Garzik /* prepare & issue qc */ 1438c6fd2807SJeff Garzik qc->tf = *tf; 1439c6fd2807SJeff Garzik if (cdb) 1440c6fd2807SJeff Garzik memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1441c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_RESULT_TF; 1442c6fd2807SJeff Garzik qc->dma_dir = dma_dir; 1443c6fd2807SJeff Garzik if (dma_dir != DMA_NONE) { 14442432697bSTejun Heo unsigned int i, buflen = 0; 14452432697bSTejun Heo 14462432697bSTejun Heo for (i = 0; i < n_elem; i++) 14472432697bSTejun Heo buflen += sg[i].length; 14482432697bSTejun Heo 14492432697bSTejun Heo ata_sg_init(qc, sg, n_elem); 145049c80429SBrian King qc->nbytes = buflen; 1451c6fd2807SJeff Garzik } 1452c6fd2807SJeff Garzik 1453c6fd2807SJeff Garzik qc->private_data = &wait; 1454c6fd2807SJeff Garzik qc->complete_fn = ata_qc_complete_internal; 1455c6fd2807SJeff Garzik 1456c6fd2807SJeff Garzik ata_qc_issue(qc); 1457c6fd2807SJeff Garzik 1458c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1459c6fd2807SJeff Garzik 1460c6fd2807SJeff Garzik rc = wait_for_completion_timeout(&wait, ata_probe_timeout); 1461c6fd2807SJeff Garzik 1462c6fd2807SJeff Garzik ata_port_flush_task(ap); 1463c6fd2807SJeff Garzik 1464c6fd2807SJeff Garzik if (!rc) { 1465c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1466c6fd2807SJeff Garzik 1467c6fd2807SJeff Garzik /* We're racing with irq here. If we lose, the 1468c6fd2807SJeff Garzik * following test prevents us from completing the qc 1469c6fd2807SJeff Garzik * twice. If we win, the port is frozen and will be 1470c6fd2807SJeff Garzik * cleaned up by ->post_internal_cmd(). 1471c6fd2807SJeff Garzik */ 1472c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE) { 1473c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 1474c6fd2807SJeff Garzik 1475c6fd2807SJeff Garzik if (ap->ops->error_handler) 1476c6fd2807SJeff Garzik ata_port_freeze(ap); 1477c6fd2807SJeff Garzik else 1478c6fd2807SJeff Garzik ata_qc_complete(qc); 1479c6fd2807SJeff Garzik 1480c6fd2807SJeff Garzik if (ata_msg_warn(ap)) 1481c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1482c6fd2807SJeff Garzik "qc timeout (cmd 0x%x)\n", command); 1483c6fd2807SJeff Garzik } 1484c6fd2807SJeff Garzik 1485c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1486c6fd2807SJeff Garzik } 1487c6fd2807SJeff Garzik 1488c6fd2807SJeff Garzik /* do post_internal_cmd */ 1489c6fd2807SJeff Garzik if (ap->ops->post_internal_cmd) 1490c6fd2807SJeff Garzik ap->ops->post_internal_cmd(qc); 1491c6fd2807SJeff Garzik 1492a51d644aSTejun Heo /* perform minimal error analysis */ 1493a51d644aSTejun Heo if (qc->flags & ATA_QCFLAG_FAILED) { 1494a51d644aSTejun Heo if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1495a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1496a51d644aSTejun Heo 1497a51d644aSTejun Heo if (!qc->err_mask) 1498c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_OTHER; 1499a51d644aSTejun Heo 1500a51d644aSTejun Heo if (qc->err_mask & ~AC_ERR_OTHER) 1501a51d644aSTejun Heo qc->err_mask &= ~AC_ERR_OTHER; 1502c6fd2807SJeff Garzik } 1503c6fd2807SJeff Garzik 1504c6fd2807SJeff Garzik /* finish up */ 1505c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1506c6fd2807SJeff Garzik 1507c6fd2807SJeff Garzik *tf = qc->result_tf; 1508c6fd2807SJeff Garzik err_mask = qc->err_mask; 1509c6fd2807SJeff Garzik 1510c6fd2807SJeff Garzik ata_qc_free(qc); 15119af5c9c9STejun Heo link->active_tag = preempted_tag; 15129af5c9c9STejun Heo link->sactive = preempted_sactive; 1513c6fd2807SJeff Garzik ap->qc_active = preempted_qc_active; 1514c6fd2807SJeff Garzik 1515c6fd2807SJeff Garzik /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1516c6fd2807SJeff Garzik * Until those drivers are fixed, we detect the condition 1517c6fd2807SJeff Garzik * here, fail the command with AC_ERR_SYSTEM and reenable the 1518c6fd2807SJeff Garzik * port. 1519c6fd2807SJeff Garzik * 1520c6fd2807SJeff Garzik * Note that this doesn't change any behavior as internal 1521c6fd2807SJeff Garzik * command failure results in disabling the device in the 1522c6fd2807SJeff Garzik * higher layer for LLDDs without new reset/EH callbacks. 1523c6fd2807SJeff Garzik * 1524c6fd2807SJeff Garzik * Kill the following code as soon as those drivers are fixed. 1525c6fd2807SJeff Garzik */ 1526c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_DISABLED) { 1527c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1528c6fd2807SJeff Garzik ata_port_probe(ap); 1529c6fd2807SJeff Garzik } 1530c6fd2807SJeff Garzik 1531c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1532c6fd2807SJeff Garzik 1533c6fd2807SJeff Garzik return err_mask; 1534c6fd2807SJeff Garzik } 1535c6fd2807SJeff Garzik 1536c6fd2807SJeff Garzik /** 153733480a0eSTejun Heo * ata_exec_internal - execute libata internal command 15382432697bSTejun Heo * @dev: Device to which the command is sent 15392432697bSTejun Heo * @tf: Taskfile registers for the command and the result 15402432697bSTejun Heo * @cdb: CDB for packet command 15412432697bSTejun Heo * @dma_dir: Data tranfer direction of the command 15422432697bSTejun Heo * @buf: Data buffer of the command 15432432697bSTejun Heo * @buflen: Length of data buffer 15442432697bSTejun Heo * 15452432697bSTejun Heo * Wrapper around ata_exec_internal_sg() which takes simple 15462432697bSTejun Heo * buffer instead of sg list. 15472432697bSTejun Heo * 15482432697bSTejun Heo * LOCKING: 15492432697bSTejun Heo * None. Should be called with kernel context, might sleep. 15502432697bSTejun Heo * 15512432697bSTejun Heo * RETURNS: 15522432697bSTejun Heo * Zero on success, AC_ERR_* mask on failure 15532432697bSTejun Heo */ 15542432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev, 15552432697bSTejun Heo struct ata_taskfile *tf, const u8 *cdb, 15562432697bSTejun Heo int dma_dir, void *buf, unsigned int buflen) 15572432697bSTejun Heo { 155833480a0eSTejun Heo struct scatterlist *psg = NULL, sg; 155933480a0eSTejun Heo unsigned int n_elem = 0; 15602432697bSTejun Heo 156133480a0eSTejun Heo if (dma_dir != DMA_NONE) { 156233480a0eSTejun Heo WARN_ON(!buf); 15632432697bSTejun Heo sg_init_one(&sg, buf, buflen); 156433480a0eSTejun Heo psg = &sg; 156533480a0eSTejun Heo n_elem++; 156633480a0eSTejun Heo } 15672432697bSTejun Heo 156833480a0eSTejun Heo return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem); 15692432697bSTejun Heo } 15702432697bSTejun Heo 15712432697bSTejun Heo /** 1572c6fd2807SJeff Garzik * ata_do_simple_cmd - execute simple internal command 1573c6fd2807SJeff Garzik * @dev: Device to which the command is sent 1574c6fd2807SJeff Garzik * @cmd: Opcode to execute 1575c6fd2807SJeff Garzik * 1576c6fd2807SJeff Garzik * Execute a 'simple' command, that only consists of the opcode 1577c6fd2807SJeff Garzik * 'cmd' itself, without filling any other registers 1578c6fd2807SJeff Garzik * 1579c6fd2807SJeff Garzik * LOCKING: 1580c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1581c6fd2807SJeff Garzik * 1582c6fd2807SJeff Garzik * RETURNS: 1583c6fd2807SJeff Garzik * Zero on success, AC_ERR_* mask on failure 1584c6fd2807SJeff Garzik */ 1585c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1586c6fd2807SJeff Garzik { 1587c6fd2807SJeff Garzik struct ata_taskfile tf; 1588c6fd2807SJeff Garzik 1589c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1590c6fd2807SJeff Garzik 1591c6fd2807SJeff Garzik tf.command = cmd; 1592c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_DEVICE; 1593c6fd2807SJeff Garzik tf.protocol = ATA_PROT_NODATA; 1594c6fd2807SJeff Garzik 1595c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 1596c6fd2807SJeff Garzik } 1597c6fd2807SJeff Garzik 1598c6fd2807SJeff Garzik /** 1599c6fd2807SJeff Garzik * ata_pio_need_iordy - check if iordy needed 1600c6fd2807SJeff Garzik * @adev: ATA device 1601c6fd2807SJeff Garzik * 1602c6fd2807SJeff Garzik * Check if the current speed of the device requires IORDY. Used 1603c6fd2807SJeff Garzik * by various controllers for chip configuration. 1604c6fd2807SJeff Garzik */ 1605c6fd2807SJeff Garzik 1606c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1607c6fd2807SJeff Garzik { 1608432729f0SAlan Cox /* Controller doesn't support IORDY. Probably a pointless check 1609432729f0SAlan Cox as the caller should know this */ 16109af5c9c9STejun Heo if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1611c6fd2807SJeff Garzik return 0; 1612432729f0SAlan Cox /* PIO3 and higher it is mandatory */ 1613432729f0SAlan Cox if (adev->pio_mode > XFER_PIO_2) 1614c6fd2807SJeff Garzik return 1; 1615432729f0SAlan Cox /* We turn it on when possible */ 1616432729f0SAlan Cox if (ata_id_has_iordy(adev->id)) 1617432729f0SAlan Cox return 1; 1618432729f0SAlan Cox return 0; 1619432729f0SAlan Cox } 1620c6fd2807SJeff Garzik 1621432729f0SAlan Cox /** 1622432729f0SAlan Cox * ata_pio_mask_no_iordy - Return the non IORDY mask 1623432729f0SAlan Cox * @adev: ATA device 1624432729f0SAlan Cox * 1625432729f0SAlan Cox * Compute the highest mode possible if we are not using iordy. Return 1626432729f0SAlan Cox * -1 if no iordy mode is available. 1627432729f0SAlan Cox */ 1628432729f0SAlan Cox 1629432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1630432729f0SAlan Cox { 1631c6fd2807SJeff Garzik /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1632c6fd2807SJeff Garzik if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1633432729f0SAlan Cox u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1634c6fd2807SJeff Garzik /* Is the speed faster than the drive allows non IORDY ? */ 1635c6fd2807SJeff Garzik if (pio) { 1636c6fd2807SJeff Garzik /* This is cycle times not frequency - watch the logic! */ 1637c6fd2807SJeff Garzik if (pio > 240) /* PIO2 is 240nS per cycle */ 1638432729f0SAlan Cox return 3 << ATA_SHIFT_PIO; 1639432729f0SAlan Cox return 7 << ATA_SHIFT_PIO; 1640c6fd2807SJeff Garzik } 1641c6fd2807SJeff Garzik } 1642432729f0SAlan Cox return 3 << ATA_SHIFT_PIO; 1643c6fd2807SJeff Garzik } 1644c6fd2807SJeff Garzik 1645c6fd2807SJeff Garzik /** 1646c6fd2807SJeff Garzik * ata_dev_read_id - Read ID data from the specified device 1647c6fd2807SJeff Garzik * @dev: target device 1648c6fd2807SJeff Garzik * @p_class: pointer to class of the target device (may be changed) 1649bff04647STejun Heo * @flags: ATA_READID_* flags 1650c6fd2807SJeff Garzik * @id: buffer to read IDENTIFY data into 1651c6fd2807SJeff Garzik * 1652c6fd2807SJeff Garzik * Read ID data from the specified device. ATA_CMD_ID_ATA is 1653c6fd2807SJeff Garzik * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1654c6fd2807SJeff Garzik * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1655c6fd2807SJeff Garzik * for pre-ATA4 drives. 1656c6fd2807SJeff Garzik * 165750a99018SAlan Cox * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 165850a99018SAlan Cox * now we abort if we hit that case. 165950a99018SAlan Cox * 1660c6fd2807SJeff Garzik * LOCKING: 1661c6fd2807SJeff Garzik * Kernel thread context (may sleep) 1662c6fd2807SJeff Garzik * 1663c6fd2807SJeff Garzik * RETURNS: 1664c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1665c6fd2807SJeff Garzik */ 1666c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1667bff04647STejun Heo unsigned int flags, u16 *id) 1668c6fd2807SJeff Garzik { 16699af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1670c6fd2807SJeff Garzik unsigned int class = *p_class; 1671c6fd2807SJeff Garzik struct ata_taskfile tf; 1672c6fd2807SJeff Garzik unsigned int err_mask = 0; 1673c6fd2807SJeff Garzik const char *reason; 167454936f8bSTejun Heo int may_fallback = 1, tried_spinup = 0; 1675c6fd2807SJeff Garzik int rc; 1676c6fd2807SJeff Garzik 1677c6fd2807SJeff Garzik if (ata_msg_ctl(ap)) 167844877b4eSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 1679c6fd2807SJeff Garzik 1680c6fd2807SJeff Garzik ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1681c6fd2807SJeff Garzik retry: 1682c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1683c6fd2807SJeff Garzik 1684c6fd2807SJeff Garzik switch (class) { 1685c6fd2807SJeff Garzik case ATA_DEV_ATA: 1686c6fd2807SJeff Garzik tf.command = ATA_CMD_ID_ATA; 1687c6fd2807SJeff Garzik break; 1688c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1689c6fd2807SJeff Garzik tf.command = ATA_CMD_ID_ATAPI; 1690c6fd2807SJeff Garzik break; 1691c6fd2807SJeff Garzik default: 1692c6fd2807SJeff Garzik rc = -ENODEV; 1693c6fd2807SJeff Garzik reason = "unsupported class"; 1694c6fd2807SJeff Garzik goto err_out; 1695c6fd2807SJeff Garzik } 1696c6fd2807SJeff Garzik 1697c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 169881afe893STejun Heo 169981afe893STejun Heo /* Some devices choke if TF registers contain garbage. Make 170081afe893STejun Heo * sure those are properly initialized. 170181afe893STejun Heo */ 170281afe893STejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 170381afe893STejun Heo 170481afe893STejun Heo /* Device presence detection is unreliable on some 170581afe893STejun Heo * controllers. Always poll IDENTIFY if available. 170681afe893STejun Heo */ 170781afe893STejun Heo tf.flags |= ATA_TFLAG_POLLING; 1708c6fd2807SJeff Garzik 1709c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1710c6fd2807SJeff Garzik id, sizeof(id[0]) * ATA_ID_WORDS); 1711c6fd2807SJeff Garzik if (err_mask) { 1712800b3996STejun Heo if (err_mask & AC_ERR_NODEV_HINT) { 171355a8e2c8STejun Heo DPRINTK("ata%u.%d: NODEV after polling detection\n", 171444877b4eSTejun Heo ap->print_id, dev->devno); 171555a8e2c8STejun Heo return -ENOENT; 171655a8e2c8STejun Heo } 171755a8e2c8STejun Heo 171854936f8bSTejun Heo /* Device or controller might have reported the wrong 171954936f8bSTejun Heo * device class. Give a shot at the other IDENTIFY if 172054936f8bSTejun Heo * the current one is aborted by the device. 172154936f8bSTejun Heo */ 172254936f8bSTejun Heo if (may_fallback && 172354936f8bSTejun Heo (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 172454936f8bSTejun Heo may_fallback = 0; 172554936f8bSTejun Heo 172654936f8bSTejun Heo if (class == ATA_DEV_ATA) 172754936f8bSTejun Heo class = ATA_DEV_ATAPI; 172854936f8bSTejun Heo else 172954936f8bSTejun Heo class = ATA_DEV_ATA; 173054936f8bSTejun Heo goto retry; 173154936f8bSTejun Heo } 173254936f8bSTejun Heo 1733c6fd2807SJeff Garzik rc = -EIO; 1734c6fd2807SJeff Garzik reason = "I/O error"; 1735c6fd2807SJeff Garzik goto err_out; 1736c6fd2807SJeff Garzik } 1737c6fd2807SJeff Garzik 173854936f8bSTejun Heo /* Falling back doesn't make sense if ID data was read 173954936f8bSTejun Heo * successfully at least once. 174054936f8bSTejun Heo */ 174154936f8bSTejun Heo may_fallback = 0; 174254936f8bSTejun Heo 1743c6fd2807SJeff Garzik swap_buf_le16(id, ATA_ID_WORDS); 1744c6fd2807SJeff Garzik 1745c6fd2807SJeff Garzik /* sanity check */ 1746c6fd2807SJeff Garzik rc = -EINVAL; 17476070068bSAlan Cox reason = "device reports invalid type"; 17484a3381feSJeff Garzik 17494a3381feSJeff Garzik if (class == ATA_DEV_ATA) { 17504a3381feSJeff Garzik if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 17514a3381feSJeff Garzik goto err_out; 17524a3381feSJeff Garzik } else { 17534a3381feSJeff Garzik if (ata_id_is_ata(id)) 1754c6fd2807SJeff Garzik goto err_out; 1755c6fd2807SJeff Garzik } 1756c6fd2807SJeff Garzik 1757169439c2SMark Lord if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1758169439c2SMark Lord tried_spinup = 1; 1759169439c2SMark Lord /* 1760169439c2SMark Lord * Drive powered-up in standby mode, and requires a specific 1761169439c2SMark Lord * SET_FEATURES spin-up subcommand before it will accept 1762169439c2SMark Lord * anything other than the original IDENTIFY command. 1763169439c2SMark Lord */ 1764169439c2SMark Lord ata_tf_init(dev, &tf); 1765169439c2SMark Lord tf.command = ATA_CMD_SET_FEATURES; 1766169439c2SMark Lord tf.feature = SETFEATURES_SPINUP; 1767169439c2SMark Lord tf.protocol = ATA_PROT_NODATA; 1768169439c2SMark Lord tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1769169439c2SMark Lord err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 1770fb0582f9SRyan Power if (err_mask && id[2] != 0x738c) { 1771169439c2SMark Lord rc = -EIO; 1772169439c2SMark Lord reason = "SPINUP failed"; 1773169439c2SMark Lord goto err_out; 1774169439c2SMark Lord } 1775169439c2SMark Lord /* 1776169439c2SMark Lord * If the drive initially returned incomplete IDENTIFY info, 1777169439c2SMark Lord * we now must reissue the IDENTIFY command. 1778169439c2SMark Lord */ 1779169439c2SMark Lord if (id[2] == 0x37c8) 1780169439c2SMark Lord goto retry; 1781169439c2SMark Lord } 1782169439c2SMark Lord 1783bff04647STejun Heo if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 1784c6fd2807SJeff Garzik /* 1785c6fd2807SJeff Garzik * The exact sequence expected by certain pre-ATA4 drives is: 1786c6fd2807SJeff Garzik * SRST RESET 178750a99018SAlan Cox * IDENTIFY (optional in early ATA) 178850a99018SAlan Cox * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 1789c6fd2807SJeff Garzik * anything else.. 1790c6fd2807SJeff Garzik * Some drives were very specific about that exact sequence. 179150a99018SAlan Cox * 179250a99018SAlan Cox * Note that ATA4 says lba is mandatory so the second check 179350a99018SAlan Cox * shoud never trigger. 1794c6fd2807SJeff Garzik */ 1795c6fd2807SJeff Garzik if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1796c6fd2807SJeff Garzik err_mask = ata_dev_init_params(dev, id[3], id[6]); 1797c6fd2807SJeff Garzik if (err_mask) { 1798c6fd2807SJeff Garzik rc = -EIO; 1799c6fd2807SJeff Garzik reason = "INIT_DEV_PARAMS failed"; 1800c6fd2807SJeff Garzik goto err_out; 1801c6fd2807SJeff Garzik } 1802c6fd2807SJeff Garzik 1803c6fd2807SJeff Garzik /* current CHS translation info (id[53-58]) might be 1804c6fd2807SJeff Garzik * changed. reread the identify device info. 1805c6fd2807SJeff Garzik */ 1806bff04647STejun Heo flags &= ~ATA_READID_POSTRESET; 1807c6fd2807SJeff Garzik goto retry; 1808c6fd2807SJeff Garzik } 1809c6fd2807SJeff Garzik } 1810c6fd2807SJeff Garzik 1811c6fd2807SJeff Garzik *p_class = class; 1812c6fd2807SJeff Garzik 1813c6fd2807SJeff Garzik return 0; 1814c6fd2807SJeff Garzik 1815c6fd2807SJeff Garzik err_out: 1816c6fd2807SJeff Garzik if (ata_msg_warn(ap)) 1817c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 1818c6fd2807SJeff Garzik "(%s, err_mask=0x%x)\n", reason, err_mask); 1819c6fd2807SJeff Garzik return rc; 1820c6fd2807SJeff Garzik } 1821c6fd2807SJeff Garzik 1822c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev) 1823c6fd2807SJeff Garzik { 18249af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 18259af5c9c9STejun Heo return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1826c6fd2807SJeff Garzik } 1827c6fd2807SJeff Garzik 1828c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev, 1829c6fd2807SJeff Garzik char *desc, size_t desc_sz) 1830c6fd2807SJeff Garzik { 18319af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1832c6fd2807SJeff Garzik int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 1833c6fd2807SJeff Garzik 1834c6fd2807SJeff Garzik if (!ata_id_has_ncq(dev->id)) { 1835c6fd2807SJeff Garzik desc[0] = '\0'; 1836c6fd2807SJeff Garzik return; 1837c6fd2807SJeff Garzik } 183875683fe7STejun Heo if (dev->horkage & ATA_HORKAGE_NONCQ) { 18396919a0a6SAlan Cox snprintf(desc, desc_sz, "NCQ (not used)"); 18406919a0a6SAlan Cox return; 18416919a0a6SAlan Cox } 1842c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_NCQ) { 1843cca3974eSJeff Garzik hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 1844c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_NCQ; 1845c6fd2807SJeff Garzik } 1846c6fd2807SJeff Garzik 1847c6fd2807SJeff Garzik if (hdepth >= ddepth) 1848c6fd2807SJeff Garzik snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 1849c6fd2807SJeff Garzik else 1850c6fd2807SJeff Garzik snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 1851c6fd2807SJeff Garzik } 1852c6fd2807SJeff Garzik 1853c6fd2807SJeff Garzik /** 1854c6fd2807SJeff Garzik * ata_dev_configure - Configure the specified ATA/ATAPI device 1855c6fd2807SJeff Garzik * @dev: Target device to configure 1856c6fd2807SJeff Garzik * 1857c6fd2807SJeff Garzik * Configure @dev according to @dev->id. Generic and low-level 1858c6fd2807SJeff Garzik * driver specific fixups are also applied. 1859c6fd2807SJeff Garzik * 1860c6fd2807SJeff Garzik * LOCKING: 1861c6fd2807SJeff Garzik * Kernel thread context (may sleep) 1862c6fd2807SJeff Garzik * 1863c6fd2807SJeff Garzik * RETURNS: 1864c6fd2807SJeff Garzik * 0 on success, -errno otherwise 1865c6fd2807SJeff Garzik */ 1866efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev) 1867c6fd2807SJeff Garzik { 18689af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 18699af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 18706746544cSTejun Heo int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1871c6fd2807SJeff Garzik const u16 *id = dev->id; 1872c6fd2807SJeff Garzik unsigned int xfer_mask; 1873b352e57dSAlan Cox char revbuf[7]; /* XYZ-99\0 */ 18743f64f565SEric D. Mudama char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 18753f64f565SEric D. Mudama char modelbuf[ATA_ID_PROD_LEN+1]; 1876c6fd2807SJeff Garzik int rc; 1877c6fd2807SJeff Garzik 1878c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 187944877b4eSTejun Heo ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 188044877b4eSTejun Heo __FUNCTION__); 1881c6fd2807SJeff Garzik return 0; 1882c6fd2807SJeff Garzik } 1883c6fd2807SJeff Garzik 1884c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 188544877b4eSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 1886c6fd2807SJeff Garzik 188775683fe7STejun Heo /* set horkage */ 188875683fe7STejun Heo dev->horkage |= ata_dev_blacklisted(dev); 188975683fe7STejun Heo 18906746544cSTejun Heo /* let ACPI work its magic */ 18916746544cSTejun Heo rc = ata_acpi_on_devcfg(dev); 18926746544cSTejun Heo if (rc) 18936746544cSTejun Heo return rc; 189408573a86SKristen Carlson Accardi 189505027adcSTejun Heo /* massage HPA, do it early as it might change IDENTIFY data */ 189605027adcSTejun Heo rc = ata_hpa_resize(dev); 189705027adcSTejun Heo if (rc) 189805027adcSTejun Heo return rc; 189905027adcSTejun Heo 1900c6fd2807SJeff Garzik /* print device capabilities */ 1901c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 1902c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_DEBUG, 1903c6fd2807SJeff Garzik "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 1904c6fd2807SJeff Garzik "85:%04x 86:%04x 87:%04x 88:%04x\n", 1905c6fd2807SJeff Garzik __FUNCTION__, 1906c6fd2807SJeff Garzik id[49], id[82], id[83], id[84], 1907c6fd2807SJeff Garzik id[85], id[86], id[87], id[88]); 1908c6fd2807SJeff Garzik 1909c6fd2807SJeff Garzik /* initialize to-be-configured parameters */ 1910c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_CFG_MASK; 1911c6fd2807SJeff Garzik dev->max_sectors = 0; 1912c6fd2807SJeff Garzik dev->cdb_len = 0; 1913c6fd2807SJeff Garzik dev->n_sectors = 0; 1914c6fd2807SJeff Garzik dev->cylinders = 0; 1915c6fd2807SJeff Garzik dev->heads = 0; 1916c6fd2807SJeff Garzik dev->sectors = 0; 1917c6fd2807SJeff Garzik 1918c6fd2807SJeff Garzik /* 1919c6fd2807SJeff Garzik * common ATA, ATAPI feature tests 1920c6fd2807SJeff Garzik */ 1921c6fd2807SJeff Garzik 1922c6fd2807SJeff Garzik /* find max transfer mode; for printk only */ 1923c6fd2807SJeff Garzik xfer_mask = ata_id_xfermask(id); 1924c6fd2807SJeff Garzik 1925c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 1926c6fd2807SJeff Garzik ata_dump_id(id); 1927c6fd2807SJeff Garzik 1928ef143d57SAlbert Lee /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 1929ef143d57SAlbert Lee ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 1930ef143d57SAlbert Lee sizeof(fwrevbuf)); 1931ef143d57SAlbert Lee 1932ef143d57SAlbert Lee ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 1933ef143d57SAlbert Lee sizeof(modelbuf)); 1934ef143d57SAlbert Lee 1935c6fd2807SJeff Garzik /* ATA-specific feature tests */ 1936c6fd2807SJeff Garzik if (dev->class == ATA_DEV_ATA) { 1937b352e57dSAlan Cox if (ata_id_is_cfa(id)) { 1938b352e57dSAlan Cox if (id[162] & 1) /* CPRM may make this media unusable */ 193944877b4eSTejun Heo ata_dev_printk(dev, KERN_WARNING, 194044877b4eSTejun Heo "supports DRM functions and may " 194144877b4eSTejun Heo "not be fully accessable.\n"); 1942b352e57dSAlan Cox snprintf(revbuf, 7, "CFA"); 1943b352e57dSAlan Cox } 1944b352e57dSAlan Cox else 1945b352e57dSAlan Cox snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 1946b352e57dSAlan Cox 1947c6fd2807SJeff Garzik dev->n_sectors = ata_id_n_sectors(id); 1948c6fd2807SJeff Garzik 19493f64f565SEric D. Mudama if (dev->id[59] & 0x100) 19503f64f565SEric D. Mudama dev->multi_count = dev->id[59] & 0xff; 19513f64f565SEric D. Mudama 1952c6fd2807SJeff Garzik if (ata_id_has_lba(id)) { 1953c6fd2807SJeff Garzik const char *lba_desc; 1954c6fd2807SJeff Garzik char ncq_desc[20]; 1955c6fd2807SJeff Garzik 1956c6fd2807SJeff Garzik lba_desc = "LBA"; 1957c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_LBA; 1958c6fd2807SJeff Garzik if (ata_id_has_lba48(id)) { 1959c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_LBA48; 1960c6fd2807SJeff Garzik lba_desc = "LBA48"; 19616fc49adbSTejun Heo 19626fc49adbSTejun Heo if (dev->n_sectors >= (1UL << 28) && 19636fc49adbSTejun Heo ata_id_has_flush_ext(id)) 19646fc49adbSTejun Heo dev->flags |= ATA_DFLAG_FLUSH_EXT; 1965c6fd2807SJeff Garzik } 1966c6fd2807SJeff Garzik 1967c6fd2807SJeff Garzik /* config NCQ */ 1968c6fd2807SJeff Garzik ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 1969c6fd2807SJeff Garzik 1970c6fd2807SJeff Garzik /* print device info to dmesg */ 19713f64f565SEric D. Mudama if (ata_msg_drv(ap) && print_info) { 19723f64f565SEric D. Mudama ata_dev_printk(dev, KERN_INFO, 19733f64f565SEric D. Mudama "%s: %s, %s, max %s\n", 19743f64f565SEric D. Mudama revbuf, modelbuf, fwrevbuf, 19753f64f565SEric D. Mudama ata_mode_string(xfer_mask)); 19763f64f565SEric D. Mudama ata_dev_printk(dev, KERN_INFO, 19773f64f565SEric D. Mudama "%Lu sectors, multi %u: %s %s\n", 1978c6fd2807SJeff Garzik (unsigned long long)dev->n_sectors, 19793f64f565SEric D. Mudama dev->multi_count, lba_desc, ncq_desc); 19803f64f565SEric D. Mudama } 1981c6fd2807SJeff Garzik } else { 1982c6fd2807SJeff Garzik /* CHS */ 1983c6fd2807SJeff Garzik 1984c6fd2807SJeff Garzik /* Default translation */ 1985c6fd2807SJeff Garzik dev->cylinders = id[1]; 1986c6fd2807SJeff Garzik dev->heads = id[3]; 1987c6fd2807SJeff Garzik dev->sectors = id[6]; 1988c6fd2807SJeff Garzik 1989c6fd2807SJeff Garzik if (ata_id_current_chs_valid(id)) { 1990c6fd2807SJeff Garzik /* Current CHS translation is valid. */ 1991c6fd2807SJeff Garzik dev->cylinders = id[54]; 1992c6fd2807SJeff Garzik dev->heads = id[55]; 1993c6fd2807SJeff Garzik dev->sectors = id[56]; 1994c6fd2807SJeff Garzik } 1995c6fd2807SJeff Garzik 1996c6fd2807SJeff Garzik /* print device info to dmesg */ 19973f64f565SEric D. Mudama if (ata_msg_drv(ap) && print_info) { 1998c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, 19993f64f565SEric D. Mudama "%s: %s, %s, max %s\n", 20003f64f565SEric D. Mudama revbuf, modelbuf, fwrevbuf, 20013f64f565SEric D. Mudama ata_mode_string(xfer_mask)); 20023f64f565SEric D. Mudama ata_dev_printk(dev, KERN_INFO, 20033f64f565SEric D. Mudama "%Lu sectors, multi %u, CHS %u/%u/%u\n", 20043f64f565SEric D. Mudama (unsigned long long)dev->n_sectors, 20053f64f565SEric D. Mudama dev->multi_count, dev->cylinders, 20063f64f565SEric D. Mudama dev->heads, dev->sectors); 20073f64f565SEric D. Mudama } 2008c6fd2807SJeff Garzik } 2009c6fd2807SJeff Garzik 2010c6fd2807SJeff Garzik dev->cdb_len = 16; 2011c6fd2807SJeff Garzik } 2012c6fd2807SJeff Garzik 2013c6fd2807SJeff Garzik /* ATAPI-specific feature tests */ 2014c6fd2807SJeff Garzik else if (dev->class == ATA_DEV_ATAPI) { 2015c6fd2807SJeff Garzik char *cdb_intr_string = ""; 2016c6fd2807SJeff Garzik 2017c6fd2807SJeff Garzik rc = atapi_cdb_len(id); 2018c6fd2807SJeff Garzik if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2019c6fd2807SJeff Garzik if (ata_msg_warn(ap)) 2020c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 2021c6fd2807SJeff Garzik "unsupported CDB len\n"); 2022c6fd2807SJeff Garzik rc = -EINVAL; 2023c6fd2807SJeff Garzik goto err_out_nosup; 2024c6fd2807SJeff Garzik } 2025c6fd2807SJeff Garzik dev->cdb_len = (unsigned int) rc; 2026c6fd2807SJeff Garzik 20279f45cbd3SKristen Carlson Accardi /* 20289f45cbd3SKristen Carlson Accardi * check to see if this ATAPI device supports 20299f45cbd3SKristen Carlson Accardi * Asynchronous Notification 20309f45cbd3SKristen Carlson Accardi */ 20319f45cbd3SKristen Carlson Accardi if ((ap->flags & ATA_FLAG_AN) && ata_id_has_AN(id)) { 20329f45cbd3SKristen Carlson Accardi int err; 20339f45cbd3SKristen Carlson Accardi /* issue SET feature command to turn this on */ 20349f45cbd3SKristen Carlson Accardi err = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE); 20359f45cbd3SKristen Carlson Accardi if (err) 20369f45cbd3SKristen Carlson Accardi ata_dev_printk(dev, KERN_ERR, 20379f45cbd3SKristen Carlson Accardi "unable to set AN, err %x\n", 20389f45cbd3SKristen Carlson Accardi err); 20399f45cbd3SKristen Carlson Accardi else 20409f45cbd3SKristen Carlson Accardi dev->flags |= ATA_DFLAG_AN; 20419f45cbd3SKristen Carlson Accardi } 20429f45cbd3SKristen Carlson Accardi 2043c6fd2807SJeff Garzik if (ata_id_cdb_intr(dev->id)) { 2044c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_CDB_INTR; 2045c6fd2807SJeff Garzik cdb_intr_string = ", CDB intr"; 2046c6fd2807SJeff Garzik } 2047c6fd2807SJeff Garzik 2048c6fd2807SJeff Garzik /* print device info to dmesg */ 2049c6fd2807SJeff Garzik if (ata_msg_drv(ap) && print_info) 2050ef143d57SAlbert Lee ata_dev_printk(dev, KERN_INFO, 2051ef143d57SAlbert Lee "ATAPI: %s, %s, max %s%s\n", 2052ef143d57SAlbert Lee modelbuf, fwrevbuf, 2053c6fd2807SJeff Garzik ata_mode_string(xfer_mask), 2054c6fd2807SJeff Garzik cdb_intr_string); 2055c6fd2807SJeff Garzik } 2056c6fd2807SJeff Garzik 2057914ed354STejun Heo /* determine max_sectors */ 2058914ed354STejun Heo dev->max_sectors = ATA_MAX_SECTORS; 2059914ed354STejun Heo if (dev->flags & ATA_DFLAG_LBA48) 2060914ed354STejun Heo dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2061914ed354STejun Heo 206293590859SAlan Cox if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 206393590859SAlan Cox /* Let the user know. We don't want to disallow opens for 206493590859SAlan Cox rescue purposes, or in case the vendor is just a blithering 206593590859SAlan Cox idiot */ 206693590859SAlan Cox if (print_info) { 206793590859SAlan Cox ata_dev_printk(dev, KERN_WARNING, 206893590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n"); 206993590859SAlan Cox ata_dev_printk(dev, KERN_WARNING, 207093590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n"); 207193590859SAlan Cox } 207293590859SAlan Cox } 207393590859SAlan Cox 2074c6fd2807SJeff Garzik /* limit bridge transfers to udma5, 200 sectors */ 2075c6fd2807SJeff Garzik if (ata_dev_knobble(dev)) { 2076c6fd2807SJeff Garzik if (ata_msg_drv(ap) && print_info) 2077c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, 2078c6fd2807SJeff Garzik "applying bridge limits\n"); 2079c6fd2807SJeff Garzik dev->udma_mask &= ATA_UDMA5; 2080c6fd2807SJeff Garzik dev->max_sectors = ATA_MAX_SECTORS; 2081c6fd2807SJeff Garzik } 2082c6fd2807SJeff Garzik 208375683fe7STejun Heo if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 208403ec52deSTejun Heo dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 208503ec52deSTejun Heo dev->max_sectors); 208618d6e9d5SAlbert Lee 2087c6fd2807SJeff Garzik if (ap->ops->dev_config) 2088cd0d3bbcSAlan ap->ops->dev_config(dev); 2089c6fd2807SJeff Garzik 2090c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 2091c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", 2092c6fd2807SJeff Garzik __FUNCTION__, ata_chk_status(ap)); 2093c6fd2807SJeff Garzik return 0; 2094c6fd2807SJeff Garzik 2095c6fd2807SJeff Garzik err_out_nosup: 2096c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 2097c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_DEBUG, 2098c6fd2807SJeff Garzik "%s: EXIT, err\n", __FUNCTION__); 2099c6fd2807SJeff Garzik return rc; 2100c6fd2807SJeff Garzik } 2101c6fd2807SJeff Garzik 2102c6fd2807SJeff Garzik /** 21032e41e8e6SAlan Cox * ata_cable_40wire - return 40 wire cable type 2104be0d18dfSAlan Cox * @ap: port 2105be0d18dfSAlan Cox * 21062e41e8e6SAlan Cox * Helper method for drivers which want to hardwire 40 wire cable 2107be0d18dfSAlan Cox * detection. 2108be0d18dfSAlan Cox */ 2109be0d18dfSAlan Cox 2110be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap) 2111be0d18dfSAlan Cox { 2112be0d18dfSAlan Cox return ATA_CBL_PATA40; 2113be0d18dfSAlan Cox } 2114be0d18dfSAlan Cox 2115be0d18dfSAlan Cox /** 21162e41e8e6SAlan Cox * ata_cable_80wire - return 80 wire cable type 2117be0d18dfSAlan Cox * @ap: port 2118be0d18dfSAlan Cox * 21192e41e8e6SAlan Cox * Helper method for drivers which want to hardwire 80 wire cable 2120be0d18dfSAlan Cox * detection. 2121be0d18dfSAlan Cox */ 2122be0d18dfSAlan Cox 2123be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap) 2124be0d18dfSAlan Cox { 2125be0d18dfSAlan Cox return ATA_CBL_PATA80; 2126be0d18dfSAlan Cox } 2127be0d18dfSAlan Cox 2128be0d18dfSAlan Cox /** 2129be0d18dfSAlan Cox * ata_cable_unknown - return unknown PATA cable. 2130be0d18dfSAlan Cox * @ap: port 2131be0d18dfSAlan Cox * 2132be0d18dfSAlan Cox * Helper method for drivers which have no PATA cable detection. 2133be0d18dfSAlan Cox */ 2134be0d18dfSAlan Cox 2135be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap) 2136be0d18dfSAlan Cox { 2137be0d18dfSAlan Cox return ATA_CBL_PATA_UNK; 2138be0d18dfSAlan Cox } 2139be0d18dfSAlan Cox 2140be0d18dfSAlan Cox /** 2141be0d18dfSAlan Cox * ata_cable_sata - return SATA cable type 2142be0d18dfSAlan Cox * @ap: port 2143be0d18dfSAlan Cox * 2144be0d18dfSAlan Cox * Helper method for drivers which have SATA cables 2145be0d18dfSAlan Cox */ 2146be0d18dfSAlan Cox 2147be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap) 2148be0d18dfSAlan Cox { 2149be0d18dfSAlan Cox return ATA_CBL_SATA; 2150be0d18dfSAlan Cox } 2151be0d18dfSAlan Cox 2152be0d18dfSAlan Cox /** 2153c6fd2807SJeff Garzik * ata_bus_probe - Reset and probe ATA bus 2154c6fd2807SJeff Garzik * @ap: Bus to probe 2155c6fd2807SJeff Garzik * 2156c6fd2807SJeff Garzik * Master ATA bus probing function. Initiates a hardware-dependent 2157c6fd2807SJeff Garzik * bus reset, then attempts to identify any devices found on 2158c6fd2807SJeff Garzik * the bus. 2159c6fd2807SJeff Garzik * 2160c6fd2807SJeff Garzik * LOCKING: 2161c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2162c6fd2807SJeff Garzik * 2163c6fd2807SJeff Garzik * RETURNS: 2164c6fd2807SJeff Garzik * Zero on success, negative errno otherwise. 2165c6fd2807SJeff Garzik */ 2166c6fd2807SJeff Garzik 2167c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap) 2168c6fd2807SJeff Garzik { 2169c6fd2807SJeff Garzik unsigned int classes[ATA_MAX_DEVICES]; 2170c6fd2807SJeff Garzik int tries[ATA_MAX_DEVICES]; 2171f58229f8STejun Heo int rc; 2172c6fd2807SJeff Garzik struct ata_device *dev; 2173c6fd2807SJeff Garzik 2174c6fd2807SJeff Garzik ata_port_probe(ap); 2175c6fd2807SJeff Garzik 2176f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) 2177f58229f8STejun Heo tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2178c6fd2807SJeff Garzik 2179c6fd2807SJeff Garzik retry: 2180c6fd2807SJeff Garzik /* reset and determine device classes */ 2181c6fd2807SJeff Garzik ap->ops->phy_reset(ap); 2182c6fd2807SJeff Garzik 2183f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2184c6fd2807SJeff Garzik if (!(ap->flags & ATA_FLAG_DISABLED) && 2185c6fd2807SJeff Garzik dev->class != ATA_DEV_UNKNOWN) 2186c6fd2807SJeff Garzik classes[dev->devno] = dev->class; 2187c6fd2807SJeff Garzik else 2188c6fd2807SJeff Garzik classes[dev->devno] = ATA_DEV_NONE; 2189c6fd2807SJeff Garzik 2190c6fd2807SJeff Garzik dev->class = ATA_DEV_UNKNOWN; 2191c6fd2807SJeff Garzik } 2192c6fd2807SJeff Garzik 2193c6fd2807SJeff Garzik ata_port_probe(ap); 2194c6fd2807SJeff Garzik 2195c6fd2807SJeff Garzik /* after the reset the device state is PIO 0 and the controller 2196c6fd2807SJeff Garzik state is undefined. Record the mode */ 2197c6fd2807SJeff Garzik 2198f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) 2199f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2200c6fd2807SJeff Garzik 2201f31f0cc2SJeff Garzik /* read IDENTIFY page and configure devices. We have to do the identify 2202f31f0cc2SJeff Garzik specific sequence bass-ackwards so that PDIAG- is released by 2203f31f0cc2SJeff Garzik the slave device */ 2204f31f0cc2SJeff Garzik 2205f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2206f58229f8STejun Heo if (tries[dev->devno]) 2207f58229f8STejun Heo dev->class = classes[dev->devno]; 2208c6fd2807SJeff Garzik 2209c6fd2807SJeff Garzik if (!ata_dev_enabled(dev)) 2210c6fd2807SJeff Garzik continue; 2211c6fd2807SJeff Garzik 2212bff04647STejun Heo rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2213bff04647STejun Heo dev->id); 2214c6fd2807SJeff Garzik if (rc) 2215c6fd2807SJeff Garzik goto fail; 2216f31f0cc2SJeff Garzik } 2217f31f0cc2SJeff Garzik 2218be0d18dfSAlan Cox /* Now ask for the cable type as PDIAG- should have been released */ 2219be0d18dfSAlan Cox if (ap->ops->cable_detect) 2220be0d18dfSAlan Cox ap->cbl = ap->ops->cable_detect(ap); 2221be0d18dfSAlan Cox 2222614fe29bSAlan Cox /* We may have SATA bridge glue hiding here irrespective of the 2223614fe29bSAlan Cox reported cable types and sensed types */ 2224614fe29bSAlan Cox ata_link_for_each_dev(dev, &ap->link) { 2225614fe29bSAlan Cox if (!ata_dev_enabled(dev)) 2226614fe29bSAlan Cox continue; 2227614fe29bSAlan Cox /* SATA drives indicate we have a bridge. We don't know which 2228614fe29bSAlan Cox end of the link the bridge is which is a problem */ 2229614fe29bSAlan Cox if (ata_id_is_sata(dev->id)) 2230614fe29bSAlan Cox ap->cbl = ATA_CBL_SATA; 2231614fe29bSAlan Cox } 2232614fe29bSAlan Cox 2233f31f0cc2SJeff Garzik /* After the identify sequence we can now set up the devices. We do 2234f31f0cc2SJeff Garzik this in the normal order so that the user doesn't get confused */ 2235f31f0cc2SJeff Garzik 2236f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2237f31f0cc2SJeff Garzik if (!ata_dev_enabled(dev)) 2238f31f0cc2SJeff Garzik continue; 2239c6fd2807SJeff Garzik 22409af5c9c9STejun Heo ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2241efdaedc4STejun Heo rc = ata_dev_configure(dev); 22429af5c9c9STejun Heo ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2243c6fd2807SJeff Garzik if (rc) 2244c6fd2807SJeff Garzik goto fail; 2245c6fd2807SJeff Garzik } 2246c6fd2807SJeff Garzik 2247c6fd2807SJeff Garzik /* configure transfer mode */ 22480260731fSTejun Heo rc = ata_set_mode(&ap->link, &dev); 22494ae72a1eSTejun Heo if (rc) 2250c6fd2807SJeff Garzik goto fail; 2251c6fd2807SJeff Garzik 2252f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) 2253f58229f8STejun Heo if (ata_dev_enabled(dev)) 2254c6fd2807SJeff Garzik return 0; 2255c6fd2807SJeff Garzik 2256c6fd2807SJeff Garzik /* no device present, disable port */ 2257c6fd2807SJeff Garzik ata_port_disable(ap); 2258c6fd2807SJeff Garzik return -ENODEV; 2259c6fd2807SJeff Garzik 2260c6fd2807SJeff Garzik fail: 22614ae72a1eSTejun Heo tries[dev->devno]--; 22624ae72a1eSTejun Heo 2263c6fd2807SJeff Garzik switch (rc) { 2264c6fd2807SJeff Garzik case -EINVAL: 22654ae72a1eSTejun Heo /* eeek, something went very wrong, give up */ 2266c6fd2807SJeff Garzik tries[dev->devno] = 0; 2267c6fd2807SJeff Garzik break; 22684ae72a1eSTejun Heo 22694ae72a1eSTejun Heo case -ENODEV: 22704ae72a1eSTejun Heo /* give it just one more chance */ 22714ae72a1eSTejun Heo tries[dev->devno] = min(tries[dev->devno], 1); 2272c6fd2807SJeff Garzik case -EIO: 22734ae72a1eSTejun Heo if (tries[dev->devno] == 1) { 22744ae72a1eSTejun Heo /* This is the last chance, better to slow 22754ae72a1eSTejun Heo * down than lose it. 22764ae72a1eSTejun Heo */ 2277936fd732STejun Heo sata_down_spd_limit(&ap->link); 22784ae72a1eSTejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 22794ae72a1eSTejun Heo } 2280c6fd2807SJeff Garzik } 2281c6fd2807SJeff Garzik 22824ae72a1eSTejun Heo if (!tries[dev->devno]) 2283c6fd2807SJeff Garzik ata_dev_disable(dev); 2284c6fd2807SJeff Garzik 2285c6fd2807SJeff Garzik goto retry; 2286c6fd2807SJeff Garzik } 2287c6fd2807SJeff Garzik 2288c6fd2807SJeff Garzik /** 2289c6fd2807SJeff Garzik * ata_port_probe - Mark port as enabled 2290c6fd2807SJeff Garzik * @ap: Port for which we indicate enablement 2291c6fd2807SJeff Garzik * 2292c6fd2807SJeff Garzik * Modify @ap data structure such that the system 2293c6fd2807SJeff Garzik * thinks that the entire port is enabled. 2294c6fd2807SJeff Garzik * 2295cca3974eSJeff Garzik * LOCKING: host lock, or some other form of 2296c6fd2807SJeff Garzik * serialization. 2297c6fd2807SJeff Garzik */ 2298c6fd2807SJeff Garzik 2299c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap) 2300c6fd2807SJeff Garzik { 2301c6fd2807SJeff Garzik ap->flags &= ~ATA_FLAG_DISABLED; 2302c6fd2807SJeff Garzik } 2303c6fd2807SJeff Garzik 2304c6fd2807SJeff Garzik /** 2305c6fd2807SJeff Garzik * sata_print_link_status - Print SATA link status 2306936fd732STejun Heo * @link: SATA link to printk link status about 2307c6fd2807SJeff Garzik * 2308c6fd2807SJeff Garzik * This function prints link speed and status of a SATA link. 2309c6fd2807SJeff Garzik * 2310c6fd2807SJeff Garzik * LOCKING: 2311c6fd2807SJeff Garzik * None. 2312c6fd2807SJeff Garzik */ 2313936fd732STejun Heo void sata_print_link_status(struct ata_link *link) 2314c6fd2807SJeff Garzik { 2315c6fd2807SJeff Garzik u32 sstatus, scontrol, tmp; 2316c6fd2807SJeff Garzik 2317936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2318c6fd2807SJeff Garzik return; 2319936fd732STejun Heo sata_scr_read(link, SCR_CONTROL, &scontrol); 2320c6fd2807SJeff Garzik 2321936fd732STejun Heo if (ata_link_online(link)) { 2322c6fd2807SJeff Garzik tmp = (sstatus >> 4) & 0xf; 2323936fd732STejun Heo ata_link_printk(link, KERN_INFO, 2324c6fd2807SJeff Garzik "SATA link up %s (SStatus %X SControl %X)\n", 2325c6fd2807SJeff Garzik sata_spd_string(tmp), sstatus, scontrol); 2326c6fd2807SJeff Garzik } else { 2327936fd732STejun Heo ata_link_printk(link, KERN_INFO, 2328c6fd2807SJeff Garzik "SATA link down (SStatus %X SControl %X)\n", 2329c6fd2807SJeff Garzik sstatus, scontrol); 2330c6fd2807SJeff Garzik } 2331c6fd2807SJeff Garzik } 2332c6fd2807SJeff Garzik 2333c6fd2807SJeff Garzik /** 2334c6fd2807SJeff Garzik * __sata_phy_reset - Wake/reset a low-level SATA PHY 2335c6fd2807SJeff Garzik * @ap: SATA port associated with target SATA PHY. 2336c6fd2807SJeff Garzik * 2337c6fd2807SJeff Garzik * This function issues commands to standard SATA Sxxx 2338c6fd2807SJeff Garzik * PHY registers, to wake up the phy (and device), and 2339c6fd2807SJeff Garzik * clear any reset condition. 2340c6fd2807SJeff Garzik * 2341c6fd2807SJeff Garzik * LOCKING: 2342c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2343c6fd2807SJeff Garzik * 2344c6fd2807SJeff Garzik */ 2345c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap) 2346c6fd2807SJeff Garzik { 2347936fd732STejun Heo struct ata_link *link = &ap->link; 2348c6fd2807SJeff Garzik unsigned long timeout = jiffies + (HZ * 5); 2349936fd732STejun Heo u32 sstatus; 2350c6fd2807SJeff Garzik 2351c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_SATA_RESET) { 2352c6fd2807SJeff Garzik /* issue phy wake/reset */ 2353936fd732STejun Heo sata_scr_write_flush(link, SCR_CONTROL, 0x301); 2354c6fd2807SJeff Garzik /* Couldn't find anything in SATA I/II specs, but 2355c6fd2807SJeff Garzik * AHCI-1.1 10.4.2 says at least 1 ms. */ 2356c6fd2807SJeff Garzik mdelay(1); 2357c6fd2807SJeff Garzik } 2358c6fd2807SJeff Garzik /* phy wake/clear reset */ 2359936fd732STejun Heo sata_scr_write_flush(link, SCR_CONTROL, 0x300); 2360c6fd2807SJeff Garzik 2361c6fd2807SJeff Garzik /* wait for phy to become ready, if necessary */ 2362c6fd2807SJeff Garzik do { 2363c6fd2807SJeff Garzik msleep(200); 2364936fd732STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus); 2365c6fd2807SJeff Garzik if ((sstatus & 0xf) != 1) 2366c6fd2807SJeff Garzik break; 2367c6fd2807SJeff Garzik } while (time_before(jiffies, timeout)); 2368c6fd2807SJeff Garzik 2369c6fd2807SJeff Garzik /* print link status */ 2370936fd732STejun Heo sata_print_link_status(link); 2371c6fd2807SJeff Garzik 2372c6fd2807SJeff Garzik /* TODO: phy layer with polling, timeouts, etc. */ 2373936fd732STejun Heo if (!ata_link_offline(link)) 2374c6fd2807SJeff Garzik ata_port_probe(ap); 2375c6fd2807SJeff Garzik else 2376c6fd2807SJeff Garzik ata_port_disable(ap); 2377c6fd2807SJeff Garzik 2378c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_DISABLED) 2379c6fd2807SJeff Garzik return; 2380c6fd2807SJeff Garzik 2381c6fd2807SJeff Garzik if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2382c6fd2807SJeff Garzik ata_port_disable(ap); 2383c6fd2807SJeff Garzik return; 2384c6fd2807SJeff Garzik } 2385c6fd2807SJeff Garzik 2386c6fd2807SJeff Garzik ap->cbl = ATA_CBL_SATA; 2387c6fd2807SJeff Garzik } 2388c6fd2807SJeff Garzik 2389c6fd2807SJeff Garzik /** 2390c6fd2807SJeff Garzik * sata_phy_reset - Reset SATA bus. 2391c6fd2807SJeff Garzik * @ap: SATA port associated with target SATA PHY. 2392c6fd2807SJeff Garzik * 2393c6fd2807SJeff Garzik * This function resets the SATA bus, and then probes 2394c6fd2807SJeff Garzik * the bus for devices. 2395c6fd2807SJeff Garzik * 2396c6fd2807SJeff Garzik * LOCKING: 2397c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2398c6fd2807SJeff Garzik * 2399c6fd2807SJeff Garzik */ 2400c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap) 2401c6fd2807SJeff Garzik { 2402c6fd2807SJeff Garzik __sata_phy_reset(ap); 2403c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_DISABLED) 2404c6fd2807SJeff Garzik return; 2405c6fd2807SJeff Garzik ata_bus_reset(ap); 2406c6fd2807SJeff Garzik } 2407c6fd2807SJeff Garzik 2408c6fd2807SJeff Garzik /** 2409c6fd2807SJeff Garzik * ata_dev_pair - return other device on cable 2410c6fd2807SJeff Garzik * @adev: device 2411c6fd2807SJeff Garzik * 2412c6fd2807SJeff Garzik * Obtain the other device on the same cable, or if none is 2413c6fd2807SJeff Garzik * present NULL is returned 2414c6fd2807SJeff Garzik */ 2415c6fd2807SJeff Garzik 2416c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev) 2417c6fd2807SJeff Garzik { 24189af5c9c9STejun Heo struct ata_link *link = adev->link; 24199af5c9c9STejun Heo struct ata_device *pair = &link->device[1 - adev->devno]; 2420c6fd2807SJeff Garzik if (!ata_dev_enabled(pair)) 2421c6fd2807SJeff Garzik return NULL; 2422c6fd2807SJeff Garzik return pair; 2423c6fd2807SJeff Garzik } 2424c6fd2807SJeff Garzik 2425c6fd2807SJeff Garzik /** 2426c6fd2807SJeff Garzik * ata_port_disable - Disable port. 2427c6fd2807SJeff Garzik * @ap: Port to be disabled. 2428c6fd2807SJeff Garzik * 2429c6fd2807SJeff Garzik * Modify @ap data structure such that the system 2430c6fd2807SJeff Garzik * thinks that the entire port is disabled, and should 2431c6fd2807SJeff Garzik * never attempt to probe or communicate with devices 2432c6fd2807SJeff Garzik * on this port. 2433c6fd2807SJeff Garzik * 2434cca3974eSJeff Garzik * LOCKING: host lock, or some other form of 2435c6fd2807SJeff Garzik * serialization. 2436c6fd2807SJeff Garzik */ 2437c6fd2807SJeff Garzik 2438c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap) 2439c6fd2807SJeff Garzik { 24409af5c9c9STejun Heo ap->link.device[0].class = ATA_DEV_NONE; 24419af5c9c9STejun Heo ap->link.device[1].class = ATA_DEV_NONE; 2442c6fd2807SJeff Garzik ap->flags |= ATA_FLAG_DISABLED; 2443c6fd2807SJeff Garzik } 2444c6fd2807SJeff Garzik 2445c6fd2807SJeff Garzik /** 2446c6fd2807SJeff Garzik * sata_down_spd_limit - adjust SATA spd limit downward 2447936fd732STejun Heo * @link: Link to adjust SATA spd limit for 2448c6fd2807SJeff Garzik * 2449936fd732STejun Heo * Adjust SATA spd limit of @link downward. Note that this 2450c6fd2807SJeff Garzik * function only adjusts the limit. The change must be applied 2451c6fd2807SJeff Garzik * using sata_set_spd(). 2452c6fd2807SJeff Garzik * 2453c6fd2807SJeff Garzik * LOCKING: 2454c6fd2807SJeff Garzik * Inherited from caller. 2455c6fd2807SJeff Garzik * 2456c6fd2807SJeff Garzik * RETURNS: 2457c6fd2807SJeff Garzik * 0 on success, negative errno on failure 2458c6fd2807SJeff Garzik */ 2459936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link) 2460c6fd2807SJeff Garzik { 2461c6fd2807SJeff Garzik u32 sstatus, spd, mask; 2462c6fd2807SJeff Garzik int rc, highbit; 2463c6fd2807SJeff Garzik 2464936fd732STejun Heo if (!sata_scr_valid(link)) 2465008a7896STejun Heo return -EOPNOTSUPP; 2466008a7896STejun Heo 2467008a7896STejun Heo /* If SCR can be read, use it to determine the current SPD. 2468936fd732STejun Heo * If not, use cached value in link->sata_spd. 2469008a7896STejun Heo */ 2470936fd732STejun Heo rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2471008a7896STejun Heo if (rc == 0) 2472008a7896STejun Heo spd = (sstatus >> 4) & 0xf; 2473008a7896STejun Heo else 2474936fd732STejun Heo spd = link->sata_spd; 2475c6fd2807SJeff Garzik 2476936fd732STejun Heo mask = link->sata_spd_limit; 2477c6fd2807SJeff Garzik if (mask <= 1) 2478c6fd2807SJeff Garzik return -EINVAL; 2479008a7896STejun Heo 2480008a7896STejun Heo /* unconditionally mask off the highest bit */ 2481c6fd2807SJeff Garzik highbit = fls(mask) - 1; 2482c6fd2807SJeff Garzik mask &= ~(1 << highbit); 2483c6fd2807SJeff Garzik 2484008a7896STejun Heo /* Mask off all speeds higher than or equal to the current 2485008a7896STejun Heo * one. Force 1.5Gbps if current SPD is not available. 2486008a7896STejun Heo */ 2487008a7896STejun Heo if (spd > 1) 2488008a7896STejun Heo mask &= (1 << (spd - 1)) - 1; 2489008a7896STejun Heo else 2490008a7896STejun Heo mask &= 1; 2491008a7896STejun Heo 2492008a7896STejun Heo /* were we already at the bottom? */ 2493c6fd2807SJeff Garzik if (!mask) 2494c6fd2807SJeff Garzik return -EINVAL; 2495c6fd2807SJeff Garzik 2496936fd732STejun Heo link->sata_spd_limit = mask; 2497c6fd2807SJeff Garzik 2498936fd732STejun Heo ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2499c6fd2807SJeff Garzik sata_spd_string(fls(mask))); 2500c6fd2807SJeff Garzik 2501c6fd2807SJeff Garzik return 0; 2502c6fd2807SJeff Garzik } 2503c6fd2807SJeff Garzik 2504936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2505c6fd2807SJeff Garzik { 2506c6fd2807SJeff Garzik u32 spd, limit; 2507c6fd2807SJeff Garzik 2508936fd732STejun Heo if (link->sata_spd_limit == UINT_MAX) 2509c6fd2807SJeff Garzik limit = 0; 2510c6fd2807SJeff Garzik else 2511936fd732STejun Heo limit = fls(link->sata_spd_limit); 2512c6fd2807SJeff Garzik 2513c6fd2807SJeff Garzik spd = (*scontrol >> 4) & 0xf; 2514c6fd2807SJeff Garzik *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); 2515c6fd2807SJeff Garzik 2516c6fd2807SJeff Garzik return spd != limit; 2517c6fd2807SJeff Garzik } 2518c6fd2807SJeff Garzik 2519c6fd2807SJeff Garzik /** 2520c6fd2807SJeff Garzik * sata_set_spd_needed - is SATA spd configuration needed 2521936fd732STejun Heo * @link: Link in question 2522c6fd2807SJeff Garzik * 2523c6fd2807SJeff Garzik * Test whether the spd limit in SControl matches 2524936fd732STejun Heo * @link->sata_spd_limit. This function is used to determine 2525c6fd2807SJeff Garzik * whether hardreset is necessary to apply SATA spd 2526c6fd2807SJeff Garzik * configuration. 2527c6fd2807SJeff Garzik * 2528c6fd2807SJeff Garzik * LOCKING: 2529c6fd2807SJeff Garzik * Inherited from caller. 2530c6fd2807SJeff Garzik * 2531c6fd2807SJeff Garzik * RETURNS: 2532c6fd2807SJeff Garzik * 1 if SATA spd configuration is needed, 0 otherwise. 2533c6fd2807SJeff Garzik */ 2534936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link) 2535c6fd2807SJeff Garzik { 2536c6fd2807SJeff Garzik u32 scontrol; 2537c6fd2807SJeff Garzik 2538936fd732STejun Heo if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2539c6fd2807SJeff Garzik return 0; 2540c6fd2807SJeff Garzik 2541936fd732STejun Heo return __sata_set_spd_needed(link, &scontrol); 2542c6fd2807SJeff Garzik } 2543c6fd2807SJeff Garzik 2544c6fd2807SJeff Garzik /** 2545c6fd2807SJeff Garzik * sata_set_spd - set SATA spd according to spd limit 2546936fd732STejun Heo * @link: Link to set SATA spd for 2547c6fd2807SJeff Garzik * 2548936fd732STejun Heo * Set SATA spd of @link according to sata_spd_limit. 2549c6fd2807SJeff Garzik * 2550c6fd2807SJeff Garzik * LOCKING: 2551c6fd2807SJeff Garzik * Inherited from caller. 2552c6fd2807SJeff Garzik * 2553c6fd2807SJeff Garzik * RETURNS: 2554c6fd2807SJeff Garzik * 0 if spd doesn't need to be changed, 1 if spd has been 2555c6fd2807SJeff Garzik * changed. Negative errno if SCR registers are inaccessible. 2556c6fd2807SJeff Garzik */ 2557936fd732STejun Heo int sata_set_spd(struct ata_link *link) 2558c6fd2807SJeff Garzik { 2559c6fd2807SJeff Garzik u32 scontrol; 2560c6fd2807SJeff Garzik int rc; 2561c6fd2807SJeff Garzik 2562936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2563c6fd2807SJeff Garzik return rc; 2564c6fd2807SJeff Garzik 2565936fd732STejun Heo if (!__sata_set_spd_needed(link, &scontrol)) 2566c6fd2807SJeff Garzik return 0; 2567c6fd2807SJeff Garzik 2568936fd732STejun Heo if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2569c6fd2807SJeff Garzik return rc; 2570c6fd2807SJeff Garzik 2571c6fd2807SJeff Garzik return 1; 2572c6fd2807SJeff Garzik } 2573c6fd2807SJeff Garzik 2574c6fd2807SJeff Garzik /* 2575c6fd2807SJeff Garzik * This mode timing computation functionality is ported over from 2576c6fd2807SJeff Garzik * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2577c6fd2807SJeff Garzik */ 2578c6fd2807SJeff Garzik /* 2579b352e57dSAlan Cox * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2580c6fd2807SJeff Garzik * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2581b352e57dSAlan Cox * for UDMA6, which is currently supported only by Maxtor drives. 2582b352e57dSAlan Cox * 2583b352e57dSAlan Cox * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2584c6fd2807SJeff Garzik */ 2585c6fd2807SJeff Garzik 2586c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = { 2587c6fd2807SJeff Garzik 2588c6fd2807SJeff Garzik { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 2589c6fd2807SJeff Garzik { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 2590c6fd2807SJeff Garzik { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 2591c6fd2807SJeff Garzik { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 2592c6fd2807SJeff Garzik 2593b352e57dSAlan Cox { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, 2594b352e57dSAlan Cox { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, 2595c6fd2807SJeff Garzik { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 2596c6fd2807SJeff Garzik { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 2597c6fd2807SJeff Garzik { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2598c6fd2807SJeff Garzik 2599c6fd2807SJeff Garzik /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2600c6fd2807SJeff Garzik 2601c6fd2807SJeff Garzik { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 2602c6fd2807SJeff Garzik { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 2603c6fd2807SJeff Garzik { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 2604c6fd2807SJeff Garzik 2605c6fd2807SJeff Garzik { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 2606c6fd2807SJeff Garzik { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 2607c6fd2807SJeff Garzik { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 2608c6fd2807SJeff Garzik 2609b352e57dSAlan Cox { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, 2610b352e57dSAlan Cox { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, 2611c6fd2807SJeff Garzik { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 2612c6fd2807SJeff Garzik { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 2613c6fd2807SJeff Garzik 2614c6fd2807SJeff Garzik { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 2615c6fd2807SJeff Garzik { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 2616c6fd2807SJeff Garzik { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 2617c6fd2807SJeff Garzik 2618c6fd2807SJeff Garzik /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 2619c6fd2807SJeff Garzik 2620c6fd2807SJeff Garzik { 0xFF } 2621c6fd2807SJeff Garzik }; 2622c6fd2807SJeff Garzik 2623c6fd2807SJeff Garzik #define ENOUGH(v,unit) (((v)-1)/(unit)+1) 2624c6fd2807SJeff Garzik #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 2625c6fd2807SJeff Garzik 2626c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2627c6fd2807SJeff Garzik { 2628c6fd2807SJeff Garzik q->setup = EZ(t->setup * 1000, T); 2629c6fd2807SJeff Garzik q->act8b = EZ(t->act8b * 1000, T); 2630c6fd2807SJeff Garzik q->rec8b = EZ(t->rec8b * 1000, T); 2631c6fd2807SJeff Garzik q->cyc8b = EZ(t->cyc8b * 1000, T); 2632c6fd2807SJeff Garzik q->active = EZ(t->active * 1000, T); 2633c6fd2807SJeff Garzik q->recover = EZ(t->recover * 1000, T); 2634c6fd2807SJeff Garzik q->cycle = EZ(t->cycle * 1000, T); 2635c6fd2807SJeff Garzik q->udma = EZ(t->udma * 1000, UT); 2636c6fd2807SJeff Garzik } 2637c6fd2807SJeff Garzik 2638c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2639c6fd2807SJeff Garzik struct ata_timing *m, unsigned int what) 2640c6fd2807SJeff Garzik { 2641c6fd2807SJeff Garzik if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2642c6fd2807SJeff Garzik if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2643c6fd2807SJeff Garzik if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2644c6fd2807SJeff Garzik if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2645c6fd2807SJeff Garzik if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2646c6fd2807SJeff Garzik if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2647c6fd2807SJeff Garzik if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2648c6fd2807SJeff Garzik if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2649c6fd2807SJeff Garzik } 2650c6fd2807SJeff Garzik 2651c6fd2807SJeff Garzik static const struct ata_timing* ata_timing_find_mode(unsigned short speed) 2652c6fd2807SJeff Garzik { 2653c6fd2807SJeff Garzik const struct ata_timing *t; 2654c6fd2807SJeff Garzik 2655c6fd2807SJeff Garzik for (t = ata_timing; t->mode != speed; t++) 2656c6fd2807SJeff Garzik if (t->mode == 0xFF) 2657c6fd2807SJeff Garzik return NULL; 2658c6fd2807SJeff Garzik return t; 2659c6fd2807SJeff Garzik } 2660c6fd2807SJeff Garzik 2661c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2662c6fd2807SJeff Garzik struct ata_timing *t, int T, int UT) 2663c6fd2807SJeff Garzik { 2664c6fd2807SJeff Garzik const struct ata_timing *s; 2665c6fd2807SJeff Garzik struct ata_timing p; 2666c6fd2807SJeff Garzik 2667c6fd2807SJeff Garzik /* 2668c6fd2807SJeff Garzik * Find the mode. 2669c6fd2807SJeff Garzik */ 2670c6fd2807SJeff Garzik 2671c6fd2807SJeff Garzik if (!(s = ata_timing_find_mode(speed))) 2672c6fd2807SJeff Garzik return -EINVAL; 2673c6fd2807SJeff Garzik 2674c6fd2807SJeff Garzik memcpy(t, s, sizeof(*s)); 2675c6fd2807SJeff Garzik 2676c6fd2807SJeff Garzik /* 2677c6fd2807SJeff Garzik * If the drive is an EIDE drive, it can tell us it needs extended 2678c6fd2807SJeff Garzik * PIO/MW_DMA cycle timing. 2679c6fd2807SJeff Garzik */ 2680c6fd2807SJeff Garzik 2681c6fd2807SJeff Garzik if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2682c6fd2807SJeff Garzik memset(&p, 0, sizeof(p)); 2683c6fd2807SJeff Garzik if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2684c6fd2807SJeff Garzik if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 2685c6fd2807SJeff Garzik else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 2686c6fd2807SJeff Garzik } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 2687c6fd2807SJeff Garzik p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 2688c6fd2807SJeff Garzik } 2689c6fd2807SJeff Garzik ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 2690c6fd2807SJeff Garzik } 2691c6fd2807SJeff Garzik 2692c6fd2807SJeff Garzik /* 2693c6fd2807SJeff Garzik * Convert the timing to bus clock counts. 2694c6fd2807SJeff Garzik */ 2695c6fd2807SJeff Garzik 2696c6fd2807SJeff Garzik ata_timing_quantize(t, t, T, UT); 2697c6fd2807SJeff Garzik 2698c6fd2807SJeff Garzik /* 2699c6fd2807SJeff Garzik * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 2700c6fd2807SJeff Garzik * S.M.A.R.T * and some other commands. We have to ensure that the 2701c6fd2807SJeff Garzik * DMA cycle timing is slower/equal than the fastest PIO timing. 2702c6fd2807SJeff Garzik */ 2703c6fd2807SJeff Garzik 2704fd3367afSAlan if (speed > XFER_PIO_6) { 2705c6fd2807SJeff Garzik ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 2706c6fd2807SJeff Garzik ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 2707c6fd2807SJeff Garzik } 2708c6fd2807SJeff Garzik 2709c6fd2807SJeff Garzik /* 2710c6fd2807SJeff Garzik * Lengthen active & recovery time so that cycle time is correct. 2711c6fd2807SJeff Garzik */ 2712c6fd2807SJeff Garzik 2713c6fd2807SJeff Garzik if (t->act8b + t->rec8b < t->cyc8b) { 2714c6fd2807SJeff Garzik t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 2715c6fd2807SJeff Garzik t->rec8b = t->cyc8b - t->act8b; 2716c6fd2807SJeff Garzik } 2717c6fd2807SJeff Garzik 2718c6fd2807SJeff Garzik if (t->active + t->recover < t->cycle) { 2719c6fd2807SJeff Garzik t->active += (t->cycle - (t->active + t->recover)) / 2; 2720c6fd2807SJeff Garzik t->recover = t->cycle - t->active; 2721c6fd2807SJeff Garzik } 27224f701d1eSAlan Cox 27234f701d1eSAlan Cox /* In a few cases quantisation may produce enough errors to 27244f701d1eSAlan Cox leave t->cycle too low for the sum of active and recovery 27254f701d1eSAlan Cox if so we must correct this */ 27264f701d1eSAlan Cox if (t->active + t->recover > t->cycle) 27274f701d1eSAlan Cox t->cycle = t->active + t->recover; 2728c6fd2807SJeff Garzik 2729c6fd2807SJeff Garzik return 0; 2730c6fd2807SJeff Garzik } 2731c6fd2807SJeff Garzik 2732c6fd2807SJeff Garzik /** 2733c6fd2807SJeff Garzik * ata_down_xfermask_limit - adjust dev xfer masks downward 2734c6fd2807SJeff Garzik * @dev: Device to adjust xfer masks 2735458337dbSTejun Heo * @sel: ATA_DNXFER_* selector 2736c6fd2807SJeff Garzik * 2737c6fd2807SJeff Garzik * Adjust xfer masks of @dev downward. Note that this function 2738c6fd2807SJeff Garzik * does not apply the change. Invoking ata_set_mode() afterwards 2739c6fd2807SJeff Garzik * will apply the limit. 2740c6fd2807SJeff Garzik * 2741c6fd2807SJeff Garzik * LOCKING: 2742c6fd2807SJeff Garzik * Inherited from caller. 2743c6fd2807SJeff Garzik * 2744c6fd2807SJeff Garzik * RETURNS: 2745c6fd2807SJeff Garzik * 0 on success, negative errno on failure 2746c6fd2807SJeff Garzik */ 2747458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 2748c6fd2807SJeff Garzik { 2749458337dbSTejun Heo char buf[32]; 2750458337dbSTejun Heo unsigned int orig_mask, xfer_mask; 2751458337dbSTejun Heo unsigned int pio_mask, mwdma_mask, udma_mask; 2752458337dbSTejun Heo int quiet, highbit; 2753c6fd2807SJeff Garzik 2754458337dbSTejun Heo quiet = !!(sel & ATA_DNXFER_QUIET); 2755458337dbSTejun Heo sel &= ~ATA_DNXFER_QUIET; 2756458337dbSTejun Heo 2757458337dbSTejun Heo xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 2758458337dbSTejun Heo dev->mwdma_mask, 2759c6fd2807SJeff Garzik dev->udma_mask); 2760458337dbSTejun Heo ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 2761c6fd2807SJeff Garzik 2762458337dbSTejun Heo switch (sel) { 2763458337dbSTejun Heo case ATA_DNXFER_PIO: 2764458337dbSTejun Heo highbit = fls(pio_mask) - 1; 2765458337dbSTejun Heo pio_mask &= ~(1 << highbit); 2766458337dbSTejun Heo break; 2767458337dbSTejun Heo 2768458337dbSTejun Heo case ATA_DNXFER_DMA: 2769458337dbSTejun Heo if (udma_mask) { 2770458337dbSTejun Heo highbit = fls(udma_mask) - 1; 2771458337dbSTejun Heo udma_mask &= ~(1 << highbit); 2772458337dbSTejun Heo if (!udma_mask) 2773458337dbSTejun Heo return -ENOENT; 2774458337dbSTejun Heo } else if (mwdma_mask) { 2775458337dbSTejun Heo highbit = fls(mwdma_mask) - 1; 2776458337dbSTejun Heo mwdma_mask &= ~(1 << highbit); 2777458337dbSTejun Heo if (!mwdma_mask) 2778458337dbSTejun Heo return -ENOENT; 2779458337dbSTejun Heo } 2780458337dbSTejun Heo break; 2781458337dbSTejun Heo 2782458337dbSTejun Heo case ATA_DNXFER_40C: 2783458337dbSTejun Heo udma_mask &= ATA_UDMA_MASK_40C; 2784458337dbSTejun Heo break; 2785458337dbSTejun Heo 2786458337dbSTejun Heo case ATA_DNXFER_FORCE_PIO0: 2787458337dbSTejun Heo pio_mask &= 1; 2788458337dbSTejun Heo case ATA_DNXFER_FORCE_PIO: 2789458337dbSTejun Heo mwdma_mask = 0; 2790458337dbSTejun Heo udma_mask = 0; 2791458337dbSTejun Heo break; 2792458337dbSTejun Heo 2793458337dbSTejun Heo default: 2794458337dbSTejun Heo BUG(); 2795458337dbSTejun Heo } 2796458337dbSTejun Heo 2797458337dbSTejun Heo xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 2798458337dbSTejun Heo 2799458337dbSTejun Heo if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 2800458337dbSTejun Heo return -ENOENT; 2801458337dbSTejun Heo 2802458337dbSTejun Heo if (!quiet) { 2803458337dbSTejun Heo if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 2804458337dbSTejun Heo snprintf(buf, sizeof(buf), "%s:%s", 2805458337dbSTejun Heo ata_mode_string(xfer_mask), 2806458337dbSTejun Heo ata_mode_string(xfer_mask & ATA_MASK_PIO)); 2807458337dbSTejun Heo else 2808458337dbSTejun Heo snprintf(buf, sizeof(buf), "%s", 2809458337dbSTejun Heo ata_mode_string(xfer_mask)); 2810458337dbSTejun Heo 2811458337dbSTejun Heo ata_dev_printk(dev, KERN_WARNING, 2812458337dbSTejun Heo "limiting speed to %s\n", buf); 2813458337dbSTejun Heo } 2814c6fd2807SJeff Garzik 2815c6fd2807SJeff Garzik ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 2816c6fd2807SJeff Garzik &dev->udma_mask); 2817c6fd2807SJeff Garzik 2818c6fd2807SJeff Garzik return 0; 2819c6fd2807SJeff Garzik } 2820c6fd2807SJeff Garzik 2821c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev) 2822c6fd2807SJeff Garzik { 28239af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 2824c6fd2807SJeff Garzik unsigned int err_mask; 2825c6fd2807SJeff Garzik int rc; 2826c6fd2807SJeff Garzik 2827c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_PIO; 2828c6fd2807SJeff Garzik if (dev->xfer_shift == ATA_SHIFT_PIO) 2829c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_PIO; 2830c6fd2807SJeff Garzik 2831c6fd2807SJeff Garzik err_mask = ata_dev_set_xfermode(dev); 283211750a40SAlan /* Old CFA may refuse this command, which is just fine */ 283311750a40SAlan if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 283411750a40SAlan err_mask &= ~AC_ERR_DEV; 28350bc2a79aSAlan Cox /* Some very old devices and some bad newer ones fail any kind of 28360bc2a79aSAlan Cox SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 28370bc2a79aSAlan Cox if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 28380bc2a79aSAlan Cox dev->pio_mode <= XFER_PIO_2) 28390bc2a79aSAlan Cox err_mask &= ~AC_ERR_DEV; 2840c6fd2807SJeff Garzik if (err_mask) { 2841c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2842c6fd2807SJeff Garzik "(err_mask=0x%x)\n", err_mask); 2843c6fd2807SJeff Garzik return -EIO; 2844c6fd2807SJeff Garzik } 2845c6fd2807SJeff Garzik 2846baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_POST_SETMODE; 2847c6fd2807SJeff Garzik rc = ata_dev_revalidate(dev, 0); 2848baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 2849c6fd2807SJeff Garzik if (rc) 2850c6fd2807SJeff Garzik return rc; 2851c6fd2807SJeff Garzik 2852c6fd2807SJeff Garzik DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2853c6fd2807SJeff Garzik dev->xfer_shift, (int)dev->xfer_mode); 2854c6fd2807SJeff Garzik 2855c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "configured for %s\n", 2856c6fd2807SJeff Garzik ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); 2857c6fd2807SJeff Garzik return 0; 2858c6fd2807SJeff Garzik } 2859c6fd2807SJeff Garzik 2860c6fd2807SJeff Garzik /** 286104351821SAlan * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 28620260731fSTejun Heo * @link: link on which timings will be programmed 2863c6fd2807SJeff Garzik * @r_failed_dev: out paramter for failed device 2864c6fd2807SJeff Garzik * 286504351821SAlan * Standard implementation of the function used to tune and set 286604351821SAlan * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 286704351821SAlan * ata_dev_set_mode() fails, pointer to the failing device is 2868c6fd2807SJeff Garzik * returned in @r_failed_dev. 2869c6fd2807SJeff Garzik * 2870c6fd2807SJeff Garzik * LOCKING: 2871c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2872c6fd2807SJeff Garzik * 2873c6fd2807SJeff Garzik * RETURNS: 2874c6fd2807SJeff Garzik * 0 on success, negative errno otherwise 2875c6fd2807SJeff Garzik */ 287604351821SAlan 28770260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 2878c6fd2807SJeff Garzik { 28790260731fSTejun Heo struct ata_port *ap = link->ap; 2880c6fd2807SJeff Garzik struct ata_device *dev; 2881f58229f8STejun Heo int rc = 0, used_dma = 0, found = 0; 2882c6fd2807SJeff Garzik 2883c6fd2807SJeff Garzik /* step 1: calculate xfer_mask */ 2884f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 2885c6fd2807SJeff Garzik unsigned int pio_mask, dma_mask; 2886c6fd2807SJeff Garzik 2887c6fd2807SJeff Garzik if (!ata_dev_enabled(dev)) 2888c6fd2807SJeff Garzik continue; 2889c6fd2807SJeff Garzik 2890c6fd2807SJeff Garzik ata_dev_xfermask(dev); 2891c6fd2807SJeff Garzik 2892c6fd2807SJeff Garzik pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2893c6fd2807SJeff Garzik dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2894c6fd2807SJeff Garzik dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2895c6fd2807SJeff Garzik dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2896c6fd2807SJeff Garzik 2897c6fd2807SJeff Garzik found = 1; 2898c6fd2807SJeff Garzik if (dev->dma_mode) 2899c6fd2807SJeff Garzik used_dma = 1; 2900c6fd2807SJeff Garzik } 2901c6fd2807SJeff Garzik if (!found) 2902c6fd2807SJeff Garzik goto out; 2903c6fd2807SJeff Garzik 2904c6fd2807SJeff Garzik /* step 2: always set host PIO timings */ 2905f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 2906c6fd2807SJeff Garzik if (!ata_dev_enabled(dev)) 2907c6fd2807SJeff Garzik continue; 2908c6fd2807SJeff Garzik 2909c6fd2807SJeff Garzik if (!dev->pio_mode) { 2910c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 2911c6fd2807SJeff Garzik rc = -EINVAL; 2912c6fd2807SJeff Garzik goto out; 2913c6fd2807SJeff Garzik } 2914c6fd2807SJeff Garzik 2915c6fd2807SJeff Garzik dev->xfer_mode = dev->pio_mode; 2916c6fd2807SJeff Garzik dev->xfer_shift = ATA_SHIFT_PIO; 2917c6fd2807SJeff Garzik if (ap->ops->set_piomode) 2918c6fd2807SJeff Garzik ap->ops->set_piomode(ap, dev); 2919c6fd2807SJeff Garzik } 2920c6fd2807SJeff Garzik 2921c6fd2807SJeff Garzik /* step 3: set host DMA timings */ 2922f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 2923c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) || !dev->dma_mode) 2924c6fd2807SJeff Garzik continue; 2925c6fd2807SJeff Garzik 2926c6fd2807SJeff Garzik dev->xfer_mode = dev->dma_mode; 2927c6fd2807SJeff Garzik dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 2928c6fd2807SJeff Garzik if (ap->ops->set_dmamode) 2929c6fd2807SJeff Garzik ap->ops->set_dmamode(ap, dev); 2930c6fd2807SJeff Garzik } 2931c6fd2807SJeff Garzik 2932c6fd2807SJeff Garzik /* step 4: update devices' xfer mode */ 2933f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 293418d90debSAlan /* don't update suspended devices' xfer mode */ 29359666f400STejun Heo if (!ata_dev_enabled(dev)) 2936c6fd2807SJeff Garzik continue; 2937c6fd2807SJeff Garzik 2938c6fd2807SJeff Garzik rc = ata_dev_set_mode(dev); 2939c6fd2807SJeff Garzik if (rc) 2940c6fd2807SJeff Garzik goto out; 2941c6fd2807SJeff Garzik } 2942c6fd2807SJeff Garzik 2943c6fd2807SJeff Garzik /* Record simplex status. If we selected DMA then the other 2944c6fd2807SJeff Garzik * host channels are not permitted to do so. 2945c6fd2807SJeff Garzik */ 2946cca3974eSJeff Garzik if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 2947032af1ceSAlan ap->host->simplex_claimed = ap; 2948c6fd2807SJeff Garzik 2949c6fd2807SJeff Garzik out: 2950c6fd2807SJeff Garzik if (rc) 2951c6fd2807SJeff Garzik *r_failed_dev = dev; 2952c6fd2807SJeff Garzik return rc; 2953c6fd2807SJeff Garzik } 2954c6fd2807SJeff Garzik 2955c6fd2807SJeff Garzik /** 295604351821SAlan * ata_set_mode - Program timings and issue SET FEATURES - XFER 29570260731fSTejun Heo * @link: link on which timings will be programmed 295804351821SAlan * @r_failed_dev: out paramter for failed device 295904351821SAlan * 296004351821SAlan * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 296104351821SAlan * ata_set_mode() fails, pointer to the failing device is 296204351821SAlan * returned in @r_failed_dev. 296304351821SAlan * 296404351821SAlan * LOCKING: 296504351821SAlan * PCI/etc. bus probe sem. 296604351821SAlan * 296704351821SAlan * RETURNS: 296804351821SAlan * 0 on success, negative errno otherwise 296904351821SAlan */ 29700260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 297104351821SAlan { 29720260731fSTejun Heo struct ata_port *ap = link->ap; 29730260731fSTejun Heo 297404351821SAlan /* has private set_mode? */ 297504351821SAlan if (ap->ops->set_mode) 29760260731fSTejun Heo return ap->ops->set_mode(link, r_failed_dev); 29770260731fSTejun Heo return ata_do_set_mode(link, r_failed_dev); 297804351821SAlan } 297904351821SAlan 298004351821SAlan /** 2981c6fd2807SJeff Garzik * ata_tf_to_host - issue ATA taskfile to host controller 2982c6fd2807SJeff Garzik * @ap: port to which command is being issued 2983c6fd2807SJeff Garzik * @tf: ATA taskfile register set 2984c6fd2807SJeff Garzik * 2985c6fd2807SJeff Garzik * Issues ATA taskfile register set to ATA host controller, 2986c6fd2807SJeff Garzik * with proper synchronization with interrupt handler and 2987c6fd2807SJeff Garzik * other threads. 2988c6fd2807SJeff Garzik * 2989c6fd2807SJeff Garzik * LOCKING: 2990cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 2991c6fd2807SJeff Garzik */ 2992c6fd2807SJeff Garzik 2993c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap, 2994c6fd2807SJeff Garzik const struct ata_taskfile *tf) 2995c6fd2807SJeff Garzik { 2996c6fd2807SJeff Garzik ap->ops->tf_load(ap, tf); 2997c6fd2807SJeff Garzik ap->ops->exec_command(ap, tf); 2998c6fd2807SJeff Garzik } 2999c6fd2807SJeff Garzik 3000c6fd2807SJeff Garzik /** 3001c6fd2807SJeff Garzik * ata_busy_sleep - sleep until BSY clears, or timeout 3002c6fd2807SJeff Garzik * @ap: port containing status register to be polled 3003c6fd2807SJeff Garzik * @tmout_pat: impatience timeout 3004c6fd2807SJeff Garzik * @tmout: overall timeout 3005c6fd2807SJeff Garzik * 3006c6fd2807SJeff Garzik * Sleep until ATA Status register bit BSY clears, 3007c6fd2807SJeff Garzik * or a timeout occurs. 3008c6fd2807SJeff Garzik * 3009d1adc1bbSTejun Heo * LOCKING: 3010d1adc1bbSTejun Heo * Kernel thread context (may sleep). 3011d1adc1bbSTejun Heo * 3012d1adc1bbSTejun Heo * RETURNS: 3013d1adc1bbSTejun Heo * 0 on success, -errno otherwise. 3014c6fd2807SJeff Garzik */ 3015d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap, 3016c6fd2807SJeff Garzik unsigned long tmout_pat, unsigned long tmout) 3017c6fd2807SJeff Garzik { 3018c6fd2807SJeff Garzik unsigned long timer_start, timeout; 3019c6fd2807SJeff Garzik u8 status; 3020c6fd2807SJeff Garzik 3021c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 300); 3022c6fd2807SJeff Garzik timer_start = jiffies; 3023c6fd2807SJeff Garzik timeout = timer_start + tmout_pat; 3024d1adc1bbSTejun Heo while (status != 0xff && (status & ATA_BUSY) && 3025d1adc1bbSTejun Heo time_before(jiffies, timeout)) { 3026c6fd2807SJeff Garzik msleep(50); 3027c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 3); 3028c6fd2807SJeff Garzik } 3029c6fd2807SJeff Garzik 3030d1adc1bbSTejun Heo if (status != 0xff && (status & ATA_BUSY)) 3031c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, 303235aa7a43SJeff Garzik "port is slow to respond, please be patient " 303335aa7a43SJeff Garzik "(Status 0x%x)\n", status); 3034c6fd2807SJeff Garzik 3035c6fd2807SJeff Garzik timeout = timer_start + tmout; 3036d1adc1bbSTejun Heo while (status != 0xff && (status & ATA_BUSY) && 3037d1adc1bbSTejun Heo time_before(jiffies, timeout)) { 3038c6fd2807SJeff Garzik msleep(50); 3039c6fd2807SJeff Garzik status = ata_chk_status(ap); 3040c6fd2807SJeff Garzik } 3041c6fd2807SJeff Garzik 3042d1adc1bbSTejun Heo if (status == 0xff) 3043d1adc1bbSTejun Heo return -ENODEV; 3044d1adc1bbSTejun Heo 3045c6fd2807SJeff Garzik if (status & ATA_BUSY) { 3046c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "port failed to respond " 304735aa7a43SJeff Garzik "(%lu secs, Status 0x%x)\n", 304835aa7a43SJeff Garzik tmout / HZ, status); 3049d1adc1bbSTejun Heo return -EBUSY; 3050c6fd2807SJeff Garzik } 3051c6fd2807SJeff Garzik 3052c6fd2807SJeff Garzik return 0; 3053c6fd2807SJeff Garzik } 3054c6fd2807SJeff Garzik 3055d4b2bab4STejun Heo /** 3056d4b2bab4STejun Heo * ata_wait_ready - sleep until BSY clears, or timeout 3057d4b2bab4STejun Heo * @ap: port containing status register to be polled 3058d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3059d4b2bab4STejun Heo * 3060d4b2bab4STejun Heo * Sleep until ATA Status register bit BSY clears, or timeout 3061d4b2bab4STejun Heo * occurs. 3062d4b2bab4STejun Heo * 3063d4b2bab4STejun Heo * LOCKING: 3064d4b2bab4STejun Heo * Kernel thread context (may sleep). 3065d4b2bab4STejun Heo * 3066d4b2bab4STejun Heo * RETURNS: 3067d4b2bab4STejun Heo * 0 on success, -errno otherwise. 3068d4b2bab4STejun Heo */ 3069d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline) 3070d4b2bab4STejun Heo { 3071d4b2bab4STejun Heo unsigned long start = jiffies; 3072d4b2bab4STejun Heo int warned = 0; 3073d4b2bab4STejun Heo 3074d4b2bab4STejun Heo while (1) { 3075d4b2bab4STejun Heo u8 status = ata_chk_status(ap); 3076d4b2bab4STejun Heo unsigned long now = jiffies; 3077d4b2bab4STejun Heo 3078d4b2bab4STejun Heo if (!(status & ATA_BUSY)) 3079d4b2bab4STejun Heo return 0; 3080936fd732STejun Heo if (!ata_link_online(&ap->link) && status == 0xff) 3081d4b2bab4STejun Heo return -ENODEV; 3082d4b2bab4STejun Heo if (time_after(now, deadline)) 3083d4b2bab4STejun Heo return -EBUSY; 3084d4b2bab4STejun Heo 3085d4b2bab4STejun Heo if (!warned && time_after(now, start + 5 * HZ) && 3086d4b2bab4STejun Heo (deadline - now > 3 * HZ)) { 3087d4b2bab4STejun Heo ata_port_printk(ap, KERN_WARNING, 3088d4b2bab4STejun Heo "port is slow to respond, please be patient " 3089d4b2bab4STejun Heo "(Status 0x%x)\n", status); 3090d4b2bab4STejun Heo warned = 1; 3091d4b2bab4STejun Heo } 3092d4b2bab4STejun Heo 3093d4b2bab4STejun Heo msleep(50); 3094d4b2bab4STejun Heo } 3095d4b2bab4STejun Heo } 3096d4b2bab4STejun Heo 3097d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask, 3098d4b2bab4STejun Heo unsigned long deadline) 3099c6fd2807SJeff Garzik { 3100c6fd2807SJeff Garzik struct ata_ioports *ioaddr = &ap->ioaddr; 3101c6fd2807SJeff Garzik unsigned int dev0 = devmask & (1 << 0); 3102c6fd2807SJeff Garzik unsigned int dev1 = devmask & (1 << 1); 31039b89391cSTejun Heo int rc, ret = 0; 3104c6fd2807SJeff Garzik 3105c6fd2807SJeff Garzik /* if device 0 was found in ata_devchk, wait for its 3106c6fd2807SJeff Garzik * BSY bit to clear 3107c6fd2807SJeff Garzik */ 3108d4b2bab4STejun Heo if (dev0) { 3109d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 31109b89391cSTejun Heo if (rc) { 31119b89391cSTejun Heo if (rc != -ENODEV) 3112d4b2bab4STejun Heo return rc; 31139b89391cSTejun Heo ret = rc; 31149b89391cSTejun Heo } 3115d4b2bab4STejun Heo } 3116c6fd2807SJeff Garzik 3117e141d999STejun Heo /* if device 1 was found in ata_devchk, wait for register 3118e141d999STejun Heo * access briefly, then wait for BSY to clear. 3119c6fd2807SJeff Garzik */ 3120e141d999STejun Heo if (dev1) { 3121e141d999STejun Heo int i; 3122c6fd2807SJeff Garzik 3123c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 3124e141d999STejun Heo 3125e141d999STejun Heo /* Wait for register access. Some ATAPI devices fail 3126e141d999STejun Heo * to set nsect/lbal after reset, so don't waste too 3127e141d999STejun Heo * much time on it. We're gonna wait for !BSY anyway. 3128e141d999STejun Heo */ 3129e141d999STejun Heo for (i = 0; i < 2; i++) { 3130e141d999STejun Heo u8 nsect, lbal; 3131e141d999STejun Heo 31320d5ff566STejun Heo nsect = ioread8(ioaddr->nsect_addr); 31330d5ff566STejun Heo lbal = ioread8(ioaddr->lbal_addr); 3134c6fd2807SJeff Garzik if ((nsect == 1) && (lbal == 1)) 3135c6fd2807SJeff Garzik break; 3136c6fd2807SJeff Garzik msleep(50); /* give drive a breather */ 3137c6fd2807SJeff Garzik } 3138e141d999STejun Heo 3139d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 31409b89391cSTejun Heo if (rc) { 31419b89391cSTejun Heo if (rc != -ENODEV) 3142d4b2bab4STejun Heo return rc; 31439b89391cSTejun Heo ret = rc; 31449b89391cSTejun Heo } 3145d4b2bab4STejun Heo } 3146c6fd2807SJeff Garzik 3147c6fd2807SJeff Garzik /* is all this really necessary? */ 3148c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3149c6fd2807SJeff Garzik if (dev1) 3150c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 3151c6fd2807SJeff Garzik if (dev0) 3152c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3153d4b2bab4STejun Heo 31549b89391cSTejun Heo return ret; 3155c6fd2807SJeff Garzik } 3156c6fd2807SJeff Garzik 3157d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 3158d4b2bab4STejun Heo unsigned long deadline) 3159c6fd2807SJeff Garzik { 3160c6fd2807SJeff Garzik struct ata_ioports *ioaddr = &ap->ioaddr; 3161c6fd2807SJeff Garzik 316244877b4eSTejun Heo DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 3163c6fd2807SJeff Garzik 3164c6fd2807SJeff Garzik /* software reset. causes dev0 to be selected */ 31650d5ff566STejun Heo iowrite8(ap->ctl, ioaddr->ctl_addr); 3166c6fd2807SJeff Garzik udelay(20); /* FIXME: flush */ 31670d5ff566STejun Heo iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 3168c6fd2807SJeff Garzik udelay(20); /* FIXME: flush */ 31690d5ff566STejun Heo iowrite8(ap->ctl, ioaddr->ctl_addr); 3170c6fd2807SJeff Garzik 3171c6fd2807SJeff Garzik /* spec mandates ">= 2ms" before checking status. 3172c6fd2807SJeff Garzik * We wait 150ms, because that was the magic delay used for 3173c6fd2807SJeff Garzik * ATAPI devices in Hale Landis's ATADRVR, for the period of time 3174c6fd2807SJeff Garzik * between when the ATA command register is written, and then 3175c6fd2807SJeff Garzik * status is checked. Because waiting for "a while" before 3176c6fd2807SJeff Garzik * checking status is fine, post SRST, we perform this magic 3177c6fd2807SJeff Garzik * delay here as well. 3178c6fd2807SJeff Garzik * 3179c6fd2807SJeff Garzik * Old drivers/ide uses the 2mS rule and then waits for ready 3180c6fd2807SJeff Garzik */ 3181c6fd2807SJeff Garzik msleep(150); 3182c6fd2807SJeff Garzik 3183c6fd2807SJeff Garzik /* Before we perform post reset processing we want to see if 3184c6fd2807SJeff Garzik * the bus shows 0xFF because the odd clown forgets the D7 3185c6fd2807SJeff Garzik * pulldown resistor. 3186c6fd2807SJeff Garzik */ 3187d1adc1bbSTejun Heo if (ata_check_status(ap) == 0xFF) 31889b89391cSTejun Heo return -ENODEV; 3189c6fd2807SJeff Garzik 3190d4b2bab4STejun Heo return ata_bus_post_reset(ap, devmask, deadline); 3191c6fd2807SJeff Garzik } 3192c6fd2807SJeff Garzik 3193c6fd2807SJeff Garzik /** 3194c6fd2807SJeff Garzik * ata_bus_reset - reset host port and associated ATA channel 3195c6fd2807SJeff Garzik * @ap: port to reset 3196c6fd2807SJeff Garzik * 3197c6fd2807SJeff Garzik * This is typically the first time we actually start issuing 3198c6fd2807SJeff Garzik * commands to the ATA channel. We wait for BSY to clear, then 3199c6fd2807SJeff Garzik * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 3200c6fd2807SJeff Garzik * result. Determine what devices, if any, are on the channel 3201c6fd2807SJeff Garzik * by looking at the device 0/1 error register. Look at the signature 3202c6fd2807SJeff Garzik * stored in each device's taskfile registers, to determine if 3203c6fd2807SJeff Garzik * the device is ATA or ATAPI. 3204c6fd2807SJeff Garzik * 3205c6fd2807SJeff Garzik * LOCKING: 3206c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 3207cca3974eSJeff Garzik * Obtains host lock. 3208c6fd2807SJeff Garzik * 3209c6fd2807SJeff Garzik * SIDE EFFECTS: 3210c6fd2807SJeff Garzik * Sets ATA_FLAG_DISABLED if bus reset fails. 3211c6fd2807SJeff Garzik */ 3212c6fd2807SJeff Garzik 3213c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap) 3214c6fd2807SJeff Garzik { 32159af5c9c9STejun Heo struct ata_device *device = ap->link.device; 3216c6fd2807SJeff Garzik struct ata_ioports *ioaddr = &ap->ioaddr; 3217c6fd2807SJeff Garzik unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3218c6fd2807SJeff Garzik u8 err; 3219c6fd2807SJeff Garzik unsigned int dev0, dev1 = 0, devmask = 0; 32209b89391cSTejun Heo int rc; 3221c6fd2807SJeff Garzik 322244877b4eSTejun Heo DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); 3223c6fd2807SJeff Garzik 3224c6fd2807SJeff Garzik /* determine if device 0/1 are present */ 3225c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_SATA_RESET) 3226c6fd2807SJeff Garzik dev0 = 1; 3227c6fd2807SJeff Garzik else { 3228c6fd2807SJeff Garzik dev0 = ata_devchk(ap, 0); 3229c6fd2807SJeff Garzik if (slave_possible) 3230c6fd2807SJeff Garzik dev1 = ata_devchk(ap, 1); 3231c6fd2807SJeff Garzik } 3232c6fd2807SJeff Garzik 3233c6fd2807SJeff Garzik if (dev0) 3234c6fd2807SJeff Garzik devmask |= (1 << 0); 3235c6fd2807SJeff Garzik if (dev1) 3236c6fd2807SJeff Garzik devmask |= (1 << 1); 3237c6fd2807SJeff Garzik 3238c6fd2807SJeff Garzik /* select device 0 again */ 3239c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3240c6fd2807SJeff Garzik 3241c6fd2807SJeff Garzik /* issue bus reset */ 32429b89391cSTejun Heo if (ap->flags & ATA_FLAG_SRST) { 32439b89391cSTejun Heo rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); 32449b89391cSTejun Heo if (rc && rc != -ENODEV) 3245c6fd2807SJeff Garzik goto err_out; 32469b89391cSTejun Heo } 3247c6fd2807SJeff Garzik 3248c6fd2807SJeff Garzik /* 3249c6fd2807SJeff Garzik * determine by signature whether we have ATA or ATAPI devices 3250c6fd2807SJeff Garzik */ 32513f19859eSTejun Heo device[0].class = ata_dev_try_classify(&device[0], dev0, &err); 3252c6fd2807SJeff Garzik if ((slave_possible) && (err != 0x81)) 32533f19859eSTejun Heo device[1].class = ata_dev_try_classify(&device[1], dev1, &err); 3254c6fd2807SJeff Garzik 3255c6fd2807SJeff Garzik /* is double-select really necessary? */ 32569af5c9c9STejun Heo if (device[1].class != ATA_DEV_NONE) 3257c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 32589af5c9c9STejun Heo if (device[0].class != ATA_DEV_NONE) 3259c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3260c6fd2807SJeff Garzik 3261c6fd2807SJeff Garzik /* if no devices were detected, disable this port */ 32629af5c9c9STejun Heo if ((device[0].class == ATA_DEV_NONE) && 32639af5c9c9STejun Heo (device[1].class == ATA_DEV_NONE)) 3264c6fd2807SJeff Garzik goto err_out; 3265c6fd2807SJeff Garzik 3266c6fd2807SJeff Garzik if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 3267c6fd2807SJeff Garzik /* set up device control for ATA_FLAG_SATA_RESET */ 32680d5ff566STejun Heo iowrite8(ap->ctl, ioaddr->ctl_addr); 3269c6fd2807SJeff Garzik } 3270c6fd2807SJeff Garzik 3271c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 3272c6fd2807SJeff Garzik return; 3273c6fd2807SJeff Garzik 3274c6fd2807SJeff Garzik err_out: 3275c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "disabling port\n"); 3276ac8869d5SJeff Garzik ata_port_disable(ap); 3277c6fd2807SJeff Garzik 3278c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 3279c6fd2807SJeff Garzik } 3280c6fd2807SJeff Garzik 3281c6fd2807SJeff Garzik /** 3282936fd732STejun Heo * sata_link_debounce - debounce SATA phy status 3283936fd732STejun Heo * @link: ATA link to debounce SATA phy status for 3284c6fd2807SJeff Garzik * @params: timing parameters { interval, duratinon, timeout } in msec 3285d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3286c6fd2807SJeff Garzik * 3287936fd732STejun Heo * Make sure SStatus of @link reaches stable state, determined by 3288c6fd2807SJeff Garzik * holding the same value where DET is not 1 for @duration polled 3289c6fd2807SJeff Garzik * every @interval, before @timeout. Timeout constraints the 3290d4b2bab4STejun Heo * beginning of the stable state. Because DET gets stuck at 1 on 3291d4b2bab4STejun Heo * some controllers after hot unplugging, this functions waits 3292c6fd2807SJeff Garzik * until timeout then returns 0 if DET is stable at 1. 3293c6fd2807SJeff Garzik * 3294d4b2bab4STejun Heo * @timeout is further limited by @deadline. The sooner of the 3295d4b2bab4STejun Heo * two is used. 3296d4b2bab4STejun Heo * 3297c6fd2807SJeff Garzik * LOCKING: 3298c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3299c6fd2807SJeff Garzik * 3300c6fd2807SJeff Garzik * RETURNS: 3301c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3302c6fd2807SJeff Garzik */ 3303936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3304d4b2bab4STejun Heo unsigned long deadline) 3305c6fd2807SJeff Garzik { 3306c6fd2807SJeff Garzik unsigned long interval_msec = params[0]; 3307d4b2bab4STejun Heo unsigned long duration = msecs_to_jiffies(params[1]); 3308d4b2bab4STejun Heo unsigned long last_jiffies, t; 3309c6fd2807SJeff Garzik u32 last, cur; 3310c6fd2807SJeff Garzik int rc; 3311c6fd2807SJeff Garzik 3312d4b2bab4STejun Heo t = jiffies + msecs_to_jiffies(params[2]); 3313d4b2bab4STejun Heo if (time_before(t, deadline)) 3314d4b2bab4STejun Heo deadline = t; 3315d4b2bab4STejun Heo 3316936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3317c6fd2807SJeff Garzik return rc; 3318c6fd2807SJeff Garzik cur &= 0xf; 3319c6fd2807SJeff Garzik 3320c6fd2807SJeff Garzik last = cur; 3321c6fd2807SJeff Garzik last_jiffies = jiffies; 3322c6fd2807SJeff Garzik 3323c6fd2807SJeff Garzik while (1) { 3324c6fd2807SJeff Garzik msleep(interval_msec); 3325936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3326c6fd2807SJeff Garzik return rc; 3327c6fd2807SJeff Garzik cur &= 0xf; 3328c6fd2807SJeff Garzik 3329c6fd2807SJeff Garzik /* DET stable? */ 3330c6fd2807SJeff Garzik if (cur == last) { 3331d4b2bab4STejun Heo if (cur == 1 && time_before(jiffies, deadline)) 3332c6fd2807SJeff Garzik continue; 3333c6fd2807SJeff Garzik if (time_after(jiffies, last_jiffies + duration)) 3334c6fd2807SJeff Garzik return 0; 3335c6fd2807SJeff Garzik continue; 3336c6fd2807SJeff Garzik } 3337c6fd2807SJeff Garzik 3338c6fd2807SJeff Garzik /* unstable, start over */ 3339c6fd2807SJeff Garzik last = cur; 3340c6fd2807SJeff Garzik last_jiffies = jiffies; 3341c6fd2807SJeff Garzik 3342f1545154STejun Heo /* Check deadline. If debouncing failed, return 3343f1545154STejun Heo * -EPIPE to tell upper layer to lower link speed. 3344f1545154STejun Heo */ 3345d4b2bab4STejun Heo if (time_after(jiffies, deadline)) 3346f1545154STejun Heo return -EPIPE; 3347c6fd2807SJeff Garzik } 3348c6fd2807SJeff Garzik } 3349c6fd2807SJeff Garzik 3350c6fd2807SJeff Garzik /** 3351936fd732STejun Heo * sata_link_resume - resume SATA link 3352936fd732STejun Heo * @link: ATA link to resume SATA 3353c6fd2807SJeff Garzik * @params: timing parameters { interval, duratinon, timeout } in msec 3354d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3355c6fd2807SJeff Garzik * 3356936fd732STejun Heo * Resume SATA phy @link and debounce it. 3357c6fd2807SJeff Garzik * 3358c6fd2807SJeff Garzik * LOCKING: 3359c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3360c6fd2807SJeff Garzik * 3361c6fd2807SJeff Garzik * RETURNS: 3362c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3363c6fd2807SJeff Garzik */ 3364936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params, 3365d4b2bab4STejun Heo unsigned long deadline) 3366c6fd2807SJeff Garzik { 3367c6fd2807SJeff Garzik u32 scontrol; 3368c6fd2807SJeff Garzik int rc; 3369c6fd2807SJeff Garzik 3370936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3371c6fd2807SJeff Garzik return rc; 3372c6fd2807SJeff Garzik 3373c6fd2807SJeff Garzik scontrol = (scontrol & 0x0f0) | 0x300; 3374c6fd2807SJeff Garzik 3375936fd732STejun Heo if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3376c6fd2807SJeff Garzik return rc; 3377c6fd2807SJeff Garzik 3378c6fd2807SJeff Garzik /* Some PHYs react badly if SStatus is pounded immediately 3379c6fd2807SJeff Garzik * after resuming. Delay 200ms before debouncing. 3380c6fd2807SJeff Garzik */ 3381c6fd2807SJeff Garzik msleep(200); 3382c6fd2807SJeff Garzik 3383936fd732STejun Heo return sata_link_debounce(link, params, deadline); 3384c6fd2807SJeff Garzik } 3385c6fd2807SJeff Garzik 3386c6fd2807SJeff Garzik /** 3387c6fd2807SJeff Garzik * ata_std_prereset - prepare for reset 3388cc0680a5STejun Heo * @link: ATA link to be reset 3389d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3390c6fd2807SJeff Garzik * 3391cc0680a5STejun Heo * @link is about to be reset. Initialize it. Failure from 3392b8cffc6aSTejun Heo * prereset makes libata abort whole reset sequence and give up 3393b8cffc6aSTejun Heo * that port, so prereset should be best-effort. It does its 3394b8cffc6aSTejun Heo * best to prepare for reset sequence but if things go wrong, it 3395b8cffc6aSTejun Heo * should just whine, not fail. 3396c6fd2807SJeff Garzik * 3397c6fd2807SJeff Garzik * LOCKING: 3398c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3399c6fd2807SJeff Garzik * 3400c6fd2807SJeff Garzik * RETURNS: 3401c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 3402c6fd2807SJeff Garzik */ 3403cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3404c6fd2807SJeff Garzik { 3405cc0680a5STejun Heo struct ata_port *ap = link->ap; 3406936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 3407c6fd2807SJeff Garzik const unsigned long *timing = sata_ehc_deb_timing(ehc); 3408c6fd2807SJeff Garzik int rc; 3409c6fd2807SJeff Garzik 341031daabdaSTejun Heo /* handle link resume */ 3411c6fd2807SJeff Garzik if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && 34120c88758bSTejun Heo (link->flags & ATA_LFLAG_HRST_TO_RESUME)) 3413c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 3414c6fd2807SJeff Garzik 3415c6fd2807SJeff Garzik /* if we're about to do hardreset, nothing more to do */ 3416c6fd2807SJeff Garzik if (ehc->i.action & ATA_EH_HARDRESET) 3417c6fd2807SJeff Garzik return 0; 3418c6fd2807SJeff Garzik 3419936fd732STejun Heo /* if SATA, resume link */ 3420a16abc0bSTejun Heo if (ap->flags & ATA_FLAG_SATA) { 3421936fd732STejun Heo rc = sata_link_resume(link, timing, deadline); 3422b8cffc6aSTejun Heo /* whine about phy resume failure but proceed */ 3423b8cffc6aSTejun Heo if (rc && rc != -EOPNOTSUPP) 3424cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "failed to resume " 3425c6fd2807SJeff Garzik "link for reset (errno=%d)\n", rc); 3426c6fd2807SJeff Garzik } 3427c6fd2807SJeff Garzik 3428c6fd2807SJeff Garzik /* Wait for !BSY if the controller can wait for the first D2H 3429c6fd2807SJeff Garzik * Reg FIS and we don't know that no device is attached. 3430c6fd2807SJeff Garzik */ 34310c88758bSTejun Heo if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) { 3432b8cffc6aSTejun Heo rc = ata_wait_ready(ap, deadline); 34336dffaf61STejun Heo if (rc && rc != -ENODEV) { 3434cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "device not ready " 3435b8cffc6aSTejun Heo "(errno=%d), forcing hardreset\n", rc); 3436b8cffc6aSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 3437b8cffc6aSTejun Heo } 3438b8cffc6aSTejun Heo } 3439c6fd2807SJeff Garzik 3440c6fd2807SJeff Garzik return 0; 3441c6fd2807SJeff Garzik } 3442c6fd2807SJeff Garzik 3443c6fd2807SJeff Garzik /** 3444c6fd2807SJeff Garzik * ata_std_softreset - reset host port via ATA SRST 3445cc0680a5STejun Heo * @link: ATA link to reset 3446c6fd2807SJeff Garzik * @classes: resulting classes of attached devices 3447d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3448c6fd2807SJeff Garzik * 3449c6fd2807SJeff Garzik * Reset host port using ATA SRST. 3450c6fd2807SJeff Garzik * 3451c6fd2807SJeff Garzik * LOCKING: 3452c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3453c6fd2807SJeff Garzik * 3454c6fd2807SJeff Garzik * RETURNS: 3455c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 3456c6fd2807SJeff Garzik */ 3457cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes, 3458d4b2bab4STejun Heo unsigned long deadline) 3459c6fd2807SJeff Garzik { 3460cc0680a5STejun Heo struct ata_port *ap = link->ap; 3461c6fd2807SJeff Garzik unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3462d4b2bab4STejun Heo unsigned int devmask = 0; 3463d4b2bab4STejun Heo int rc; 3464c6fd2807SJeff Garzik u8 err; 3465c6fd2807SJeff Garzik 3466c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3467c6fd2807SJeff Garzik 3468936fd732STejun Heo if (ata_link_offline(link)) { 3469c6fd2807SJeff Garzik classes[0] = ATA_DEV_NONE; 3470c6fd2807SJeff Garzik goto out; 3471c6fd2807SJeff Garzik } 3472c6fd2807SJeff Garzik 3473c6fd2807SJeff Garzik /* determine if device 0/1 are present */ 3474c6fd2807SJeff Garzik if (ata_devchk(ap, 0)) 3475c6fd2807SJeff Garzik devmask |= (1 << 0); 3476c6fd2807SJeff Garzik if (slave_possible && ata_devchk(ap, 1)) 3477c6fd2807SJeff Garzik devmask |= (1 << 1); 3478c6fd2807SJeff Garzik 3479c6fd2807SJeff Garzik /* select device 0 again */ 3480c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3481c6fd2807SJeff Garzik 3482c6fd2807SJeff Garzik /* issue bus reset */ 3483c6fd2807SJeff Garzik DPRINTK("about to softreset, devmask=%x\n", devmask); 3484d4b2bab4STejun Heo rc = ata_bus_softreset(ap, devmask, deadline); 34859b89391cSTejun Heo /* if link is occupied, -ENODEV too is an error */ 3486936fd732STejun Heo if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 3487cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 3488d4b2bab4STejun Heo return rc; 3489c6fd2807SJeff Garzik } 3490c6fd2807SJeff Garzik 3491c6fd2807SJeff Garzik /* determine by signature whether we have ATA or ATAPI devices */ 34923f19859eSTejun Heo classes[0] = ata_dev_try_classify(&link->device[0], 34933f19859eSTejun Heo devmask & (1 << 0), &err); 3494c6fd2807SJeff Garzik if (slave_possible && err != 0x81) 34953f19859eSTejun Heo classes[1] = ata_dev_try_classify(&link->device[1], 34963f19859eSTejun Heo devmask & (1 << 1), &err); 3497c6fd2807SJeff Garzik 3498c6fd2807SJeff Garzik out: 3499c6fd2807SJeff Garzik DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 3500c6fd2807SJeff Garzik return 0; 3501c6fd2807SJeff Garzik } 3502c6fd2807SJeff Garzik 3503c6fd2807SJeff Garzik /** 3504cc0680a5STejun Heo * sata_link_hardreset - reset link via SATA phy reset 3505cc0680a5STejun Heo * @link: link to reset 3506b6103f6dSTejun Heo * @timing: timing parameters { interval, duratinon, timeout } in msec 3507d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3508c6fd2807SJeff Garzik * 3509cc0680a5STejun Heo * SATA phy-reset @link using DET bits of SControl register. 3510c6fd2807SJeff Garzik * 3511c6fd2807SJeff Garzik * LOCKING: 3512c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3513c6fd2807SJeff Garzik * 3514c6fd2807SJeff Garzik * RETURNS: 3515c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 3516c6fd2807SJeff Garzik */ 3517cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3518d4b2bab4STejun Heo unsigned long deadline) 3519c6fd2807SJeff Garzik { 3520c6fd2807SJeff Garzik u32 scontrol; 3521c6fd2807SJeff Garzik int rc; 3522c6fd2807SJeff Garzik 3523c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3524c6fd2807SJeff Garzik 3525936fd732STejun Heo if (sata_set_spd_needed(link)) { 3526c6fd2807SJeff Garzik /* SATA spec says nothing about how to reconfigure 3527c6fd2807SJeff Garzik * spd. To be on the safe side, turn off phy during 3528c6fd2807SJeff Garzik * reconfiguration. This works for at least ICH7 AHCI 3529c6fd2807SJeff Garzik * and Sil3124. 3530c6fd2807SJeff Garzik */ 3531936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3532b6103f6dSTejun Heo goto out; 3533c6fd2807SJeff Garzik 3534cea0d336SJeff Garzik scontrol = (scontrol & 0x0f0) | 0x304; 3535c6fd2807SJeff Garzik 3536936fd732STejun Heo if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3537b6103f6dSTejun Heo goto out; 3538c6fd2807SJeff Garzik 3539936fd732STejun Heo sata_set_spd(link); 3540c6fd2807SJeff Garzik } 3541c6fd2807SJeff Garzik 3542c6fd2807SJeff Garzik /* issue phy wake/reset */ 3543936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3544b6103f6dSTejun Heo goto out; 3545c6fd2807SJeff Garzik 3546c6fd2807SJeff Garzik scontrol = (scontrol & 0x0f0) | 0x301; 3547c6fd2807SJeff Garzik 3548936fd732STejun Heo if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3549b6103f6dSTejun Heo goto out; 3550c6fd2807SJeff Garzik 3551c6fd2807SJeff Garzik /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3552c6fd2807SJeff Garzik * 10.4.2 says at least 1 ms. 3553c6fd2807SJeff Garzik */ 3554c6fd2807SJeff Garzik msleep(1); 3555c6fd2807SJeff Garzik 3556936fd732STejun Heo /* bring link back */ 3557936fd732STejun Heo rc = sata_link_resume(link, timing, deadline); 3558b6103f6dSTejun Heo out: 3559b6103f6dSTejun Heo DPRINTK("EXIT, rc=%d\n", rc); 3560b6103f6dSTejun Heo return rc; 3561b6103f6dSTejun Heo } 3562b6103f6dSTejun Heo 3563b6103f6dSTejun Heo /** 3564b6103f6dSTejun Heo * sata_std_hardreset - reset host port via SATA phy reset 3565cc0680a5STejun Heo * @link: link to reset 3566b6103f6dSTejun Heo * @class: resulting class of attached device 3567d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3568b6103f6dSTejun Heo * 3569b6103f6dSTejun Heo * SATA phy-reset host port using DET bits of SControl register, 3570b6103f6dSTejun Heo * wait for !BSY and classify the attached device. 3571b6103f6dSTejun Heo * 3572b6103f6dSTejun Heo * LOCKING: 3573b6103f6dSTejun Heo * Kernel thread context (may sleep) 3574b6103f6dSTejun Heo * 3575b6103f6dSTejun Heo * RETURNS: 3576b6103f6dSTejun Heo * 0 on success, -errno otherwise. 3577b6103f6dSTejun Heo */ 3578cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3579d4b2bab4STejun Heo unsigned long deadline) 3580b6103f6dSTejun Heo { 3581cc0680a5STejun Heo struct ata_port *ap = link->ap; 3582936fd732STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3583b6103f6dSTejun Heo int rc; 3584b6103f6dSTejun Heo 3585b6103f6dSTejun Heo DPRINTK("ENTER\n"); 3586b6103f6dSTejun Heo 3587b6103f6dSTejun Heo /* do hardreset */ 3588cc0680a5STejun Heo rc = sata_link_hardreset(link, timing, deadline); 3589b6103f6dSTejun Heo if (rc) { 3590cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 3591b6103f6dSTejun Heo "COMRESET failed (errno=%d)\n", rc); 3592b6103f6dSTejun Heo return rc; 3593b6103f6dSTejun Heo } 3594c6fd2807SJeff Garzik 3595c6fd2807SJeff Garzik /* TODO: phy layer with polling, timeouts, etc. */ 3596936fd732STejun Heo if (ata_link_offline(link)) { 3597c6fd2807SJeff Garzik *class = ATA_DEV_NONE; 3598c6fd2807SJeff Garzik DPRINTK("EXIT, link offline\n"); 3599c6fd2807SJeff Garzik return 0; 3600c6fd2807SJeff Garzik } 3601c6fd2807SJeff Garzik 360234fee227STejun Heo /* wait a while before checking status, see SRST for more info */ 360334fee227STejun Heo msleep(150); 360434fee227STejun Heo 3605d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 36069b89391cSTejun Heo /* link occupied, -ENODEV too is an error */ 36079b89391cSTejun Heo if (rc) { 3608cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 3609d4b2bab4STejun Heo "COMRESET failed (errno=%d)\n", rc); 3610d4b2bab4STejun Heo return rc; 3611c6fd2807SJeff Garzik } 3612c6fd2807SJeff Garzik 3613c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); /* probably unnecessary */ 3614c6fd2807SJeff Garzik 36153f19859eSTejun Heo *class = ata_dev_try_classify(link->device, 1, NULL); 3616c6fd2807SJeff Garzik 3617c6fd2807SJeff Garzik DPRINTK("EXIT, class=%u\n", *class); 3618c6fd2807SJeff Garzik return 0; 3619c6fd2807SJeff Garzik } 3620c6fd2807SJeff Garzik 3621c6fd2807SJeff Garzik /** 3622c6fd2807SJeff Garzik * ata_std_postreset - standard postreset callback 3623cc0680a5STejun Heo * @link: the target ata_link 3624c6fd2807SJeff Garzik * @classes: classes of attached devices 3625c6fd2807SJeff Garzik * 3626c6fd2807SJeff Garzik * This function is invoked after a successful reset. Note that 3627c6fd2807SJeff Garzik * the device might have been reset more than once using 3628c6fd2807SJeff Garzik * different reset methods before postreset is invoked. 3629c6fd2807SJeff Garzik * 3630c6fd2807SJeff Garzik * LOCKING: 3631c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3632c6fd2807SJeff Garzik */ 3633cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3634c6fd2807SJeff Garzik { 3635cc0680a5STejun Heo struct ata_port *ap = link->ap; 3636c6fd2807SJeff Garzik u32 serror; 3637c6fd2807SJeff Garzik 3638c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3639c6fd2807SJeff Garzik 3640c6fd2807SJeff Garzik /* print link status */ 3641936fd732STejun Heo sata_print_link_status(link); 3642c6fd2807SJeff Garzik 3643c6fd2807SJeff Garzik /* clear SError */ 3644936fd732STejun Heo if (sata_scr_read(link, SCR_ERROR, &serror) == 0) 3645936fd732STejun Heo sata_scr_write(link, SCR_ERROR, serror); 3646c6fd2807SJeff Garzik 3647c6fd2807SJeff Garzik /* is double-select really necessary? */ 3648c6fd2807SJeff Garzik if (classes[0] != ATA_DEV_NONE) 3649c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 3650c6fd2807SJeff Garzik if (classes[1] != ATA_DEV_NONE) 3651c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3652c6fd2807SJeff Garzik 3653c6fd2807SJeff Garzik /* bail out if no device is present */ 3654c6fd2807SJeff Garzik if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 3655c6fd2807SJeff Garzik DPRINTK("EXIT, no device\n"); 3656c6fd2807SJeff Garzik return; 3657c6fd2807SJeff Garzik } 3658c6fd2807SJeff Garzik 3659c6fd2807SJeff Garzik /* set up device control */ 36600d5ff566STejun Heo if (ap->ioaddr.ctl_addr) 36610d5ff566STejun Heo iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 3662c6fd2807SJeff Garzik 3663c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 3664c6fd2807SJeff Garzik } 3665c6fd2807SJeff Garzik 3666c6fd2807SJeff Garzik /** 3667c6fd2807SJeff Garzik * ata_dev_same_device - Determine whether new ID matches configured device 3668c6fd2807SJeff Garzik * @dev: device to compare against 3669c6fd2807SJeff Garzik * @new_class: class of the new device 3670c6fd2807SJeff Garzik * @new_id: IDENTIFY page of the new device 3671c6fd2807SJeff Garzik * 3672c6fd2807SJeff Garzik * Compare @new_class and @new_id against @dev and determine 3673c6fd2807SJeff Garzik * whether @dev is the device indicated by @new_class and 3674c6fd2807SJeff Garzik * @new_id. 3675c6fd2807SJeff Garzik * 3676c6fd2807SJeff Garzik * LOCKING: 3677c6fd2807SJeff Garzik * None. 3678c6fd2807SJeff Garzik * 3679c6fd2807SJeff Garzik * RETURNS: 3680c6fd2807SJeff Garzik * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3681c6fd2807SJeff Garzik */ 3682c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3683c6fd2807SJeff Garzik const u16 *new_id) 3684c6fd2807SJeff Garzik { 3685c6fd2807SJeff Garzik const u16 *old_id = dev->id; 3686a0cf733bSTejun Heo unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3687a0cf733bSTejun Heo unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3688c6fd2807SJeff Garzik 3689c6fd2807SJeff Garzik if (dev->class != new_class) { 3690c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 3691c6fd2807SJeff Garzik dev->class, new_class); 3692c6fd2807SJeff Garzik return 0; 3693c6fd2807SJeff Garzik } 3694c6fd2807SJeff Garzik 3695a0cf733bSTejun Heo ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3696a0cf733bSTejun Heo ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3697a0cf733bSTejun Heo ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3698a0cf733bSTejun Heo ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3699c6fd2807SJeff Garzik 3700c6fd2807SJeff Garzik if (strcmp(model[0], model[1])) { 3701c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "model number mismatch " 3702c6fd2807SJeff Garzik "'%s' != '%s'\n", model[0], model[1]); 3703c6fd2807SJeff Garzik return 0; 3704c6fd2807SJeff Garzik } 3705c6fd2807SJeff Garzik 3706c6fd2807SJeff Garzik if (strcmp(serial[0], serial[1])) { 3707c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 3708c6fd2807SJeff Garzik "'%s' != '%s'\n", serial[0], serial[1]); 3709c6fd2807SJeff Garzik return 0; 3710c6fd2807SJeff Garzik } 3711c6fd2807SJeff Garzik 3712c6fd2807SJeff Garzik return 1; 3713c6fd2807SJeff Garzik } 3714c6fd2807SJeff Garzik 3715c6fd2807SJeff Garzik /** 3716fe30911bSTejun Heo * ata_dev_reread_id - Re-read IDENTIFY data 37173fae450cSHenrik Kretzschmar * @dev: target ATA device 3718bff04647STejun Heo * @readid_flags: read ID flags 3719c6fd2807SJeff Garzik * 3720c6fd2807SJeff Garzik * Re-read IDENTIFY page and make sure @dev is still attached to 3721c6fd2807SJeff Garzik * the port. 3722c6fd2807SJeff Garzik * 3723c6fd2807SJeff Garzik * LOCKING: 3724c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3725c6fd2807SJeff Garzik * 3726c6fd2807SJeff Garzik * RETURNS: 3727c6fd2807SJeff Garzik * 0 on success, negative errno otherwise 3728c6fd2807SJeff Garzik */ 3729fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3730c6fd2807SJeff Garzik { 3731c6fd2807SJeff Garzik unsigned int class = dev->class; 37329af5c9c9STejun Heo u16 *id = (void *)dev->link->ap->sector_buf; 3733c6fd2807SJeff Garzik int rc; 3734c6fd2807SJeff Garzik 3735c6fd2807SJeff Garzik /* read ID data */ 3736bff04647STejun Heo rc = ata_dev_read_id(dev, &class, readid_flags, id); 3737c6fd2807SJeff Garzik if (rc) 3738fe30911bSTejun Heo return rc; 3739c6fd2807SJeff Garzik 3740c6fd2807SJeff Garzik /* is the device still there? */ 3741fe30911bSTejun Heo if (!ata_dev_same_device(dev, class, id)) 3742fe30911bSTejun Heo return -ENODEV; 3743c6fd2807SJeff Garzik 3744c6fd2807SJeff Garzik memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3745fe30911bSTejun Heo return 0; 3746fe30911bSTejun Heo } 3747fe30911bSTejun Heo 3748fe30911bSTejun Heo /** 3749fe30911bSTejun Heo * ata_dev_revalidate - Revalidate ATA device 3750fe30911bSTejun Heo * @dev: device to revalidate 3751fe30911bSTejun Heo * @readid_flags: read ID flags 3752fe30911bSTejun Heo * 3753fe30911bSTejun Heo * Re-read IDENTIFY page, make sure @dev is still attached to the 3754fe30911bSTejun Heo * port and reconfigure it according to the new IDENTIFY page. 3755fe30911bSTejun Heo * 3756fe30911bSTejun Heo * LOCKING: 3757fe30911bSTejun Heo * Kernel thread context (may sleep) 3758fe30911bSTejun Heo * 3759fe30911bSTejun Heo * RETURNS: 3760fe30911bSTejun Heo * 0 on success, negative errno otherwise 3761fe30911bSTejun Heo */ 3762fe30911bSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags) 3763fe30911bSTejun Heo { 37646ddcd3b0STejun Heo u64 n_sectors = dev->n_sectors; 3765fe30911bSTejun Heo int rc; 3766fe30911bSTejun Heo 3767fe30911bSTejun Heo if (!ata_dev_enabled(dev)) 3768fe30911bSTejun Heo return -ENODEV; 3769fe30911bSTejun Heo 3770fe30911bSTejun Heo /* re-read ID */ 3771fe30911bSTejun Heo rc = ata_dev_reread_id(dev, readid_flags); 3772fe30911bSTejun Heo if (rc) 3773fe30911bSTejun Heo goto fail; 3774c6fd2807SJeff Garzik 3775c6fd2807SJeff Garzik /* configure device according to the new ID */ 3776efdaedc4STejun Heo rc = ata_dev_configure(dev); 37776ddcd3b0STejun Heo if (rc) 37786ddcd3b0STejun Heo goto fail; 37796ddcd3b0STejun Heo 37806ddcd3b0STejun Heo /* verify n_sectors hasn't changed */ 3781b54eebd6STejun Heo if (dev->class == ATA_DEV_ATA && n_sectors && 3782b54eebd6STejun Heo dev->n_sectors != n_sectors) { 37836ddcd3b0STejun Heo ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 37846ddcd3b0STejun Heo "%llu != %llu\n", 37856ddcd3b0STejun Heo (unsigned long long)n_sectors, 37866ddcd3b0STejun Heo (unsigned long long)dev->n_sectors); 37878270bec4STejun Heo 37888270bec4STejun Heo /* restore original n_sectors */ 37898270bec4STejun Heo dev->n_sectors = n_sectors; 37908270bec4STejun Heo 37916ddcd3b0STejun Heo rc = -ENODEV; 37926ddcd3b0STejun Heo goto fail; 37936ddcd3b0STejun Heo } 37946ddcd3b0STejun Heo 3795c6fd2807SJeff Garzik return 0; 3796c6fd2807SJeff Garzik 3797c6fd2807SJeff Garzik fail: 3798c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 3799c6fd2807SJeff Garzik return rc; 3800c6fd2807SJeff Garzik } 3801c6fd2807SJeff Garzik 38026919a0a6SAlan Cox struct ata_blacklist_entry { 38036919a0a6SAlan Cox const char *model_num; 38046919a0a6SAlan Cox const char *model_rev; 38056919a0a6SAlan Cox unsigned long horkage; 38066919a0a6SAlan Cox }; 38076919a0a6SAlan Cox 38086919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = { 38096919a0a6SAlan Cox /* Devices with DMA related problems under Linux */ 38106919a0a6SAlan Cox { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 38116919a0a6SAlan Cox { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 38126919a0a6SAlan Cox { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 38136919a0a6SAlan Cox { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 38146919a0a6SAlan Cox { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 38156919a0a6SAlan Cox { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 38166919a0a6SAlan Cox { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 38176919a0a6SAlan Cox { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 38186919a0a6SAlan Cox { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 38196919a0a6SAlan Cox { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 38206919a0a6SAlan Cox { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 38216919a0a6SAlan Cox { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 38226919a0a6SAlan Cox { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 38236919a0a6SAlan Cox { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 38246919a0a6SAlan Cox { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 38256919a0a6SAlan Cox { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 38266919a0a6SAlan Cox { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 38276919a0a6SAlan Cox { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 38286919a0a6SAlan Cox { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 38296919a0a6SAlan Cox { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 38306919a0a6SAlan Cox { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 38316919a0a6SAlan Cox { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 38326919a0a6SAlan Cox { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 38336919a0a6SAlan Cox { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 38346919a0a6SAlan Cox { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 38356919a0a6SAlan Cox { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 38366919a0a6SAlan Cox { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 38376919a0a6SAlan Cox { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 38386919a0a6SAlan Cox { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, 383939f19886SDave Jones { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 38405acd50f6STejun Heo { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ 384139ce7128STejun Heo { "IOMEGA ZIP 250 ATAPI Floppy", 384239ce7128STejun Heo NULL, ATA_HORKAGE_NODMA }, 38436919a0a6SAlan Cox 384418d6e9d5SAlbert Lee /* Weird ATAPI devices */ 384540a1d531STejun Heo { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 384618d6e9d5SAlbert Lee 38476919a0a6SAlan Cox /* Devices we expect to fail diagnostics */ 38486919a0a6SAlan Cox 38496919a0a6SAlan Cox /* Devices where NCQ should be avoided */ 38506919a0a6SAlan Cox /* NCQ is slow */ 38516919a0a6SAlan Cox { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 385209125ea6STejun Heo /* http://thread.gmane.org/gmane.linux.ide/14907 */ 385309125ea6STejun Heo { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 38547acfaf30SPaul Rolland /* NCQ is broken */ 3855539cc7c7SJeff Garzik { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 38560e3dbc01SAlan Cox { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 38572f8d90abSPrarit Bhargava { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", 38582f8d90abSPrarit Bhargava ATA_HORKAGE_NONCQ }, 3859539cc7c7SJeff Garzik 386036e337d0SRobert Hancock /* Blacklist entries taken from Silicon Image 3124/3132 386136e337d0SRobert Hancock Windows driver .inf file - also several Linux problem reports */ 386236e337d0SRobert Hancock { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 386336e337d0SRobert Hancock { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 386436e337d0SRobert Hancock { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 3865bd9c5a39STejun Heo /* Drives which do spurious command completion */ 3866bd9c5a39STejun Heo { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, 38672f8fcebbSTejun Heo { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, 3868e14cbfa6STejun Heo { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, 38692f8fcebbSTejun Heo { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 3870a520f261STejun Heo { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, 38713fb6589cSTejun Heo { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, 38720e3dbc01SAlan Cox { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, }, 38735d6aca8dSTejun Heo { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, 38746919a0a6SAlan Cox 387516c55b03STejun Heo /* devices which puke on READ_NATIVE_MAX */ 387616c55b03STejun Heo { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 387716c55b03STejun Heo { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 387816c55b03STejun Heo { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 387916c55b03STejun Heo { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 38806919a0a6SAlan Cox 38816919a0a6SAlan Cox /* End Marker */ 38826919a0a6SAlan Cox { } 3883c6fd2807SJeff Garzik }; 3884c6fd2807SJeff Garzik 3885539cc7c7SJeff Garzik int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 3886539cc7c7SJeff Garzik { 3887539cc7c7SJeff Garzik const char *p; 3888539cc7c7SJeff Garzik int len; 3889539cc7c7SJeff Garzik 3890539cc7c7SJeff Garzik /* 3891539cc7c7SJeff Garzik * check for trailing wildcard: *\0 3892539cc7c7SJeff Garzik */ 3893539cc7c7SJeff Garzik p = strchr(patt, wildchar); 3894539cc7c7SJeff Garzik if (p && ((*(p + 1)) == 0)) 3895539cc7c7SJeff Garzik len = p - patt; 3896539cc7c7SJeff Garzik else 3897539cc7c7SJeff Garzik len = strlen(name); 3898539cc7c7SJeff Garzik 3899539cc7c7SJeff Garzik return strncmp(patt, name, len); 3900539cc7c7SJeff Garzik } 3901539cc7c7SJeff Garzik 390275683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 3903c6fd2807SJeff Garzik { 39048bfa79fcSTejun Heo unsigned char model_num[ATA_ID_PROD_LEN + 1]; 39058bfa79fcSTejun Heo unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 39066919a0a6SAlan Cox const struct ata_blacklist_entry *ad = ata_device_blacklist; 3907c6fd2807SJeff Garzik 39088bfa79fcSTejun Heo ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 39098bfa79fcSTejun Heo ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 3910c6fd2807SJeff Garzik 39116919a0a6SAlan Cox while (ad->model_num) { 3912539cc7c7SJeff Garzik if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 39136919a0a6SAlan Cox if (ad->model_rev == NULL) 39146919a0a6SAlan Cox return ad->horkage; 3915539cc7c7SJeff Garzik if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 39166919a0a6SAlan Cox return ad->horkage; 3917c6fd2807SJeff Garzik } 39186919a0a6SAlan Cox ad++; 3919c6fd2807SJeff Garzik } 3920c6fd2807SJeff Garzik return 0; 3921c6fd2807SJeff Garzik } 3922c6fd2807SJeff Garzik 39236919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev) 39246919a0a6SAlan Cox { 39256919a0a6SAlan Cox /* We don't support polling DMA. 39266919a0a6SAlan Cox * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 39276919a0a6SAlan Cox * if the LLDD handles only interrupts in the HSM_ST_LAST state. 39286919a0a6SAlan Cox */ 39299af5c9c9STejun Heo if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 39306919a0a6SAlan Cox (dev->flags & ATA_DFLAG_CDB_INTR)) 39316919a0a6SAlan Cox return 1; 393275683fe7STejun Heo return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 39336919a0a6SAlan Cox } 39346919a0a6SAlan Cox 3935c6fd2807SJeff Garzik /** 3936c6fd2807SJeff Garzik * ata_dev_xfermask - Compute supported xfermask of the given device 3937c6fd2807SJeff Garzik * @dev: Device to compute xfermask for 3938c6fd2807SJeff Garzik * 3939c6fd2807SJeff Garzik * Compute supported xfermask of @dev and store it in 3940c6fd2807SJeff Garzik * dev->*_mask. This function is responsible for applying all 3941c6fd2807SJeff Garzik * known limits including host controller limits, device 3942c6fd2807SJeff Garzik * blacklist, etc... 3943c6fd2807SJeff Garzik * 3944c6fd2807SJeff Garzik * LOCKING: 3945c6fd2807SJeff Garzik * None. 3946c6fd2807SJeff Garzik */ 3947c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev) 3948c6fd2807SJeff Garzik { 39499af5c9c9STejun Heo struct ata_link *link = dev->link; 39509af5c9c9STejun Heo struct ata_port *ap = link->ap; 3951cca3974eSJeff Garzik struct ata_host *host = ap->host; 3952c6fd2807SJeff Garzik unsigned long xfer_mask; 3953c6fd2807SJeff Garzik 3954c6fd2807SJeff Garzik /* controller modes available */ 3955c6fd2807SJeff Garzik xfer_mask = ata_pack_xfermask(ap->pio_mask, 3956c6fd2807SJeff Garzik ap->mwdma_mask, ap->udma_mask); 3957c6fd2807SJeff Garzik 39588343f889SRobert Hancock /* drive modes available */ 3959c6fd2807SJeff Garzik xfer_mask &= ata_pack_xfermask(dev->pio_mask, 3960c6fd2807SJeff Garzik dev->mwdma_mask, dev->udma_mask); 3961c6fd2807SJeff Garzik xfer_mask &= ata_id_xfermask(dev->id); 3962c6fd2807SJeff Garzik 3963b352e57dSAlan Cox /* 3964b352e57dSAlan Cox * CFA Advanced TrueIDE timings are not allowed on a shared 3965b352e57dSAlan Cox * cable 3966b352e57dSAlan Cox */ 3967b352e57dSAlan Cox if (ata_dev_pair(dev)) { 3968b352e57dSAlan Cox /* No PIO5 or PIO6 */ 3969b352e57dSAlan Cox xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 3970b352e57dSAlan Cox /* No MWDMA3 or MWDMA 4 */ 3971b352e57dSAlan Cox xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 3972b352e57dSAlan Cox } 3973b352e57dSAlan Cox 3974c6fd2807SJeff Garzik if (ata_dma_blacklisted(dev)) { 3975c6fd2807SJeff Garzik xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3976c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 3977c6fd2807SJeff Garzik "device is on DMA blacklist, disabling DMA\n"); 3978c6fd2807SJeff Garzik } 3979c6fd2807SJeff Garzik 398014d66ab7SPetr Vandrovec if ((host->flags & ATA_HOST_SIMPLEX) && 398114d66ab7SPetr Vandrovec host->simplex_claimed && host->simplex_claimed != ap) { 3982c6fd2807SJeff Garzik xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3983c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 3984c6fd2807SJeff Garzik "other device, disabling DMA\n"); 3985c6fd2807SJeff Garzik } 3986c6fd2807SJeff Garzik 3987e424675fSJeff Garzik if (ap->flags & ATA_FLAG_NO_IORDY) 3988e424675fSJeff Garzik xfer_mask &= ata_pio_mask_no_iordy(dev); 3989e424675fSJeff Garzik 3990c6fd2807SJeff Garzik if (ap->ops->mode_filter) 3991a76b62caSAlan Cox xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 3992c6fd2807SJeff Garzik 39938343f889SRobert Hancock /* Apply cable rule here. Don't apply it early because when 39948343f889SRobert Hancock * we handle hot plug the cable type can itself change. 39958343f889SRobert Hancock * Check this last so that we know if the transfer rate was 39968343f889SRobert Hancock * solely limited by the cable. 39978343f889SRobert Hancock * Unknown or 80 wire cables reported host side are checked 39988343f889SRobert Hancock * drive side as well. Cases where we know a 40wire cable 39998343f889SRobert Hancock * is used safely for 80 are not checked here. 40008343f889SRobert Hancock */ 40018343f889SRobert Hancock if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 40028343f889SRobert Hancock /* UDMA/44 or higher would be available */ 40038343f889SRobert Hancock if((ap->cbl == ATA_CBL_PATA40) || 40048343f889SRobert Hancock (ata_drive_40wire(dev->id) && 40058343f889SRobert Hancock (ap->cbl == ATA_CBL_PATA_UNK || 40068343f889SRobert Hancock ap->cbl == ATA_CBL_PATA80))) { 40078343f889SRobert Hancock ata_dev_printk(dev, KERN_WARNING, 40088343f889SRobert Hancock "limited to UDMA/33 due to 40-wire cable\n"); 40098343f889SRobert Hancock xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 40108343f889SRobert Hancock } 40118343f889SRobert Hancock 4012c6fd2807SJeff Garzik ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4013c6fd2807SJeff Garzik &dev->mwdma_mask, &dev->udma_mask); 4014c6fd2807SJeff Garzik } 4015c6fd2807SJeff Garzik 4016c6fd2807SJeff Garzik /** 4017c6fd2807SJeff Garzik * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4018c6fd2807SJeff Garzik * @dev: Device to which command will be sent 4019c6fd2807SJeff Garzik * 4020c6fd2807SJeff Garzik * Issue SET FEATURES - XFER MODE command to device @dev 4021c6fd2807SJeff Garzik * on port @ap. 4022c6fd2807SJeff Garzik * 4023c6fd2807SJeff Garzik * LOCKING: 4024c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 4025c6fd2807SJeff Garzik * 4026c6fd2807SJeff Garzik * RETURNS: 4027c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 4028c6fd2807SJeff Garzik */ 4029c6fd2807SJeff Garzik 4030c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4031c6fd2807SJeff Garzik { 4032c6fd2807SJeff Garzik struct ata_taskfile tf; 4033c6fd2807SJeff Garzik unsigned int err_mask; 4034c6fd2807SJeff Garzik 4035c6fd2807SJeff Garzik /* set up set-features taskfile */ 4036c6fd2807SJeff Garzik DPRINTK("set features - xfer mode\n"); 4037c6fd2807SJeff Garzik 4038464cf177STejun Heo /* Some controllers and ATAPI devices show flaky interrupt 4039464cf177STejun Heo * behavior after setting xfer mode. Use polling instead. 4040464cf177STejun Heo */ 4041c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 4042c6fd2807SJeff Garzik tf.command = ATA_CMD_SET_FEATURES; 4043c6fd2807SJeff Garzik tf.feature = SETFEATURES_XFER; 4044464cf177STejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4045c6fd2807SJeff Garzik tf.protocol = ATA_PROT_NODATA; 4046c6fd2807SJeff Garzik tf.nsect = dev->xfer_mode; 4047c6fd2807SJeff Garzik 4048c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 4049c6fd2807SJeff Garzik 4050c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 4051c6fd2807SJeff Garzik return err_mask; 4052c6fd2807SJeff Garzik } 4053c6fd2807SJeff Garzik 4054c6fd2807SJeff Garzik /** 40559f45cbd3SKristen Carlson Accardi * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES 40569f45cbd3SKristen Carlson Accardi * @dev: Device to which command will be sent 40579f45cbd3SKristen Carlson Accardi * @enable: Whether to enable or disable the feature 40589f45cbd3SKristen Carlson Accardi * 40599f45cbd3SKristen Carlson Accardi * Issue SET FEATURES - SATA FEATURES command to device @dev 40609f45cbd3SKristen Carlson Accardi * on port @ap with sector count set to indicate Asynchronous 40619f45cbd3SKristen Carlson Accardi * Notification feature 40629f45cbd3SKristen Carlson Accardi * 40639f45cbd3SKristen Carlson Accardi * LOCKING: 40649f45cbd3SKristen Carlson Accardi * PCI/etc. bus probe sem. 40659f45cbd3SKristen Carlson Accardi * 40669f45cbd3SKristen Carlson Accardi * RETURNS: 40679f45cbd3SKristen Carlson Accardi * 0 on success, AC_ERR_* mask otherwise. 40689f45cbd3SKristen Carlson Accardi */ 40699f45cbd3SKristen Carlson Accardi static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable) 40709f45cbd3SKristen Carlson Accardi { 40719f45cbd3SKristen Carlson Accardi struct ata_taskfile tf; 40729f45cbd3SKristen Carlson Accardi unsigned int err_mask; 40739f45cbd3SKristen Carlson Accardi 40749f45cbd3SKristen Carlson Accardi /* set up set-features taskfile */ 40759f45cbd3SKristen Carlson Accardi DPRINTK("set features - SATA features\n"); 40769f45cbd3SKristen Carlson Accardi 40779f45cbd3SKristen Carlson Accardi ata_tf_init(dev, &tf); 40789f45cbd3SKristen Carlson Accardi tf.command = ATA_CMD_SET_FEATURES; 40799f45cbd3SKristen Carlson Accardi tf.feature = enable; 40809f45cbd3SKristen Carlson Accardi tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 40819f45cbd3SKristen Carlson Accardi tf.protocol = ATA_PROT_NODATA; 40829f45cbd3SKristen Carlson Accardi tf.nsect = SATA_AN; 40839f45cbd3SKristen Carlson Accardi 40849f45cbd3SKristen Carlson Accardi err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 40859f45cbd3SKristen Carlson Accardi 40869f45cbd3SKristen Carlson Accardi DPRINTK("EXIT, err_mask=%x\n", err_mask); 40879f45cbd3SKristen Carlson Accardi return err_mask; 40889f45cbd3SKristen Carlson Accardi } 40899f45cbd3SKristen Carlson Accardi 40909f45cbd3SKristen Carlson Accardi /** 4091c6fd2807SJeff Garzik * ata_dev_init_params - Issue INIT DEV PARAMS command 4092c6fd2807SJeff Garzik * @dev: Device to which command will be sent 4093c6fd2807SJeff Garzik * @heads: Number of heads (taskfile parameter) 4094c6fd2807SJeff Garzik * @sectors: Number of sectors (taskfile parameter) 4095c6fd2807SJeff Garzik * 4096c6fd2807SJeff Garzik * LOCKING: 4097c6fd2807SJeff Garzik * Kernel thread context (may sleep) 4098c6fd2807SJeff Garzik * 4099c6fd2807SJeff Garzik * RETURNS: 4100c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 4101c6fd2807SJeff Garzik */ 4102c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev, 4103c6fd2807SJeff Garzik u16 heads, u16 sectors) 4104c6fd2807SJeff Garzik { 4105c6fd2807SJeff Garzik struct ata_taskfile tf; 4106c6fd2807SJeff Garzik unsigned int err_mask; 4107c6fd2807SJeff Garzik 4108c6fd2807SJeff Garzik /* Number of sectors per track 1-255. Number of heads 1-16 */ 4109c6fd2807SJeff Garzik if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4110c6fd2807SJeff Garzik return AC_ERR_INVALID; 4111c6fd2807SJeff Garzik 4112c6fd2807SJeff Garzik /* set up init dev params taskfile */ 4113c6fd2807SJeff Garzik DPRINTK("init dev params \n"); 4114c6fd2807SJeff Garzik 4115c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 4116c6fd2807SJeff Garzik tf.command = ATA_CMD_INIT_DEV_PARAMS; 4117c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4118c6fd2807SJeff Garzik tf.protocol = ATA_PROT_NODATA; 4119c6fd2807SJeff Garzik tf.nsect = sectors; 4120c6fd2807SJeff Garzik tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4121c6fd2807SJeff Garzik 4122c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); 412318b2466cSAlan Cox /* A clean abort indicates an original or just out of spec drive 412418b2466cSAlan Cox and we should continue as we issue the setup based on the 412518b2466cSAlan Cox drive reported working geometry */ 412618b2466cSAlan Cox if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 412718b2466cSAlan Cox err_mask = 0; 4128c6fd2807SJeff Garzik 4129c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 4130c6fd2807SJeff Garzik return err_mask; 4131c6fd2807SJeff Garzik } 4132c6fd2807SJeff Garzik 4133c6fd2807SJeff Garzik /** 4134c6fd2807SJeff Garzik * ata_sg_clean - Unmap DMA memory associated with command 4135c6fd2807SJeff Garzik * @qc: Command containing DMA memory to be released 4136c6fd2807SJeff Garzik * 4137c6fd2807SJeff Garzik * Unmap all mapped DMA memory associated with this command. 4138c6fd2807SJeff Garzik * 4139c6fd2807SJeff Garzik * LOCKING: 4140cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4141c6fd2807SJeff Garzik */ 414270e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc) 4143c6fd2807SJeff Garzik { 4144c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4145c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4146c6fd2807SJeff Garzik int dir = qc->dma_dir; 4147c6fd2807SJeff Garzik void *pad_buf = NULL; 4148c6fd2807SJeff Garzik 4149c6fd2807SJeff Garzik WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); 4150c6fd2807SJeff Garzik WARN_ON(sg == NULL); 4151c6fd2807SJeff Garzik 4152c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SINGLE) 4153c6fd2807SJeff Garzik WARN_ON(qc->n_elem > 1); 4154c6fd2807SJeff Garzik 4155c6fd2807SJeff Garzik VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4156c6fd2807SJeff Garzik 4157c6fd2807SJeff Garzik /* if we padded the buffer out to 32-bit bound, and data 4158c6fd2807SJeff Garzik * xfer direction is from-device, we must copy from the 4159c6fd2807SJeff Garzik * pad buffer back into the supplied buffer 4160c6fd2807SJeff Garzik */ 4161c6fd2807SJeff Garzik if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) 4162c6fd2807SJeff Garzik pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4163c6fd2807SJeff Garzik 4164c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SG) { 4165c6fd2807SJeff Garzik if (qc->n_elem) 4166c6fd2807SJeff Garzik dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4167c6fd2807SJeff Garzik /* restore last sg */ 4168c6fd2807SJeff Garzik sg[qc->orig_n_elem - 1].length += qc->pad_len; 4169c6fd2807SJeff Garzik if (pad_buf) { 4170c6fd2807SJeff Garzik struct scatterlist *psg = &qc->pad_sgent; 4171c6fd2807SJeff Garzik void *addr = kmap_atomic(psg->page, KM_IRQ0); 4172c6fd2807SJeff Garzik memcpy(addr + psg->offset, pad_buf, qc->pad_len); 4173c6fd2807SJeff Garzik kunmap_atomic(addr, KM_IRQ0); 4174c6fd2807SJeff Garzik } 4175c6fd2807SJeff Garzik } else { 4176c6fd2807SJeff Garzik if (qc->n_elem) 4177c6fd2807SJeff Garzik dma_unmap_single(ap->dev, 4178c6fd2807SJeff Garzik sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), 4179c6fd2807SJeff Garzik dir); 4180c6fd2807SJeff Garzik /* restore sg */ 4181c6fd2807SJeff Garzik sg->length += qc->pad_len; 4182c6fd2807SJeff Garzik if (pad_buf) 4183c6fd2807SJeff Garzik memcpy(qc->buf_virt + sg->length - qc->pad_len, 4184c6fd2807SJeff Garzik pad_buf, qc->pad_len); 4185c6fd2807SJeff Garzik } 4186c6fd2807SJeff Garzik 4187c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_DMAMAP; 4188c6fd2807SJeff Garzik qc->__sg = NULL; 4189c6fd2807SJeff Garzik } 4190c6fd2807SJeff Garzik 4191c6fd2807SJeff Garzik /** 4192c6fd2807SJeff Garzik * ata_fill_sg - Fill PCI IDE PRD table 4193c6fd2807SJeff Garzik * @qc: Metadata associated with taskfile to be transferred 4194c6fd2807SJeff Garzik * 4195c6fd2807SJeff Garzik * Fill PCI IDE PRD (scatter-gather) table with segments 4196c6fd2807SJeff Garzik * associated with the current disk command. 4197c6fd2807SJeff Garzik * 4198c6fd2807SJeff Garzik * LOCKING: 4199cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4200c6fd2807SJeff Garzik * 4201c6fd2807SJeff Garzik */ 4202c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc) 4203c6fd2807SJeff Garzik { 4204c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4205c6fd2807SJeff Garzik struct scatterlist *sg; 4206c6fd2807SJeff Garzik unsigned int idx; 4207c6fd2807SJeff Garzik 4208c6fd2807SJeff Garzik WARN_ON(qc->__sg == NULL); 4209c6fd2807SJeff Garzik WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); 4210c6fd2807SJeff Garzik 4211c6fd2807SJeff Garzik idx = 0; 4212c6fd2807SJeff Garzik ata_for_each_sg(sg, qc) { 4213c6fd2807SJeff Garzik u32 addr, offset; 4214c6fd2807SJeff Garzik u32 sg_len, len; 4215c6fd2807SJeff Garzik 4216c6fd2807SJeff Garzik /* determine if physical DMA addr spans 64K boundary. 4217c6fd2807SJeff Garzik * Note h/w doesn't support 64-bit, so we unconditionally 4218c6fd2807SJeff Garzik * truncate dma_addr_t to u32. 4219c6fd2807SJeff Garzik */ 4220c6fd2807SJeff Garzik addr = (u32) sg_dma_address(sg); 4221c6fd2807SJeff Garzik sg_len = sg_dma_len(sg); 4222c6fd2807SJeff Garzik 4223c6fd2807SJeff Garzik while (sg_len) { 4224c6fd2807SJeff Garzik offset = addr & 0xffff; 4225c6fd2807SJeff Garzik len = sg_len; 4226c6fd2807SJeff Garzik if ((offset + sg_len) > 0x10000) 4227c6fd2807SJeff Garzik len = 0x10000 - offset; 4228c6fd2807SJeff Garzik 4229c6fd2807SJeff Garzik ap->prd[idx].addr = cpu_to_le32(addr); 4230c6fd2807SJeff Garzik ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 4231c6fd2807SJeff Garzik VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4232c6fd2807SJeff Garzik 4233c6fd2807SJeff Garzik idx++; 4234c6fd2807SJeff Garzik sg_len -= len; 4235c6fd2807SJeff Garzik addr += len; 4236c6fd2807SJeff Garzik } 4237c6fd2807SJeff Garzik } 4238c6fd2807SJeff Garzik 4239c6fd2807SJeff Garzik if (idx) 4240c6fd2807SJeff Garzik ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4241c6fd2807SJeff Garzik } 4242b9a4197eSTejun Heo 4243c6fd2807SJeff Garzik /** 4244d26fc955SAlan Cox * ata_fill_sg_dumb - Fill PCI IDE PRD table 4245d26fc955SAlan Cox * @qc: Metadata associated with taskfile to be transferred 4246d26fc955SAlan Cox * 4247d26fc955SAlan Cox * Fill PCI IDE PRD (scatter-gather) table with segments 4248d26fc955SAlan Cox * associated with the current disk command. Perform the fill 4249d26fc955SAlan Cox * so that we avoid writing any length 64K records for 4250d26fc955SAlan Cox * controllers that don't follow the spec. 4251d26fc955SAlan Cox * 4252d26fc955SAlan Cox * LOCKING: 4253d26fc955SAlan Cox * spin_lock_irqsave(host lock) 4254d26fc955SAlan Cox * 4255d26fc955SAlan Cox */ 4256d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) 4257d26fc955SAlan Cox { 4258d26fc955SAlan Cox struct ata_port *ap = qc->ap; 4259d26fc955SAlan Cox struct scatterlist *sg; 4260d26fc955SAlan Cox unsigned int idx; 4261d26fc955SAlan Cox 4262d26fc955SAlan Cox WARN_ON(qc->__sg == NULL); 4263d26fc955SAlan Cox WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); 4264d26fc955SAlan Cox 4265d26fc955SAlan Cox idx = 0; 4266d26fc955SAlan Cox ata_for_each_sg(sg, qc) { 4267d26fc955SAlan Cox u32 addr, offset; 4268d26fc955SAlan Cox u32 sg_len, len, blen; 4269d26fc955SAlan Cox 4270d26fc955SAlan Cox /* determine if physical DMA addr spans 64K boundary. 4271d26fc955SAlan Cox * Note h/w doesn't support 64-bit, so we unconditionally 4272d26fc955SAlan Cox * truncate dma_addr_t to u32. 4273d26fc955SAlan Cox */ 4274d26fc955SAlan Cox addr = (u32) sg_dma_address(sg); 4275d26fc955SAlan Cox sg_len = sg_dma_len(sg); 4276d26fc955SAlan Cox 4277d26fc955SAlan Cox while (sg_len) { 4278d26fc955SAlan Cox offset = addr & 0xffff; 4279d26fc955SAlan Cox len = sg_len; 4280d26fc955SAlan Cox if ((offset + sg_len) > 0x10000) 4281d26fc955SAlan Cox len = 0x10000 - offset; 4282d26fc955SAlan Cox 4283d26fc955SAlan Cox blen = len & 0xffff; 4284d26fc955SAlan Cox ap->prd[idx].addr = cpu_to_le32(addr); 4285d26fc955SAlan Cox if (blen == 0) { 4286d26fc955SAlan Cox /* Some PATA chipsets like the CS5530 can't 4287d26fc955SAlan Cox cope with 0x0000 meaning 64K as the spec says */ 4288d26fc955SAlan Cox ap->prd[idx].flags_len = cpu_to_le32(0x8000); 4289d26fc955SAlan Cox blen = 0x8000; 4290d26fc955SAlan Cox ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000); 4291d26fc955SAlan Cox } 4292d26fc955SAlan Cox ap->prd[idx].flags_len = cpu_to_le32(blen); 4293d26fc955SAlan Cox VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4294d26fc955SAlan Cox 4295d26fc955SAlan Cox idx++; 4296d26fc955SAlan Cox sg_len -= len; 4297d26fc955SAlan Cox addr += len; 4298d26fc955SAlan Cox } 4299d26fc955SAlan Cox } 4300d26fc955SAlan Cox 4301d26fc955SAlan Cox if (idx) 4302d26fc955SAlan Cox ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4303d26fc955SAlan Cox } 4304d26fc955SAlan Cox 4305d26fc955SAlan Cox /** 4306c6fd2807SJeff Garzik * ata_check_atapi_dma - Check whether ATAPI DMA can be supported 4307c6fd2807SJeff Garzik * @qc: Metadata associated with taskfile to check 4308c6fd2807SJeff Garzik * 4309c6fd2807SJeff Garzik * Allow low-level driver to filter ATA PACKET commands, returning 4310c6fd2807SJeff Garzik * a status indicating whether or not it is OK to use DMA for the 4311c6fd2807SJeff Garzik * supplied PACKET command. 4312c6fd2807SJeff Garzik * 4313c6fd2807SJeff Garzik * LOCKING: 4314cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4315c6fd2807SJeff Garzik * 4316c6fd2807SJeff Garzik * RETURNS: 0 when ATAPI DMA can be used 4317c6fd2807SJeff Garzik * nonzero otherwise 4318c6fd2807SJeff Garzik */ 4319c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc) 4320c6fd2807SJeff Garzik { 4321c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4322c6fd2807SJeff Garzik 4323b9a4197eSTejun Heo /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4324b9a4197eSTejun Heo * few ATAPI devices choke on such DMA requests. 4325b9a4197eSTejun Heo */ 4326b9a4197eSTejun Heo if (unlikely(qc->nbytes & 15)) 43276f23a31dSAlbert Lee return 1; 43286f23a31dSAlbert Lee 4329c6fd2807SJeff Garzik if (ap->ops->check_atapi_dma) 4330b9a4197eSTejun Heo return ap->ops->check_atapi_dma(qc); 4331c6fd2807SJeff Garzik 4332b9a4197eSTejun Heo return 0; 4333c6fd2807SJeff Garzik } 4334b9a4197eSTejun Heo 4335c6fd2807SJeff Garzik /** 4336c6fd2807SJeff Garzik * ata_qc_prep - Prepare taskfile for submission 4337c6fd2807SJeff Garzik * @qc: Metadata associated with taskfile to be prepared 4338c6fd2807SJeff Garzik * 4339c6fd2807SJeff Garzik * Prepare ATA taskfile for submission. 4340c6fd2807SJeff Garzik * 4341c6fd2807SJeff Garzik * LOCKING: 4342cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4343c6fd2807SJeff Garzik */ 4344c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc) 4345c6fd2807SJeff Garzik { 4346c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4347c6fd2807SJeff Garzik return; 4348c6fd2807SJeff Garzik 4349c6fd2807SJeff Garzik ata_fill_sg(qc); 4350c6fd2807SJeff Garzik } 4351c6fd2807SJeff Garzik 4352d26fc955SAlan Cox /** 4353d26fc955SAlan Cox * ata_dumb_qc_prep - Prepare taskfile for submission 4354d26fc955SAlan Cox * @qc: Metadata associated with taskfile to be prepared 4355d26fc955SAlan Cox * 4356d26fc955SAlan Cox * Prepare ATA taskfile for submission. 4357d26fc955SAlan Cox * 4358d26fc955SAlan Cox * LOCKING: 4359d26fc955SAlan Cox * spin_lock_irqsave(host lock) 4360d26fc955SAlan Cox */ 4361d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc) 4362d26fc955SAlan Cox { 4363d26fc955SAlan Cox if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4364d26fc955SAlan Cox return; 4365d26fc955SAlan Cox 4366d26fc955SAlan Cox ata_fill_sg_dumb(qc); 4367d26fc955SAlan Cox } 4368d26fc955SAlan Cox 4369c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4370c6fd2807SJeff Garzik 4371c6fd2807SJeff Garzik /** 4372c6fd2807SJeff Garzik * ata_sg_init_one - Associate command with memory buffer 4373c6fd2807SJeff Garzik * @qc: Command to be associated 4374c6fd2807SJeff Garzik * @buf: Memory buffer 4375c6fd2807SJeff Garzik * @buflen: Length of memory buffer, in bytes. 4376c6fd2807SJeff Garzik * 4377c6fd2807SJeff Garzik * Initialize the data-related elements of queued_cmd @qc 4378c6fd2807SJeff Garzik * to point to a single memory buffer, @buf of byte length @buflen. 4379c6fd2807SJeff Garzik * 4380c6fd2807SJeff Garzik * LOCKING: 4381cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4382c6fd2807SJeff Garzik */ 4383c6fd2807SJeff Garzik 4384c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 4385c6fd2807SJeff Garzik { 4386c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SINGLE; 4387c6fd2807SJeff Garzik 4388c6fd2807SJeff Garzik qc->__sg = &qc->sgent; 4389c6fd2807SJeff Garzik qc->n_elem = 1; 4390c6fd2807SJeff Garzik qc->orig_n_elem = 1; 4391c6fd2807SJeff Garzik qc->buf_virt = buf; 4392c6fd2807SJeff Garzik qc->nbytes = buflen; 4393c6fd2807SJeff Garzik 439461c0596cSTejun Heo sg_init_one(&qc->sgent, buf, buflen); 4395c6fd2807SJeff Garzik } 4396c6fd2807SJeff Garzik 4397c6fd2807SJeff Garzik /** 4398c6fd2807SJeff Garzik * ata_sg_init - Associate command with scatter-gather table. 4399c6fd2807SJeff Garzik * @qc: Command to be associated 4400c6fd2807SJeff Garzik * @sg: Scatter-gather table. 4401c6fd2807SJeff Garzik * @n_elem: Number of elements in s/g table. 4402c6fd2807SJeff Garzik * 4403c6fd2807SJeff Garzik * Initialize the data-related elements of queued_cmd @qc 4404c6fd2807SJeff Garzik * to point to a scatter-gather table @sg, containing @n_elem 4405c6fd2807SJeff Garzik * elements. 4406c6fd2807SJeff Garzik * 4407c6fd2807SJeff Garzik * LOCKING: 4408cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4409c6fd2807SJeff Garzik */ 4410c6fd2807SJeff Garzik 4411c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4412c6fd2807SJeff Garzik unsigned int n_elem) 4413c6fd2807SJeff Garzik { 4414c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SG; 4415c6fd2807SJeff Garzik qc->__sg = sg; 4416c6fd2807SJeff Garzik qc->n_elem = n_elem; 4417c6fd2807SJeff Garzik qc->orig_n_elem = n_elem; 4418c6fd2807SJeff Garzik } 4419c6fd2807SJeff Garzik 4420c6fd2807SJeff Garzik /** 4421c6fd2807SJeff Garzik * ata_sg_setup_one - DMA-map the memory buffer associated with a command. 4422c6fd2807SJeff Garzik * @qc: Command with memory buffer to be mapped. 4423c6fd2807SJeff Garzik * 4424c6fd2807SJeff Garzik * DMA-map the memory buffer associated with queued_cmd @qc. 4425c6fd2807SJeff Garzik * 4426c6fd2807SJeff Garzik * LOCKING: 4427cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4428c6fd2807SJeff Garzik * 4429c6fd2807SJeff Garzik * RETURNS: 4430c6fd2807SJeff Garzik * Zero on success, negative on error. 4431c6fd2807SJeff Garzik */ 4432c6fd2807SJeff Garzik 4433c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc) 4434c6fd2807SJeff Garzik { 4435c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4436c6fd2807SJeff Garzik int dir = qc->dma_dir; 4437c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4438c6fd2807SJeff Garzik dma_addr_t dma_address; 4439c6fd2807SJeff Garzik int trim_sg = 0; 4440c6fd2807SJeff Garzik 4441c6fd2807SJeff Garzik /* we must lengthen transfers to end on a 32-bit boundary */ 4442c6fd2807SJeff Garzik qc->pad_len = sg->length & 3; 4443c6fd2807SJeff Garzik if (qc->pad_len) { 4444c6fd2807SJeff Garzik void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4445c6fd2807SJeff Garzik struct scatterlist *psg = &qc->pad_sgent; 4446c6fd2807SJeff Garzik 4447c6fd2807SJeff Garzik WARN_ON(qc->dev->class != ATA_DEV_ATAPI); 4448c6fd2807SJeff Garzik 4449c6fd2807SJeff Garzik memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4450c6fd2807SJeff Garzik 4451c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_WRITE) 4452c6fd2807SJeff Garzik memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, 4453c6fd2807SJeff Garzik qc->pad_len); 4454c6fd2807SJeff Garzik 4455c6fd2807SJeff Garzik sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 4456c6fd2807SJeff Garzik sg_dma_len(psg) = ATA_DMA_PAD_SZ; 4457c6fd2807SJeff Garzik /* trim sg */ 4458c6fd2807SJeff Garzik sg->length -= qc->pad_len; 4459c6fd2807SJeff Garzik if (sg->length == 0) 4460c6fd2807SJeff Garzik trim_sg = 1; 4461c6fd2807SJeff Garzik 4462c6fd2807SJeff Garzik DPRINTK("padding done, sg->length=%u pad_len=%u\n", 4463c6fd2807SJeff Garzik sg->length, qc->pad_len); 4464c6fd2807SJeff Garzik } 4465c6fd2807SJeff Garzik 4466c6fd2807SJeff Garzik if (trim_sg) { 4467c6fd2807SJeff Garzik qc->n_elem--; 4468c6fd2807SJeff Garzik goto skip_map; 4469c6fd2807SJeff Garzik } 4470c6fd2807SJeff Garzik 4471c6fd2807SJeff Garzik dma_address = dma_map_single(ap->dev, qc->buf_virt, 4472c6fd2807SJeff Garzik sg->length, dir); 4473c6fd2807SJeff Garzik if (dma_mapping_error(dma_address)) { 4474c6fd2807SJeff Garzik /* restore sg */ 4475c6fd2807SJeff Garzik sg->length += qc->pad_len; 4476c6fd2807SJeff Garzik return -1; 4477c6fd2807SJeff Garzik } 4478c6fd2807SJeff Garzik 4479c6fd2807SJeff Garzik sg_dma_address(sg) = dma_address; 4480c6fd2807SJeff Garzik sg_dma_len(sg) = sg->length; 4481c6fd2807SJeff Garzik 4482c6fd2807SJeff Garzik skip_map: 4483c6fd2807SJeff Garzik DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 4484c6fd2807SJeff Garzik qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 4485c6fd2807SJeff Garzik 4486c6fd2807SJeff Garzik return 0; 4487c6fd2807SJeff Garzik } 4488c6fd2807SJeff Garzik 4489c6fd2807SJeff Garzik /** 4490c6fd2807SJeff Garzik * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4491c6fd2807SJeff Garzik * @qc: Command with scatter-gather table to be mapped. 4492c6fd2807SJeff Garzik * 4493c6fd2807SJeff Garzik * DMA-map the scatter-gather table associated with queued_cmd @qc. 4494c6fd2807SJeff Garzik * 4495c6fd2807SJeff Garzik * LOCKING: 4496cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4497c6fd2807SJeff Garzik * 4498c6fd2807SJeff Garzik * RETURNS: 4499c6fd2807SJeff Garzik * Zero on success, negative on error. 4500c6fd2807SJeff Garzik * 4501c6fd2807SJeff Garzik */ 4502c6fd2807SJeff Garzik 4503c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc) 4504c6fd2807SJeff Garzik { 4505c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4506c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4507c6fd2807SJeff Garzik struct scatterlist *lsg = &sg[qc->n_elem - 1]; 4508c6fd2807SJeff Garzik int n_elem, pre_n_elem, dir, trim_sg = 0; 4509c6fd2807SJeff Garzik 451044877b4eSTejun Heo VPRINTK("ENTER, ata%u\n", ap->print_id); 4511c6fd2807SJeff Garzik WARN_ON(!(qc->flags & ATA_QCFLAG_SG)); 4512c6fd2807SJeff Garzik 4513c6fd2807SJeff Garzik /* we must lengthen transfers to end on a 32-bit boundary */ 4514c6fd2807SJeff Garzik qc->pad_len = lsg->length & 3; 4515c6fd2807SJeff Garzik if (qc->pad_len) { 4516c6fd2807SJeff Garzik void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4517c6fd2807SJeff Garzik struct scatterlist *psg = &qc->pad_sgent; 4518c6fd2807SJeff Garzik unsigned int offset; 4519c6fd2807SJeff Garzik 4520c6fd2807SJeff Garzik WARN_ON(qc->dev->class != ATA_DEV_ATAPI); 4521c6fd2807SJeff Garzik 4522c6fd2807SJeff Garzik memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4523c6fd2807SJeff Garzik 4524c6fd2807SJeff Garzik /* 4525c6fd2807SJeff Garzik * psg->page/offset are used to copy to-be-written 4526c6fd2807SJeff Garzik * data in this function or read data in ata_sg_clean. 4527c6fd2807SJeff Garzik */ 4528c6fd2807SJeff Garzik offset = lsg->offset + lsg->length - qc->pad_len; 4529c6fd2807SJeff Garzik psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); 4530c6fd2807SJeff Garzik psg->offset = offset_in_page(offset); 4531c6fd2807SJeff Garzik 4532c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_WRITE) { 4533c6fd2807SJeff Garzik void *addr = kmap_atomic(psg->page, KM_IRQ0); 4534c6fd2807SJeff Garzik memcpy(pad_buf, addr + psg->offset, qc->pad_len); 4535c6fd2807SJeff Garzik kunmap_atomic(addr, KM_IRQ0); 4536c6fd2807SJeff Garzik } 4537c6fd2807SJeff Garzik 4538c6fd2807SJeff Garzik sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 4539c6fd2807SJeff Garzik sg_dma_len(psg) = ATA_DMA_PAD_SZ; 4540c6fd2807SJeff Garzik /* trim last sg */ 4541c6fd2807SJeff Garzik lsg->length -= qc->pad_len; 4542c6fd2807SJeff Garzik if (lsg->length == 0) 4543c6fd2807SJeff Garzik trim_sg = 1; 4544c6fd2807SJeff Garzik 4545c6fd2807SJeff Garzik DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 4546c6fd2807SJeff Garzik qc->n_elem - 1, lsg->length, qc->pad_len); 4547c6fd2807SJeff Garzik } 4548c6fd2807SJeff Garzik 4549c6fd2807SJeff Garzik pre_n_elem = qc->n_elem; 4550c6fd2807SJeff Garzik if (trim_sg && pre_n_elem) 4551c6fd2807SJeff Garzik pre_n_elem--; 4552c6fd2807SJeff Garzik 4553c6fd2807SJeff Garzik if (!pre_n_elem) { 4554c6fd2807SJeff Garzik n_elem = 0; 4555c6fd2807SJeff Garzik goto skip_map; 4556c6fd2807SJeff Garzik } 4557c6fd2807SJeff Garzik 4558c6fd2807SJeff Garzik dir = qc->dma_dir; 4559c6fd2807SJeff Garzik n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); 4560c6fd2807SJeff Garzik if (n_elem < 1) { 4561c6fd2807SJeff Garzik /* restore last sg */ 4562c6fd2807SJeff Garzik lsg->length += qc->pad_len; 4563c6fd2807SJeff Garzik return -1; 4564c6fd2807SJeff Garzik } 4565c6fd2807SJeff Garzik 4566c6fd2807SJeff Garzik DPRINTK("%d sg elements mapped\n", n_elem); 4567c6fd2807SJeff Garzik 4568c6fd2807SJeff Garzik skip_map: 4569c6fd2807SJeff Garzik qc->n_elem = n_elem; 4570c6fd2807SJeff Garzik 4571c6fd2807SJeff Garzik return 0; 4572c6fd2807SJeff Garzik } 4573c6fd2807SJeff Garzik 4574c6fd2807SJeff Garzik /** 4575c6fd2807SJeff Garzik * swap_buf_le16 - swap halves of 16-bit words in place 4576c6fd2807SJeff Garzik * @buf: Buffer to swap 4577c6fd2807SJeff Garzik * @buf_words: Number of 16-bit words in buffer. 4578c6fd2807SJeff Garzik * 4579c6fd2807SJeff Garzik * Swap halves of 16-bit words if needed to convert from 4580c6fd2807SJeff Garzik * little-endian byte order to native cpu byte order, or 4581c6fd2807SJeff Garzik * vice-versa. 4582c6fd2807SJeff Garzik * 4583c6fd2807SJeff Garzik * LOCKING: 4584c6fd2807SJeff Garzik * Inherited from caller. 4585c6fd2807SJeff Garzik */ 4586c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words) 4587c6fd2807SJeff Garzik { 4588c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN 4589c6fd2807SJeff Garzik unsigned int i; 4590c6fd2807SJeff Garzik 4591c6fd2807SJeff Garzik for (i = 0; i < buf_words; i++) 4592c6fd2807SJeff Garzik buf[i] = le16_to_cpu(buf[i]); 4593c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */ 4594c6fd2807SJeff Garzik } 4595c6fd2807SJeff Garzik 4596c6fd2807SJeff Garzik /** 45970d5ff566STejun Heo * ata_data_xfer - Transfer data by PIO 4598c6fd2807SJeff Garzik * @adev: device to target 4599c6fd2807SJeff Garzik * @buf: data buffer 4600c6fd2807SJeff Garzik * @buflen: buffer length 4601c6fd2807SJeff Garzik * @write_data: read/write 4602c6fd2807SJeff Garzik * 4603c6fd2807SJeff Garzik * Transfer data from/to the device data register by PIO. 4604c6fd2807SJeff Garzik * 4605c6fd2807SJeff Garzik * LOCKING: 4606c6fd2807SJeff Garzik * Inherited from caller. 4607c6fd2807SJeff Garzik */ 46080d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 4609c6fd2807SJeff Garzik unsigned int buflen, int write_data) 4610c6fd2807SJeff Garzik { 46119af5c9c9STejun Heo struct ata_port *ap = adev->link->ap; 4612c6fd2807SJeff Garzik unsigned int words = buflen >> 1; 4613c6fd2807SJeff Garzik 4614c6fd2807SJeff Garzik /* Transfer multiple of 2 bytes */ 4615c6fd2807SJeff Garzik if (write_data) 46160d5ff566STejun Heo iowrite16_rep(ap->ioaddr.data_addr, buf, words); 4617c6fd2807SJeff Garzik else 46180d5ff566STejun Heo ioread16_rep(ap->ioaddr.data_addr, buf, words); 4619c6fd2807SJeff Garzik 4620c6fd2807SJeff Garzik /* Transfer trailing 1 byte, if any. */ 4621c6fd2807SJeff Garzik if (unlikely(buflen & 0x01)) { 4622c6fd2807SJeff Garzik u16 align_buf[1] = { 0 }; 4623c6fd2807SJeff Garzik unsigned char *trailing_buf = buf + buflen - 1; 4624c6fd2807SJeff Garzik 4625c6fd2807SJeff Garzik if (write_data) { 4626c6fd2807SJeff Garzik memcpy(align_buf, trailing_buf, 1); 46270d5ff566STejun Heo iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); 4628c6fd2807SJeff Garzik } else { 46290d5ff566STejun Heo align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr)); 4630c6fd2807SJeff Garzik memcpy(trailing_buf, align_buf, 1); 4631c6fd2807SJeff Garzik } 4632c6fd2807SJeff Garzik } 4633c6fd2807SJeff Garzik } 4634c6fd2807SJeff Garzik 4635c6fd2807SJeff Garzik /** 46360d5ff566STejun Heo * ata_data_xfer_noirq - Transfer data by PIO 4637c6fd2807SJeff Garzik * @adev: device to target 4638c6fd2807SJeff Garzik * @buf: data buffer 4639c6fd2807SJeff Garzik * @buflen: buffer length 4640c6fd2807SJeff Garzik * @write_data: read/write 4641c6fd2807SJeff Garzik * 4642c6fd2807SJeff Garzik * Transfer data from/to the device data register by PIO. Do the 4643c6fd2807SJeff Garzik * transfer with interrupts disabled. 4644c6fd2807SJeff Garzik * 4645c6fd2807SJeff Garzik * LOCKING: 4646c6fd2807SJeff Garzik * Inherited from caller. 4647c6fd2807SJeff Garzik */ 46480d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 4649c6fd2807SJeff Garzik unsigned int buflen, int write_data) 4650c6fd2807SJeff Garzik { 4651c6fd2807SJeff Garzik unsigned long flags; 4652c6fd2807SJeff Garzik local_irq_save(flags); 46530d5ff566STejun Heo ata_data_xfer(adev, buf, buflen, write_data); 4654c6fd2807SJeff Garzik local_irq_restore(flags); 4655c6fd2807SJeff Garzik } 4656c6fd2807SJeff Garzik 4657c6fd2807SJeff Garzik 4658c6fd2807SJeff Garzik /** 46595a5dbd18SMark Lord * ata_pio_sector - Transfer a sector of data. 4660c6fd2807SJeff Garzik * @qc: Command on going 4661c6fd2807SJeff Garzik * 46625a5dbd18SMark Lord * Transfer qc->sect_size bytes of data from/to the ATA device. 4663c6fd2807SJeff Garzik * 4664c6fd2807SJeff Garzik * LOCKING: 4665c6fd2807SJeff Garzik * Inherited from caller. 4666c6fd2807SJeff Garzik */ 4667c6fd2807SJeff Garzik 4668c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc) 4669c6fd2807SJeff Garzik { 4670c6fd2807SJeff Garzik int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4671c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4672c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4673c6fd2807SJeff Garzik struct page *page; 4674c6fd2807SJeff Garzik unsigned int offset; 4675c6fd2807SJeff Garzik unsigned char *buf; 4676c6fd2807SJeff Garzik 46775a5dbd18SMark Lord if (qc->curbytes == qc->nbytes - qc->sect_size) 4678c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4679c6fd2807SJeff Garzik 4680c6fd2807SJeff Garzik page = sg[qc->cursg].page; 4681726f0785STejun Heo offset = sg[qc->cursg].offset + qc->cursg_ofs; 4682c6fd2807SJeff Garzik 4683c6fd2807SJeff Garzik /* get the current page and offset */ 4684c6fd2807SJeff Garzik page = nth_page(page, (offset >> PAGE_SHIFT)); 4685c6fd2807SJeff Garzik offset %= PAGE_SIZE; 4686c6fd2807SJeff Garzik 4687c6fd2807SJeff Garzik DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 4688c6fd2807SJeff Garzik 4689c6fd2807SJeff Garzik if (PageHighMem(page)) { 4690c6fd2807SJeff Garzik unsigned long flags; 4691c6fd2807SJeff Garzik 4692c6fd2807SJeff Garzik /* FIXME: use a bounce buffer */ 4693c6fd2807SJeff Garzik local_irq_save(flags); 4694c6fd2807SJeff Garzik buf = kmap_atomic(page, KM_IRQ0); 4695c6fd2807SJeff Garzik 4696c6fd2807SJeff Garzik /* do the actual data transfer */ 46975a5dbd18SMark Lord ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 4698c6fd2807SJeff Garzik 4699c6fd2807SJeff Garzik kunmap_atomic(buf, KM_IRQ0); 4700c6fd2807SJeff Garzik local_irq_restore(flags); 4701c6fd2807SJeff Garzik } else { 4702c6fd2807SJeff Garzik buf = page_address(page); 47035a5dbd18SMark Lord ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 4704c6fd2807SJeff Garzik } 4705c6fd2807SJeff Garzik 47065a5dbd18SMark Lord qc->curbytes += qc->sect_size; 47075a5dbd18SMark Lord qc->cursg_ofs += qc->sect_size; 4708c6fd2807SJeff Garzik 4709726f0785STejun Heo if (qc->cursg_ofs == (&sg[qc->cursg])->length) { 4710c6fd2807SJeff Garzik qc->cursg++; 4711c6fd2807SJeff Garzik qc->cursg_ofs = 0; 4712c6fd2807SJeff Garzik } 4713c6fd2807SJeff Garzik } 4714c6fd2807SJeff Garzik 4715c6fd2807SJeff Garzik /** 47165a5dbd18SMark Lord * ata_pio_sectors - Transfer one or many sectors. 4717c6fd2807SJeff Garzik * @qc: Command on going 4718c6fd2807SJeff Garzik * 47195a5dbd18SMark Lord * Transfer one or many sectors of data from/to the 4720c6fd2807SJeff Garzik * ATA device for the DRQ request. 4721c6fd2807SJeff Garzik * 4722c6fd2807SJeff Garzik * LOCKING: 4723c6fd2807SJeff Garzik * Inherited from caller. 4724c6fd2807SJeff Garzik */ 4725c6fd2807SJeff Garzik 4726c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc) 4727c6fd2807SJeff Garzik { 4728c6fd2807SJeff Garzik if (is_multi_taskfile(&qc->tf)) { 4729c6fd2807SJeff Garzik /* READ/WRITE MULTIPLE */ 4730c6fd2807SJeff Garzik unsigned int nsect; 4731c6fd2807SJeff Garzik 4732c6fd2807SJeff Garzik WARN_ON(qc->dev->multi_count == 0); 4733c6fd2807SJeff Garzik 47345a5dbd18SMark Lord nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 4735726f0785STejun Heo qc->dev->multi_count); 4736c6fd2807SJeff Garzik while (nsect--) 4737c6fd2807SJeff Garzik ata_pio_sector(qc); 4738c6fd2807SJeff Garzik } else 4739c6fd2807SJeff Garzik ata_pio_sector(qc); 47404cc980b3SAlbert Lee 47414cc980b3SAlbert Lee ata_altstatus(qc->ap); /* flush */ 4742c6fd2807SJeff Garzik } 4743c6fd2807SJeff Garzik 4744c6fd2807SJeff Garzik /** 4745c6fd2807SJeff Garzik * atapi_send_cdb - Write CDB bytes to hardware 4746c6fd2807SJeff Garzik * @ap: Port to which ATAPI device is attached. 4747c6fd2807SJeff Garzik * @qc: Taskfile currently active 4748c6fd2807SJeff Garzik * 4749c6fd2807SJeff Garzik * When device has indicated its readiness to accept 4750c6fd2807SJeff Garzik * a CDB, this function is called. Send the CDB. 4751c6fd2807SJeff Garzik * 4752c6fd2807SJeff Garzik * LOCKING: 4753c6fd2807SJeff Garzik * caller. 4754c6fd2807SJeff Garzik */ 4755c6fd2807SJeff Garzik 4756c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 4757c6fd2807SJeff Garzik { 4758c6fd2807SJeff Garzik /* send SCSI cdb */ 4759c6fd2807SJeff Garzik DPRINTK("send cdb\n"); 4760c6fd2807SJeff Garzik WARN_ON(qc->dev->cdb_len < 12); 4761c6fd2807SJeff Garzik 4762c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 4763c6fd2807SJeff Garzik ata_altstatus(ap); /* flush */ 4764c6fd2807SJeff Garzik 4765c6fd2807SJeff Garzik switch (qc->tf.protocol) { 4766c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 4767c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST; 4768c6fd2807SJeff Garzik break; 4769c6fd2807SJeff Garzik case ATA_PROT_ATAPI_NODATA: 4770c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4771c6fd2807SJeff Garzik break; 4772c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 4773c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4774c6fd2807SJeff Garzik /* initiate bmdma */ 4775c6fd2807SJeff Garzik ap->ops->bmdma_start(qc); 4776c6fd2807SJeff Garzik break; 4777c6fd2807SJeff Garzik } 4778c6fd2807SJeff Garzik } 4779c6fd2807SJeff Garzik 4780c6fd2807SJeff Garzik /** 4781c6fd2807SJeff Garzik * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 4782c6fd2807SJeff Garzik * @qc: Command on going 4783c6fd2807SJeff Garzik * @bytes: number of bytes 4784c6fd2807SJeff Garzik * 4785c6fd2807SJeff Garzik * Transfer Transfer data from/to the ATAPI device. 4786c6fd2807SJeff Garzik * 4787c6fd2807SJeff Garzik * LOCKING: 4788c6fd2807SJeff Garzik * Inherited from caller. 4789c6fd2807SJeff Garzik * 4790c6fd2807SJeff Garzik */ 4791c6fd2807SJeff Garzik 4792c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 4793c6fd2807SJeff Garzik { 4794c6fd2807SJeff Garzik int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4795c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4796c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4797c6fd2807SJeff Garzik struct page *page; 4798c6fd2807SJeff Garzik unsigned char *buf; 4799c6fd2807SJeff Garzik unsigned int offset, count; 4800c6fd2807SJeff Garzik 4801c6fd2807SJeff Garzik if (qc->curbytes + bytes >= qc->nbytes) 4802c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4803c6fd2807SJeff Garzik 4804c6fd2807SJeff Garzik next_sg: 4805c6fd2807SJeff Garzik if (unlikely(qc->cursg >= qc->n_elem)) { 4806c6fd2807SJeff Garzik /* 4807c6fd2807SJeff Garzik * The end of qc->sg is reached and the device expects 4808c6fd2807SJeff Garzik * more data to transfer. In order not to overrun qc->sg 4809c6fd2807SJeff Garzik * and fulfill length specified in the byte count register, 4810c6fd2807SJeff Garzik * - for read case, discard trailing data from the device 4811c6fd2807SJeff Garzik * - for write case, padding zero data to the device 4812c6fd2807SJeff Garzik */ 4813c6fd2807SJeff Garzik u16 pad_buf[1] = { 0 }; 4814c6fd2807SJeff Garzik unsigned int words = bytes >> 1; 4815c6fd2807SJeff Garzik unsigned int i; 4816c6fd2807SJeff Garzik 4817c6fd2807SJeff Garzik if (words) /* warning if bytes > 1 */ 4818c6fd2807SJeff Garzik ata_dev_printk(qc->dev, KERN_WARNING, 4819c6fd2807SJeff Garzik "%u bytes trailing data\n", bytes); 4820c6fd2807SJeff Garzik 4821c6fd2807SJeff Garzik for (i = 0; i < words; i++) 4822c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write); 4823c6fd2807SJeff Garzik 4824c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4825c6fd2807SJeff Garzik return; 4826c6fd2807SJeff Garzik } 4827c6fd2807SJeff Garzik 4828c6fd2807SJeff Garzik sg = &qc->__sg[qc->cursg]; 4829c6fd2807SJeff Garzik 4830c6fd2807SJeff Garzik page = sg->page; 4831c6fd2807SJeff Garzik offset = sg->offset + qc->cursg_ofs; 4832c6fd2807SJeff Garzik 4833c6fd2807SJeff Garzik /* get the current page and offset */ 4834c6fd2807SJeff Garzik page = nth_page(page, (offset >> PAGE_SHIFT)); 4835c6fd2807SJeff Garzik offset %= PAGE_SIZE; 4836c6fd2807SJeff Garzik 4837c6fd2807SJeff Garzik /* don't overrun current sg */ 4838c6fd2807SJeff Garzik count = min(sg->length - qc->cursg_ofs, bytes); 4839c6fd2807SJeff Garzik 4840c6fd2807SJeff Garzik /* don't cross page boundaries */ 4841c6fd2807SJeff Garzik count = min(count, (unsigned int)PAGE_SIZE - offset); 4842c6fd2807SJeff Garzik 4843c6fd2807SJeff Garzik DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 4844c6fd2807SJeff Garzik 4845c6fd2807SJeff Garzik if (PageHighMem(page)) { 4846c6fd2807SJeff Garzik unsigned long flags; 4847c6fd2807SJeff Garzik 4848c6fd2807SJeff Garzik /* FIXME: use bounce buffer */ 4849c6fd2807SJeff Garzik local_irq_save(flags); 4850c6fd2807SJeff Garzik buf = kmap_atomic(page, KM_IRQ0); 4851c6fd2807SJeff Garzik 4852c6fd2807SJeff Garzik /* do the actual data transfer */ 4853c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 4854c6fd2807SJeff Garzik 4855c6fd2807SJeff Garzik kunmap_atomic(buf, KM_IRQ0); 4856c6fd2807SJeff Garzik local_irq_restore(flags); 4857c6fd2807SJeff Garzik } else { 4858c6fd2807SJeff Garzik buf = page_address(page); 4859c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 4860c6fd2807SJeff Garzik } 4861c6fd2807SJeff Garzik 4862c6fd2807SJeff Garzik bytes -= count; 4863c6fd2807SJeff Garzik qc->curbytes += count; 4864c6fd2807SJeff Garzik qc->cursg_ofs += count; 4865c6fd2807SJeff Garzik 4866c6fd2807SJeff Garzik if (qc->cursg_ofs == sg->length) { 4867c6fd2807SJeff Garzik qc->cursg++; 4868c6fd2807SJeff Garzik qc->cursg_ofs = 0; 4869c6fd2807SJeff Garzik } 4870c6fd2807SJeff Garzik 4871c6fd2807SJeff Garzik if (bytes) 4872c6fd2807SJeff Garzik goto next_sg; 4873c6fd2807SJeff Garzik } 4874c6fd2807SJeff Garzik 4875c6fd2807SJeff Garzik /** 4876c6fd2807SJeff Garzik * atapi_pio_bytes - Transfer data from/to the ATAPI device. 4877c6fd2807SJeff Garzik * @qc: Command on going 4878c6fd2807SJeff Garzik * 4879c6fd2807SJeff Garzik * Transfer Transfer data from/to the ATAPI device. 4880c6fd2807SJeff Garzik * 4881c6fd2807SJeff Garzik * LOCKING: 4882c6fd2807SJeff Garzik * Inherited from caller. 4883c6fd2807SJeff Garzik */ 4884c6fd2807SJeff Garzik 4885c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc) 4886c6fd2807SJeff Garzik { 4887c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4888c6fd2807SJeff Garzik struct ata_device *dev = qc->dev; 4889c6fd2807SJeff Garzik unsigned int ireason, bc_lo, bc_hi, bytes; 4890c6fd2807SJeff Garzik int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 4891c6fd2807SJeff Garzik 4892c6fd2807SJeff Garzik /* Abuse qc->result_tf for temp storage of intermediate TF 4893c6fd2807SJeff Garzik * here to save some kernel stack usage. 4894c6fd2807SJeff Garzik * For normal completion, qc->result_tf is not relevant. For 4895c6fd2807SJeff Garzik * error, qc->result_tf is later overwritten by ata_qc_complete(). 4896c6fd2807SJeff Garzik * So, the correctness of qc->result_tf is not affected. 4897c6fd2807SJeff Garzik */ 4898c6fd2807SJeff Garzik ap->ops->tf_read(ap, &qc->result_tf); 4899c6fd2807SJeff Garzik ireason = qc->result_tf.nsect; 4900c6fd2807SJeff Garzik bc_lo = qc->result_tf.lbam; 4901c6fd2807SJeff Garzik bc_hi = qc->result_tf.lbah; 4902c6fd2807SJeff Garzik bytes = (bc_hi << 8) | bc_lo; 4903c6fd2807SJeff Garzik 4904c6fd2807SJeff Garzik /* shall be cleared to zero, indicating xfer of data */ 4905c6fd2807SJeff Garzik if (ireason & (1 << 0)) 4906c6fd2807SJeff Garzik goto err_out; 4907c6fd2807SJeff Garzik 4908c6fd2807SJeff Garzik /* make sure transfer direction matches expected */ 4909c6fd2807SJeff Garzik i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 4910c6fd2807SJeff Garzik if (do_write != i_write) 4911c6fd2807SJeff Garzik goto err_out; 4912c6fd2807SJeff Garzik 491344877b4eSTejun Heo VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 4914c6fd2807SJeff Garzik 4915c6fd2807SJeff Garzik __atapi_pio_bytes(qc, bytes); 49164cc980b3SAlbert Lee ata_altstatus(ap); /* flush */ 4917c6fd2807SJeff Garzik 4918c6fd2807SJeff Garzik return; 4919c6fd2807SJeff Garzik 4920c6fd2807SJeff Garzik err_out: 4921c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); 4922c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 4923c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 4924c6fd2807SJeff Garzik } 4925c6fd2807SJeff Garzik 4926c6fd2807SJeff Garzik /** 4927c6fd2807SJeff Garzik * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 4928c6fd2807SJeff Garzik * @ap: the target ata_port 4929c6fd2807SJeff Garzik * @qc: qc on going 4930c6fd2807SJeff Garzik * 4931c6fd2807SJeff Garzik * RETURNS: 4932c6fd2807SJeff Garzik * 1 if ok in workqueue, 0 otherwise. 4933c6fd2807SJeff Garzik */ 4934c6fd2807SJeff Garzik 4935c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) 4936c6fd2807SJeff Garzik { 4937c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 4938c6fd2807SJeff Garzik return 1; 4939c6fd2807SJeff Garzik 4940c6fd2807SJeff Garzik if (ap->hsm_task_state == HSM_ST_FIRST) { 4941c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_PIO && 4942c6fd2807SJeff Garzik (qc->tf.flags & ATA_TFLAG_WRITE)) 4943c6fd2807SJeff Garzik return 1; 4944c6fd2807SJeff Garzik 4945c6fd2807SJeff Garzik if (is_atapi_taskfile(&qc->tf) && 4946c6fd2807SJeff Garzik !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 4947c6fd2807SJeff Garzik return 1; 4948c6fd2807SJeff Garzik } 4949c6fd2807SJeff Garzik 4950c6fd2807SJeff Garzik return 0; 4951c6fd2807SJeff Garzik } 4952c6fd2807SJeff Garzik 4953c6fd2807SJeff Garzik /** 4954c6fd2807SJeff Garzik * ata_hsm_qc_complete - finish a qc running on standard HSM 4955c6fd2807SJeff Garzik * @qc: Command to complete 4956c6fd2807SJeff Garzik * @in_wq: 1 if called from workqueue, 0 otherwise 4957c6fd2807SJeff Garzik * 4958c6fd2807SJeff Garzik * Finish @qc which is running on standard HSM. 4959c6fd2807SJeff Garzik * 4960c6fd2807SJeff Garzik * LOCKING: 4961cca3974eSJeff Garzik * If @in_wq is zero, spin_lock_irqsave(host lock). 4962c6fd2807SJeff Garzik * Otherwise, none on entry and grabs host lock. 4963c6fd2807SJeff Garzik */ 4964c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 4965c6fd2807SJeff Garzik { 4966c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4967c6fd2807SJeff Garzik unsigned long flags; 4968c6fd2807SJeff Garzik 4969c6fd2807SJeff Garzik if (ap->ops->error_handler) { 4970c6fd2807SJeff Garzik if (in_wq) { 4971c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4972c6fd2807SJeff Garzik 4973cca3974eSJeff Garzik /* EH might have kicked in while host lock is 4974cca3974eSJeff Garzik * released. 4975c6fd2807SJeff Garzik */ 4976c6fd2807SJeff Garzik qc = ata_qc_from_tag(ap, qc->tag); 4977c6fd2807SJeff Garzik if (qc) { 4978c6fd2807SJeff Garzik if (likely(!(qc->err_mask & AC_ERR_HSM))) { 497983625006SAkira Iguchi ap->ops->irq_on(ap); 4980c6fd2807SJeff Garzik ata_qc_complete(qc); 4981c6fd2807SJeff Garzik } else 4982c6fd2807SJeff Garzik ata_port_freeze(ap); 4983c6fd2807SJeff Garzik } 4984c6fd2807SJeff Garzik 4985c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4986c6fd2807SJeff Garzik } else { 4987c6fd2807SJeff Garzik if (likely(!(qc->err_mask & AC_ERR_HSM))) 4988c6fd2807SJeff Garzik ata_qc_complete(qc); 4989c6fd2807SJeff Garzik else 4990c6fd2807SJeff Garzik ata_port_freeze(ap); 4991c6fd2807SJeff Garzik } 4992c6fd2807SJeff Garzik } else { 4993c6fd2807SJeff Garzik if (in_wq) { 4994c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 499583625006SAkira Iguchi ap->ops->irq_on(ap); 4996c6fd2807SJeff Garzik ata_qc_complete(qc); 4997c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4998c6fd2807SJeff Garzik } else 4999c6fd2807SJeff Garzik ata_qc_complete(qc); 5000c6fd2807SJeff Garzik } 5001c6fd2807SJeff Garzik } 5002c6fd2807SJeff Garzik 5003c6fd2807SJeff Garzik /** 5004c6fd2807SJeff Garzik * ata_hsm_move - move the HSM to the next state. 5005c6fd2807SJeff Garzik * @ap: the target ata_port 5006c6fd2807SJeff Garzik * @qc: qc on going 5007c6fd2807SJeff Garzik * @status: current device status 5008c6fd2807SJeff Garzik * @in_wq: 1 if called from workqueue, 0 otherwise 5009c6fd2807SJeff Garzik * 5010c6fd2807SJeff Garzik * RETURNS: 5011c6fd2807SJeff Garzik * 1 when poll next status needed, 0 otherwise. 5012c6fd2807SJeff Garzik */ 5013c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 5014c6fd2807SJeff Garzik u8 status, int in_wq) 5015c6fd2807SJeff Garzik { 5016c6fd2807SJeff Garzik unsigned long flags = 0; 5017c6fd2807SJeff Garzik int poll_next; 5018c6fd2807SJeff Garzik 5019c6fd2807SJeff Garzik WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 5020c6fd2807SJeff Garzik 5021c6fd2807SJeff Garzik /* Make sure ata_qc_issue_prot() does not throw things 5022c6fd2807SJeff Garzik * like DMA polling into the workqueue. Notice that 5023c6fd2807SJeff Garzik * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 5024c6fd2807SJeff Garzik */ 5025c6fd2807SJeff Garzik WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); 5026c6fd2807SJeff Garzik 5027c6fd2807SJeff Garzik fsm_start: 5028c6fd2807SJeff Garzik DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 502944877b4eSTejun Heo ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 5030c6fd2807SJeff Garzik 5031c6fd2807SJeff Garzik switch (ap->hsm_task_state) { 5032c6fd2807SJeff Garzik case HSM_ST_FIRST: 5033c6fd2807SJeff Garzik /* Send first data block or PACKET CDB */ 5034c6fd2807SJeff Garzik 5035c6fd2807SJeff Garzik /* If polling, we will stay in the work queue after 5036c6fd2807SJeff Garzik * sending the data. Otherwise, interrupt handler 5037c6fd2807SJeff Garzik * takes over after sending the data. 5038c6fd2807SJeff Garzik */ 5039c6fd2807SJeff Garzik poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 5040c6fd2807SJeff Garzik 5041c6fd2807SJeff Garzik /* check device status */ 5042c6fd2807SJeff Garzik if (unlikely((status & ATA_DRQ) == 0)) { 5043c6fd2807SJeff Garzik /* handle BSY=0, DRQ=0 as error */ 5044c6fd2807SJeff Garzik if (likely(status & (ATA_ERR | ATA_DF))) 5045c6fd2807SJeff Garzik /* device stops HSM for abort/error */ 5046c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 5047c6fd2807SJeff Garzik else 5048c6fd2807SJeff Garzik /* HSM violation. Let EH handle this */ 5049c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5050c6fd2807SJeff Garzik 5051c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5052c6fd2807SJeff Garzik goto fsm_start; 5053c6fd2807SJeff Garzik } 5054c6fd2807SJeff Garzik 5055c6fd2807SJeff Garzik /* Device should not ask for data transfer (DRQ=1) 5056c6fd2807SJeff Garzik * when it finds something wrong. 5057c6fd2807SJeff Garzik * We ignore DRQ here and stop the HSM by 5058c6fd2807SJeff Garzik * changing hsm_task_state to HSM_ST_ERR and 5059c6fd2807SJeff Garzik * let the EH abort the command or reset the device. 5060c6fd2807SJeff Garzik */ 5061c6fd2807SJeff Garzik if (unlikely(status & (ATA_ERR | ATA_DF))) { 506244877b4eSTejun Heo ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device " 506344877b4eSTejun Heo "error, dev_stat 0x%X\n", status); 5064c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5065c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5066c6fd2807SJeff Garzik goto fsm_start; 5067c6fd2807SJeff Garzik } 5068c6fd2807SJeff Garzik 5069c6fd2807SJeff Garzik /* Send the CDB (atapi) or the first data block (ata pio out). 5070c6fd2807SJeff Garzik * During the state transition, interrupt handler shouldn't 5071c6fd2807SJeff Garzik * be invoked before the data transfer is complete and 5072c6fd2807SJeff Garzik * hsm_task_state is changed. Hence, the following locking. 5073c6fd2807SJeff Garzik */ 5074c6fd2807SJeff Garzik if (in_wq) 5075c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5076c6fd2807SJeff Garzik 5077c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_PIO) { 5078c6fd2807SJeff Garzik /* PIO data out protocol. 5079c6fd2807SJeff Garzik * send first data block. 5080c6fd2807SJeff Garzik */ 5081c6fd2807SJeff Garzik 5082c6fd2807SJeff Garzik /* ata_pio_sectors() might change the state 5083c6fd2807SJeff Garzik * to HSM_ST_LAST. so, the state is changed here 5084c6fd2807SJeff Garzik * before ata_pio_sectors(). 5085c6fd2807SJeff Garzik */ 5086c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST; 5087c6fd2807SJeff Garzik ata_pio_sectors(qc); 5088c6fd2807SJeff Garzik } else 5089c6fd2807SJeff Garzik /* send CDB */ 5090c6fd2807SJeff Garzik atapi_send_cdb(ap, qc); 5091c6fd2807SJeff Garzik 5092c6fd2807SJeff Garzik if (in_wq) 5093c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 5094c6fd2807SJeff Garzik 5095c6fd2807SJeff Garzik /* if polling, ata_pio_task() handles the rest. 5096c6fd2807SJeff Garzik * otherwise, interrupt handler takes over from here. 5097c6fd2807SJeff Garzik */ 5098c6fd2807SJeff Garzik break; 5099c6fd2807SJeff Garzik 5100c6fd2807SJeff Garzik case HSM_ST: 5101c6fd2807SJeff Garzik /* complete command or read/write the data register */ 5102c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_ATAPI) { 5103c6fd2807SJeff Garzik /* ATAPI PIO protocol */ 5104c6fd2807SJeff Garzik if ((status & ATA_DRQ) == 0) { 5105c6fd2807SJeff Garzik /* No more data to transfer or device error. 5106c6fd2807SJeff Garzik * Device error will be tagged in HSM_ST_LAST. 5107c6fd2807SJeff Garzik */ 5108c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5109c6fd2807SJeff Garzik goto fsm_start; 5110c6fd2807SJeff Garzik } 5111c6fd2807SJeff Garzik 5112c6fd2807SJeff Garzik /* Device should not ask for data transfer (DRQ=1) 5113c6fd2807SJeff Garzik * when it finds something wrong. 5114c6fd2807SJeff Garzik * We ignore DRQ here and stop the HSM by 5115c6fd2807SJeff Garzik * changing hsm_task_state to HSM_ST_ERR and 5116c6fd2807SJeff Garzik * let the EH abort the command or reset the device. 5117c6fd2807SJeff Garzik */ 5118c6fd2807SJeff Garzik if (unlikely(status & (ATA_ERR | ATA_DF))) { 511944877b4eSTejun Heo ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " 512044877b4eSTejun Heo "device error, dev_stat 0x%X\n", 512144877b4eSTejun Heo status); 5122c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5123c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5124c6fd2807SJeff Garzik goto fsm_start; 5125c6fd2807SJeff Garzik } 5126c6fd2807SJeff Garzik 5127c6fd2807SJeff Garzik atapi_pio_bytes(qc); 5128c6fd2807SJeff Garzik 5129c6fd2807SJeff Garzik if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 5130c6fd2807SJeff Garzik /* bad ireason reported by device */ 5131c6fd2807SJeff Garzik goto fsm_start; 5132c6fd2807SJeff Garzik 5133c6fd2807SJeff Garzik } else { 5134c6fd2807SJeff Garzik /* ATA PIO protocol */ 5135c6fd2807SJeff Garzik if (unlikely((status & ATA_DRQ) == 0)) { 5136c6fd2807SJeff Garzik /* handle BSY=0, DRQ=0 as error */ 5137c6fd2807SJeff Garzik if (likely(status & (ATA_ERR | ATA_DF))) 5138c6fd2807SJeff Garzik /* device stops HSM for abort/error */ 5139c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 5140c6fd2807SJeff Garzik else 514155a8e2c8STejun Heo /* HSM violation. Let EH handle this. 514255a8e2c8STejun Heo * Phantom devices also trigger this 514355a8e2c8STejun Heo * condition. Mark hint. 514455a8e2c8STejun Heo */ 514555a8e2c8STejun Heo qc->err_mask |= AC_ERR_HSM | 514655a8e2c8STejun Heo AC_ERR_NODEV_HINT; 5147c6fd2807SJeff Garzik 5148c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5149c6fd2807SJeff Garzik goto fsm_start; 5150c6fd2807SJeff Garzik } 5151c6fd2807SJeff Garzik 5152c6fd2807SJeff Garzik /* For PIO reads, some devices may ask for 5153c6fd2807SJeff Garzik * data transfer (DRQ=1) alone with ERR=1. 5154c6fd2807SJeff Garzik * We respect DRQ here and transfer one 5155c6fd2807SJeff Garzik * block of junk data before changing the 5156c6fd2807SJeff Garzik * hsm_task_state to HSM_ST_ERR. 5157c6fd2807SJeff Garzik * 5158c6fd2807SJeff Garzik * For PIO writes, ERR=1 DRQ=1 doesn't make 5159c6fd2807SJeff Garzik * sense since the data block has been 5160c6fd2807SJeff Garzik * transferred to the device. 5161c6fd2807SJeff Garzik */ 5162c6fd2807SJeff Garzik if (unlikely(status & (ATA_ERR | ATA_DF))) { 5163c6fd2807SJeff Garzik /* data might be corrputed */ 5164c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 5165c6fd2807SJeff Garzik 5166c6fd2807SJeff Garzik if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 5167c6fd2807SJeff Garzik ata_pio_sectors(qc); 5168c6fd2807SJeff Garzik status = ata_wait_idle(ap); 5169c6fd2807SJeff Garzik } 5170c6fd2807SJeff Garzik 5171c6fd2807SJeff Garzik if (status & (ATA_BUSY | ATA_DRQ)) 5172c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5173c6fd2807SJeff Garzik 5174c6fd2807SJeff Garzik /* ata_pio_sectors() might change the 5175c6fd2807SJeff Garzik * state to HSM_ST_LAST. so, the state 5176c6fd2807SJeff Garzik * is changed after ata_pio_sectors(). 5177c6fd2807SJeff Garzik */ 5178c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5179c6fd2807SJeff Garzik goto fsm_start; 5180c6fd2807SJeff Garzik } 5181c6fd2807SJeff Garzik 5182c6fd2807SJeff Garzik ata_pio_sectors(qc); 5183c6fd2807SJeff Garzik 5184c6fd2807SJeff Garzik if (ap->hsm_task_state == HSM_ST_LAST && 5185c6fd2807SJeff Garzik (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 5186c6fd2807SJeff Garzik /* all data read */ 5187c6fd2807SJeff Garzik status = ata_wait_idle(ap); 5188c6fd2807SJeff Garzik goto fsm_start; 5189c6fd2807SJeff Garzik } 5190c6fd2807SJeff Garzik } 5191c6fd2807SJeff Garzik 5192c6fd2807SJeff Garzik poll_next = 1; 5193c6fd2807SJeff Garzik break; 5194c6fd2807SJeff Garzik 5195c6fd2807SJeff Garzik case HSM_ST_LAST: 5196c6fd2807SJeff Garzik if (unlikely(!ata_ok(status))) { 5197c6fd2807SJeff Garzik qc->err_mask |= __ac_err_mask(status); 5198c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5199c6fd2807SJeff Garzik goto fsm_start; 5200c6fd2807SJeff Garzik } 5201c6fd2807SJeff Garzik 5202c6fd2807SJeff Garzik /* no more data to transfer */ 5203c6fd2807SJeff Garzik DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 520444877b4eSTejun Heo ap->print_id, qc->dev->devno, status); 5205c6fd2807SJeff Garzik 5206c6fd2807SJeff Garzik WARN_ON(qc->err_mask); 5207c6fd2807SJeff Garzik 5208c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_IDLE; 5209c6fd2807SJeff Garzik 5210c6fd2807SJeff Garzik /* complete taskfile transaction */ 5211c6fd2807SJeff Garzik ata_hsm_qc_complete(qc, in_wq); 5212c6fd2807SJeff Garzik 5213c6fd2807SJeff Garzik poll_next = 0; 5214c6fd2807SJeff Garzik break; 5215c6fd2807SJeff Garzik 5216c6fd2807SJeff Garzik case HSM_ST_ERR: 5217c6fd2807SJeff Garzik /* make sure qc->err_mask is available to 5218c6fd2807SJeff Garzik * know what's wrong and recover 5219c6fd2807SJeff Garzik */ 5220c6fd2807SJeff Garzik WARN_ON(qc->err_mask == 0); 5221c6fd2807SJeff Garzik 5222c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_IDLE; 5223c6fd2807SJeff Garzik 5224c6fd2807SJeff Garzik /* complete taskfile transaction */ 5225c6fd2807SJeff Garzik ata_hsm_qc_complete(qc, in_wq); 5226c6fd2807SJeff Garzik 5227c6fd2807SJeff Garzik poll_next = 0; 5228c6fd2807SJeff Garzik break; 5229c6fd2807SJeff Garzik default: 5230c6fd2807SJeff Garzik poll_next = 0; 5231c6fd2807SJeff Garzik BUG(); 5232c6fd2807SJeff Garzik } 5233c6fd2807SJeff Garzik 5234c6fd2807SJeff Garzik return poll_next; 5235c6fd2807SJeff Garzik } 5236c6fd2807SJeff Garzik 523765f27f38SDavid Howells static void ata_pio_task(struct work_struct *work) 5238c6fd2807SJeff Garzik { 523965f27f38SDavid Howells struct ata_port *ap = 524065f27f38SDavid Howells container_of(work, struct ata_port, port_task.work); 524165f27f38SDavid Howells struct ata_queued_cmd *qc = ap->port_task_data; 5242c6fd2807SJeff Garzik u8 status; 5243c6fd2807SJeff Garzik int poll_next; 5244c6fd2807SJeff Garzik 5245c6fd2807SJeff Garzik fsm_start: 5246c6fd2807SJeff Garzik WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); 5247c6fd2807SJeff Garzik 5248c6fd2807SJeff Garzik /* 5249c6fd2807SJeff Garzik * This is purely heuristic. This is a fast path. 5250c6fd2807SJeff Garzik * Sometimes when we enter, BSY will be cleared in 5251c6fd2807SJeff Garzik * a chk-status or two. If not, the drive is probably seeking 5252c6fd2807SJeff Garzik * or something. Snooze for a couple msecs, then 5253c6fd2807SJeff Garzik * chk-status again. If still busy, queue delayed work. 5254c6fd2807SJeff Garzik */ 5255c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 5); 5256c6fd2807SJeff Garzik if (status & ATA_BUSY) { 5257c6fd2807SJeff Garzik msleep(2); 5258c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 10); 5259c6fd2807SJeff Garzik if (status & ATA_BUSY) { 5260c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); 5261c6fd2807SJeff Garzik return; 5262c6fd2807SJeff Garzik } 5263c6fd2807SJeff Garzik } 5264c6fd2807SJeff Garzik 5265c6fd2807SJeff Garzik /* move the HSM */ 5266c6fd2807SJeff Garzik poll_next = ata_hsm_move(ap, qc, status, 1); 5267c6fd2807SJeff Garzik 5268c6fd2807SJeff Garzik /* another command or interrupt handler 5269c6fd2807SJeff Garzik * may be running at this point. 5270c6fd2807SJeff Garzik */ 5271c6fd2807SJeff Garzik if (poll_next) 5272c6fd2807SJeff Garzik goto fsm_start; 5273c6fd2807SJeff Garzik } 5274c6fd2807SJeff Garzik 5275c6fd2807SJeff Garzik /** 5276c6fd2807SJeff Garzik * ata_qc_new - Request an available ATA command, for queueing 5277c6fd2807SJeff Garzik * @ap: Port associated with device @dev 5278c6fd2807SJeff Garzik * @dev: Device from whom we request an available command structure 5279c6fd2807SJeff Garzik * 5280c6fd2807SJeff Garzik * LOCKING: 5281c6fd2807SJeff Garzik * None. 5282c6fd2807SJeff Garzik */ 5283c6fd2807SJeff Garzik 5284c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 5285c6fd2807SJeff Garzik { 5286c6fd2807SJeff Garzik struct ata_queued_cmd *qc = NULL; 5287c6fd2807SJeff Garzik unsigned int i; 5288c6fd2807SJeff Garzik 5289c6fd2807SJeff Garzik /* no command while frozen */ 5290c6fd2807SJeff Garzik if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 5291c6fd2807SJeff Garzik return NULL; 5292c6fd2807SJeff Garzik 5293c6fd2807SJeff Garzik /* the last tag is reserved for internal command. */ 5294c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 5295c6fd2807SJeff Garzik if (!test_and_set_bit(i, &ap->qc_allocated)) { 5296c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 5297c6fd2807SJeff Garzik break; 5298c6fd2807SJeff Garzik } 5299c6fd2807SJeff Garzik 5300c6fd2807SJeff Garzik if (qc) 5301c6fd2807SJeff Garzik qc->tag = i; 5302c6fd2807SJeff Garzik 5303c6fd2807SJeff Garzik return qc; 5304c6fd2807SJeff Garzik } 5305c6fd2807SJeff Garzik 5306c6fd2807SJeff Garzik /** 5307c6fd2807SJeff Garzik * ata_qc_new_init - Request an available ATA command, and initialize it 5308c6fd2807SJeff Garzik * @dev: Device from whom we request an available command structure 5309c6fd2807SJeff Garzik * 5310c6fd2807SJeff Garzik * LOCKING: 5311c6fd2807SJeff Garzik * None. 5312c6fd2807SJeff Garzik */ 5313c6fd2807SJeff Garzik 5314c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 5315c6fd2807SJeff Garzik { 53169af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 5317c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 5318c6fd2807SJeff Garzik 5319c6fd2807SJeff Garzik qc = ata_qc_new(ap); 5320c6fd2807SJeff Garzik if (qc) { 5321c6fd2807SJeff Garzik qc->scsicmd = NULL; 5322c6fd2807SJeff Garzik qc->ap = ap; 5323c6fd2807SJeff Garzik qc->dev = dev; 5324c6fd2807SJeff Garzik 5325c6fd2807SJeff Garzik ata_qc_reinit(qc); 5326c6fd2807SJeff Garzik } 5327c6fd2807SJeff Garzik 5328c6fd2807SJeff Garzik return qc; 5329c6fd2807SJeff Garzik } 5330c6fd2807SJeff Garzik 5331c6fd2807SJeff Garzik /** 5332c6fd2807SJeff Garzik * ata_qc_free - free unused ata_queued_cmd 5333c6fd2807SJeff Garzik * @qc: Command to complete 5334c6fd2807SJeff Garzik * 5335c6fd2807SJeff Garzik * Designed to free unused ata_queued_cmd object 5336c6fd2807SJeff Garzik * in case something prevents using it. 5337c6fd2807SJeff Garzik * 5338c6fd2807SJeff Garzik * LOCKING: 5339cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5340c6fd2807SJeff Garzik */ 5341c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc) 5342c6fd2807SJeff Garzik { 5343c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5344c6fd2807SJeff Garzik unsigned int tag; 5345c6fd2807SJeff Garzik 5346c6fd2807SJeff Garzik WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5347c6fd2807SJeff Garzik 5348c6fd2807SJeff Garzik qc->flags = 0; 5349c6fd2807SJeff Garzik tag = qc->tag; 5350c6fd2807SJeff Garzik if (likely(ata_tag_valid(tag))) { 5351c6fd2807SJeff Garzik qc->tag = ATA_TAG_POISON; 5352c6fd2807SJeff Garzik clear_bit(tag, &ap->qc_allocated); 5353c6fd2807SJeff Garzik } 5354c6fd2807SJeff Garzik } 5355c6fd2807SJeff Garzik 5356c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc) 5357c6fd2807SJeff Garzik { 5358c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 53599af5c9c9STejun Heo struct ata_link *link = qc->dev->link; 5360c6fd2807SJeff Garzik 5361c6fd2807SJeff Garzik WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5362c6fd2807SJeff Garzik WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5363c6fd2807SJeff Garzik 5364c6fd2807SJeff Garzik if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5365c6fd2807SJeff Garzik ata_sg_clean(qc); 5366c6fd2807SJeff Garzik 5367c6fd2807SJeff Garzik /* command should be marked inactive atomically with qc completion */ 5368c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_NCQ) 53699af5c9c9STejun Heo link->sactive &= ~(1 << qc->tag); 5370c6fd2807SJeff Garzik else 53719af5c9c9STejun Heo link->active_tag = ATA_TAG_POISON; 5372c6fd2807SJeff Garzik 5373c6fd2807SJeff Garzik /* atapi: mark qc as inactive to prevent the interrupt handler 5374c6fd2807SJeff Garzik * from completing the command twice later, before the error handler 5375c6fd2807SJeff Garzik * is called. (when rc != 0 and atapi request sense is needed) 5376c6fd2807SJeff Garzik */ 5377c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_ACTIVE; 5378c6fd2807SJeff Garzik ap->qc_active &= ~(1 << qc->tag); 5379c6fd2807SJeff Garzik 5380c6fd2807SJeff Garzik /* call completion callback */ 5381c6fd2807SJeff Garzik qc->complete_fn(qc); 5382c6fd2807SJeff Garzik } 5383c6fd2807SJeff Garzik 538439599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc) 538539599a53STejun Heo { 538639599a53STejun Heo struct ata_port *ap = qc->ap; 538739599a53STejun Heo 538839599a53STejun Heo qc->result_tf.flags = qc->tf.flags; 53894742d54fSMark Lord ap->ops->tf_read(ap, &qc->result_tf); 539039599a53STejun Heo } 539139599a53STejun Heo 5392c6fd2807SJeff Garzik /** 5393c6fd2807SJeff Garzik * ata_qc_complete - Complete an active ATA command 5394c6fd2807SJeff Garzik * @qc: Command to complete 5395c6fd2807SJeff Garzik * @err_mask: ATA Status register contents 5396c6fd2807SJeff Garzik * 5397c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA 5398c6fd2807SJeff Garzik * command has completed, with either an ok or not-ok status. 5399c6fd2807SJeff Garzik * 5400c6fd2807SJeff Garzik * LOCKING: 5401cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5402c6fd2807SJeff Garzik */ 5403c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc) 5404c6fd2807SJeff Garzik { 5405c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5406c6fd2807SJeff Garzik 5407c6fd2807SJeff Garzik /* XXX: New EH and old EH use different mechanisms to 5408c6fd2807SJeff Garzik * synchronize EH with regular execution path. 5409c6fd2807SJeff Garzik * 5410c6fd2807SJeff Garzik * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5411c6fd2807SJeff Garzik * Normal execution path is responsible for not accessing a 5412c6fd2807SJeff Garzik * failed qc. libata core enforces the rule by returning NULL 5413c6fd2807SJeff Garzik * from ata_qc_from_tag() for failed qcs. 5414c6fd2807SJeff Garzik * 5415c6fd2807SJeff Garzik * Old EH depends on ata_qc_complete() nullifying completion 5416c6fd2807SJeff Garzik * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5417c6fd2807SJeff Garzik * not synchronize with interrupt handler. Only PIO task is 5418c6fd2807SJeff Garzik * taken care of. 5419c6fd2807SJeff Garzik */ 5420c6fd2807SJeff Garzik if (ap->ops->error_handler) { 5421c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); 5422c6fd2807SJeff Garzik 5423c6fd2807SJeff Garzik if (unlikely(qc->err_mask)) 5424c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 5425c6fd2807SJeff Garzik 5426c6fd2807SJeff Garzik if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5427c6fd2807SJeff Garzik if (!ata_tag_internal(qc->tag)) { 5428c6fd2807SJeff Garzik /* always fill result TF for failed qc */ 542939599a53STejun Heo fill_result_tf(qc); 5430c6fd2807SJeff Garzik ata_qc_schedule_eh(qc); 5431c6fd2807SJeff Garzik return; 5432c6fd2807SJeff Garzik } 5433c6fd2807SJeff Garzik } 5434c6fd2807SJeff Garzik 5435c6fd2807SJeff Garzik /* read result TF if requested */ 5436c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_RESULT_TF) 543739599a53STejun Heo fill_result_tf(qc); 5438c6fd2807SJeff Garzik 5439c6fd2807SJeff Garzik __ata_qc_complete(qc); 5440c6fd2807SJeff Garzik } else { 5441c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5442c6fd2807SJeff Garzik return; 5443c6fd2807SJeff Garzik 5444c6fd2807SJeff Garzik /* read result TF if failed or requested */ 5445c6fd2807SJeff Garzik if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 544639599a53STejun Heo fill_result_tf(qc); 5447c6fd2807SJeff Garzik 5448c6fd2807SJeff Garzik __ata_qc_complete(qc); 5449c6fd2807SJeff Garzik } 5450c6fd2807SJeff Garzik } 5451c6fd2807SJeff Garzik 5452c6fd2807SJeff Garzik /** 5453c6fd2807SJeff Garzik * ata_qc_complete_multiple - Complete multiple qcs successfully 5454c6fd2807SJeff Garzik * @ap: port in question 5455c6fd2807SJeff Garzik * @qc_active: new qc_active mask 5456c6fd2807SJeff Garzik * @finish_qc: LLDD callback invoked before completing a qc 5457c6fd2807SJeff Garzik * 5458c6fd2807SJeff Garzik * Complete in-flight commands. This functions is meant to be 5459c6fd2807SJeff Garzik * called from low-level driver's interrupt routine to complete 5460c6fd2807SJeff Garzik * requests normally. ap->qc_active and @qc_active is compared 5461c6fd2807SJeff Garzik * and commands are completed accordingly. 5462c6fd2807SJeff Garzik * 5463c6fd2807SJeff Garzik * LOCKING: 5464cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5465c6fd2807SJeff Garzik * 5466c6fd2807SJeff Garzik * RETURNS: 5467c6fd2807SJeff Garzik * Number of completed commands on success, -errno otherwise. 5468c6fd2807SJeff Garzik */ 5469c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 5470c6fd2807SJeff Garzik void (*finish_qc)(struct ata_queued_cmd *)) 5471c6fd2807SJeff Garzik { 5472c6fd2807SJeff Garzik int nr_done = 0; 5473c6fd2807SJeff Garzik u32 done_mask; 5474c6fd2807SJeff Garzik int i; 5475c6fd2807SJeff Garzik 5476c6fd2807SJeff Garzik done_mask = ap->qc_active ^ qc_active; 5477c6fd2807SJeff Garzik 5478c6fd2807SJeff Garzik if (unlikely(done_mask & qc_active)) { 5479c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5480c6fd2807SJeff Garzik "(%08x->%08x)\n", ap->qc_active, qc_active); 5481c6fd2807SJeff Garzik return -EINVAL; 5482c6fd2807SJeff Garzik } 5483c6fd2807SJeff Garzik 5484c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 5485c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 5486c6fd2807SJeff Garzik 5487c6fd2807SJeff Garzik if (!(done_mask & (1 << i))) 5488c6fd2807SJeff Garzik continue; 5489c6fd2807SJeff Garzik 5490c6fd2807SJeff Garzik if ((qc = ata_qc_from_tag(ap, i))) { 5491c6fd2807SJeff Garzik if (finish_qc) 5492c6fd2807SJeff Garzik finish_qc(qc); 5493c6fd2807SJeff Garzik ata_qc_complete(qc); 5494c6fd2807SJeff Garzik nr_done++; 5495c6fd2807SJeff Garzik } 5496c6fd2807SJeff Garzik } 5497c6fd2807SJeff Garzik 5498c6fd2807SJeff Garzik return nr_done; 5499c6fd2807SJeff Garzik } 5500c6fd2807SJeff Garzik 5501c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 5502c6fd2807SJeff Garzik { 5503c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5504c6fd2807SJeff Garzik 5505c6fd2807SJeff Garzik switch (qc->tf.protocol) { 5506c6fd2807SJeff Garzik case ATA_PROT_NCQ: 5507c6fd2807SJeff Garzik case ATA_PROT_DMA: 5508c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 5509c6fd2807SJeff Garzik return 1; 5510c6fd2807SJeff Garzik 5511c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 5512c6fd2807SJeff Garzik case ATA_PROT_PIO: 5513c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) 5514c6fd2807SJeff Garzik return 1; 5515c6fd2807SJeff Garzik 5516c6fd2807SJeff Garzik /* fall through */ 5517c6fd2807SJeff Garzik 5518c6fd2807SJeff Garzik default: 5519c6fd2807SJeff Garzik return 0; 5520c6fd2807SJeff Garzik } 5521c6fd2807SJeff Garzik 5522c6fd2807SJeff Garzik /* never reached */ 5523c6fd2807SJeff Garzik } 5524c6fd2807SJeff Garzik 5525c6fd2807SJeff Garzik /** 5526c6fd2807SJeff Garzik * ata_qc_issue - issue taskfile to device 5527c6fd2807SJeff Garzik * @qc: command to issue to device 5528c6fd2807SJeff Garzik * 5529c6fd2807SJeff Garzik * Prepare an ATA command to submission to device. 5530c6fd2807SJeff Garzik * This includes mapping the data into a DMA-able 5531c6fd2807SJeff Garzik * area, filling in the S/G table, and finally 5532c6fd2807SJeff Garzik * writing the taskfile to hardware, starting the command. 5533c6fd2807SJeff Garzik * 5534c6fd2807SJeff Garzik * LOCKING: 5535cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5536c6fd2807SJeff Garzik */ 5537c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc) 5538c6fd2807SJeff Garzik { 5539c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 55409af5c9c9STejun Heo struct ata_link *link = qc->dev->link; 5541c6fd2807SJeff Garzik 5542c6fd2807SJeff Garzik /* Make sure only one non-NCQ command is outstanding. The 5543c6fd2807SJeff Garzik * check is skipped for old EH because it reuses active qc to 5544c6fd2807SJeff Garzik * request ATAPI sense. 5545c6fd2807SJeff Garzik */ 55469af5c9c9STejun Heo WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5547c6fd2807SJeff Garzik 5548c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_NCQ) { 55499af5c9c9STejun Heo WARN_ON(link->sactive & (1 << qc->tag)); 55509af5c9c9STejun Heo link->sactive |= 1 << qc->tag; 5551c6fd2807SJeff Garzik } else { 55529af5c9c9STejun Heo WARN_ON(link->sactive); 55539af5c9c9STejun Heo link->active_tag = qc->tag; 5554c6fd2807SJeff Garzik } 5555c6fd2807SJeff Garzik 5556c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_ACTIVE; 5557c6fd2807SJeff Garzik ap->qc_active |= 1 << qc->tag; 5558c6fd2807SJeff Garzik 5559c6fd2807SJeff Garzik if (ata_should_dma_map(qc)) { 5560c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SG) { 5561c6fd2807SJeff Garzik if (ata_sg_setup(qc)) 5562c6fd2807SJeff Garzik goto sg_err; 5563c6fd2807SJeff Garzik } else if (qc->flags & ATA_QCFLAG_SINGLE) { 5564c6fd2807SJeff Garzik if (ata_sg_setup_one(qc)) 5565c6fd2807SJeff Garzik goto sg_err; 5566c6fd2807SJeff Garzik } 5567c6fd2807SJeff Garzik } else { 5568c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_DMAMAP; 5569c6fd2807SJeff Garzik } 5570c6fd2807SJeff Garzik 5571c6fd2807SJeff Garzik ap->ops->qc_prep(qc); 5572c6fd2807SJeff Garzik 5573c6fd2807SJeff Garzik qc->err_mask |= ap->ops->qc_issue(qc); 5574c6fd2807SJeff Garzik if (unlikely(qc->err_mask)) 5575c6fd2807SJeff Garzik goto err; 5576c6fd2807SJeff Garzik return; 5577c6fd2807SJeff Garzik 5578c6fd2807SJeff Garzik sg_err: 5579c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_DMAMAP; 5580c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_SYSTEM; 5581c6fd2807SJeff Garzik err: 5582c6fd2807SJeff Garzik ata_qc_complete(qc); 5583c6fd2807SJeff Garzik } 5584c6fd2807SJeff Garzik 5585c6fd2807SJeff Garzik /** 5586c6fd2807SJeff Garzik * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 5587c6fd2807SJeff Garzik * @qc: command to issue to device 5588c6fd2807SJeff Garzik * 5589c6fd2807SJeff Garzik * Using various libata functions and hooks, this function 5590c6fd2807SJeff Garzik * starts an ATA command. ATA commands are grouped into 5591c6fd2807SJeff Garzik * classes called "protocols", and issuing each type of protocol 5592c6fd2807SJeff Garzik * is slightly different. 5593c6fd2807SJeff Garzik * 5594c6fd2807SJeff Garzik * May be used as the qc_issue() entry in ata_port_operations. 5595c6fd2807SJeff Garzik * 5596c6fd2807SJeff Garzik * LOCKING: 5597cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5598c6fd2807SJeff Garzik * 5599c6fd2807SJeff Garzik * RETURNS: 5600c6fd2807SJeff Garzik * Zero on success, AC_ERR_* mask on failure 5601c6fd2807SJeff Garzik */ 5602c6fd2807SJeff Garzik 5603c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) 5604c6fd2807SJeff Garzik { 5605c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5606c6fd2807SJeff Garzik 5607c6fd2807SJeff Garzik /* Use polling pio if the LLD doesn't handle 5608c6fd2807SJeff Garzik * interrupt driven pio and atapi CDB interrupt. 5609c6fd2807SJeff Garzik */ 5610c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_POLLING) { 5611c6fd2807SJeff Garzik switch (qc->tf.protocol) { 5612c6fd2807SJeff Garzik case ATA_PROT_PIO: 5613e3472cbeSAlbert Lee case ATA_PROT_NODATA: 5614c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 5615c6fd2807SJeff Garzik case ATA_PROT_ATAPI_NODATA: 5616c6fd2807SJeff Garzik qc->tf.flags |= ATA_TFLAG_POLLING; 5617c6fd2807SJeff Garzik break; 5618c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 5619c6fd2807SJeff Garzik if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 5620c6fd2807SJeff Garzik /* see ata_dma_blacklisted() */ 5621c6fd2807SJeff Garzik BUG(); 5622c6fd2807SJeff Garzik break; 5623c6fd2807SJeff Garzik default: 5624c6fd2807SJeff Garzik break; 5625c6fd2807SJeff Garzik } 5626c6fd2807SJeff Garzik } 5627c6fd2807SJeff Garzik 5628c6fd2807SJeff Garzik /* select the device */ 5629c6fd2807SJeff Garzik ata_dev_select(ap, qc->dev->devno, 1, 0); 5630c6fd2807SJeff Garzik 5631c6fd2807SJeff Garzik /* start the command */ 5632c6fd2807SJeff Garzik switch (qc->tf.protocol) { 5633c6fd2807SJeff Garzik case ATA_PROT_NODATA: 5634c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5635c6fd2807SJeff Garzik ata_qc_set_polling(qc); 5636c6fd2807SJeff Garzik 5637c6fd2807SJeff Garzik ata_tf_to_host(ap, &qc->tf); 5638c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5639c6fd2807SJeff Garzik 5640c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5641c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5642c6fd2807SJeff Garzik 5643c6fd2807SJeff Garzik break; 5644c6fd2807SJeff Garzik 5645c6fd2807SJeff Garzik case ATA_PROT_DMA: 5646c6fd2807SJeff Garzik WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 5647c6fd2807SJeff Garzik 5648c6fd2807SJeff Garzik ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 5649c6fd2807SJeff Garzik ap->ops->bmdma_setup(qc); /* set up bmdma */ 5650c6fd2807SJeff Garzik ap->ops->bmdma_start(qc); /* initiate bmdma */ 5651c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5652c6fd2807SJeff Garzik break; 5653c6fd2807SJeff Garzik 5654c6fd2807SJeff Garzik case ATA_PROT_PIO: 5655c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5656c6fd2807SJeff Garzik ata_qc_set_polling(qc); 5657c6fd2807SJeff Garzik 5658c6fd2807SJeff Garzik ata_tf_to_host(ap, &qc->tf); 5659c6fd2807SJeff Garzik 5660c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_WRITE) { 5661c6fd2807SJeff Garzik /* PIO data out protocol */ 5662c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_FIRST; 5663c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5664c6fd2807SJeff Garzik 5665c6fd2807SJeff Garzik /* always send first data block using 5666c6fd2807SJeff Garzik * the ata_pio_task() codepath. 5667c6fd2807SJeff Garzik */ 5668c6fd2807SJeff Garzik } else { 5669c6fd2807SJeff Garzik /* PIO data in protocol */ 5670c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST; 5671c6fd2807SJeff Garzik 5672c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5673c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5674c6fd2807SJeff Garzik 5675c6fd2807SJeff Garzik /* if polling, ata_pio_task() handles the rest. 5676c6fd2807SJeff Garzik * otherwise, interrupt handler takes over from here. 5677c6fd2807SJeff Garzik */ 5678c6fd2807SJeff Garzik } 5679c6fd2807SJeff Garzik 5680c6fd2807SJeff Garzik break; 5681c6fd2807SJeff Garzik 5682c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 5683c6fd2807SJeff Garzik case ATA_PROT_ATAPI_NODATA: 5684c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5685c6fd2807SJeff Garzik ata_qc_set_polling(qc); 5686c6fd2807SJeff Garzik 5687c6fd2807SJeff Garzik ata_tf_to_host(ap, &qc->tf); 5688c6fd2807SJeff Garzik 5689c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_FIRST; 5690c6fd2807SJeff Garzik 5691c6fd2807SJeff Garzik /* send cdb by polling if no cdb interrupt */ 5692c6fd2807SJeff Garzik if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 5693c6fd2807SJeff Garzik (qc->tf.flags & ATA_TFLAG_POLLING)) 5694c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5695c6fd2807SJeff Garzik break; 5696c6fd2807SJeff Garzik 5697c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 5698c6fd2807SJeff Garzik WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 5699c6fd2807SJeff Garzik 5700c6fd2807SJeff Garzik ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 5701c6fd2807SJeff Garzik ap->ops->bmdma_setup(qc); /* set up bmdma */ 5702c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_FIRST; 5703c6fd2807SJeff Garzik 5704c6fd2807SJeff Garzik /* send cdb by polling if no cdb interrupt */ 5705c6fd2807SJeff Garzik if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5706c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5707c6fd2807SJeff Garzik break; 5708c6fd2807SJeff Garzik 5709c6fd2807SJeff Garzik default: 5710c6fd2807SJeff Garzik WARN_ON(1); 5711c6fd2807SJeff Garzik return AC_ERR_SYSTEM; 5712c6fd2807SJeff Garzik } 5713c6fd2807SJeff Garzik 5714c6fd2807SJeff Garzik return 0; 5715c6fd2807SJeff Garzik } 5716c6fd2807SJeff Garzik 5717c6fd2807SJeff Garzik /** 5718c6fd2807SJeff Garzik * ata_host_intr - Handle host interrupt for given (port, task) 5719c6fd2807SJeff Garzik * @ap: Port on which interrupt arrived (possibly...) 5720c6fd2807SJeff Garzik * @qc: Taskfile currently active in engine 5721c6fd2807SJeff Garzik * 5722c6fd2807SJeff Garzik * Handle host interrupt for given queued command. Currently, 5723c6fd2807SJeff Garzik * only DMA interrupts are handled. All other commands are 5724c6fd2807SJeff Garzik * handled via polling with interrupts disabled (nIEN bit). 5725c6fd2807SJeff Garzik * 5726c6fd2807SJeff Garzik * LOCKING: 5727cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5728c6fd2807SJeff Garzik * 5729c6fd2807SJeff Garzik * RETURNS: 5730c6fd2807SJeff Garzik * One if interrupt was handled, zero if not (shared irq). 5731c6fd2807SJeff Garzik */ 5732c6fd2807SJeff Garzik 5733c6fd2807SJeff Garzik inline unsigned int ata_host_intr (struct ata_port *ap, 5734c6fd2807SJeff Garzik struct ata_queued_cmd *qc) 5735c6fd2807SJeff Garzik { 57369af5c9c9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 5737c6fd2807SJeff Garzik u8 status, host_stat = 0; 5738c6fd2807SJeff Garzik 5739c6fd2807SJeff Garzik VPRINTK("ata%u: protocol %d task_state %d\n", 574044877b4eSTejun Heo ap->print_id, qc->tf.protocol, ap->hsm_task_state); 5741c6fd2807SJeff Garzik 5742c6fd2807SJeff Garzik /* Check whether we are expecting interrupt in this state */ 5743c6fd2807SJeff Garzik switch (ap->hsm_task_state) { 5744c6fd2807SJeff Garzik case HSM_ST_FIRST: 5745c6fd2807SJeff Garzik /* Some pre-ATAPI-4 devices assert INTRQ 5746c6fd2807SJeff Garzik * at this state when ready to receive CDB. 5747c6fd2807SJeff Garzik */ 5748c6fd2807SJeff Garzik 5749c6fd2807SJeff Garzik /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 5750c6fd2807SJeff Garzik * The flag was turned on only for atapi devices. 5751c6fd2807SJeff Garzik * No need to check is_atapi_taskfile(&qc->tf) again. 5752c6fd2807SJeff Garzik */ 5753c6fd2807SJeff Garzik if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5754c6fd2807SJeff Garzik goto idle_irq; 5755c6fd2807SJeff Garzik break; 5756c6fd2807SJeff Garzik case HSM_ST_LAST: 5757c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_DMA || 5758c6fd2807SJeff Garzik qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 5759c6fd2807SJeff Garzik /* check status of DMA engine */ 5760c6fd2807SJeff Garzik host_stat = ap->ops->bmdma_status(ap); 576144877b4eSTejun Heo VPRINTK("ata%u: host_stat 0x%X\n", 576244877b4eSTejun Heo ap->print_id, host_stat); 5763c6fd2807SJeff Garzik 5764c6fd2807SJeff Garzik /* if it's not our irq... */ 5765c6fd2807SJeff Garzik if (!(host_stat & ATA_DMA_INTR)) 5766c6fd2807SJeff Garzik goto idle_irq; 5767c6fd2807SJeff Garzik 5768c6fd2807SJeff Garzik /* before we do anything else, clear DMA-Start bit */ 5769c6fd2807SJeff Garzik ap->ops->bmdma_stop(qc); 5770c6fd2807SJeff Garzik 5771c6fd2807SJeff Garzik if (unlikely(host_stat & ATA_DMA_ERR)) { 5772c6fd2807SJeff Garzik /* error when transfering data to/from memory */ 5773c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HOST_BUS; 5774c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5775c6fd2807SJeff Garzik } 5776c6fd2807SJeff Garzik } 5777c6fd2807SJeff Garzik break; 5778c6fd2807SJeff Garzik case HSM_ST: 5779c6fd2807SJeff Garzik break; 5780c6fd2807SJeff Garzik default: 5781c6fd2807SJeff Garzik goto idle_irq; 5782c6fd2807SJeff Garzik } 5783c6fd2807SJeff Garzik 5784c6fd2807SJeff Garzik /* check altstatus */ 5785c6fd2807SJeff Garzik status = ata_altstatus(ap); 5786c6fd2807SJeff Garzik if (status & ATA_BUSY) 5787c6fd2807SJeff Garzik goto idle_irq; 5788c6fd2807SJeff Garzik 5789c6fd2807SJeff Garzik /* check main status, clearing INTRQ */ 5790c6fd2807SJeff Garzik status = ata_chk_status(ap); 5791c6fd2807SJeff Garzik if (unlikely(status & ATA_BUSY)) 5792c6fd2807SJeff Garzik goto idle_irq; 5793c6fd2807SJeff Garzik 5794c6fd2807SJeff Garzik /* ack bmdma irq events */ 5795c6fd2807SJeff Garzik ap->ops->irq_clear(ap); 5796c6fd2807SJeff Garzik 5797c6fd2807SJeff Garzik ata_hsm_move(ap, qc, status, 0); 5798ea54763fSTejun Heo 5799ea54763fSTejun Heo if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 5800ea54763fSTejun Heo qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 5801ea54763fSTejun Heo ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 5802ea54763fSTejun Heo 5803c6fd2807SJeff Garzik return 1; /* irq handled */ 5804c6fd2807SJeff Garzik 5805c6fd2807SJeff Garzik idle_irq: 5806c6fd2807SJeff Garzik ap->stats.idle_irq++; 5807c6fd2807SJeff Garzik 5808c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP 5809c6fd2807SJeff Garzik if ((ap->stats.idle_irq % 1000) == 0) { 58106d32d30fSJeff Garzik ata_chk_status(ap); 58116d32d30fSJeff Garzik ap->ops->irq_clear(ap); 5812c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 5813c6fd2807SJeff Garzik return 1; 5814c6fd2807SJeff Garzik } 5815c6fd2807SJeff Garzik #endif 5816c6fd2807SJeff Garzik return 0; /* irq not handled */ 5817c6fd2807SJeff Garzik } 5818c6fd2807SJeff Garzik 5819c6fd2807SJeff Garzik /** 5820c6fd2807SJeff Garzik * ata_interrupt - Default ATA host interrupt handler 5821c6fd2807SJeff Garzik * @irq: irq line (unused) 5822cca3974eSJeff Garzik * @dev_instance: pointer to our ata_host information structure 5823c6fd2807SJeff Garzik * 5824c6fd2807SJeff Garzik * Default interrupt handler for PCI IDE devices. Calls 5825c6fd2807SJeff Garzik * ata_host_intr() for each port that is not disabled. 5826c6fd2807SJeff Garzik * 5827c6fd2807SJeff Garzik * LOCKING: 5828cca3974eSJeff Garzik * Obtains host lock during operation. 5829c6fd2807SJeff Garzik * 5830c6fd2807SJeff Garzik * RETURNS: 5831c6fd2807SJeff Garzik * IRQ_NONE or IRQ_HANDLED. 5832c6fd2807SJeff Garzik */ 5833c6fd2807SJeff Garzik 58347d12e780SDavid Howells irqreturn_t ata_interrupt (int irq, void *dev_instance) 5835c6fd2807SJeff Garzik { 5836cca3974eSJeff Garzik struct ata_host *host = dev_instance; 5837c6fd2807SJeff Garzik unsigned int i; 5838c6fd2807SJeff Garzik unsigned int handled = 0; 5839c6fd2807SJeff Garzik unsigned long flags; 5840c6fd2807SJeff Garzik 5841c6fd2807SJeff Garzik /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 5842cca3974eSJeff Garzik spin_lock_irqsave(&host->lock, flags); 5843c6fd2807SJeff Garzik 5844cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 5845c6fd2807SJeff Garzik struct ata_port *ap; 5846c6fd2807SJeff Garzik 5847cca3974eSJeff Garzik ap = host->ports[i]; 5848c6fd2807SJeff Garzik if (ap && 5849c6fd2807SJeff Garzik !(ap->flags & ATA_FLAG_DISABLED)) { 5850c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 5851c6fd2807SJeff Garzik 58529af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 5853c6fd2807SJeff Garzik if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 5854c6fd2807SJeff Garzik (qc->flags & ATA_QCFLAG_ACTIVE)) 5855c6fd2807SJeff Garzik handled |= ata_host_intr(ap, qc); 5856c6fd2807SJeff Garzik } 5857c6fd2807SJeff Garzik } 5858c6fd2807SJeff Garzik 5859cca3974eSJeff Garzik spin_unlock_irqrestore(&host->lock, flags); 5860c6fd2807SJeff Garzik 5861c6fd2807SJeff Garzik return IRQ_RETVAL(handled); 5862c6fd2807SJeff Garzik } 5863c6fd2807SJeff Garzik 5864c6fd2807SJeff Garzik /** 5865c6fd2807SJeff Garzik * sata_scr_valid - test whether SCRs are accessible 5866936fd732STejun Heo * @link: ATA link to test SCR accessibility for 5867c6fd2807SJeff Garzik * 5868936fd732STejun Heo * Test whether SCRs are accessible for @link. 5869c6fd2807SJeff Garzik * 5870c6fd2807SJeff Garzik * LOCKING: 5871c6fd2807SJeff Garzik * None. 5872c6fd2807SJeff Garzik * 5873c6fd2807SJeff Garzik * RETURNS: 5874c6fd2807SJeff Garzik * 1 if SCRs are accessible, 0 otherwise. 5875c6fd2807SJeff Garzik */ 5876936fd732STejun Heo int sata_scr_valid(struct ata_link *link) 5877c6fd2807SJeff Garzik { 5878936fd732STejun Heo struct ata_port *ap = link->ap; 5879936fd732STejun Heo 5880a16abc0bSTejun Heo return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5881c6fd2807SJeff Garzik } 5882c6fd2807SJeff Garzik 5883c6fd2807SJeff Garzik /** 5884c6fd2807SJeff Garzik * sata_scr_read - read SCR register of the specified port 5885936fd732STejun Heo * @link: ATA link to read SCR for 5886c6fd2807SJeff Garzik * @reg: SCR to read 5887c6fd2807SJeff Garzik * @val: Place to store read value 5888c6fd2807SJeff Garzik * 5889936fd732STejun Heo * Read SCR register @reg of @link into *@val. This function is 5890c6fd2807SJeff Garzik * guaranteed to succeed if the cable type of the port is SATA 5891c6fd2807SJeff Garzik * and the port implements ->scr_read. 5892c6fd2807SJeff Garzik * 5893c6fd2807SJeff Garzik * LOCKING: 5894c6fd2807SJeff Garzik * None. 5895c6fd2807SJeff Garzik * 5896c6fd2807SJeff Garzik * RETURNS: 5897c6fd2807SJeff Garzik * 0 on success, negative errno on failure. 5898c6fd2807SJeff Garzik */ 5899936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5900c6fd2807SJeff Garzik { 5901936fd732STejun Heo struct ata_port *ap = link->ap; 5902936fd732STejun Heo 5903936fd732STejun Heo if (sata_scr_valid(link)) 5904da3dbb17STejun Heo return ap->ops->scr_read(ap, reg, val); 5905c6fd2807SJeff Garzik return -EOPNOTSUPP; 5906c6fd2807SJeff Garzik } 5907c6fd2807SJeff Garzik 5908c6fd2807SJeff Garzik /** 5909c6fd2807SJeff Garzik * sata_scr_write - write SCR register of the specified port 5910936fd732STejun Heo * @link: ATA link to write SCR for 5911c6fd2807SJeff Garzik * @reg: SCR to write 5912c6fd2807SJeff Garzik * @val: value to write 5913c6fd2807SJeff Garzik * 5914936fd732STejun Heo * Write @val to SCR register @reg of @link. This function is 5915c6fd2807SJeff Garzik * guaranteed to succeed if the cable type of the port is SATA 5916c6fd2807SJeff Garzik * and the port implements ->scr_read. 5917c6fd2807SJeff Garzik * 5918c6fd2807SJeff Garzik * LOCKING: 5919c6fd2807SJeff Garzik * None. 5920c6fd2807SJeff Garzik * 5921c6fd2807SJeff Garzik * RETURNS: 5922c6fd2807SJeff Garzik * 0 on success, negative errno on failure. 5923c6fd2807SJeff Garzik */ 5924936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val) 5925c6fd2807SJeff Garzik { 5926936fd732STejun Heo struct ata_port *ap = link->ap; 5927936fd732STejun Heo 5928936fd732STejun Heo if (sata_scr_valid(link)) 5929da3dbb17STejun Heo return ap->ops->scr_write(ap, reg, val); 5930c6fd2807SJeff Garzik return -EOPNOTSUPP; 5931c6fd2807SJeff Garzik } 5932c6fd2807SJeff Garzik 5933c6fd2807SJeff Garzik /** 5934c6fd2807SJeff Garzik * sata_scr_write_flush - write SCR register of the specified port and flush 5935936fd732STejun Heo * @link: ATA link to write SCR for 5936c6fd2807SJeff Garzik * @reg: SCR to write 5937c6fd2807SJeff Garzik * @val: value to write 5938c6fd2807SJeff Garzik * 5939c6fd2807SJeff Garzik * This function is identical to sata_scr_write() except that this 5940c6fd2807SJeff Garzik * function performs flush after writing to the register. 5941c6fd2807SJeff Garzik * 5942c6fd2807SJeff Garzik * LOCKING: 5943c6fd2807SJeff Garzik * None. 5944c6fd2807SJeff Garzik * 5945c6fd2807SJeff Garzik * RETURNS: 5946c6fd2807SJeff Garzik * 0 on success, negative errno on failure. 5947c6fd2807SJeff Garzik */ 5948936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5949c6fd2807SJeff Garzik { 5950936fd732STejun Heo struct ata_port *ap = link->ap; 5951da3dbb17STejun Heo int rc; 5952da3dbb17STejun Heo 5953936fd732STejun Heo if (sata_scr_valid(link)) { 5954da3dbb17STejun Heo rc = ap->ops->scr_write(ap, reg, val); 5955da3dbb17STejun Heo if (rc == 0) 5956da3dbb17STejun Heo rc = ap->ops->scr_read(ap, reg, &val); 5957da3dbb17STejun Heo return rc; 5958c6fd2807SJeff Garzik } 5959c6fd2807SJeff Garzik return -EOPNOTSUPP; 5960c6fd2807SJeff Garzik } 5961c6fd2807SJeff Garzik 5962c6fd2807SJeff Garzik /** 5963936fd732STejun Heo * ata_link_online - test whether the given link is online 5964936fd732STejun Heo * @link: ATA link to test 5965c6fd2807SJeff Garzik * 5966936fd732STejun Heo * Test whether @link is online. Note that this function returns 5967936fd732STejun Heo * 0 if online status of @link cannot be obtained, so 5968936fd732STejun Heo * ata_link_online(link) != !ata_link_offline(link). 5969c6fd2807SJeff Garzik * 5970c6fd2807SJeff Garzik * LOCKING: 5971c6fd2807SJeff Garzik * None. 5972c6fd2807SJeff Garzik * 5973c6fd2807SJeff Garzik * RETURNS: 5974c6fd2807SJeff Garzik * 1 if the port online status is available and online. 5975c6fd2807SJeff Garzik */ 5976936fd732STejun Heo int ata_link_online(struct ata_link *link) 5977c6fd2807SJeff Garzik { 5978c6fd2807SJeff Garzik u32 sstatus; 5979c6fd2807SJeff Garzik 5980936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5981936fd732STejun Heo (sstatus & 0xf) == 0x3) 5982c6fd2807SJeff Garzik return 1; 5983c6fd2807SJeff Garzik return 0; 5984c6fd2807SJeff Garzik } 5985c6fd2807SJeff Garzik 5986c6fd2807SJeff Garzik /** 5987936fd732STejun Heo * ata_link_offline - test whether the given link is offline 5988936fd732STejun Heo * @link: ATA link to test 5989c6fd2807SJeff Garzik * 5990936fd732STejun Heo * Test whether @link is offline. Note that this function 5991936fd732STejun Heo * returns 0 if offline status of @link cannot be obtained, so 5992936fd732STejun Heo * ata_link_online(link) != !ata_link_offline(link). 5993c6fd2807SJeff Garzik * 5994c6fd2807SJeff Garzik * LOCKING: 5995c6fd2807SJeff Garzik * None. 5996c6fd2807SJeff Garzik * 5997c6fd2807SJeff Garzik * RETURNS: 5998c6fd2807SJeff Garzik * 1 if the port offline status is available and offline. 5999c6fd2807SJeff Garzik */ 6000936fd732STejun Heo int ata_link_offline(struct ata_link *link) 6001c6fd2807SJeff Garzik { 6002c6fd2807SJeff Garzik u32 sstatus; 6003c6fd2807SJeff Garzik 6004936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6005936fd732STejun Heo (sstatus & 0xf) != 0x3) 6006c6fd2807SJeff Garzik return 1; 6007c6fd2807SJeff Garzik return 0; 6008c6fd2807SJeff Garzik } 6009c6fd2807SJeff Garzik 6010c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev) 6011c6fd2807SJeff Garzik { 6012c6fd2807SJeff Garzik unsigned int err_mask; 6013c6fd2807SJeff Garzik u8 cmd; 6014c6fd2807SJeff Garzik 6015c6fd2807SJeff Garzik if (!ata_try_flush_cache(dev)) 6016c6fd2807SJeff Garzik return 0; 6017c6fd2807SJeff Garzik 60186fc49adbSTejun Heo if (dev->flags & ATA_DFLAG_FLUSH_EXT) 6019c6fd2807SJeff Garzik cmd = ATA_CMD_FLUSH_EXT; 6020c6fd2807SJeff Garzik else 6021c6fd2807SJeff Garzik cmd = ATA_CMD_FLUSH; 6022c6fd2807SJeff Garzik 60234f34337bSAlan Cox /* This is wrong. On a failed flush we get back the LBA of the lost 60244f34337bSAlan Cox sector and we should (assuming it wasn't aborted as unknown) issue 60254f34337bSAlan Cox a further flush command to continue the writeback until it 60264f34337bSAlan Cox does not error */ 6027c6fd2807SJeff Garzik err_mask = ata_do_simple_cmd(dev, cmd); 6028c6fd2807SJeff Garzik if (err_mask) { 6029c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); 6030c6fd2807SJeff Garzik return -EIO; 6031c6fd2807SJeff Garzik } 6032c6fd2807SJeff Garzik 6033c6fd2807SJeff Garzik return 0; 6034c6fd2807SJeff Garzik } 6035c6fd2807SJeff Garzik 60366ffa01d8STejun Heo #ifdef CONFIG_PM 6037cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 6038cca3974eSJeff Garzik unsigned int action, unsigned int ehi_flags, 6039cca3974eSJeff Garzik int wait) 6040c6fd2807SJeff Garzik { 6041c6fd2807SJeff Garzik unsigned long flags; 6042c6fd2807SJeff Garzik int i, rc; 6043c6fd2807SJeff Garzik 6044cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6045cca3974eSJeff Garzik struct ata_port *ap = host->ports[i]; 6046e3667ebfSTejun Heo struct ata_link *link; 6047c6fd2807SJeff Garzik 6048c6fd2807SJeff Garzik /* Previous resume operation might still be in 6049c6fd2807SJeff Garzik * progress. Wait for PM_PENDING to clear. 6050c6fd2807SJeff Garzik */ 6051c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_PM_PENDING) { 6052c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6053c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6054c6fd2807SJeff Garzik } 6055c6fd2807SJeff Garzik 6056c6fd2807SJeff Garzik /* request PM ops to EH */ 6057c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6058c6fd2807SJeff Garzik 6059c6fd2807SJeff Garzik ap->pm_mesg = mesg; 6060c6fd2807SJeff Garzik if (wait) { 6061c6fd2807SJeff Garzik rc = 0; 6062c6fd2807SJeff Garzik ap->pm_result = &rc; 6063c6fd2807SJeff Garzik } 6064c6fd2807SJeff Garzik 6065c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_PM_PENDING; 6066e3667ebfSTejun Heo __ata_port_for_each_link(link, ap) { 6067e3667ebfSTejun Heo link->eh_info.action |= action; 6068e3667ebfSTejun Heo link->eh_info.flags |= ehi_flags; 6069e3667ebfSTejun Heo } 6070c6fd2807SJeff Garzik 6071c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 6072c6fd2807SJeff Garzik 6073c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6074c6fd2807SJeff Garzik 6075c6fd2807SJeff Garzik /* wait and check result */ 6076c6fd2807SJeff Garzik if (wait) { 6077c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6078c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6079c6fd2807SJeff Garzik if (rc) 6080c6fd2807SJeff Garzik return rc; 6081c6fd2807SJeff Garzik } 6082c6fd2807SJeff Garzik } 6083c6fd2807SJeff Garzik 6084c6fd2807SJeff Garzik return 0; 6085c6fd2807SJeff Garzik } 6086c6fd2807SJeff Garzik 6087c6fd2807SJeff Garzik /** 6088cca3974eSJeff Garzik * ata_host_suspend - suspend host 6089cca3974eSJeff Garzik * @host: host to suspend 6090c6fd2807SJeff Garzik * @mesg: PM message 6091c6fd2807SJeff Garzik * 6092cca3974eSJeff Garzik * Suspend @host. Actual operation is performed by EH. This 6093c6fd2807SJeff Garzik * function requests EH to perform PM operations and waits for EH 6094c6fd2807SJeff Garzik * to finish. 6095c6fd2807SJeff Garzik * 6096c6fd2807SJeff Garzik * LOCKING: 6097c6fd2807SJeff Garzik * Kernel thread context (may sleep). 6098c6fd2807SJeff Garzik * 6099c6fd2807SJeff Garzik * RETURNS: 6100c6fd2807SJeff Garzik * 0 on success, -errno on failure. 6101c6fd2807SJeff Garzik */ 6102cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 6103c6fd2807SJeff Garzik { 61049666f400STejun Heo int rc; 6105c6fd2807SJeff Garzik 6106cca3974eSJeff Garzik rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 61079666f400STejun Heo if (rc == 0) 6108cca3974eSJeff Garzik host->dev->power.power_state = mesg; 6109c6fd2807SJeff Garzik return rc; 6110c6fd2807SJeff Garzik } 6111c6fd2807SJeff Garzik 6112c6fd2807SJeff Garzik /** 6113cca3974eSJeff Garzik * ata_host_resume - resume host 6114cca3974eSJeff Garzik * @host: host to resume 6115c6fd2807SJeff Garzik * 6116cca3974eSJeff Garzik * Resume @host. Actual operation is performed by EH. This 6117c6fd2807SJeff Garzik * function requests EH to perform PM operations and returns. 6118c6fd2807SJeff Garzik * Note that all resume operations are performed parallely. 6119c6fd2807SJeff Garzik * 6120c6fd2807SJeff Garzik * LOCKING: 6121c6fd2807SJeff Garzik * Kernel thread context (may sleep). 6122c6fd2807SJeff Garzik */ 6123cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host) 6124c6fd2807SJeff Garzik { 6125cca3974eSJeff Garzik ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, 6126c6fd2807SJeff Garzik ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 6127cca3974eSJeff Garzik host->dev->power.power_state = PMSG_ON; 6128c6fd2807SJeff Garzik } 61296ffa01d8STejun Heo #endif 6130c6fd2807SJeff Garzik 6131c6fd2807SJeff Garzik /** 6132c6fd2807SJeff Garzik * ata_port_start - Set port up for dma. 6133c6fd2807SJeff Garzik * @ap: Port to initialize 6134c6fd2807SJeff Garzik * 6135c6fd2807SJeff Garzik * Called just after data structures for each port are 6136c6fd2807SJeff Garzik * initialized. Allocates space for PRD table. 6137c6fd2807SJeff Garzik * 6138c6fd2807SJeff Garzik * May be used as the port_start() entry in ata_port_operations. 6139c6fd2807SJeff Garzik * 6140c6fd2807SJeff Garzik * LOCKING: 6141c6fd2807SJeff Garzik * Inherited from caller. 6142c6fd2807SJeff Garzik */ 6143c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap) 6144c6fd2807SJeff Garzik { 6145c6fd2807SJeff Garzik struct device *dev = ap->dev; 6146c6fd2807SJeff Garzik int rc; 6147c6fd2807SJeff Garzik 6148f0d36efdSTejun Heo ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6149f0d36efdSTejun Heo GFP_KERNEL); 6150c6fd2807SJeff Garzik if (!ap->prd) 6151c6fd2807SJeff Garzik return -ENOMEM; 6152c6fd2807SJeff Garzik 6153c6fd2807SJeff Garzik rc = ata_pad_alloc(ap, dev); 6154f0d36efdSTejun Heo if (rc) 6155c6fd2807SJeff Garzik return rc; 6156c6fd2807SJeff Garzik 6157f0d36efdSTejun Heo DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, 6158f0d36efdSTejun Heo (unsigned long long)ap->prd_dma); 6159c6fd2807SJeff Garzik return 0; 6160c6fd2807SJeff Garzik } 6161c6fd2807SJeff Garzik 6162c6fd2807SJeff Garzik /** 6163c6fd2807SJeff Garzik * ata_dev_init - Initialize an ata_device structure 6164c6fd2807SJeff Garzik * @dev: Device structure to initialize 6165c6fd2807SJeff Garzik * 6166c6fd2807SJeff Garzik * Initialize @dev in preparation for probing. 6167c6fd2807SJeff Garzik * 6168c6fd2807SJeff Garzik * LOCKING: 6169c6fd2807SJeff Garzik * Inherited from caller. 6170c6fd2807SJeff Garzik */ 6171c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev) 6172c6fd2807SJeff Garzik { 61739af5c9c9STejun Heo struct ata_link *link = dev->link; 61749af5c9c9STejun Heo struct ata_port *ap = link->ap; 6175c6fd2807SJeff Garzik unsigned long flags; 6176c6fd2807SJeff Garzik 6177c6fd2807SJeff Garzik /* SATA spd limit is bound to the first device */ 61789af5c9c9STejun Heo link->sata_spd_limit = link->hw_sata_spd_limit; 61799af5c9c9STejun Heo link->sata_spd = 0; 6180c6fd2807SJeff Garzik 6181c6fd2807SJeff Garzik /* High bits of dev->flags are used to record warm plug 6182c6fd2807SJeff Garzik * requests which occur asynchronously. Synchronize using 6183cca3974eSJeff Garzik * host lock. 6184c6fd2807SJeff Garzik */ 6185c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6186c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_INIT_MASK; 61873dcc323fSTejun Heo dev->horkage = 0; 6188c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6189c6fd2807SJeff Garzik 6190c6fd2807SJeff Garzik memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, 6191c6fd2807SJeff Garzik sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); 6192c6fd2807SJeff Garzik dev->pio_mask = UINT_MAX; 6193c6fd2807SJeff Garzik dev->mwdma_mask = UINT_MAX; 6194c6fd2807SJeff Garzik dev->udma_mask = UINT_MAX; 6195c6fd2807SJeff Garzik } 6196c6fd2807SJeff Garzik 6197c6fd2807SJeff Garzik /** 61984fb37a25STejun Heo * ata_link_init - Initialize an ata_link structure 61994fb37a25STejun Heo * @ap: ATA port link is attached to 62004fb37a25STejun Heo * @link: Link structure to initialize 62018989805dSTejun Heo * @pmp: Port multiplier port number 62024fb37a25STejun Heo * 62034fb37a25STejun Heo * Initialize @link. 62044fb37a25STejun Heo * 62054fb37a25STejun Heo * LOCKING: 62064fb37a25STejun Heo * Kernel thread context (may sleep) 62074fb37a25STejun Heo */ 62088989805dSTejun Heo static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 62094fb37a25STejun Heo { 62104fb37a25STejun Heo int i; 62114fb37a25STejun Heo 62124fb37a25STejun Heo /* clear everything except for devices */ 62134fb37a25STejun Heo memset(link, 0, offsetof(struct ata_link, device[0])); 62144fb37a25STejun Heo 62154fb37a25STejun Heo link->ap = ap; 62168989805dSTejun Heo link->pmp = pmp; 62174fb37a25STejun Heo link->active_tag = ATA_TAG_POISON; 62184fb37a25STejun Heo link->hw_sata_spd_limit = UINT_MAX; 62194fb37a25STejun Heo 62204fb37a25STejun Heo /* can't use iterator, ap isn't initialized yet */ 62214fb37a25STejun Heo for (i = 0; i < ATA_MAX_DEVICES; i++) { 62224fb37a25STejun Heo struct ata_device *dev = &link->device[i]; 62234fb37a25STejun Heo 62244fb37a25STejun Heo dev->link = link; 62254fb37a25STejun Heo dev->devno = dev - link->device; 62264fb37a25STejun Heo ata_dev_init(dev); 62274fb37a25STejun Heo } 62284fb37a25STejun Heo } 62294fb37a25STejun Heo 62304fb37a25STejun Heo /** 62314fb37a25STejun Heo * sata_link_init_spd - Initialize link->sata_spd_limit 62324fb37a25STejun Heo * @link: Link to configure sata_spd_limit for 62334fb37a25STejun Heo * 62344fb37a25STejun Heo * Initialize @link->[hw_]sata_spd_limit to the currently 62354fb37a25STejun Heo * configured value. 62364fb37a25STejun Heo * 62374fb37a25STejun Heo * LOCKING: 62384fb37a25STejun Heo * Kernel thread context (may sleep). 62394fb37a25STejun Heo * 62404fb37a25STejun Heo * RETURNS: 62414fb37a25STejun Heo * 0 on success, -errno on failure. 62424fb37a25STejun Heo */ 62434fb37a25STejun Heo static int sata_link_init_spd(struct ata_link *link) 62444fb37a25STejun Heo { 62454fb37a25STejun Heo u32 scontrol, spd; 62464fb37a25STejun Heo int rc; 62474fb37a25STejun Heo 62484fb37a25STejun Heo rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 62494fb37a25STejun Heo if (rc) 62504fb37a25STejun Heo return rc; 62514fb37a25STejun Heo 62524fb37a25STejun Heo spd = (scontrol >> 4) & 0xf; 62534fb37a25STejun Heo if (spd) 62544fb37a25STejun Heo link->hw_sata_spd_limit &= (1 << spd) - 1; 62554fb37a25STejun Heo 62564fb37a25STejun Heo link->sata_spd_limit = link->hw_sata_spd_limit; 62574fb37a25STejun Heo 62584fb37a25STejun Heo return 0; 62594fb37a25STejun Heo } 62604fb37a25STejun Heo 62614fb37a25STejun Heo /** 6262f3187195STejun Heo * ata_port_alloc - allocate and initialize basic ATA port resources 6263f3187195STejun Heo * @host: ATA host this allocated port belongs to 6264c6fd2807SJeff Garzik * 6265f3187195STejun Heo * Allocate and initialize basic ATA port resources. 6266f3187195STejun Heo * 6267f3187195STejun Heo * RETURNS: 6268f3187195STejun Heo * Allocate ATA port on success, NULL on failure. 6269c6fd2807SJeff Garzik * 6270c6fd2807SJeff Garzik * LOCKING: 6271f3187195STejun Heo * Inherited from calling layer (may sleep). 6272c6fd2807SJeff Garzik */ 6273f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host) 6274c6fd2807SJeff Garzik { 6275f3187195STejun Heo struct ata_port *ap; 6276c6fd2807SJeff Garzik 6277f3187195STejun Heo DPRINTK("ENTER\n"); 6278f3187195STejun Heo 6279f3187195STejun Heo ap = kzalloc(sizeof(*ap), GFP_KERNEL); 6280f3187195STejun Heo if (!ap) 6281f3187195STejun Heo return NULL; 6282f3187195STejun Heo 6283f4d6d004STejun Heo ap->pflags |= ATA_PFLAG_INITIALIZING; 6284cca3974eSJeff Garzik ap->lock = &host->lock; 6285c6fd2807SJeff Garzik ap->flags = ATA_FLAG_DISABLED; 6286f3187195STejun Heo ap->print_id = -1; 6287c6fd2807SJeff Garzik ap->ctl = ATA_DEVCTL_OBS; 6288cca3974eSJeff Garzik ap->host = host; 6289f3187195STejun Heo ap->dev = host->dev; 6290c6fd2807SJeff Garzik ap->last_ctl = 0xFF; 6291c6fd2807SJeff Garzik 6292c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG) 6293c6fd2807SJeff Garzik /* turn on all debugging levels */ 6294c6fd2807SJeff Garzik ap->msg_enable = 0x00FF; 6295c6fd2807SJeff Garzik #elif defined(ATA_DEBUG) 6296c6fd2807SJeff Garzik ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 6297c6fd2807SJeff Garzik #else 6298c6fd2807SJeff Garzik ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 6299c6fd2807SJeff Garzik #endif 6300c6fd2807SJeff Garzik 630165f27f38SDavid Howells INIT_DELAYED_WORK(&ap->port_task, NULL); 630265f27f38SDavid Howells INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 630365f27f38SDavid Howells INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 6304c6fd2807SJeff Garzik INIT_LIST_HEAD(&ap->eh_done_q); 6305c6fd2807SJeff Garzik init_waitqueue_head(&ap->eh_wait_q); 63065ddf24c5STejun Heo init_timer_deferrable(&ap->fastdrain_timer); 63075ddf24c5STejun Heo ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 63085ddf24c5STejun Heo ap->fastdrain_timer.data = (unsigned long)ap; 6309c6fd2807SJeff Garzik 6310c6fd2807SJeff Garzik ap->cbl = ATA_CBL_NONE; 6311c6fd2807SJeff Garzik 63128989805dSTejun Heo ata_link_init(ap, &ap->link, 0); 6313c6fd2807SJeff Garzik 6314c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP 6315c6fd2807SJeff Garzik ap->stats.unhandled_irq = 1; 6316c6fd2807SJeff Garzik ap->stats.idle_irq = 1; 6317c6fd2807SJeff Garzik #endif 6318c6fd2807SJeff Garzik return ap; 6319c6fd2807SJeff Garzik } 6320c6fd2807SJeff Garzik 6321f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res) 6322f0d36efdSTejun Heo { 6323f0d36efdSTejun Heo struct ata_host *host = dev_get_drvdata(gendev); 6324f0d36efdSTejun Heo int i; 6325f0d36efdSTejun Heo 6326f0d36efdSTejun Heo for (i = 0; i < host->n_ports; i++) { 6327f0d36efdSTejun Heo struct ata_port *ap = host->ports[i]; 6328f0d36efdSTejun Heo 6329ecef7253STejun Heo if (!ap) 6330ecef7253STejun Heo continue; 6331ecef7253STejun Heo 6332ecef7253STejun Heo if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop) 6333f0d36efdSTejun Heo ap->ops->port_stop(ap); 6334f0d36efdSTejun Heo } 6335f0d36efdSTejun Heo 6336ecef7253STejun Heo if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop) 6337f0d36efdSTejun Heo host->ops->host_stop(host); 63381aa56ccaSTejun Heo 63391aa506e4STejun Heo for (i = 0; i < host->n_ports; i++) { 63401aa506e4STejun Heo struct ata_port *ap = host->ports[i]; 63411aa506e4STejun Heo 63424911487aSTejun Heo if (!ap) 63434911487aSTejun Heo continue; 63444911487aSTejun Heo 63454911487aSTejun Heo if (ap->scsi_host) 63461aa506e4STejun Heo scsi_host_put(ap->scsi_host); 63471aa506e4STejun Heo 63484911487aSTejun Heo kfree(ap); 63491aa506e4STejun Heo host->ports[i] = NULL; 63501aa506e4STejun Heo } 63511aa506e4STejun Heo 63521aa56ccaSTejun Heo dev_set_drvdata(gendev, NULL); 6353f0d36efdSTejun Heo } 6354f0d36efdSTejun Heo 6355c6fd2807SJeff Garzik /** 6356f3187195STejun Heo * ata_host_alloc - allocate and init basic ATA host resources 6357f3187195STejun Heo * @dev: generic device this host is associated with 6358f3187195STejun Heo * @max_ports: maximum number of ATA ports associated with this host 6359f3187195STejun Heo * 6360f3187195STejun Heo * Allocate and initialize basic ATA host resources. LLD calls 6361f3187195STejun Heo * this function to allocate a host, initializes it fully and 6362f3187195STejun Heo * attaches it using ata_host_register(). 6363f3187195STejun Heo * 6364f3187195STejun Heo * @max_ports ports are allocated and host->n_ports is 6365f3187195STejun Heo * initialized to @max_ports. The caller is allowed to decrease 6366f3187195STejun Heo * host->n_ports before calling ata_host_register(). The unused 6367f3187195STejun Heo * ports will be automatically freed on registration. 6368f3187195STejun Heo * 6369f3187195STejun Heo * RETURNS: 6370f3187195STejun Heo * Allocate ATA host on success, NULL on failure. 6371f3187195STejun Heo * 6372f3187195STejun Heo * LOCKING: 6373f3187195STejun Heo * Inherited from calling layer (may sleep). 6374f3187195STejun Heo */ 6375f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 6376f3187195STejun Heo { 6377f3187195STejun Heo struct ata_host *host; 6378f3187195STejun Heo size_t sz; 6379f3187195STejun Heo int i; 6380f3187195STejun Heo 6381f3187195STejun Heo DPRINTK("ENTER\n"); 6382f3187195STejun Heo 6383f3187195STejun Heo if (!devres_open_group(dev, NULL, GFP_KERNEL)) 6384f3187195STejun Heo return NULL; 6385f3187195STejun Heo 6386f3187195STejun Heo /* alloc a container for our list of ATA ports (buses) */ 6387f3187195STejun Heo sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 6388f3187195STejun Heo /* alloc a container for our list of ATA ports (buses) */ 6389f3187195STejun Heo host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 6390f3187195STejun Heo if (!host) 6391f3187195STejun Heo goto err_out; 6392f3187195STejun Heo 6393f3187195STejun Heo devres_add(dev, host); 6394f3187195STejun Heo dev_set_drvdata(dev, host); 6395f3187195STejun Heo 6396f3187195STejun Heo spin_lock_init(&host->lock); 6397f3187195STejun Heo host->dev = dev; 6398f3187195STejun Heo host->n_ports = max_ports; 6399f3187195STejun Heo 6400f3187195STejun Heo /* allocate ports bound to this host */ 6401f3187195STejun Heo for (i = 0; i < max_ports; i++) { 6402f3187195STejun Heo struct ata_port *ap; 6403f3187195STejun Heo 6404f3187195STejun Heo ap = ata_port_alloc(host); 6405f3187195STejun Heo if (!ap) 6406f3187195STejun Heo goto err_out; 6407f3187195STejun Heo 6408f3187195STejun Heo ap->port_no = i; 6409f3187195STejun Heo host->ports[i] = ap; 6410f3187195STejun Heo } 6411f3187195STejun Heo 6412f3187195STejun Heo devres_remove_group(dev, NULL); 6413f3187195STejun Heo return host; 6414f3187195STejun Heo 6415f3187195STejun Heo err_out: 6416f3187195STejun Heo devres_release_group(dev, NULL); 6417f3187195STejun Heo return NULL; 6418f3187195STejun Heo } 6419f3187195STejun Heo 6420f3187195STejun Heo /** 6421f5cda257STejun Heo * ata_host_alloc_pinfo - alloc host and init with port_info array 6422f5cda257STejun Heo * @dev: generic device this host is associated with 6423f5cda257STejun Heo * @ppi: array of ATA port_info to initialize host with 6424f5cda257STejun Heo * @n_ports: number of ATA ports attached to this host 6425f5cda257STejun Heo * 6426f5cda257STejun Heo * Allocate ATA host and initialize with info from @ppi. If NULL 6427f5cda257STejun Heo * terminated, @ppi may contain fewer entries than @n_ports. The 6428f5cda257STejun Heo * last entry will be used for the remaining ports. 6429f5cda257STejun Heo * 6430f5cda257STejun Heo * RETURNS: 6431f5cda257STejun Heo * Allocate ATA host on success, NULL on failure. 6432f5cda257STejun Heo * 6433f5cda257STejun Heo * LOCKING: 6434f5cda257STejun Heo * Inherited from calling layer (may sleep). 6435f5cda257STejun Heo */ 6436f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev, 6437f5cda257STejun Heo const struct ata_port_info * const * ppi, 6438f5cda257STejun Heo int n_ports) 6439f5cda257STejun Heo { 6440f5cda257STejun Heo const struct ata_port_info *pi; 6441f5cda257STejun Heo struct ata_host *host; 6442f5cda257STejun Heo int i, j; 6443f5cda257STejun Heo 6444f5cda257STejun Heo host = ata_host_alloc(dev, n_ports); 6445f5cda257STejun Heo if (!host) 6446f5cda257STejun Heo return NULL; 6447f5cda257STejun Heo 6448f5cda257STejun Heo for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 6449f5cda257STejun Heo struct ata_port *ap = host->ports[i]; 6450f5cda257STejun Heo 6451f5cda257STejun Heo if (ppi[j]) 6452f5cda257STejun Heo pi = ppi[j++]; 6453f5cda257STejun Heo 6454f5cda257STejun Heo ap->pio_mask = pi->pio_mask; 6455f5cda257STejun Heo ap->mwdma_mask = pi->mwdma_mask; 6456f5cda257STejun Heo ap->udma_mask = pi->udma_mask; 6457f5cda257STejun Heo ap->flags |= pi->flags; 64580c88758bSTejun Heo ap->link.flags |= pi->link_flags; 6459f5cda257STejun Heo ap->ops = pi->port_ops; 6460f5cda257STejun Heo 6461f5cda257STejun Heo if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6462f5cda257STejun Heo host->ops = pi->port_ops; 6463f5cda257STejun Heo if (!host->private_data && pi->private_data) 6464f5cda257STejun Heo host->private_data = pi->private_data; 6465f5cda257STejun Heo } 6466f5cda257STejun Heo 6467f5cda257STejun Heo return host; 6468f5cda257STejun Heo } 6469f5cda257STejun Heo 6470f5cda257STejun Heo /** 6471ecef7253STejun Heo * ata_host_start - start and freeze ports of an ATA host 6472ecef7253STejun Heo * @host: ATA host to start ports for 6473ecef7253STejun Heo * 6474ecef7253STejun Heo * Start and then freeze ports of @host. Started status is 6475ecef7253STejun Heo * recorded in host->flags, so this function can be called 6476ecef7253STejun Heo * multiple times. Ports are guaranteed to get started only 6477f3187195STejun Heo * once. If host->ops isn't initialized yet, its set to the 6478f3187195STejun Heo * first non-dummy port ops. 6479ecef7253STejun Heo * 6480ecef7253STejun Heo * LOCKING: 6481ecef7253STejun Heo * Inherited from calling layer (may sleep). 6482ecef7253STejun Heo * 6483ecef7253STejun Heo * RETURNS: 6484ecef7253STejun Heo * 0 if all ports are started successfully, -errno otherwise. 6485ecef7253STejun Heo */ 6486ecef7253STejun Heo int ata_host_start(struct ata_host *host) 6487ecef7253STejun Heo { 6488ecef7253STejun Heo int i, rc; 6489ecef7253STejun Heo 6490ecef7253STejun Heo if (host->flags & ATA_HOST_STARTED) 6491ecef7253STejun Heo return 0; 6492ecef7253STejun Heo 6493ecef7253STejun Heo for (i = 0; i < host->n_ports; i++) { 6494ecef7253STejun Heo struct ata_port *ap = host->ports[i]; 6495ecef7253STejun Heo 6496f3187195STejun Heo if (!host->ops && !ata_port_is_dummy(ap)) 6497f3187195STejun Heo host->ops = ap->ops; 6498f3187195STejun Heo 6499ecef7253STejun Heo if (ap->ops->port_start) { 6500ecef7253STejun Heo rc = ap->ops->port_start(ap); 6501ecef7253STejun Heo if (rc) { 6502ecef7253STejun Heo ata_port_printk(ap, KERN_ERR, "failed to " 6503ecef7253STejun Heo "start port (errno=%d)\n", rc); 6504ecef7253STejun Heo goto err_out; 6505ecef7253STejun Heo } 6506ecef7253STejun Heo } 6507ecef7253STejun Heo 6508ecef7253STejun Heo ata_eh_freeze_port(ap); 6509ecef7253STejun Heo } 6510ecef7253STejun Heo 6511ecef7253STejun Heo host->flags |= ATA_HOST_STARTED; 6512ecef7253STejun Heo return 0; 6513ecef7253STejun Heo 6514ecef7253STejun Heo err_out: 6515ecef7253STejun Heo while (--i >= 0) { 6516ecef7253STejun Heo struct ata_port *ap = host->ports[i]; 6517ecef7253STejun Heo 6518ecef7253STejun Heo if (ap->ops->port_stop) 6519ecef7253STejun Heo ap->ops->port_stop(ap); 6520ecef7253STejun Heo } 6521ecef7253STejun Heo return rc; 6522ecef7253STejun Heo } 6523ecef7253STejun Heo 6524ecef7253STejun Heo /** 6525cca3974eSJeff Garzik * ata_sas_host_init - Initialize a host struct 6526cca3974eSJeff Garzik * @host: host to initialize 6527cca3974eSJeff Garzik * @dev: device host is attached to 6528cca3974eSJeff Garzik * @flags: host flags 6529c6fd2807SJeff Garzik * @ops: port_ops 6530c6fd2807SJeff Garzik * 6531c6fd2807SJeff Garzik * LOCKING: 6532c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 6533c6fd2807SJeff Garzik * 6534c6fd2807SJeff Garzik */ 6535f3187195STejun Heo /* KILLME - the only user left is ipr */ 6536cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev, 6537cca3974eSJeff Garzik unsigned long flags, const struct ata_port_operations *ops) 6538c6fd2807SJeff Garzik { 6539cca3974eSJeff Garzik spin_lock_init(&host->lock); 6540cca3974eSJeff Garzik host->dev = dev; 6541cca3974eSJeff Garzik host->flags = flags; 6542cca3974eSJeff Garzik host->ops = ops; 6543c6fd2807SJeff Garzik } 6544c6fd2807SJeff Garzik 6545c6fd2807SJeff Garzik /** 6546f3187195STejun Heo * ata_host_register - register initialized ATA host 6547f3187195STejun Heo * @host: ATA host to register 6548f3187195STejun Heo * @sht: template for SCSI host 6549c6fd2807SJeff Garzik * 6550f3187195STejun Heo * Register initialized ATA host. @host is allocated using 6551f3187195STejun Heo * ata_host_alloc() and fully initialized by LLD. This function 6552f3187195STejun Heo * starts ports, registers @host with ATA and SCSI layers and 6553f3187195STejun Heo * probe registered devices. 6554c6fd2807SJeff Garzik * 6555c6fd2807SJeff Garzik * LOCKING: 6556f3187195STejun Heo * Inherited from calling layer (may sleep). 6557c6fd2807SJeff Garzik * 6558c6fd2807SJeff Garzik * RETURNS: 6559f3187195STejun Heo * 0 on success, -errno otherwise. 6560c6fd2807SJeff Garzik */ 6561f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6562c6fd2807SJeff Garzik { 6563f3187195STejun Heo int i, rc; 6564c6fd2807SJeff Garzik 6565f3187195STejun Heo /* host must have been started */ 6566f3187195STejun Heo if (!(host->flags & ATA_HOST_STARTED)) { 6567f3187195STejun Heo dev_printk(KERN_ERR, host->dev, 6568f3187195STejun Heo "BUG: trying to register unstarted host\n"); 6569f3187195STejun Heo WARN_ON(1); 6570f3187195STejun Heo return -EINVAL; 657102f076aaSAlan Cox } 6572f0d36efdSTejun Heo 6573f3187195STejun Heo /* Blow away unused ports. This happens when LLD can't 6574f3187195STejun Heo * determine the exact number of ports to allocate at 6575f3187195STejun Heo * allocation time. 6576f3187195STejun Heo */ 6577f3187195STejun Heo for (i = host->n_ports; host->ports[i]; i++) 6578f3187195STejun Heo kfree(host->ports[i]); 6579f0d36efdSTejun Heo 6580f3187195STejun Heo /* give ports names and add SCSI hosts */ 6581f3187195STejun Heo for (i = 0; i < host->n_ports; i++) 6582f3187195STejun Heo host->ports[i]->print_id = ata_print_id++; 6583c6fd2807SJeff Garzik 6584f3187195STejun Heo rc = ata_scsi_add_hosts(host, sht); 6585ecef7253STejun Heo if (rc) 6586f3187195STejun Heo return rc; 6587ecef7253STejun Heo 6588fafbae87STejun Heo /* associate with ACPI nodes */ 6589fafbae87STejun Heo ata_acpi_associate(host); 6590fafbae87STejun Heo 6591f3187195STejun Heo /* set cable, sata_spd_limit and report */ 6592cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6593cca3974eSJeff Garzik struct ata_port *ap = host->ports[i]; 6594f3187195STejun Heo unsigned long xfer_mask; 6595f3187195STejun Heo 6596f3187195STejun Heo /* set SATA cable type if still unset */ 6597f3187195STejun Heo if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6598f3187195STejun Heo ap->cbl = ATA_CBL_SATA; 6599c6fd2807SJeff Garzik 6600c6fd2807SJeff Garzik /* init sata_spd_limit to the current value */ 66014fb37a25STejun Heo sata_link_init_spd(&ap->link); 6602c6fd2807SJeff Garzik 6603cbcdd875STejun Heo /* print per-port info to dmesg */ 6604f3187195STejun Heo xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6605f3187195STejun Heo ap->udma_mask); 6606f3187195STejun Heo 6607f3187195STejun Heo if (!ata_port_is_dummy(ap)) 6608cbcdd875STejun Heo ata_port_printk(ap, KERN_INFO, 6609cbcdd875STejun Heo "%cATA max %s %s\n", 6610a16abc0bSTejun Heo (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6611f3187195STejun Heo ata_mode_string(xfer_mask), 6612cbcdd875STejun Heo ap->link.eh_info.desc); 6613f3187195STejun Heo else 6614f3187195STejun Heo ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6615c6fd2807SJeff Garzik } 6616c6fd2807SJeff Garzik 6617f3187195STejun Heo /* perform each probe synchronously */ 6618f3187195STejun Heo DPRINTK("probe begin\n"); 6619f3187195STejun Heo for (i = 0; i < host->n_ports; i++) { 6620f3187195STejun Heo struct ata_port *ap = host->ports[i]; 6621f3187195STejun Heo int rc; 6622f3187195STejun Heo 6623f3187195STejun Heo /* probe */ 6624c6fd2807SJeff Garzik if (ap->ops->error_handler) { 66259af5c9c9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 6626c6fd2807SJeff Garzik unsigned long flags; 6627c6fd2807SJeff Garzik 6628c6fd2807SJeff Garzik ata_port_probe(ap); 6629c6fd2807SJeff Garzik 6630c6fd2807SJeff Garzik /* kick EH for boot probing */ 6631c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6632c6fd2807SJeff Garzik 6633f58229f8STejun Heo ehi->probe_mask = 6634f58229f8STejun Heo (1 << ata_link_max_devices(&ap->link)) - 1; 6635c6fd2807SJeff Garzik ehi->action |= ATA_EH_SOFTRESET; 6636c6fd2807SJeff Garzik ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6637c6fd2807SJeff Garzik 6638f4d6d004STejun Heo ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6639c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_LOADING; 6640c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 6641c6fd2807SJeff Garzik 6642c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6643c6fd2807SJeff Garzik 6644c6fd2807SJeff Garzik /* wait for EH to finish */ 6645c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6646c6fd2807SJeff Garzik } else { 664744877b4eSTejun Heo DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6648c6fd2807SJeff Garzik rc = ata_bus_probe(ap); 664944877b4eSTejun Heo DPRINTK("ata%u: bus probe end\n", ap->print_id); 6650c6fd2807SJeff Garzik 6651c6fd2807SJeff Garzik if (rc) { 6652c6fd2807SJeff Garzik /* FIXME: do something useful here? 6653c6fd2807SJeff Garzik * Current libata behavior will 6654c6fd2807SJeff Garzik * tear down everything when 6655c6fd2807SJeff Garzik * the module is removed 6656c6fd2807SJeff Garzik * or the h/w is unplugged. 6657c6fd2807SJeff Garzik */ 6658c6fd2807SJeff Garzik } 6659c6fd2807SJeff Garzik } 6660c6fd2807SJeff Garzik } 6661c6fd2807SJeff Garzik 6662c6fd2807SJeff Garzik /* probes are done, now scan each port's disk(s) */ 6663c6fd2807SJeff Garzik DPRINTK("host probe begin\n"); 6664cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6665cca3974eSJeff Garzik struct ata_port *ap = host->ports[i]; 6666c6fd2807SJeff Garzik 66671ae46317STejun Heo ata_scsi_scan_host(ap, 1); 6668c6fd2807SJeff Garzik } 6669c6fd2807SJeff Garzik 6670f3187195STejun Heo return 0; 6671f3187195STejun Heo } 6672f3187195STejun Heo 6673f3187195STejun Heo /** 6674f5cda257STejun Heo * ata_host_activate - start host, request IRQ and register it 6675f5cda257STejun Heo * @host: target ATA host 6676f5cda257STejun Heo * @irq: IRQ to request 6677f5cda257STejun Heo * @irq_handler: irq_handler used when requesting IRQ 6678f5cda257STejun Heo * @irq_flags: irq_flags used when requesting IRQ 6679f5cda257STejun Heo * @sht: scsi_host_template to use when registering the host 6680f5cda257STejun Heo * 6681f5cda257STejun Heo * After allocating an ATA host and initializing it, most libata 6682f5cda257STejun Heo * LLDs perform three steps to activate the host - start host, 6683f5cda257STejun Heo * request IRQ and register it. This helper takes necessasry 6684f5cda257STejun Heo * arguments and performs the three steps in one go. 6685f5cda257STejun Heo * 6686f5cda257STejun Heo * LOCKING: 6687f5cda257STejun Heo * Inherited from calling layer (may sleep). 6688f5cda257STejun Heo * 6689f5cda257STejun Heo * RETURNS: 6690f5cda257STejun Heo * 0 on success, -errno otherwise. 6691f5cda257STejun Heo */ 6692f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq, 6693f5cda257STejun Heo irq_handler_t irq_handler, unsigned long irq_flags, 6694f5cda257STejun Heo struct scsi_host_template *sht) 6695f5cda257STejun Heo { 6696cbcdd875STejun Heo int i, rc; 6697f5cda257STejun Heo 6698f5cda257STejun Heo rc = ata_host_start(host); 6699f5cda257STejun Heo if (rc) 6700f5cda257STejun Heo return rc; 6701f5cda257STejun Heo 6702f5cda257STejun Heo rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6703f5cda257STejun Heo dev_driver_string(host->dev), host); 6704f5cda257STejun Heo if (rc) 6705f5cda257STejun Heo return rc; 6706f5cda257STejun Heo 6707cbcdd875STejun Heo for (i = 0; i < host->n_ports; i++) 6708cbcdd875STejun Heo ata_port_desc(host->ports[i], "irq %d", irq); 67094031826bSTejun Heo 6710f5cda257STejun Heo rc = ata_host_register(host, sht); 6711f5cda257STejun Heo /* if failed, just free the IRQ and leave ports alone */ 6712f5cda257STejun Heo if (rc) 6713f5cda257STejun Heo devm_free_irq(host->dev, irq, host); 6714f5cda257STejun Heo 6715f5cda257STejun Heo return rc; 6716f5cda257STejun Heo } 6717f5cda257STejun Heo 6718f5cda257STejun Heo /** 6719c6fd2807SJeff Garzik * ata_port_detach - Detach ATA port in prepration of device removal 6720c6fd2807SJeff Garzik * @ap: ATA port to be detached 6721c6fd2807SJeff Garzik * 6722c6fd2807SJeff Garzik * Detach all ATA devices and the associated SCSI devices of @ap; 6723c6fd2807SJeff Garzik * then, remove the associated SCSI host. @ap is guaranteed to 6724c6fd2807SJeff Garzik * be quiescent on return from this function. 6725c6fd2807SJeff Garzik * 6726c6fd2807SJeff Garzik * LOCKING: 6727c6fd2807SJeff Garzik * Kernel thread context (may sleep). 6728c6fd2807SJeff Garzik */ 6729c6fd2807SJeff Garzik void ata_port_detach(struct ata_port *ap) 6730c6fd2807SJeff Garzik { 6731c6fd2807SJeff Garzik unsigned long flags; 673241bda9c9STejun Heo struct ata_link *link; 6733f58229f8STejun Heo struct ata_device *dev; 6734c6fd2807SJeff Garzik 6735c6fd2807SJeff Garzik if (!ap->ops->error_handler) 6736c6fd2807SJeff Garzik goto skip_eh; 6737c6fd2807SJeff Garzik 6738c6fd2807SJeff Garzik /* tell EH we're leaving & flush EH */ 6739c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6740c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_UNLOADING; 6741c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6742c6fd2807SJeff Garzik 6743c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6744c6fd2807SJeff Garzik 6745c6fd2807SJeff Garzik /* EH is now guaranteed to see UNLOADING, so no new device 6746c6fd2807SJeff Garzik * will be attached. Disable all existing devices. 6747c6fd2807SJeff Garzik */ 6748c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6749c6fd2807SJeff Garzik 675041bda9c9STejun Heo ata_port_for_each_link(link, ap) { 675141bda9c9STejun Heo ata_link_for_each_dev(dev, link) 6752f58229f8STejun Heo ata_dev_disable(dev); 675341bda9c9STejun Heo } 6754c6fd2807SJeff Garzik 6755c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6756c6fd2807SJeff Garzik 6757c6fd2807SJeff Garzik /* Final freeze & EH. All in-flight commands are aborted. EH 6758c6fd2807SJeff Garzik * will be skipped and retrials will be terminated with bad 6759c6fd2807SJeff Garzik * target. 6760c6fd2807SJeff Garzik */ 6761c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6762c6fd2807SJeff Garzik ata_port_freeze(ap); /* won't be thawed */ 6763c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6764c6fd2807SJeff Garzik 6765c6fd2807SJeff Garzik ata_port_wait_eh(ap); 676645a66c1cSOleg Nesterov cancel_rearming_delayed_work(&ap->hotplug_task); 6767c6fd2807SJeff Garzik 6768c6fd2807SJeff Garzik skip_eh: 6769c6fd2807SJeff Garzik /* remove the associated SCSI host */ 6770cca3974eSJeff Garzik scsi_remove_host(ap->scsi_host); 6771c6fd2807SJeff Garzik } 6772c6fd2807SJeff Garzik 6773c6fd2807SJeff Garzik /** 67740529c159STejun Heo * ata_host_detach - Detach all ports of an ATA host 67750529c159STejun Heo * @host: Host to detach 67760529c159STejun Heo * 67770529c159STejun Heo * Detach all ports of @host. 67780529c159STejun Heo * 67790529c159STejun Heo * LOCKING: 67800529c159STejun Heo * Kernel thread context (may sleep). 67810529c159STejun Heo */ 67820529c159STejun Heo void ata_host_detach(struct ata_host *host) 67830529c159STejun Heo { 67840529c159STejun Heo int i; 67850529c159STejun Heo 67860529c159STejun Heo for (i = 0; i < host->n_ports; i++) 67870529c159STejun Heo ata_port_detach(host->ports[i]); 67880529c159STejun Heo } 67890529c159STejun Heo 6790c6fd2807SJeff Garzik /** 6791c6fd2807SJeff Garzik * ata_std_ports - initialize ioaddr with standard port offsets. 6792c6fd2807SJeff Garzik * @ioaddr: IO address structure to be initialized 6793c6fd2807SJeff Garzik * 6794c6fd2807SJeff Garzik * Utility function which initializes data_addr, error_addr, 6795c6fd2807SJeff Garzik * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 6796c6fd2807SJeff Garzik * device_addr, status_addr, and command_addr to standard offsets 6797c6fd2807SJeff Garzik * relative to cmd_addr. 6798c6fd2807SJeff Garzik * 6799c6fd2807SJeff Garzik * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 6800c6fd2807SJeff Garzik */ 6801c6fd2807SJeff Garzik 6802c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr) 6803c6fd2807SJeff Garzik { 6804c6fd2807SJeff Garzik ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 6805c6fd2807SJeff Garzik ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 6806c6fd2807SJeff Garzik ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 6807c6fd2807SJeff Garzik ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 6808c6fd2807SJeff Garzik ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 6809c6fd2807SJeff Garzik ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 6810c6fd2807SJeff Garzik ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 6811c6fd2807SJeff Garzik ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 6812c6fd2807SJeff Garzik ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 6813c6fd2807SJeff Garzik ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 6814c6fd2807SJeff Garzik } 6815c6fd2807SJeff Garzik 6816c6fd2807SJeff Garzik 6817c6fd2807SJeff Garzik #ifdef CONFIG_PCI 6818c6fd2807SJeff Garzik 6819c6fd2807SJeff Garzik /** 6820c6fd2807SJeff Garzik * ata_pci_remove_one - PCI layer callback for device removal 6821c6fd2807SJeff Garzik * @pdev: PCI device that was removed 6822c6fd2807SJeff Garzik * 6823b878ca5dSTejun Heo * PCI layer indicates to libata via this hook that hot-unplug or 6824b878ca5dSTejun Heo * module unload event has occurred. Detach all ports. Resource 6825b878ca5dSTejun Heo * release is handled via devres. 6826c6fd2807SJeff Garzik * 6827c6fd2807SJeff Garzik * LOCKING: 6828c6fd2807SJeff Garzik * Inherited from PCI layer (may sleep). 6829c6fd2807SJeff Garzik */ 6830c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev) 6831c6fd2807SJeff Garzik { 6832c6fd2807SJeff Garzik struct device *dev = pci_dev_to_dev(pdev); 6833cca3974eSJeff Garzik struct ata_host *host = dev_get_drvdata(dev); 6834c6fd2807SJeff Garzik 6835f0d36efdSTejun Heo ata_host_detach(host); 6836c6fd2807SJeff Garzik } 6837c6fd2807SJeff Garzik 6838c6fd2807SJeff Garzik /* move to PCI subsystem */ 6839c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6840c6fd2807SJeff Garzik { 6841c6fd2807SJeff Garzik unsigned long tmp = 0; 6842c6fd2807SJeff Garzik 6843c6fd2807SJeff Garzik switch (bits->width) { 6844c6fd2807SJeff Garzik case 1: { 6845c6fd2807SJeff Garzik u8 tmp8 = 0; 6846c6fd2807SJeff Garzik pci_read_config_byte(pdev, bits->reg, &tmp8); 6847c6fd2807SJeff Garzik tmp = tmp8; 6848c6fd2807SJeff Garzik break; 6849c6fd2807SJeff Garzik } 6850c6fd2807SJeff Garzik case 2: { 6851c6fd2807SJeff Garzik u16 tmp16 = 0; 6852c6fd2807SJeff Garzik pci_read_config_word(pdev, bits->reg, &tmp16); 6853c6fd2807SJeff Garzik tmp = tmp16; 6854c6fd2807SJeff Garzik break; 6855c6fd2807SJeff Garzik } 6856c6fd2807SJeff Garzik case 4: { 6857c6fd2807SJeff Garzik u32 tmp32 = 0; 6858c6fd2807SJeff Garzik pci_read_config_dword(pdev, bits->reg, &tmp32); 6859c6fd2807SJeff Garzik tmp = tmp32; 6860c6fd2807SJeff Garzik break; 6861c6fd2807SJeff Garzik } 6862c6fd2807SJeff Garzik 6863c6fd2807SJeff Garzik default: 6864c6fd2807SJeff Garzik return -EINVAL; 6865c6fd2807SJeff Garzik } 6866c6fd2807SJeff Garzik 6867c6fd2807SJeff Garzik tmp &= bits->mask; 6868c6fd2807SJeff Garzik 6869c6fd2807SJeff Garzik return (tmp == bits->val) ? 1 : 0; 6870c6fd2807SJeff Garzik } 6871c6fd2807SJeff Garzik 68726ffa01d8STejun Heo #ifdef CONFIG_PM 6873c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6874c6fd2807SJeff Garzik { 6875c6fd2807SJeff Garzik pci_save_state(pdev); 6876c6fd2807SJeff Garzik pci_disable_device(pdev); 68774c90d971STejun Heo 68784c90d971STejun Heo if (mesg.event == PM_EVENT_SUSPEND) 6879c6fd2807SJeff Garzik pci_set_power_state(pdev, PCI_D3hot); 6880c6fd2807SJeff Garzik } 6881c6fd2807SJeff Garzik 6882553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev) 6883c6fd2807SJeff Garzik { 6884553c4aa6STejun Heo int rc; 6885553c4aa6STejun Heo 6886c6fd2807SJeff Garzik pci_set_power_state(pdev, PCI_D0); 6887c6fd2807SJeff Garzik pci_restore_state(pdev); 6888553c4aa6STejun Heo 6889f0d36efdSTejun Heo rc = pcim_enable_device(pdev); 6890553c4aa6STejun Heo if (rc) { 6891553c4aa6STejun Heo dev_printk(KERN_ERR, &pdev->dev, 6892553c4aa6STejun Heo "failed to enable device after resume (%d)\n", rc); 6893553c4aa6STejun Heo return rc; 6894553c4aa6STejun Heo } 6895553c4aa6STejun Heo 6896c6fd2807SJeff Garzik pci_set_master(pdev); 6897553c4aa6STejun Heo return 0; 6898c6fd2807SJeff Garzik } 6899c6fd2807SJeff Garzik 6900c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6901c6fd2807SJeff Garzik { 6902cca3974eSJeff Garzik struct ata_host *host = dev_get_drvdata(&pdev->dev); 6903c6fd2807SJeff Garzik int rc = 0; 6904c6fd2807SJeff Garzik 6905cca3974eSJeff Garzik rc = ata_host_suspend(host, mesg); 6906c6fd2807SJeff Garzik if (rc) 6907c6fd2807SJeff Garzik return rc; 6908c6fd2807SJeff Garzik 6909c6fd2807SJeff Garzik ata_pci_device_do_suspend(pdev, mesg); 6910c6fd2807SJeff Garzik 6911c6fd2807SJeff Garzik return 0; 6912c6fd2807SJeff Garzik } 6913c6fd2807SJeff Garzik 6914c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev) 6915c6fd2807SJeff Garzik { 6916cca3974eSJeff Garzik struct ata_host *host = dev_get_drvdata(&pdev->dev); 6917553c4aa6STejun Heo int rc; 6918c6fd2807SJeff Garzik 6919553c4aa6STejun Heo rc = ata_pci_device_do_resume(pdev); 6920553c4aa6STejun Heo if (rc == 0) 6921cca3974eSJeff Garzik ata_host_resume(host); 6922553c4aa6STejun Heo return rc; 6923c6fd2807SJeff Garzik } 69246ffa01d8STejun Heo #endif /* CONFIG_PM */ 69256ffa01d8STejun Heo 6926c6fd2807SJeff Garzik #endif /* CONFIG_PCI */ 6927c6fd2807SJeff Garzik 6928c6fd2807SJeff Garzik 6929c6fd2807SJeff Garzik static int __init ata_init(void) 6930c6fd2807SJeff Garzik { 6931c6fd2807SJeff Garzik ata_probe_timeout *= HZ; 6932c6fd2807SJeff Garzik ata_wq = create_workqueue("ata"); 6933c6fd2807SJeff Garzik if (!ata_wq) 6934c6fd2807SJeff Garzik return -ENOMEM; 6935c6fd2807SJeff Garzik 6936c6fd2807SJeff Garzik ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6937c6fd2807SJeff Garzik if (!ata_aux_wq) { 6938c6fd2807SJeff Garzik destroy_workqueue(ata_wq); 6939c6fd2807SJeff Garzik return -ENOMEM; 6940c6fd2807SJeff Garzik } 6941c6fd2807SJeff Garzik 6942c6fd2807SJeff Garzik printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6943c6fd2807SJeff Garzik return 0; 6944c6fd2807SJeff Garzik } 6945c6fd2807SJeff Garzik 6946c6fd2807SJeff Garzik static void __exit ata_exit(void) 6947c6fd2807SJeff Garzik { 6948c6fd2807SJeff Garzik destroy_workqueue(ata_wq); 6949c6fd2807SJeff Garzik destroy_workqueue(ata_aux_wq); 6950c6fd2807SJeff Garzik } 6951c6fd2807SJeff Garzik 6952a4625085SBrian King subsys_initcall(ata_init); 6953c6fd2807SJeff Garzik module_exit(ata_exit); 6954c6fd2807SJeff Garzik 6955c6fd2807SJeff Garzik static unsigned long ratelimit_time; 6956c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock); 6957c6fd2807SJeff Garzik 6958c6fd2807SJeff Garzik int ata_ratelimit(void) 6959c6fd2807SJeff Garzik { 6960c6fd2807SJeff Garzik int rc; 6961c6fd2807SJeff Garzik unsigned long flags; 6962c6fd2807SJeff Garzik 6963c6fd2807SJeff Garzik spin_lock_irqsave(&ata_ratelimit_lock, flags); 6964c6fd2807SJeff Garzik 6965c6fd2807SJeff Garzik if (time_after(jiffies, ratelimit_time)) { 6966c6fd2807SJeff Garzik rc = 1; 6967c6fd2807SJeff Garzik ratelimit_time = jiffies + (HZ/5); 6968c6fd2807SJeff Garzik } else 6969c6fd2807SJeff Garzik rc = 0; 6970c6fd2807SJeff Garzik 6971c6fd2807SJeff Garzik spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 6972c6fd2807SJeff Garzik 6973c6fd2807SJeff Garzik return rc; 6974c6fd2807SJeff Garzik } 6975c6fd2807SJeff Garzik 6976c6fd2807SJeff Garzik /** 6977c6fd2807SJeff Garzik * ata_wait_register - wait until register value changes 6978c6fd2807SJeff Garzik * @reg: IO-mapped register 6979c6fd2807SJeff Garzik * @mask: Mask to apply to read register value 6980c6fd2807SJeff Garzik * @val: Wait condition 6981c6fd2807SJeff Garzik * @interval_msec: polling interval in milliseconds 6982c6fd2807SJeff Garzik * @timeout_msec: timeout in milliseconds 6983c6fd2807SJeff Garzik * 6984c6fd2807SJeff Garzik * Waiting for some bits of register to change is a common 6985c6fd2807SJeff Garzik * operation for ATA controllers. This function reads 32bit LE 6986c6fd2807SJeff Garzik * IO-mapped register @reg and tests for the following condition. 6987c6fd2807SJeff Garzik * 6988c6fd2807SJeff Garzik * (*@reg & mask) != val 6989c6fd2807SJeff Garzik * 6990c6fd2807SJeff Garzik * If the condition is met, it returns; otherwise, the process is 6991c6fd2807SJeff Garzik * repeated after @interval_msec until timeout. 6992c6fd2807SJeff Garzik * 6993c6fd2807SJeff Garzik * LOCKING: 6994c6fd2807SJeff Garzik * Kernel thread context (may sleep) 6995c6fd2807SJeff Garzik * 6996c6fd2807SJeff Garzik * RETURNS: 6997c6fd2807SJeff Garzik * The final register value. 6998c6fd2807SJeff Garzik */ 6999c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 7000c6fd2807SJeff Garzik unsigned long interval_msec, 7001c6fd2807SJeff Garzik unsigned long timeout_msec) 7002c6fd2807SJeff Garzik { 7003c6fd2807SJeff Garzik unsigned long timeout; 7004c6fd2807SJeff Garzik u32 tmp; 7005c6fd2807SJeff Garzik 7006c6fd2807SJeff Garzik tmp = ioread32(reg); 7007c6fd2807SJeff Garzik 7008c6fd2807SJeff Garzik /* Calculate timeout _after_ the first read to make sure 7009c6fd2807SJeff Garzik * preceding writes reach the controller before starting to 7010c6fd2807SJeff Garzik * eat away the timeout. 7011c6fd2807SJeff Garzik */ 7012c6fd2807SJeff Garzik timeout = jiffies + (timeout_msec * HZ) / 1000; 7013c6fd2807SJeff Garzik 7014c6fd2807SJeff Garzik while ((tmp & mask) == val && time_before(jiffies, timeout)) { 7015c6fd2807SJeff Garzik msleep(interval_msec); 7016c6fd2807SJeff Garzik tmp = ioread32(reg); 7017c6fd2807SJeff Garzik } 7018c6fd2807SJeff Garzik 7019c6fd2807SJeff Garzik return tmp; 7020c6fd2807SJeff Garzik } 7021c6fd2807SJeff Garzik 7022c6fd2807SJeff Garzik /* 7023c6fd2807SJeff Garzik * Dummy port_ops 7024c6fd2807SJeff Garzik */ 7025c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap) { } 7026c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap) { return 0; } 7027c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } 7028c6fd2807SJeff Garzik 7029c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap) 7030c6fd2807SJeff Garzik { 7031c6fd2807SJeff Garzik return ATA_DRDY; 7032c6fd2807SJeff Garzik } 7033c6fd2807SJeff Garzik 7034c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 7035c6fd2807SJeff Garzik { 7036c6fd2807SJeff Garzik return AC_ERR_SYSTEM; 7037c6fd2807SJeff Garzik } 7038c6fd2807SJeff Garzik 7039c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = { 7040c6fd2807SJeff Garzik .check_status = ata_dummy_check_status, 7041c6fd2807SJeff Garzik .check_altstatus = ata_dummy_check_status, 7042c6fd2807SJeff Garzik .dev_select = ata_noop_dev_select, 7043c6fd2807SJeff Garzik .qc_prep = ata_noop_qc_prep, 7044c6fd2807SJeff Garzik .qc_issue = ata_dummy_qc_issue, 7045c6fd2807SJeff Garzik .freeze = ata_dummy_noret, 7046c6fd2807SJeff Garzik .thaw = ata_dummy_noret, 7047c6fd2807SJeff Garzik .error_handler = ata_dummy_noret, 7048c6fd2807SJeff Garzik .post_internal_cmd = ata_dummy_qc_noret, 7049c6fd2807SJeff Garzik .irq_clear = ata_dummy_noret, 7050c6fd2807SJeff Garzik .port_start = ata_dummy_ret0, 7051c6fd2807SJeff Garzik .port_stop = ata_dummy_noret, 7052c6fd2807SJeff Garzik }; 7053c6fd2807SJeff Garzik 705421b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = { 705521b0ad4fSTejun Heo .port_ops = &ata_dummy_port_ops, 705621b0ad4fSTejun Heo }; 705721b0ad4fSTejun Heo 7058c6fd2807SJeff Garzik /* 7059c6fd2807SJeff Garzik * libata is essentially a library of internal helper functions for 7060c6fd2807SJeff Garzik * low-level ATA host controller drivers. As such, the API/ABI is 7061c6fd2807SJeff Garzik * likely to change as new drivers are added and updated. 7062c6fd2807SJeff Garzik * Do not depend on ABI/API stability. 7063c6fd2807SJeff Garzik */ 7064c6fd2807SJeff Garzik 7065c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7066c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7067c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7068c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 706921b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7070c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param); 7071c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports); 7072cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init); 7073f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc); 7074f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7075ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start); 7076f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register); 7077f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate); 70780529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach); 7079c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init); 7080c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one); 7081c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move); 7082c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete); 7083c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7084c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 7085c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load); 7086c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read); 7087c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select); 7088c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select); 708943727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status); 7090c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7091c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7092c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status); 7093c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus); 7094c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command); 7095c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start); 7096d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start); 7097c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt); 709804351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode); 70990d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer); 71000d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); 7101c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep); 7102d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); 7103c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7104c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup); 7105c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start); 7106c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 7107c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status); 7108c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop); 7109c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze); 7110c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw); 7111c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); 7112c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 7113c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 7114c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe); 711510305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable); 7116c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd); 7117936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce); 7118936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume); 7119c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset); 7120c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset); 7121c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset); 7122c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset); 7123c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset); 7124cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset); 7125c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset); 7126c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset); 7127c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify); 7128c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair); 7129c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable); 7130c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit); 7131c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register); 7132c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep); 7133d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready); 7134c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task); 7135c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 7136c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7137c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7138c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7139c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7140c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr); 7141c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid); 7142c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read); 7143c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write); 7144c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7145936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online); 7146936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline); 71476ffa01d8STejun Heo #ifdef CONFIG_PM 7148cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend); 7149cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume); 71506ffa01d8STejun Heo #endif /* CONFIG_PM */ 7151c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string); 7152c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string); 715310305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode); 7154c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7155c6fd2807SJeff Garzik 7156c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7157c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute); 7158c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge); 7159c6fd2807SJeff Garzik 7160c6fd2807SJeff Garzik #ifdef CONFIG_PCI 7161c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits); 7162d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); 71631626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); 7164d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); 7165c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one); 7166c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one); 71676ffa01d8STejun Heo #ifdef CONFIG_PM 7168c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7169c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7170c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7171c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume); 71726ffa01d8STejun Heo #endif /* CONFIG_PM */ 7173c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter); 7174c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 7175c6fd2807SJeff Garzik #endif /* CONFIG_PCI */ 7176c6fd2807SJeff Garzik 7177b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7178b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7179b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7180cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc); 7181cbcdd875STejun Heo #ifdef CONFIG_PCI 7182cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7183cbcdd875STejun Heo #endif /* CONFIG_PCI */ 7184c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout); 7185c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7186dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort); 7187c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort); 7188c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze); 7189c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7190c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7191c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7192c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7193c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh); 719483625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on); 7195a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify); 7196be0d18dfSAlan Cox 7197be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire); 7198be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire); 7199be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown); 7200be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata); 7201