1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-core.c - helper library for ATA 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9c6fd2807SJeff Garzik * Copyright 2003-2004 Jeff Garzik 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * 12c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or modify 13c6fd2807SJeff Garzik * it under the terms of the GNU General Public License as published by 14c6fd2807SJeff Garzik * the Free Software Foundation; either version 2, or (at your option) 15c6fd2807SJeff Garzik * any later version. 16c6fd2807SJeff Garzik * 17c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 18c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 19c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20c6fd2807SJeff Garzik * GNU General Public License for more details. 21c6fd2807SJeff Garzik * 22c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 23c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 24c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36c6fd2807SJeff Garzik #include <linux/module.h> 37c6fd2807SJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <linux/init.h> 39c6fd2807SJeff Garzik #include <linux/list.h> 40c6fd2807SJeff Garzik #include <linux/mm.h> 41c6fd2807SJeff Garzik #include <linux/highmem.h> 42c6fd2807SJeff Garzik #include <linux/spinlock.h> 43c6fd2807SJeff Garzik #include <linux/blkdev.h> 44c6fd2807SJeff Garzik #include <linux/delay.h> 45c6fd2807SJeff Garzik #include <linux/timer.h> 46c6fd2807SJeff Garzik #include <linux/interrupt.h> 47c6fd2807SJeff Garzik #include <linux/completion.h> 48c6fd2807SJeff Garzik #include <linux/suspend.h> 49c6fd2807SJeff Garzik #include <linux/workqueue.h> 50c6fd2807SJeff Garzik #include <linux/jiffies.h> 51c6fd2807SJeff Garzik #include <linux/scatterlist.h> 522dcb407eSJeff Garzik #include <linux/io.h> 53c6fd2807SJeff Garzik #include <scsi/scsi.h> 54c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 55c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 56c6fd2807SJeff Garzik #include <linux/libata.h> 57c6fd2807SJeff Garzik #include <asm/semaphore.h> 58c6fd2807SJeff Garzik #include <asm/byteorder.h> 59c6fd2807SJeff Garzik 60c6fd2807SJeff Garzik #include "libata.h" 61c6fd2807SJeff Garzik 62fda0efc5SJeff Garzik 63c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */ 64c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 65c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 67c6fd2807SJeff Garzik 68c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev, 69c6fd2807SJeff Garzik u16 heads, u16 sectors); 70c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 71218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, 72218f3d30SJeff Garzik u8 enable, u8 feature); 73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev); 7475683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 75c6fd2807SJeff Garzik 76f3187195STejun Heo unsigned int ata_print_id = 1; 77c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq; 78c6fd2807SJeff Garzik 79c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq; 80c6fd2807SJeff Garzik 81c6fd2807SJeff Garzik int atapi_enabled = 1; 82c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444); 83c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 84c6fd2807SJeff Garzik 85c6fd2807SJeff Garzik int atapi_dmadir = 0; 86c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444); 87c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 88c6fd2807SJeff Garzik 89baf4fdfaSMark Lord int atapi_passthru16 = 1; 90baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444); 91baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 92baf4fdfaSMark Lord 93c6fd2807SJeff Garzik int libata_fua = 0; 94c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444); 95c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 96c6fd2807SJeff Garzik 972dcb407eSJeff Garzik static int ata_ignore_hpa; 981e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 991e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 1001e999736SAlan Cox 101b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 102b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444); 103b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 104b3a70601SAlan Cox 105c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; 106c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444); 107c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 108c6fd2807SJeff Garzik 1096ebe9d86SJeff Garzik int libata_noacpi = 0; 110d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444); 1116ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 11211ef697bSKristen Carlson Accardi 113c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik"); 114c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices"); 115c6fd2807SJeff Garzik MODULE_LICENSE("GPL"); 116c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION); 117c6fd2807SJeff Garzik 118c6fd2807SJeff Garzik 119c6fd2807SJeff Garzik /** 120c6fd2807SJeff Garzik * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 121c6fd2807SJeff Garzik * @tf: Taskfile to convert 122c6fd2807SJeff Garzik * @pmp: Port multiplier port 1239977126cSTejun Heo * @is_cmd: This FIS is for command 1249977126cSTejun Heo * @fis: Buffer into which data will output 125c6fd2807SJeff Garzik * 126c6fd2807SJeff Garzik * Converts a standard ATA taskfile to a Serial ATA 127c6fd2807SJeff Garzik * FIS structure (Register - Host to Device). 128c6fd2807SJeff Garzik * 129c6fd2807SJeff Garzik * LOCKING: 130c6fd2807SJeff Garzik * Inherited from caller. 131c6fd2807SJeff Garzik */ 1329977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 133c6fd2807SJeff Garzik { 134c6fd2807SJeff Garzik fis[0] = 0x27; /* Register - Host to Device FIS */ 1359977126cSTejun Heo fis[1] = pmp & 0xf; /* Port multiplier number*/ 1369977126cSTejun Heo if (is_cmd) 1379977126cSTejun Heo fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 1389977126cSTejun Heo 139c6fd2807SJeff Garzik fis[2] = tf->command; 140c6fd2807SJeff Garzik fis[3] = tf->feature; 141c6fd2807SJeff Garzik 142c6fd2807SJeff Garzik fis[4] = tf->lbal; 143c6fd2807SJeff Garzik fis[5] = tf->lbam; 144c6fd2807SJeff Garzik fis[6] = tf->lbah; 145c6fd2807SJeff Garzik fis[7] = tf->device; 146c6fd2807SJeff Garzik 147c6fd2807SJeff Garzik fis[8] = tf->hob_lbal; 148c6fd2807SJeff Garzik fis[9] = tf->hob_lbam; 149c6fd2807SJeff Garzik fis[10] = tf->hob_lbah; 150c6fd2807SJeff Garzik fis[11] = tf->hob_feature; 151c6fd2807SJeff Garzik 152c6fd2807SJeff Garzik fis[12] = tf->nsect; 153c6fd2807SJeff Garzik fis[13] = tf->hob_nsect; 154c6fd2807SJeff Garzik fis[14] = 0; 155c6fd2807SJeff Garzik fis[15] = tf->ctl; 156c6fd2807SJeff Garzik 157c6fd2807SJeff Garzik fis[16] = 0; 158c6fd2807SJeff Garzik fis[17] = 0; 159c6fd2807SJeff Garzik fis[18] = 0; 160c6fd2807SJeff Garzik fis[19] = 0; 161c6fd2807SJeff Garzik } 162c6fd2807SJeff Garzik 163c6fd2807SJeff Garzik /** 164c6fd2807SJeff Garzik * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 165c6fd2807SJeff Garzik * @fis: Buffer from which data will be input 166c6fd2807SJeff Garzik * @tf: Taskfile to output 167c6fd2807SJeff Garzik * 168c6fd2807SJeff Garzik * Converts a serial ATA FIS structure to a standard ATA taskfile. 169c6fd2807SJeff Garzik * 170c6fd2807SJeff Garzik * LOCKING: 171c6fd2807SJeff Garzik * Inherited from caller. 172c6fd2807SJeff Garzik */ 173c6fd2807SJeff Garzik 174c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 175c6fd2807SJeff Garzik { 176c6fd2807SJeff Garzik tf->command = fis[2]; /* status */ 177c6fd2807SJeff Garzik tf->feature = fis[3]; /* error */ 178c6fd2807SJeff Garzik 179c6fd2807SJeff Garzik tf->lbal = fis[4]; 180c6fd2807SJeff Garzik tf->lbam = fis[5]; 181c6fd2807SJeff Garzik tf->lbah = fis[6]; 182c6fd2807SJeff Garzik tf->device = fis[7]; 183c6fd2807SJeff Garzik 184c6fd2807SJeff Garzik tf->hob_lbal = fis[8]; 185c6fd2807SJeff Garzik tf->hob_lbam = fis[9]; 186c6fd2807SJeff Garzik tf->hob_lbah = fis[10]; 187c6fd2807SJeff Garzik 188c6fd2807SJeff Garzik tf->nsect = fis[12]; 189c6fd2807SJeff Garzik tf->hob_nsect = fis[13]; 190c6fd2807SJeff Garzik } 191c6fd2807SJeff Garzik 192c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = { 193c6fd2807SJeff Garzik /* pio multi */ 194c6fd2807SJeff Garzik ATA_CMD_READ_MULTI, 195c6fd2807SJeff Garzik ATA_CMD_WRITE_MULTI, 196c6fd2807SJeff Garzik ATA_CMD_READ_MULTI_EXT, 197c6fd2807SJeff Garzik ATA_CMD_WRITE_MULTI_EXT, 198c6fd2807SJeff Garzik 0, 199c6fd2807SJeff Garzik 0, 200c6fd2807SJeff Garzik 0, 201c6fd2807SJeff Garzik ATA_CMD_WRITE_MULTI_FUA_EXT, 202c6fd2807SJeff Garzik /* pio */ 203c6fd2807SJeff Garzik ATA_CMD_PIO_READ, 204c6fd2807SJeff Garzik ATA_CMD_PIO_WRITE, 205c6fd2807SJeff Garzik ATA_CMD_PIO_READ_EXT, 206c6fd2807SJeff Garzik ATA_CMD_PIO_WRITE_EXT, 207c6fd2807SJeff Garzik 0, 208c6fd2807SJeff Garzik 0, 209c6fd2807SJeff Garzik 0, 210c6fd2807SJeff Garzik 0, 211c6fd2807SJeff Garzik /* dma */ 212c6fd2807SJeff Garzik ATA_CMD_READ, 213c6fd2807SJeff Garzik ATA_CMD_WRITE, 214c6fd2807SJeff Garzik ATA_CMD_READ_EXT, 215c6fd2807SJeff Garzik ATA_CMD_WRITE_EXT, 216c6fd2807SJeff Garzik 0, 217c6fd2807SJeff Garzik 0, 218c6fd2807SJeff Garzik 0, 219c6fd2807SJeff Garzik ATA_CMD_WRITE_FUA_EXT 220c6fd2807SJeff Garzik }; 221c6fd2807SJeff Garzik 222c6fd2807SJeff Garzik /** 223c6fd2807SJeff Garzik * ata_rwcmd_protocol - set taskfile r/w commands and protocol 224bd056d7eSTejun Heo * @tf: command to examine and configure 225bd056d7eSTejun Heo * @dev: device tf belongs to 226c6fd2807SJeff Garzik * 227c6fd2807SJeff Garzik * Examine the device configuration and tf->flags to calculate 228c6fd2807SJeff Garzik * the proper read/write commands and protocol to use. 229c6fd2807SJeff Garzik * 230c6fd2807SJeff Garzik * LOCKING: 231c6fd2807SJeff Garzik * caller. 232c6fd2807SJeff Garzik */ 233bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 234c6fd2807SJeff Garzik { 235c6fd2807SJeff Garzik u8 cmd; 236c6fd2807SJeff Garzik 237c6fd2807SJeff Garzik int index, fua, lba48, write; 238c6fd2807SJeff Garzik 239c6fd2807SJeff Garzik fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 240c6fd2807SJeff Garzik lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 241c6fd2807SJeff Garzik write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 242c6fd2807SJeff Garzik 243c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_PIO) { 244c6fd2807SJeff Garzik tf->protocol = ATA_PROT_PIO; 245c6fd2807SJeff Garzik index = dev->multi_count ? 0 : 8; 2469af5c9c9STejun Heo } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 247c6fd2807SJeff Garzik /* Unable to use DMA due to host limitation */ 248c6fd2807SJeff Garzik tf->protocol = ATA_PROT_PIO; 249c6fd2807SJeff Garzik index = dev->multi_count ? 0 : 8; 250c6fd2807SJeff Garzik } else { 251c6fd2807SJeff Garzik tf->protocol = ATA_PROT_DMA; 252c6fd2807SJeff Garzik index = 16; 253c6fd2807SJeff Garzik } 254c6fd2807SJeff Garzik 255c6fd2807SJeff Garzik cmd = ata_rw_cmds[index + fua + lba48 + write]; 256c6fd2807SJeff Garzik if (cmd) { 257c6fd2807SJeff Garzik tf->command = cmd; 258c6fd2807SJeff Garzik return 0; 259c6fd2807SJeff Garzik } 260c6fd2807SJeff Garzik return -1; 261c6fd2807SJeff Garzik } 262c6fd2807SJeff Garzik 263c6fd2807SJeff Garzik /** 26435b649feSTejun Heo * ata_tf_read_block - Read block address from ATA taskfile 26535b649feSTejun Heo * @tf: ATA taskfile of interest 26635b649feSTejun Heo * @dev: ATA device @tf belongs to 26735b649feSTejun Heo * 26835b649feSTejun Heo * LOCKING: 26935b649feSTejun Heo * None. 27035b649feSTejun Heo * 27135b649feSTejun Heo * Read block address from @tf. This function can handle all 27235b649feSTejun Heo * three address formats - LBA, LBA48 and CHS. tf->protocol and 27335b649feSTejun Heo * flags select the address format to use. 27435b649feSTejun Heo * 27535b649feSTejun Heo * RETURNS: 27635b649feSTejun Heo * Block address read from @tf. 27735b649feSTejun Heo */ 27835b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 27935b649feSTejun Heo { 28035b649feSTejun Heo u64 block = 0; 28135b649feSTejun Heo 28235b649feSTejun Heo if (tf->flags & ATA_TFLAG_LBA) { 28335b649feSTejun Heo if (tf->flags & ATA_TFLAG_LBA48) { 28435b649feSTejun Heo block |= (u64)tf->hob_lbah << 40; 28535b649feSTejun Heo block |= (u64)tf->hob_lbam << 32; 28635b649feSTejun Heo block |= tf->hob_lbal << 24; 28735b649feSTejun Heo } else 28835b649feSTejun Heo block |= (tf->device & 0xf) << 24; 28935b649feSTejun Heo 29035b649feSTejun Heo block |= tf->lbah << 16; 29135b649feSTejun Heo block |= tf->lbam << 8; 29235b649feSTejun Heo block |= tf->lbal; 29335b649feSTejun Heo } else { 29435b649feSTejun Heo u32 cyl, head, sect; 29535b649feSTejun Heo 29635b649feSTejun Heo cyl = tf->lbam | (tf->lbah << 8); 29735b649feSTejun Heo head = tf->device & 0xf; 29835b649feSTejun Heo sect = tf->lbal; 29935b649feSTejun Heo 30035b649feSTejun Heo block = (cyl * dev->heads + head) * dev->sectors + sect; 30135b649feSTejun Heo } 30235b649feSTejun Heo 30335b649feSTejun Heo return block; 30435b649feSTejun Heo } 30535b649feSTejun Heo 30635b649feSTejun Heo /** 307bd056d7eSTejun Heo * ata_build_rw_tf - Build ATA taskfile for given read/write request 308bd056d7eSTejun Heo * @tf: Target ATA taskfile 309bd056d7eSTejun Heo * @dev: ATA device @tf belongs to 310bd056d7eSTejun Heo * @block: Block address 311bd056d7eSTejun Heo * @n_block: Number of blocks 312bd056d7eSTejun Heo * @tf_flags: RW/FUA etc... 313bd056d7eSTejun Heo * @tag: tag 314bd056d7eSTejun Heo * 315bd056d7eSTejun Heo * LOCKING: 316bd056d7eSTejun Heo * None. 317bd056d7eSTejun Heo * 318bd056d7eSTejun Heo * Build ATA taskfile @tf for read/write request described by 319bd056d7eSTejun Heo * @block, @n_block, @tf_flags and @tag on @dev. 320bd056d7eSTejun Heo * 321bd056d7eSTejun Heo * RETURNS: 322bd056d7eSTejun Heo * 323bd056d7eSTejun Heo * 0 on success, -ERANGE if the request is too large for @dev, 324bd056d7eSTejun Heo * -EINVAL if the request is invalid. 325bd056d7eSTejun Heo */ 326bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 327bd056d7eSTejun Heo u64 block, u32 n_block, unsigned int tf_flags, 328bd056d7eSTejun Heo unsigned int tag) 329bd056d7eSTejun Heo { 330bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 331bd056d7eSTejun Heo tf->flags |= tf_flags; 332bd056d7eSTejun Heo 3336d1245bfSTejun Heo if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 334bd056d7eSTejun Heo /* yay, NCQ */ 335bd056d7eSTejun Heo if (!lba_48_ok(block, n_block)) 336bd056d7eSTejun Heo return -ERANGE; 337bd056d7eSTejun Heo 338bd056d7eSTejun Heo tf->protocol = ATA_PROT_NCQ; 339bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 340bd056d7eSTejun Heo 341bd056d7eSTejun Heo if (tf->flags & ATA_TFLAG_WRITE) 342bd056d7eSTejun Heo tf->command = ATA_CMD_FPDMA_WRITE; 343bd056d7eSTejun Heo else 344bd056d7eSTejun Heo tf->command = ATA_CMD_FPDMA_READ; 345bd056d7eSTejun Heo 346bd056d7eSTejun Heo tf->nsect = tag << 3; 347bd056d7eSTejun Heo tf->hob_feature = (n_block >> 8) & 0xff; 348bd056d7eSTejun Heo tf->feature = n_block & 0xff; 349bd056d7eSTejun Heo 350bd056d7eSTejun Heo tf->hob_lbah = (block >> 40) & 0xff; 351bd056d7eSTejun Heo tf->hob_lbam = (block >> 32) & 0xff; 352bd056d7eSTejun Heo tf->hob_lbal = (block >> 24) & 0xff; 353bd056d7eSTejun Heo tf->lbah = (block >> 16) & 0xff; 354bd056d7eSTejun Heo tf->lbam = (block >> 8) & 0xff; 355bd056d7eSTejun Heo tf->lbal = block & 0xff; 356bd056d7eSTejun Heo 357bd056d7eSTejun Heo tf->device = 1 << 6; 358bd056d7eSTejun Heo if (tf->flags & ATA_TFLAG_FUA) 359bd056d7eSTejun Heo tf->device |= 1 << 7; 360bd056d7eSTejun Heo } else if (dev->flags & ATA_DFLAG_LBA) { 361bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_LBA; 362bd056d7eSTejun Heo 363bd056d7eSTejun Heo if (lba_28_ok(block, n_block)) { 364bd056d7eSTejun Heo /* use LBA28 */ 365bd056d7eSTejun Heo tf->device |= (block >> 24) & 0xf; 366bd056d7eSTejun Heo } else if (lba_48_ok(block, n_block)) { 367bd056d7eSTejun Heo if (!(dev->flags & ATA_DFLAG_LBA48)) 368bd056d7eSTejun Heo return -ERANGE; 369bd056d7eSTejun Heo 370bd056d7eSTejun Heo /* use LBA48 */ 371bd056d7eSTejun Heo tf->flags |= ATA_TFLAG_LBA48; 372bd056d7eSTejun Heo 373bd056d7eSTejun Heo tf->hob_nsect = (n_block >> 8) & 0xff; 374bd056d7eSTejun Heo 375bd056d7eSTejun Heo tf->hob_lbah = (block >> 40) & 0xff; 376bd056d7eSTejun Heo tf->hob_lbam = (block >> 32) & 0xff; 377bd056d7eSTejun Heo tf->hob_lbal = (block >> 24) & 0xff; 378bd056d7eSTejun Heo } else 379bd056d7eSTejun Heo /* request too large even for LBA48 */ 380bd056d7eSTejun Heo return -ERANGE; 381bd056d7eSTejun Heo 382bd056d7eSTejun Heo if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 383bd056d7eSTejun Heo return -EINVAL; 384bd056d7eSTejun Heo 385bd056d7eSTejun Heo tf->nsect = n_block & 0xff; 386bd056d7eSTejun Heo 387bd056d7eSTejun Heo tf->lbah = (block >> 16) & 0xff; 388bd056d7eSTejun Heo tf->lbam = (block >> 8) & 0xff; 389bd056d7eSTejun Heo tf->lbal = block & 0xff; 390bd056d7eSTejun Heo 391bd056d7eSTejun Heo tf->device |= ATA_LBA; 392bd056d7eSTejun Heo } else { 393bd056d7eSTejun Heo /* CHS */ 394bd056d7eSTejun Heo u32 sect, head, cyl, track; 395bd056d7eSTejun Heo 396bd056d7eSTejun Heo /* The request -may- be too large for CHS addressing. */ 397bd056d7eSTejun Heo if (!lba_28_ok(block, n_block)) 398bd056d7eSTejun Heo return -ERANGE; 399bd056d7eSTejun Heo 400bd056d7eSTejun Heo if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 401bd056d7eSTejun Heo return -EINVAL; 402bd056d7eSTejun Heo 403bd056d7eSTejun Heo /* Convert LBA to CHS */ 404bd056d7eSTejun Heo track = (u32)block / dev->sectors; 405bd056d7eSTejun Heo cyl = track / dev->heads; 406bd056d7eSTejun Heo head = track % dev->heads; 407bd056d7eSTejun Heo sect = (u32)block % dev->sectors + 1; 408bd056d7eSTejun Heo 409bd056d7eSTejun Heo DPRINTK("block %u track %u cyl %u head %u sect %u\n", 410bd056d7eSTejun Heo (u32)block, track, cyl, head, sect); 411bd056d7eSTejun Heo 412bd056d7eSTejun Heo /* Check whether the converted CHS can fit. 413bd056d7eSTejun Heo Cylinder: 0-65535 414bd056d7eSTejun Heo Head: 0-15 415bd056d7eSTejun Heo Sector: 1-255*/ 416bd056d7eSTejun Heo if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 417bd056d7eSTejun Heo return -ERANGE; 418bd056d7eSTejun Heo 419bd056d7eSTejun Heo tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 420bd056d7eSTejun Heo tf->lbal = sect; 421bd056d7eSTejun Heo tf->lbam = cyl; 422bd056d7eSTejun Heo tf->lbah = cyl >> 8; 423bd056d7eSTejun Heo tf->device |= head; 424bd056d7eSTejun Heo } 425bd056d7eSTejun Heo 426bd056d7eSTejun Heo return 0; 427bd056d7eSTejun Heo } 428bd056d7eSTejun Heo 429bd056d7eSTejun Heo /** 430c6fd2807SJeff Garzik * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 431c6fd2807SJeff Garzik * @pio_mask: pio_mask 432c6fd2807SJeff Garzik * @mwdma_mask: mwdma_mask 433c6fd2807SJeff Garzik * @udma_mask: udma_mask 434c6fd2807SJeff Garzik * 435c6fd2807SJeff Garzik * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 436c6fd2807SJeff Garzik * unsigned int xfer_mask. 437c6fd2807SJeff Garzik * 438c6fd2807SJeff Garzik * LOCKING: 439c6fd2807SJeff Garzik * None. 440c6fd2807SJeff Garzik * 441c6fd2807SJeff Garzik * RETURNS: 442c6fd2807SJeff Garzik * Packed xfer_mask. 443c6fd2807SJeff Garzik */ 444c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask, 445c6fd2807SJeff Garzik unsigned int mwdma_mask, 446c6fd2807SJeff Garzik unsigned int udma_mask) 447c6fd2807SJeff Garzik { 448c6fd2807SJeff Garzik return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 449c6fd2807SJeff Garzik ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 450c6fd2807SJeff Garzik ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 451c6fd2807SJeff Garzik } 452c6fd2807SJeff Garzik 453c6fd2807SJeff Garzik /** 454c6fd2807SJeff Garzik * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 455c6fd2807SJeff Garzik * @xfer_mask: xfer_mask to unpack 456c6fd2807SJeff Garzik * @pio_mask: resulting pio_mask 457c6fd2807SJeff Garzik * @mwdma_mask: resulting mwdma_mask 458c6fd2807SJeff Garzik * @udma_mask: resulting udma_mask 459c6fd2807SJeff Garzik * 460c6fd2807SJeff Garzik * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 461c6fd2807SJeff Garzik * Any NULL distination masks will be ignored. 462c6fd2807SJeff Garzik */ 463c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask, 464c6fd2807SJeff Garzik unsigned int *pio_mask, 465c6fd2807SJeff Garzik unsigned int *mwdma_mask, 466c6fd2807SJeff Garzik unsigned int *udma_mask) 467c6fd2807SJeff Garzik { 468c6fd2807SJeff Garzik if (pio_mask) 469c6fd2807SJeff Garzik *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 470c6fd2807SJeff Garzik if (mwdma_mask) 471c6fd2807SJeff Garzik *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 472c6fd2807SJeff Garzik if (udma_mask) 473c6fd2807SJeff Garzik *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 474c6fd2807SJeff Garzik } 475c6fd2807SJeff Garzik 476c6fd2807SJeff Garzik static const struct ata_xfer_ent { 477c6fd2807SJeff Garzik int shift, bits; 478c6fd2807SJeff Garzik u8 base; 479c6fd2807SJeff Garzik } ata_xfer_tbl[] = { 480c6fd2807SJeff Garzik { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, 481c6fd2807SJeff Garzik { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, 482c6fd2807SJeff Garzik { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, 483c6fd2807SJeff Garzik { -1, }, 484c6fd2807SJeff Garzik }; 485c6fd2807SJeff Garzik 486c6fd2807SJeff Garzik /** 487c6fd2807SJeff Garzik * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 488c6fd2807SJeff Garzik * @xfer_mask: xfer_mask of interest 489c6fd2807SJeff Garzik * 490c6fd2807SJeff Garzik * Return matching XFER_* value for @xfer_mask. Only the highest 491c6fd2807SJeff Garzik * bit of @xfer_mask is considered. 492c6fd2807SJeff Garzik * 493c6fd2807SJeff Garzik * LOCKING: 494c6fd2807SJeff Garzik * None. 495c6fd2807SJeff Garzik * 496c6fd2807SJeff Garzik * RETURNS: 497c6fd2807SJeff Garzik * Matching XFER_* value, 0 if no match found. 498c6fd2807SJeff Garzik */ 499c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask) 500c6fd2807SJeff Garzik { 501c6fd2807SJeff Garzik int highbit = fls(xfer_mask) - 1; 502c6fd2807SJeff Garzik const struct ata_xfer_ent *ent; 503c6fd2807SJeff Garzik 504c6fd2807SJeff Garzik for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 505c6fd2807SJeff Garzik if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 506c6fd2807SJeff Garzik return ent->base + highbit - ent->shift; 507c6fd2807SJeff Garzik return 0; 508c6fd2807SJeff Garzik } 509c6fd2807SJeff Garzik 510c6fd2807SJeff Garzik /** 511c6fd2807SJeff Garzik * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 512c6fd2807SJeff Garzik * @xfer_mode: XFER_* of interest 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * Return matching xfer_mask for @xfer_mode. 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * LOCKING: 517c6fd2807SJeff Garzik * None. 518c6fd2807SJeff Garzik * 519c6fd2807SJeff Garzik * RETURNS: 520c6fd2807SJeff Garzik * Matching xfer_mask, 0 if no match found. 521c6fd2807SJeff Garzik */ 522c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode) 523c6fd2807SJeff Garzik { 524c6fd2807SJeff Garzik const struct ata_xfer_ent *ent; 525c6fd2807SJeff Garzik 526c6fd2807SJeff Garzik for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 527c6fd2807SJeff Garzik if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 528c6fd2807SJeff Garzik return 1 << (ent->shift + xfer_mode - ent->base); 529c6fd2807SJeff Garzik return 0; 530c6fd2807SJeff Garzik } 531c6fd2807SJeff Garzik 532c6fd2807SJeff Garzik /** 533c6fd2807SJeff Garzik * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 534c6fd2807SJeff Garzik * @xfer_mode: XFER_* of interest 535c6fd2807SJeff Garzik * 536c6fd2807SJeff Garzik * Return matching xfer_shift for @xfer_mode. 537c6fd2807SJeff Garzik * 538c6fd2807SJeff Garzik * LOCKING: 539c6fd2807SJeff Garzik * None. 540c6fd2807SJeff Garzik * 541c6fd2807SJeff Garzik * RETURNS: 542c6fd2807SJeff Garzik * Matching xfer_shift, -1 if no match found. 543c6fd2807SJeff Garzik */ 544c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode) 545c6fd2807SJeff Garzik { 546c6fd2807SJeff Garzik const struct ata_xfer_ent *ent; 547c6fd2807SJeff Garzik 548c6fd2807SJeff Garzik for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 549c6fd2807SJeff Garzik if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 550c6fd2807SJeff Garzik return ent->shift; 551c6fd2807SJeff Garzik return -1; 552c6fd2807SJeff Garzik } 553c6fd2807SJeff Garzik 554c6fd2807SJeff Garzik /** 555c6fd2807SJeff Garzik * ata_mode_string - convert xfer_mask to string 556c6fd2807SJeff Garzik * @xfer_mask: mask of bits supported; only highest bit counts. 557c6fd2807SJeff Garzik * 558c6fd2807SJeff Garzik * Determine string which represents the highest speed 559c6fd2807SJeff Garzik * (highest bit in @modemask). 560c6fd2807SJeff Garzik * 561c6fd2807SJeff Garzik * LOCKING: 562c6fd2807SJeff Garzik * None. 563c6fd2807SJeff Garzik * 564c6fd2807SJeff Garzik * RETURNS: 565c6fd2807SJeff Garzik * Constant C string representing highest speed listed in 566c6fd2807SJeff Garzik * @mode_mask, or the constant C string "<n/a>". 567c6fd2807SJeff Garzik */ 568c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask) 569c6fd2807SJeff Garzik { 570c6fd2807SJeff Garzik static const char * const xfer_mode_str[] = { 571c6fd2807SJeff Garzik "PIO0", 572c6fd2807SJeff Garzik "PIO1", 573c6fd2807SJeff Garzik "PIO2", 574c6fd2807SJeff Garzik "PIO3", 575c6fd2807SJeff Garzik "PIO4", 576b352e57dSAlan Cox "PIO5", 577b352e57dSAlan Cox "PIO6", 578c6fd2807SJeff Garzik "MWDMA0", 579c6fd2807SJeff Garzik "MWDMA1", 580c6fd2807SJeff Garzik "MWDMA2", 581b352e57dSAlan Cox "MWDMA3", 582b352e57dSAlan Cox "MWDMA4", 583c6fd2807SJeff Garzik "UDMA/16", 584c6fd2807SJeff Garzik "UDMA/25", 585c6fd2807SJeff Garzik "UDMA/33", 586c6fd2807SJeff Garzik "UDMA/44", 587c6fd2807SJeff Garzik "UDMA/66", 588c6fd2807SJeff Garzik "UDMA/100", 589c6fd2807SJeff Garzik "UDMA/133", 590c6fd2807SJeff Garzik "UDMA7", 591c6fd2807SJeff Garzik }; 592c6fd2807SJeff Garzik int highbit; 593c6fd2807SJeff Garzik 594c6fd2807SJeff Garzik highbit = fls(xfer_mask) - 1; 595c6fd2807SJeff Garzik if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 596c6fd2807SJeff Garzik return xfer_mode_str[highbit]; 597c6fd2807SJeff Garzik return "<n/a>"; 598c6fd2807SJeff Garzik } 599c6fd2807SJeff Garzik 600c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd) 601c6fd2807SJeff Garzik { 602c6fd2807SJeff Garzik static const char * const spd_str[] = { 603c6fd2807SJeff Garzik "1.5 Gbps", 604c6fd2807SJeff Garzik "3.0 Gbps", 605c6fd2807SJeff Garzik }; 606c6fd2807SJeff Garzik 607c6fd2807SJeff Garzik if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 608c6fd2807SJeff Garzik return "<unknown>"; 609c6fd2807SJeff Garzik return spd_str[spd - 1]; 610c6fd2807SJeff Garzik } 611c6fd2807SJeff Garzik 612c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev) 613c6fd2807SJeff Garzik { 61409d7f9b0STejun Heo if (ata_dev_enabled(dev)) { 6159af5c9c9STejun Heo if (ata_msg_drv(dev->link->ap)) 616c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 6174ae72a1eSTejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | 6184ae72a1eSTejun Heo ATA_DNXFER_QUIET); 619c6fd2807SJeff Garzik dev->class++; 620c6fd2807SJeff Garzik } 621c6fd2807SJeff Garzik } 622c6fd2807SJeff Garzik 623c6fd2807SJeff Garzik /** 624c6fd2807SJeff Garzik * ata_devchk - PATA device presence detection 625c6fd2807SJeff Garzik * @ap: ATA channel to examine 626c6fd2807SJeff Garzik * @device: Device to examine (starting at zero) 627c6fd2807SJeff Garzik * 6280d5ff566STejun Heo * This technique was originally described in 6290d5ff566STejun Heo * Hale Landis's ATADRVR (www.ata-atapi.com), and 6300d5ff566STejun Heo * later found its way into the ATA/ATAPI spec. 6310d5ff566STejun Heo * 6320d5ff566STejun Heo * Write a pattern to the ATA shadow registers, 6330d5ff566STejun Heo * and if a device is present, it will respond by 6340d5ff566STejun Heo * correctly storing and echoing back the 6350d5ff566STejun Heo * ATA shadow register contents. 636c6fd2807SJeff Garzik * 637c6fd2807SJeff Garzik * LOCKING: 638c6fd2807SJeff Garzik * caller. 639c6fd2807SJeff Garzik */ 640c6fd2807SJeff Garzik 6410d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 642c6fd2807SJeff Garzik { 6430d5ff566STejun Heo struct ata_ioports *ioaddr = &ap->ioaddr; 6440d5ff566STejun Heo u8 nsect, lbal; 6450d5ff566STejun Heo 6460d5ff566STejun Heo ap->ops->dev_select(ap, device); 6470d5ff566STejun Heo 6480d5ff566STejun Heo iowrite8(0x55, ioaddr->nsect_addr); 6490d5ff566STejun Heo iowrite8(0xaa, ioaddr->lbal_addr); 6500d5ff566STejun Heo 6510d5ff566STejun Heo iowrite8(0xaa, ioaddr->nsect_addr); 6520d5ff566STejun Heo iowrite8(0x55, ioaddr->lbal_addr); 6530d5ff566STejun Heo 6540d5ff566STejun Heo iowrite8(0x55, ioaddr->nsect_addr); 6550d5ff566STejun Heo iowrite8(0xaa, ioaddr->lbal_addr); 6560d5ff566STejun Heo 6570d5ff566STejun Heo nsect = ioread8(ioaddr->nsect_addr); 6580d5ff566STejun Heo lbal = ioread8(ioaddr->lbal_addr); 6590d5ff566STejun Heo 6600d5ff566STejun Heo if ((nsect == 0x55) && (lbal == 0xaa)) 6610d5ff566STejun Heo return 1; /* we found a device */ 6620d5ff566STejun Heo 6630d5ff566STejun Heo return 0; /* nothing found */ 664c6fd2807SJeff Garzik } 665c6fd2807SJeff Garzik 666c6fd2807SJeff Garzik /** 667c6fd2807SJeff Garzik * ata_dev_classify - determine device type based on ATA-spec signature 668c6fd2807SJeff Garzik * @tf: ATA taskfile register set for device to be identified 669c6fd2807SJeff Garzik * 670c6fd2807SJeff Garzik * Determine from taskfile register contents whether a device is 671c6fd2807SJeff Garzik * ATA or ATAPI, as per "Signature and persistence" section 672c6fd2807SJeff Garzik * of ATA/PI spec (volume 1, sect 5.14). 673c6fd2807SJeff Garzik * 674c6fd2807SJeff Garzik * LOCKING: 675c6fd2807SJeff Garzik * None. 676c6fd2807SJeff Garzik * 677c6fd2807SJeff Garzik * RETURNS: 678633273a3STejun Heo * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 679633273a3STejun Heo * %ATA_DEV_UNKNOWN the event of failure. 680c6fd2807SJeff Garzik */ 681c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf) 682c6fd2807SJeff Garzik { 683c6fd2807SJeff Garzik /* Apple's open source Darwin code hints that some devices only 684c6fd2807SJeff Garzik * put a proper signature into the LBA mid/high registers, 685c6fd2807SJeff Garzik * So, we only check those. It's sufficient for uniqueness. 686633273a3STejun Heo * 687633273a3STejun Heo * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 688633273a3STejun Heo * signatures for ATA and ATAPI devices attached on SerialATA, 689633273a3STejun Heo * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 690633273a3STejun Heo * spec has never mentioned about using different signatures 691633273a3STejun Heo * for ATA/ATAPI devices. Then, Serial ATA II: Port 692633273a3STejun Heo * Multiplier specification began to use 0x69/0x96 to identify 693633273a3STejun Heo * port multpliers and 0x3c/0xc3 to identify SEMB device. 694633273a3STejun Heo * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 695633273a3STejun Heo * 0x69/0x96 shortly and described them as reserved for 696633273a3STejun Heo * SerialATA. 697633273a3STejun Heo * 698633273a3STejun Heo * We follow the current spec and consider that 0x69/0x96 699633273a3STejun Heo * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 700c6fd2807SJeff Garzik */ 701633273a3STejun Heo if ((tf->lbam == 0) && (tf->lbah == 0)) { 702c6fd2807SJeff Garzik DPRINTK("found ATA device by sig\n"); 703c6fd2807SJeff Garzik return ATA_DEV_ATA; 704c6fd2807SJeff Garzik } 705c6fd2807SJeff Garzik 706633273a3STejun Heo if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 707c6fd2807SJeff Garzik DPRINTK("found ATAPI device by sig\n"); 708c6fd2807SJeff Garzik return ATA_DEV_ATAPI; 709c6fd2807SJeff Garzik } 710c6fd2807SJeff Garzik 711633273a3STejun Heo if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 712633273a3STejun Heo DPRINTK("found PMP device by sig\n"); 713633273a3STejun Heo return ATA_DEV_PMP; 714633273a3STejun Heo } 715633273a3STejun Heo 716633273a3STejun Heo if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 7172dcb407eSJeff Garzik printk(KERN_INFO "ata: SEMB device ignored\n"); 718633273a3STejun Heo return ATA_DEV_SEMB_UNSUP; /* not yet */ 719633273a3STejun Heo } 720633273a3STejun Heo 721c6fd2807SJeff Garzik DPRINTK("unknown device\n"); 722c6fd2807SJeff Garzik return ATA_DEV_UNKNOWN; 723c6fd2807SJeff Garzik } 724c6fd2807SJeff Garzik 725c6fd2807SJeff Garzik /** 726c6fd2807SJeff Garzik * ata_dev_try_classify - Parse returned ATA device signature 7273f19859eSTejun Heo * @dev: ATA device to classify (starting at zero) 7283f19859eSTejun Heo * @present: device seems present 729c6fd2807SJeff Garzik * @r_err: Value of error register on completion 730c6fd2807SJeff Garzik * 731c6fd2807SJeff Garzik * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 732c6fd2807SJeff Garzik * an ATA/ATAPI-defined set of values is placed in the ATA 733c6fd2807SJeff Garzik * shadow registers, indicating the results of device detection 734c6fd2807SJeff Garzik * and diagnostics. 735c6fd2807SJeff Garzik * 736c6fd2807SJeff Garzik * Select the ATA device, and read the values from the ATA shadow 737c6fd2807SJeff Garzik * registers. Then parse according to the Error register value, 738c6fd2807SJeff Garzik * and the spec-defined values examined by ata_dev_classify(). 739c6fd2807SJeff Garzik * 740c6fd2807SJeff Garzik * LOCKING: 741c6fd2807SJeff Garzik * caller. 742c6fd2807SJeff Garzik * 743c6fd2807SJeff Garzik * RETURNS: 744c6fd2807SJeff Garzik * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 745c6fd2807SJeff Garzik */ 7463f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present, 7473f19859eSTejun Heo u8 *r_err) 748c6fd2807SJeff Garzik { 7493f19859eSTejun Heo struct ata_port *ap = dev->link->ap; 750c6fd2807SJeff Garzik struct ata_taskfile tf; 751c6fd2807SJeff Garzik unsigned int class; 752c6fd2807SJeff Garzik u8 err; 753c6fd2807SJeff Garzik 7543f19859eSTejun Heo ap->ops->dev_select(ap, dev->devno); 755c6fd2807SJeff Garzik 756c6fd2807SJeff Garzik memset(&tf, 0, sizeof(tf)); 757c6fd2807SJeff Garzik 758c6fd2807SJeff Garzik ap->ops->tf_read(ap, &tf); 759c6fd2807SJeff Garzik err = tf.feature; 760c6fd2807SJeff Garzik if (r_err) 761c6fd2807SJeff Garzik *r_err = err; 762c6fd2807SJeff Garzik 76393590859SAlan Cox /* see if device passed diags: if master then continue and warn later */ 7643f19859eSTejun Heo if (err == 0 && dev->devno == 0) 76593590859SAlan Cox /* diagnostic fail : do nothing _YET_ */ 7663f19859eSTejun Heo dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 76793590859SAlan Cox else if (err == 1) 768c6fd2807SJeff Garzik /* do nothing */ ; 7693f19859eSTejun Heo else if ((dev->devno == 0) && (err == 0x81)) 770c6fd2807SJeff Garzik /* do nothing */ ; 771c6fd2807SJeff Garzik else 772c6fd2807SJeff Garzik return ATA_DEV_NONE; 773c6fd2807SJeff Garzik 774c6fd2807SJeff Garzik /* determine if device is ATA or ATAPI */ 775c6fd2807SJeff Garzik class = ata_dev_classify(&tf); 776c6fd2807SJeff Garzik 777d7fbee05STejun Heo if (class == ATA_DEV_UNKNOWN) { 778d7fbee05STejun Heo /* If the device failed diagnostic, it's likely to 779d7fbee05STejun Heo * have reported incorrect device signature too. 780d7fbee05STejun Heo * Assume ATA device if the device seems present but 781d7fbee05STejun Heo * device signature is invalid with diagnostic 782d7fbee05STejun Heo * failure. 783d7fbee05STejun Heo */ 784d7fbee05STejun Heo if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 785d7fbee05STejun Heo class = ATA_DEV_ATA; 786d7fbee05STejun Heo else 787d7fbee05STejun Heo class = ATA_DEV_NONE; 788d7fbee05STejun Heo } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 789d7fbee05STejun Heo class = ATA_DEV_NONE; 790d7fbee05STejun Heo 791c6fd2807SJeff Garzik return class; 792c6fd2807SJeff Garzik } 793c6fd2807SJeff Garzik 794c6fd2807SJeff Garzik /** 795c6fd2807SJeff Garzik * ata_id_string - Convert IDENTIFY DEVICE page into string 796c6fd2807SJeff Garzik * @id: IDENTIFY DEVICE results we will examine 797c6fd2807SJeff Garzik * @s: string into which data is output 798c6fd2807SJeff Garzik * @ofs: offset into identify device page 799c6fd2807SJeff Garzik * @len: length of string to return. must be an even number. 800c6fd2807SJeff Garzik * 801c6fd2807SJeff Garzik * The strings in the IDENTIFY DEVICE page are broken up into 802c6fd2807SJeff Garzik * 16-bit chunks. Run through the string, and output each 803c6fd2807SJeff Garzik * 8-bit chunk linearly, regardless of platform. 804c6fd2807SJeff Garzik * 805c6fd2807SJeff Garzik * LOCKING: 806c6fd2807SJeff Garzik * caller. 807c6fd2807SJeff Garzik */ 808c6fd2807SJeff Garzik 809c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s, 810c6fd2807SJeff Garzik unsigned int ofs, unsigned int len) 811c6fd2807SJeff Garzik { 812c6fd2807SJeff Garzik unsigned int c; 813c6fd2807SJeff Garzik 814c6fd2807SJeff Garzik while (len > 0) { 815c6fd2807SJeff Garzik c = id[ofs] >> 8; 816c6fd2807SJeff Garzik *s = c; 817c6fd2807SJeff Garzik s++; 818c6fd2807SJeff Garzik 819c6fd2807SJeff Garzik c = id[ofs] & 0xff; 820c6fd2807SJeff Garzik *s = c; 821c6fd2807SJeff Garzik s++; 822c6fd2807SJeff Garzik 823c6fd2807SJeff Garzik ofs++; 824c6fd2807SJeff Garzik len -= 2; 825c6fd2807SJeff Garzik } 826c6fd2807SJeff Garzik } 827c6fd2807SJeff Garzik 828c6fd2807SJeff Garzik /** 829c6fd2807SJeff Garzik * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 830c6fd2807SJeff Garzik * @id: IDENTIFY DEVICE results we will examine 831c6fd2807SJeff Garzik * @s: string into which data is output 832c6fd2807SJeff Garzik * @ofs: offset into identify device page 833c6fd2807SJeff Garzik * @len: length of string to return. must be an odd number. 834c6fd2807SJeff Garzik * 835c6fd2807SJeff Garzik * This function is identical to ata_id_string except that it 836c6fd2807SJeff Garzik * trims trailing spaces and terminates the resulting string with 837c6fd2807SJeff Garzik * null. @len must be actual maximum length (even number) + 1. 838c6fd2807SJeff Garzik * 839c6fd2807SJeff Garzik * LOCKING: 840c6fd2807SJeff Garzik * caller. 841c6fd2807SJeff Garzik */ 842c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s, 843c6fd2807SJeff Garzik unsigned int ofs, unsigned int len) 844c6fd2807SJeff Garzik { 845c6fd2807SJeff Garzik unsigned char *p; 846c6fd2807SJeff Garzik 847c6fd2807SJeff Garzik WARN_ON(!(len & 1)); 848c6fd2807SJeff Garzik 849c6fd2807SJeff Garzik ata_id_string(id, s, ofs, len - 1); 850c6fd2807SJeff Garzik 851c6fd2807SJeff Garzik p = s + strnlen(s, len - 1); 852c6fd2807SJeff Garzik while (p > s && p[-1] == ' ') 853c6fd2807SJeff Garzik p--; 854c6fd2807SJeff Garzik *p = '\0'; 855c6fd2807SJeff Garzik } 856c6fd2807SJeff Garzik 857db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id) 858db6f8759STejun Heo { 859db6f8759STejun Heo if (ata_id_has_lba(id)) { 860db6f8759STejun Heo if (ata_id_has_lba48(id)) 861db6f8759STejun Heo return ata_id_u64(id, 100); 862db6f8759STejun Heo else 863db6f8759STejun Heo return ata_id_u32(id, 60); 864db6f8759STejun Heo } else { 865db6f8759STejun Heo if (ata_id_current_chs_valid(id)) 866db6f8759STejun Heo return ata_id_u32(id, 57); 867db6f8759STejun Heo else 868db6f8759STejun Heo return id[1] * id[3] * id[6]; 869db6f8759STejun Heo } 870db6f8759STejun Heo } 871db6f8759STejun Heo 8721e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf) 8731e999736SAlan Cox { 8741e999736SAlan Cox u64 sectors = 0; 8751e999736SAlan Cox 8761e999736SAlan Cox sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 8771e999736SAlan Cox sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 8781e999736SAlan Cox sectors |= (tf->hob_lbal & 0xff) << 24; 8791e999736SAlan Cox sectors |= (tf->lbah & 0xff) << 16; 8801e999736SAlan Cox sectors |= (tf->lbam & 0xff) << 8; 8811e999736SAlan Cox sectors |= (tf->lbal & 0xff); 8821e999736SAlan Cox 8831e999736SAlan Cox return ++sectors; 8841e999736SAlan Cox } 8851e999736SAlan Cox 8861e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf) 8871e999736SAlan Cox { 8881e999736SAlan Cox u64 sectors = 0; 8891e999736SAlan Cox 8901e999736SAlan Cox sectors |= (tf->device & 0x0f) << 24; 8911e999736SAlan Cox sectors |= (tf->lbah & 0xff) << 16; 8921e999736SAlan Cox sectors |= (tf->lbam & 0xff) << 8; 8931e999736SAlan Cox sectors |= (tf->lbal & 0xff); 8941e999736SAlan Cox 8951e999736SAlan Cox return ++sectors; 8961e999736SAlan Cox } 8971e999736SAlan Cox 8981e999736SAlan Cox /** 899c728a914STejun Heo * ata_read_native_max_address - Read native max address 900c728a914STejun Heo * @dev: target device 901c728a914STejun Heo * @max_sectors: out parameter for the result native max address 9021e999736SAlan Cox * 903c728a914STejun Heo * Perform an LBA48 or LBA28 native size query upon the device in 904c728a914STejun Heo * question. 905c728a914STejun Heo * 906c728a914STejun Heo * RETURNS: 907c728a914STejun Heo * 0 on success, -EACCES if command is aborted by the drive. 908c728a914STejun Heo * -EIO on other errors. 9091e999736SAlan Cox */ 910c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 9111e999736SAlan Cox { 912c728a914STejun Heo unsigned int err_mask; 9131e999736SAlan Cox struct ata_taskfile tf; 914c728a914STejun Heo int lba48 = ata_id_has_lba48(dev->id); 9151e999736SAlan Cox 9161e999736SAlan Cox ata_tf_init(dev, &tf); 9171e999736SAlan Cox 918c728a914STejun Heo /* always clear all address registers */ 9191e999736SAlan Cox tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 920c728a914STejun Heo 921c728a914STejun Heo if (lba48) { 922c728a914STejun Heo tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 923c728a914STejun Heo tf.flags |= ATA_TFLAG_LBA48; 924c728a914STejun Heo } else 925c728a914STejun Heo tf.command = ATA_CMD_READ_NATIVE_MAX; 926c728a914STejun Heo 9271e999736SAlan Cox tf.protocol |= ATA_PROT_NODATA; 928c728a914STejun Heo tf.device |= ATA_LBA; 9291e999736SAlan Cox 9302b789108STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 931c728a914STejun Heo if (err_mask) { 932c728a914STejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to read native " 933c728a914STejun Heo "max address (err_mask=0x%x)\n", err_mask); 934c728a914STejun Heo if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 935c728a914STejun Heo return -EACCES; 936c728a914STejun Heo return -EIO; 937c728a914STejun Heo } 938c728a914STejun Heo 939c728a914STejun Heo if (lba48) 940c728a914STejun Heo *max_sectors = ata_tf_to_lba48(&tf); 941c728a914STejun Heo else 942c728a914STejun Heo *max_sectors = ata_tf_to_lba(&tf); 94393328e11SAlan Cox if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 94493328e11SAlan Cox (*max_sectors)--; 9451e999736SAlan Cox return 0; 9461e999736SAlan Cox } 9471e999736SAlan Cox 9481e999736SAlan Cox /** 949c728a914STejun Heo * ata_set_max_sectors - Set max sectors 950c728a914STejun Heo * @dev: target device 9516b38d1d1SRandy Dunlap * @new_sectors: new max sectors value to set for the device 9521e999736SAlan Cox * 953c728a914STejun Heo * Set max sectors of @dev to @new_sectors. 954c728a914STejun Heo * 955c728a914STejun Heo * RETURNS: 956c728a914STejun Heo * 0 on success, -EACCES if command is aborted or denied (due to 957c728a914STejun Heo * previous non-volatile SET_MAX) by the drive. -EIO on other 958c728a914STejun Heo * errors. 9591e999736SAlan Cox */ 96005027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 9611e999736SAlan Cox { 962c728a914STejun Heo unsigned int err_mask; 9631e999736SAlan Cox struct ata_taskfile tf; 964c728a914STejun Heo int lba48 = ata_id_has_lba48(dev->id); 9651e999736SAlan Cox 9661e999736SAlan Cox new_sectors--; 9671e999736SAlan Cox 9681e999736SAlan Cox ata_tf_init(dev, &tf); 9691e999736SAlan Cox 970c728a914STejun Heo tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 9711e999736SAlan Cox 972c728a914STejun Heo if (lba48) { 973c728a914STejun Heo tf.command = ATA_CMD_SET_MAX_EXT; 974c728a914STejun Heo tf.flags |= ATA_TFLAG_LBA48; 9751e999736SAlan Cox 9761e999736SAlan Cox tf.hob_lbal = (new_sectors >> 24) & 0xff; 9771e999736SAlan Cox tf.hob_lbam = (new_sectors >> 32) & 0xff; 9781e999736SAlan Cox tf.hob_lbah = (new_sectors >> 40) & 0xff; 9791e582ba4STejun Heo } else { 9801e999736SAlan Cox tf.command = ATA_CMD_SET_MAX; 981c728a914STejun Heo 9821e582ba4STejun Heo tf.device |= (new_sectors >> 24) & 0xf; 9831e582ba4STejun Heo } 9841e582ba4STejun Heo 9851e999736SAlan Cox tf.protocol |= ATA_PROT_NODATA; 986c728a914STejun Heo tf.device |= ATA_LBA; 9871e999736SAlan Cox 9881e999736SAlan Cox tf.lbal = (new_sectors >> 0) & 0xff; 9891e999736SAlan Cox tf.lbam = (new_sectors >> 8) & 0xff; 9901e999736SAlan Cox tf.lbah = (new_sectors >> 16) & 0xff; 9911e999736SAlan Cox 9922b789108STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 993c728a914STejun Heo if (err_mask) { 994c728a914STejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to set " 995c728a914STejun Heo "max address (err_mask=0x%x)\n", err_mask); 996c728a914STejun Heo if (err_mask == AC_ERR_DEV && 997c728a914STejun Heo (tf.feature & (ATA_ABORTED | ATA_IDNF))) 998c728a914STejun Heo return -EACCES; 999c728a914STejun Heo return -EIO; 1000c728a914STejun Heo } 1001c728a914STejun Heo 10021e999736SAlan Cox return 0; 10031e999736SAlan Cox } 10041e999736SAlan Cox 10051e999736SAlan Cox /** 10061e999736SAlan Cox * ata_hpa_resize - Resize a device with an HPA set 10071e999736SAlan Cox * @dev: Device to resize 10081e999736SAlan Cox * 10091e999736SAlan Cox * Read the size of an LBA28 or LBA48 disk with HPA features and resize 10101e999736SAlan Cox * it if required to the full size of the media. The caller must check 10111e999736SAlan Cox * the drive has the HPA feature set enabled. 101205027adcSTejun Heo * 101305027adcSTejun Heo * RETURNS: 101405027adcSTejun Heo * 0 on success, -errno on failure. 10151e999736SAlan Cox */ 101605027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev) 10171e999736SAlan Cox { 101805027adcSTejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 101905027adcSTejun Heo int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 102005027adcSTejun Heo u64 sectors = ata_id_n_sectors(dev->id); 102105027adcSTejun Heo u64 native_sectors; 1022c728a914STejun Heo int rc; 10231e999736SAlan Cox 102405027adcSTejun Heo /* do we need to do it? */ 102505027adcSTejun Heo if (dev->class != ATA_DEV_ATA || 102605027adcSTejun Heo !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 102705027adcSTejun Heo (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1028c728a914STejun Heo return 0; 10291e999736SAlan Cox 103005027adcSTejun Heo /* read native max address */ 103105027adcSTejun Heo rc = ata_read_native_max_address(dev, &native_sectors); 103205027adcSTejun Heo if (rc) { 103305027adcSTejun Heo /* If HPA isn't going to be unlocked, skip HPA 103405027adcSTejun Heo * resizing from the next try. 103505027adcSTejun Heo */ 103605027adcSTejun Heo if (!ata_ignore_hpa) { 103705027adcSTejun Heo ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 103805027adcSTejun Heo "broken, will skip HPA handling\n"); 103905027adcSTejun Heo dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 104005027adcSTejun Heo 104105027adcSTejun Heo /* we can continue if device aborted the command */ 104205027adcSTejun Heo if (rc == -EACCES) 104305027adcSTejun Heo rc = 0; 104405027adcSTejun Heo } 104505027adcSTejun Heo 104605027adcSTejun Heo return rc; 104705027adcSTejun Heo } 104805027adcSTejun Heo 104905027adcSTejun Heo /* nothing to do? */ 105005027adcSTejun Heo if (native_sectors <= sectors || !ata_ignore_hpa) { 105105027adcSTejun Heo if (!print_info || native_sectors == sectors) 105205027adcSTejun Heo return 0; 105305027adcSTejun Heo 105405027adcSTejun Heo if (native_sectors > sectors) 10551e999736SAlan Cox ata_dev_printk(dev, KERN_INFO, 105605027adcSTejun Heo "HPA detected: current %llu, native %llu\n", 105705027adcSTejun Heo (unsigned long long)sectors, 105805027adcSTejun Heo (unsigned long long)native_sectors); 105905027adcSTejun Heo else if (native_sectors < sectors) 106005027adcSTejun Heo ata_dev_printk(dev, KERN_WARNING, 106105027adcSTejun Heo "native sectors (%llu) is smaller than " 106205027adcSTejun Heo "sectors (%llu)\n", 106305027adcSTejun Heo (unsigned long long)native_sectors, 106405027adcSTejun Heo (unsigned long long)sectors); 106505027adcSTejun Heo return 0; 10661e999736SAlan Cox } 106737301a55STejun Heo 106805027adcSTejun Heo /* let's unlock HPA */ 106905027adcSTejun Heo rc = ata_set_max_sectors(dev, native_sectors); 107005027adcSTejun Heo if (rc == -EACCES) { 107105027adcSTejun Heo /* if device aborted the command, skip HPA resizing */ 107205027adcSTejun Heo ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 107305027adcSTejun Heo "(%llu -> %llu), skipping HPA handling\n", 107405027adcSTejun Heo (unsigned long long)sectors, 107505027adcSTejun Heo (unsigned long long)native_sectors); 107605027adcSTejun Heo dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 107705027adcSTejun Heo return 0; 107805027adcSTejun Heo } else if (rc) 107905027adcSTejun Heo return rc; 108005027adcSTejun Heo 108105027adcSTejun Heo /* re-read IDENTIFY data */ 108205027adcSTejun Heo rc = ata_dev_reread_id(dev, 0); 108305027adcSTejun Heo if (rc) { 108405027adcSTejun Heo ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 108505027adcSTejun Heo "data after HPA resizing\n"); 108605027adcSTejun Heo return rc; 108705027adcSTejun Heo } 108805027adcSTejun Heo 108905027adcSTejun Heo if (print_info) { 109005027adcSTejun Heo u64 new_sectors = ata_id_n_sectors(dev->id); 109105027adcSTejun Heo ata_dev_printk(dev, KERN_INFO, 109205027adcSTejun Heo "HPA unlocked: %llu -> %llu, native %llu\n", 109305027adcSTejun Heo (unsigned long long)sectors, 109405027adcSTejun Heo (unsigned long long)new_sectors, 109505027adcSTejun Heo (unsigned long long)native_sectors); 109605027adcSTejun Heo } 109705027adcSTejun Heo 109805027adcSTejun Heo return 0; 10991e999736SAlan Cox } 11001e999736SAlan Cox 1101c6fd2807SJeff Garzik /** 110210305f0fSAlan * ata_id_to_dma_mode - Identify DMA mode from id block 110310305f0fSAlan * @dev: device to identify 1104cc261267SRandy Dunlap * @unknown: mode to assume if we cannot tell 110510305f0fSAlan * 110610305f0fSAlan * Set up the timing values for the device based upon the identify 110710305f0fSAlan * reported values for the DMA mode. This function is used by drivers 110810305f0fSAlan * which rely upon firmware configured modes, but wish to report the 110910305f0fSAlan * mode correctly when possible. 111010305f0fSAlan * 111110305f0fSAlan * In addition we emit similarly formatted messages to the default 111210305f0fSAlan * ata_dev_set_mode handler, in order to provide consistency of 111310305f0fSAlan * presentation. 111410305f0fSAlan */ 111510305f0fSAlan 111610305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown) 111710305f0fSAlan { 111810305f0fSAlan unsigned int mask; 111910305f0fSAlan u8 mode; 112010305f0fSAlan 112110305f0fSAlan /* Pack the DMA modes */ 112210305f0fSAlan mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA; 112310305f0fSAlan if (dev->id[53] & 0x04) 112410305f0fSAlan mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA; 112510305f0fSAlan 112610305f0fSAlan /* Select the mode in use */ 112710305f0fSAlan mode = ata_xfer_mask2mode(mask); 112810305f0fSAlan 112910305f0fSAlan if (mode != 0) { 113010305f0fSAlan ata_dev_printk(dev, KERN_INFO, "configured for %s\n", 113110305f0fSAlan ata_mode_string(mask)); 113210305f0fSAlan } else { 113310305f0fSAlan /* SWDMA perhaps ? */ 113410305f0fSAlan mode = unknown; 113510305f0fSAlan ata_dev_printk(dev, KERN_INFO, "configured for DMA\n"); 113610305f0fSAlan } 113710305f0fSAlan 113810305f0fSAlan /* Configure the device reporting */ 113910305f0fSAlan dev->xfer_mode = mode; 114010305f0fSAlan dev->xfer_shift = ata_xfer_mode2shift(mode); 114110305f0fSAlan } 114210305f0fSAlan 114310305f0fSAlan /** 1144c6fd2807SJeff Garzik * ata_noop_dev_select - Select device 0/1 on ATA bus 1145c6fd2807SJeff Garzik * @ap: ATA channel to manipulate 1146c6fd2807SJeff Garzik * @device: ATA device (numbered from zero) to select 1147c6fd2807SJeff Garzik * 1148c6fd2807SJeff Garzik * This function performs no actual function. 1149c6fd2807SJeff Garzik * 1150c6fd2807SJeff Garzik * May be used as the dev_select() entry in ata_port_operations. 1151c6fd2807SJeff Garzik * 1152c6fd2807SJeff Garzik * LOCKING: 1153c6fd2807SJeff Garzik * caller. 1154c6fd2807SJeff Garzik */ 1155c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device) 1156c6fd2807SJeff Garzik { 1157c6fd2807SJeff Garzik } 1158c6fd2807SJeff Garzik 1159c6fd2807SJeff Garzik 1160c6fd2807SJeff Garzik /** 1161c6fd2807SJeff Garzik * ata_std_dev_select - Select device 0/1 on ATA bus 1162c6fd2807SJeff Garzik * @ap: ATA channel to manipulate 1163c6fd2807SJeff Garzik * @device: ATA device (numbered from zero) to select 1164c6fd2807SJeff Garzik * 1165c6fd2807SJeff Garzik * Use the method defined in the ATA specification to 1166c6fd2807SJeff Garzik * make either device 0, or device 1, active on the 1167c6fd2807SJeff Garzik * ATA channel. Works with both PIO and MMIO. 1168c6fd2807SJeff Garzik * 1169c6fd2807SJeff Garzik * May be used as the dev_select() entry in ata_port_operations. 1170c6fd2807SJeff Garzik * 1171c6fd2807SJeff Garzik * LOCKING: 1172c6fd2807SJeff Garzik * caller. 1173c6fd2807SJeff Garzik */ 1174c6fd2807SJeff Garzik 1175c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device) 1176c6fd2807SJeff Garzik { 1177c6fd2807SJeff Garzik u8 tmp; 1178c6fd2807SJeff Garzik 1179c6fd2807SJeff Garzik if (device == 0) 1180c6fd2807SJeff Garzik tmp = ATA_DEVICE_OBS; 1181c6fd2807SJeff Garzik else 1182c6fd2807SJeff Garzik tmp = ATA_DEVICE_OBS | ATA_DEV1; 1183c6fd2807SJeff Garzik 11840d5ff566STejun Heo iowrite8(tmp, ap->ioaddr.device_addr); 1185c6fd2807SJeff Garzik ata_pause(ap); /* needed; also flushes, for mmio */ 1186c6fd2807SJeff Garzik } 1187c6fd2807SJeff Garzik 1188c6fd2807SJeff Garzik /** 1189c6fd2807SJeff Garzik * ata_dev_select - Select device 0/1 on ATA bus 1190c6fd2807SJeff Garzik * @ap: ATA channel to manipulate 1191c6fd2807SJeff Garzik * @device: ATA device (numbered from zero) to select 1192c6fd2807SJeff Garzik * @wait: non-zero to wait for Status register BSY bit to clear 1193c6fd2807SJeff Garzik * @can_sleep: non-zero if context allows sleeping 1194c6fd2807SJeff Garzik * 1195c6fd2807SJeff Garzik * Use the method defined in the ATA specification to 1196c6fd2807SJeff Garzik * make either device 0, or device 1, active on the 1197c6fd2807SJeff Garzik * ATA channel. 1198c6fd2807SJeff Garzik * 1199c6fd2807SJeff Garzik * This is a high-level version of ata_std_dev_select(), 1200c6fd2807SJeff Garzik * which additionally provides the services of inserting 1201c6fd2807SJeff Garzik * the proper pauses and status polling, where needed. 1202c6fd2807SJeff Garzik * 1203c6fd2807SJeff Garzik * LOCKING: 1204c6fd2807SJeff Garzik * caller. 1205c6fd2807SJeff Garzik */ 1206c6fd2807SJeff Garzik 1207c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device, 1208c6fd2807SJeff Garzik unsigned int wait, unsigned int can_sleep) 1209c6fd2807SJeff Garzik { 1210c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 121144877b4eSTejun Heo ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 121244877b4eSTejun Heo "device %u, wait %u\n", device, wait); 1213c6fd2807SJeff Garzik 1214c6fd2807SJeff Garzik if (wait) 1215c6fd2807SJeff Garzik ata_wait_idle(ap); 1216c6fd2807SJeff Garzik 1217c6fd2807SJeff Garzik ap->ops->dev_select(ap, device); 1218c6fd2807SJeff Garzik 1219c6fd2807SJeff Garzik if (wait) { 12209af5c9c9STejun Heo if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 1221c6fd2807SJeff Garzik msleep(150); 1222c6fd2807SJeff Garzik ata_wait_idle(ap); 1223c6fd2807SJeff Garzik } 1224c6fd2807SJeff Garzik } 1225c6fd2807SJeff Garzik 1226c6fd2807SJeff Garzik /** 1227c6fd2807SJeff Garzik * ata_dump_id - IDENTIFY DEVICE info debugging output 1228c6fd2807SJeff Garzik * @id: IDENTIFY DEVICE page to dump 1229c6fd2807SJeff Garzik * 1230c6fd2807SJeff Garzik * Dump selected 16-bit words from the given IDENTIFY DEVICE 1231c6fd2807SJeff Garzik * page. 1232c6fd2807SJeff Garzik * 1233c6fd2807SJeff Garzik * LOCKING: 1234c6fd2807SJeff Garzik * caller. 1235c6fd2807SJeff Garzik */ 1236c6fd2807SJeff Garzik 1237c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id) 1238c6fd2807SJeff Garzik { 1239c6fd2807SJeff Garzik DPRINTK("49==0x%04x " 1240c6fd2807SJeff Garzik "53==0x%04x " 1241c6fd2807SJeff Garzik "63==0x%04x " 1242c6fd2807SJeff Garzik "64==0x%04x " 1243c6fd2807SJeff Garzik "75==0x%04x \n", 1244c6fd2807SJeff Garzik id[49], 1245c6fd2807SJeff Garzik id[53], 1246c6fd2807SJeff Garzik id[63], 1247c6fd2807SJeff Garzik id[64], 1248c6fd2807SJeff Garzik id[75]); 1249c6fd2807SJeff Garzik DPRINTK("80==0x%04x " 1250c6fd2807SJeff Garzik "81==0x%04x " 1251c6fd2807SJeff Garzik "82==0x%04x " 1252c6fd2807SJeff Garzik "83==0x%04x " 1253c6fd2807SJeff Garzik "84==0x%04x \n", 1254c6fd2807SJeff Garzik id[80], 1255c6fd2807SJeff Garzik id[81], 1256c6fd2807SJeff Garzik id[82], 1257c6fd2807SJeff Garzik id[83], 1258c6fd2807SJeff Garzik id[84]); 1259c6fd2807SJeff Garzik DPRINTK("88==0x%04x " 1260c6fd2807SJeff Garzik "93==0x%04x\n", 1261c6fd2807SJeff Garzik id[88], 1262c6fd2807SJeff Garzik id[93]); 1263c6fd2807SJeff Garzik } 1264c6fd2807SJeff Garzik 1265c6fd2807SJeff Garzik /** 1266c6fd2807SJeff Garzik * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1267c6fd2807SJeff Garzik * @id: IDENTIFY data to compute xfer mask from 1268c6fd2807SJeff Garzik * 1269c6fd2807SJeff Garzik * Compute the xfermask for this device. This is not as trivial 1270c6fd2807SJeff Garzik * as it seems if we must consider early devices correctly. 1271c6fd2807SJeff Garzik * 1272c6fd2807SJeff Garzik * FIXME: pre IDE drive timing (do we care ?). 1273c6fd2807SJeff Garzik * 1274c6fd2807SJeff Garzik * LOCKING: 1275c6fd2807SJeff Garzik * None. 1276c6fd2807SJeff Garzik * 1277c6fd2807SJeff Garzik * RETURNS: 1278c6fd2807SJeff Garzik * Computed xfermask 1279c6fd2807SJeff Garzik */ 1280c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id) 1281c6fd2807SJeff Garzik { 1282c6fd2807SJeff Garzik unsigned int pio_mask, mwdma_mask, udma_mask; 1283c6fd2807SJeff Garzik 1284c6fd2807SJeff Garzik /* Usual case. Word 53 indicates word 64 is valid */ 1285c6fd2807SJeff Garzik if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1286c6fd2807SJeff Garzik pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1287c6fd2807SJeff Garzik pio_mask <<= 3; 1288c6fd2807SJeff Garzik pio_mask |= 0x7; 1289c6fd2807SJeff Garzik } else { 1290c6fd2807SJeff Garzik /* If word 64 isn't valid then Word 51 high byte holds 1291c6fd2807SJeff Garzik * the PIO timing number for the maximum. Turn it into 1292c6fd2807SJeff Garzik * a mask. 1293c6fd2807SJeff Garzik */ 12947a0f1c8aSLennert Buytenhek u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 129546767aebSAlan Cox if (mode < 5) /* Valid PIO range */ 129646767aebSAlan Cox pio_mask = (2 << mode) - 1; 129746767aebSAlan Cox else 129846767aebSAlan Cox pio_mask = 1; 1299c6fd2807SJeff Garzik 1300c6fd2807SJeff Garzik /* But wait.. there's more. Design your standards by 1301c6fd2807SJeff Garzik * committee and you too can get a free iordy field to 1302c6fd2807SJeff Garzik * process. However its the speeds not the modes that 1303c6fd2807SJeff Garzik * are supported... Note drivers using the timing API 1304c6fd2807SJeff Garzik * will get this right anyway 1305c6fd2807SJeff Garzik */ 1306c6fd2807SJeff Garzik } 1307c6fd2807SJeff Garzik 1308c6fd2807SJeff Garzik mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1309c6fd2807SJeff Garzik 1310b352e57dSAlan Cox if (ata_id_is_cfa(id)) { 1311b352e57dSAlan Cox /* 1312b352e57dSAlan Cox * Process compact flash extended modes 1313b352e57dSAlan Cox */ 1314b352e57dSAlan Cox int pio = id[163] & 0x7; 1315b352e57dSAlan Cox int dma = (id[163] >> 3) & 7; 1316b352e57dSAlan Cox 1317b352e57dSAlan Cox if (pio) 1318b352e57dSAlan Cox pio_mask |= (1 << 5); 1319b352e57dSAlan Cox if (pio > 1) 1320b352e57dSAlan Cox pio_mask |= (1 << 6); 1321b352e57dSAlan Cox if (dma) 1322b352e57dSAlan Cox mwdma_mask |= (1 << 3); 1323b352e57dSAlan Cox if (dma > 1) 1324b352e57dSAlan Cox mwdma_mask |= (1 << 4); 1325b352e57dSAlan Cox } 1326b352e57dSAlan Cox 1327c6fd2807SJeff Garzik udma_mask = 0; 1328c6fd2807SJeff Garzik if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1329c6fd2807SJeff Garzik udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1330c6fd2807SJeff Garzik 1331c6fd2807SJeff Garzik return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1332c6fd2807SJeff Garzik } 1333c6fd2807SJeff Garzik 1334c6fd2807SJeff Garzik /** 1335c6fd2807SJeff Garzik * ata_port_queue_task - Queue port_task 1336c6fd2807SJeff Garzik * @ap: The ata_port to queue port_task for 1337c6fd2807SJeff Garzik * @fn: workqueue function to be scheduled 133865f27f38SDavid Howells * @data: data for @fn to use 1339c6fd2807SJeff Garzik * @delay: delay time for workqueue function 1340c6fd2807SJeff Garzik * 1341c6fd2807SJeff Garzik * Schedule @fn(@data) for execution after @delay jiffies using 1342c6fd2807SJeff Garzik * port_task. There is one port_task per port and it's the 1343c6fd2807SJeff Garzik * user(low level driver)'s responsibility to make sure that only 1344c6fd2807SJeff Garzik * one task is active at any given time. 1345c6fd2807SJeff Garzik * 1346c6fd2807SJeff Garzik * libata core layer takes care of synchronization between 1347c6fd2807SJeff Garzik * port_task and EH. ata_port_queue_task() may be ignored for EH 1348c6fd2807SJeff Garzik * synchronization. 1349c6fd2807SJeff Garzik * 1350c6fd2807SJeff Garzik * LOCKING: 1351c6fd2807SJeff Garzik * Inherited from caller. 1352c6fd2807SJeff Garzik */ 135365f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, 1354c6fd2807SJeff Garzik unsigned long delay) 1355c6fd2807SJeff Garzik { 135665f27f38SDavid Howells PREPARE_DELAYED_WORK(&ap->port_task, fn); 135765f27f38SDavid Howells ap->port_task_data = data; 1358c6fd2807SJeff Garzik 135945a66c1cSOleg Nesterov /* may fail if ata_port_flush_task() in progress */ 136045a66c1cSOleg Nesterov queue_delayed_work(ata_wq, &ap->port_task, delay); 1361c6fd2807SJeff Garzik } 1362c6fd2807SJeff Garzik 1363c6fd2807SJeff Garzik /** 1364c6fd2807SJeff Garzik * ata_port_flush_task - Flush port_task 1365c6fd2807SJeff Garzik * @ap: The ata_port to flush port_task for 1366c6fd2807SJeff Garzik * 1367c6fd2807SJeff Garzik * After this function completes, port_task is guranteed not to 1368c6fd2807SJeff Garzik * be running or scheduled. 1369c6fd2807SJeff Garzik * 1370c6fd2807SJeff Garzik * LOCKING: 1371c6fd2807SJeff Garzik * Kernel thread context (may sleep) 1372c6fd2807SJeff Garzik */ 1373c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap) 1374c6fd2807SJeff Garzik { 1375c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1376c6fd2807SJeff Garzik 137745a66c1cSOleg Nesterov cancel_rearming_delayed_work(&ap->port_task); 1378c6fd2807SJeff Garzik 1379c6fd2807SJeff Garzik if (ata_msg_ctl(ap)) 1380c6fd2807SJeff Garzik ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); 1381c6fd2807SJeff Garzik } 1382c6fd2807SJeff Garzik 13837102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1384c6fd2807SJeff Garzik { 1385c6fd2807SJeff Garzik struct completion *waiting = qc->private_data; 1386c6fd2807SJeff Garzik 1387c6fd2807SJeff Garzik complete(waiting); 1388c6fd2807SJeff Garzik } 1389c6fd2807SJeff Garzik 1390c6fd2807SJeff Garzik /** 13912432697bSTejun Heo * ata_exec_internal_sg - execute libata internal command 1392c6fd2807SJeff Garzik * @dev: Device to which the command is sent 1393c6fd2807SJeff Garzik * @tf: Taskfile registers for the command and the result 1394c6fd2807SJeff Garzik * @cdb: CDB for packet command 1395c6fd2807SJeff Garzik * @dma_dir: Data tranfer direction of the command 13965c1ad8b3SRandy Dunlap * @sgl: sg list for the data buffer of the command 13972432697bSTejun Heo * @n_elem: Number of sg entries 13982b789108STejun Heo * @timeout: Timeout in msecs (0 for default) 1399c6fd2807SJeff Garzik * 1400c6fd2807SJeff Garzik * Executes libata internal command with timeout. @tf contains 1401c6fd2807SJeff Garzik * command on entry and result on return. Timeout and error 1402c6fd2807SJeff Garzik * conditions are reported via return value. No recovery action 1403c6fd2807SJeff Garzik * is taken after a command times out. It's caller's duty to 1404c6fd2807SJeff Garzik * clean up after timeout. 1405c6fd2807SJeff Garzik * 1406c6fd2807SJeff Garzik * LOCKING: 1407c6fd2807SJeff Garzik * None. Should be called with kernel context, might sleep. 1408c6fd2807SJeff Garzik * 1409c6fd2807SJeff Garzik * RETURNS: 1410c6fd2807SJeff Garzik * Zero on success, AC_ERR_* mask on failure 1411c6fd2807SJeff Garzik */ 14122432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev, 1413c6fd2807SJeff Garzik struct ata_taskfile *tf, const u8 *cdb, 141487260216SJens Axboe int dma_dir, struct scatterlist *sgl, 14152b789108STejun Heo unsigned int n_elem, unsigned long timeout) 1416c6fd2807SJeff Garzik { 14179af5c9c9STejun Heo struct ata_link *link = dev->link; 14189af5c9c9STejun Heo struct ata_port *ap = link->ap; 1419c6fd2807SJeff Garzik u8 command = tf->command; 1420c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1421c6fd2807SJeff Garzik unsigned int tag, preempted_tag; 1422c6fd2807SJeff Garzik u32 preempted_sactive, preempted_qc_active; 1423da917d69STejun Heo int preempted_nr_active_links; 1424c6fd2807SJeff Garzik DECLARE_COMPLETION_ONSTACK(wait); 1425c6fd2807SJeff Garzik unsigned long flags; 1426c6fd2807SJeff Garzik unsigned int err_mask; 1427c6fd2807SJeff Garzik int rc; 1428c6fd2807SJeff Garzik 1429c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1430c6fd2807SJeff Garzik 1431c6fd2807SJeff Garzik /* no internal command while frozen */ 1432c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) { 1433c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1434c6fd2807SJeff Garzik return AC_ERR_SYSTEM; 1435c6fd2807SJeff Garzik } 1436c6fd2807SJeff Garzik 1437c6fd2807SJeff Garzik /* initialize internal qc */ 1438c6fd2807SJeff Garzik 1439c6fd2807SJeff Garzik /* XXX: Tag 0 is used for drivers with legacy EH as some 1440c6fd2807SJeff Garzik * drivers choke if any other tag is given. This breaks 1441c6fd2807SJeff Garzik * ata_tag_internal() test for those drivers. Don't use new 1442c6fd2807SJeff Garzik * EH stuff without converting to it. 1443c6fd2807SJeff Garzik */ 1444c6fd2807SJeff Garzik if (ap->ops->error_handler) 1445c6fd2807SJeff Garzik tag = ATA_TAG_INTERNAL; 1446c6fd2807SJeff Garzik else 1447c6fd2807SJeff Garzik tag = 0; 1448c6fd2807SJeff Garzik 1449c6fd2807SJeff Garzik if (test_and_set_bit(tag, &ap->qc_allocated)) 1450c6fd2807SJeff Garzik BUG(); 1451c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1452c6fd2807SJeff Garzik 1453c6fd2807SJeff Garzik qc->tag = tag; 1454c6fd2807SJeff Garzik qc->scsicmd = NULL; 1455c6fd2807SJeff Garzik qc->ap = ap; 1456c6fd2807SJeff Garzik qc->dev = dev; 1457c6fd2807SJeff Garzik ata_qc_reinit(qc); 1458c6fd2807SJeff Garzik 14599af5c9c9STejun Heo preempted_tag = link->active_tag; 14609af5c9c9STejun Heo preempted_sactive = link->sactive; 1461c6fd2807SJeff Garzik preempted_qc_active = ap->qc_active; 1462da917d69STejun Heo preempted_nr_active_links = ap->nr_active_links; 14639af5c9c9STejun Heo link->active_tag = ATA_TAG_POISON; 14649af5c9c9STejun Heo link->sactive = 0; 1465c6fd2807SJeff Garzik ap->qc_active = 0; 1466da917d69STejun Heo ap->nr_active_links = 0; 1467c6fd2807SJeff Garzik 1468c6fd2807SJeff Garzik /* prepare & issue qc */ 1469c6fd2807SJeff Garzik qc->tf = *tf; 1470c6fd2807SJeff Garzik if (cdb) 1471c6fd2807SJeff Garzik memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1472c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_RESULT_TF; 1473c6fd2807SJeff Garzik qc->dma_dir = dma_dir; 1474c6fd2807SJeff Garzik if (dma_dir != DMA_NONE) { 14752432697bSTejun Heo unsigned int i, buflen = 0; 147687260216SJens Axboe struct scatterlist *sg; 14772432697bSTejun Heo 147887260216SJens Axboe for_each_sg(sgl, sg, n_elem, i) 147987260216SJens Axboe buflen += sg->length; 14802432697bSTejun Heo 148187260216SJens Axboe ata_sg_init(qc, sgl, n_elem); 148249c80429SBrian King qc->nbytes = buflen; 1483c6fd2807SJeff Garzik } 1484c6fd2807SJeff Garzik 1485c6fd2807SJeff Garzik qc->private_data = &wait; 1486c6fd2807SJeff Garzik qc->complete_fn = ata_qc_complete_internal; 1487c6fd2807SJeff Garzik 1488c6fd2807SJeff Garzik ata_qc_issue(qc); 1489c6fd2807SJeff Garzik 1490c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1491c6fd2807SJeff Garzik 14922b789108STejun Heo if (!timeout) 14932b789108STejun Heo timeout = ata_probe_timeout * 1000 / HZ; 14942b789108STejun Heo 14952b789108STejun Heo rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1496c6fd2807SJeff Garzik 1497c6fd2807SJeff Garzik ata_port_flush_task(ap); 1498c6fd2807SJeff Garzik 1499c6fd2807SJeff Garzik if (!rc) { 1500c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1501c6fd2807SJeff Garzik 1502c6fd2807SJeff Garzik /* We're racing with irq here. If we lose, the 1503c6fd2807SJeff Garzik * following test prevents us from completing the qc 1504c6fd2807SJeff Garzik * twice. If we win, the port is frozen and will be 1505c6fd2807SJeff Garzik * cleaned up by ->post_internal_cmd(). 1506c6fd2807SJeff Garzik */ 1507c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE) { 1508c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 1509c6fd2807SJeff Garzik 1510c6fd2807SJeff Garzik if (ap->ops->error_handler) 1511c6fd2807SJeff Garzik ata_port_freeze(ap); 1512c6fd2807SJeff Garzik else 1513c6fd2807SJeff Garzik ata_qc_complete(qc); 1514c6fd2807SJeff Garzik 1515c6fd2807SJeff Garzik if (ata_msg_warn(ap)) 1516c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1517c6fd2807SJeff Garzik "qc timeout (cmd 0x%x)\n", command); 1518c6fd2807SJeff Garzik } 1519c6fd2807SJeff Garzik 1520c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1521c6fd2807SJeff Garzik } 1522c6fd2807SJeff Garzik 1523c6fd2807SJeff Garzik /* do post_internal_cmd */ 1524c6fd2807SJeff Garzik if (ap->ops->post_internal_cmd) 1525c6fd2807SJeff Garzik ap->ops->post_internal_cmd(qc); 1526c6fd2807SJeff Garzik 1527a51d644aSTejun Heo /* perform minimal error analysis */ 1528a51d644aSTejun Heo if (qc->flags & ATA_QCFLAG_FAILED) { 1529a51d644aSTejun Heo if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1530a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1531a51d644aSTejun Heo 1532a51d644aSTejun Heo if (!qc->err_mask) 1533c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_OTHER; 1534a51d644aSTejun Heo 1535a51d644aSTejun Heo if (qc->err_mask & ~AC_ERR_OTHER) 1536a51d644aSTejun Heo qc->err_mask &= ~AC_ERR_OTHER; 1537c6fd2807SJeff Garzik } 1538c6fd2807SJeff Garzik 1539c6fd2807SJeff Garzik /* finish up */ 1540c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1541c6fd2807SJeff Garzik 1542c6fd2807SJeff Garzik *tf = qc->result_tf; 1543c6fd2807SJeff Garzik err_mask = qc->err_mask; 1544c6fd2807SJeff Garzik 1545c6fd2807SJeff Garzik ata_qc_free(qc); 15469af5c9c9STejun Heo link->active_tag = preempted_tag; 15479af5c9c9STejun Heo link->sactive = preempted_sactive; 1548c6fd2807SJeff Garzik ap->qc_active = preempted_qc_active; 1549da917d69STejun Heo ap->nr_active_links = preempted_nr_active_links; 1550c6fd2807SJeff Garzik 1551c6fd2807SJeff Garzik /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1552c6fd2807SJeff Garzik * Until those drivers are fixed, we detect the condition 1553c6fd2807SJeff Garzik * here, fail the command with AC_ERR_SYSTEM and reenable the 1554c6fd2807SJeff Garzik * port. 1555c6fd2807SJeff Garzik * 1556c6fd2807SJeff Garzik * Note that this doesn't change any behavior as internal 1557c6fd2807SJeff Garzik * command failure results in disabling the device in the 1558c6fd2807SJeff Garzik * higher layer for LLDDs without new reset/EH callbacks. 1559c6fd2807SJeff Garzik * 1560c6fd2807SJeff Garzik * Kill the following code as soon as those drivers are fixed. 1561c6fd2807SJeff Garzik */ 1562c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_DISABLED) { 1563c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1564c6fd2807SJeff Garzik ata_port_probe(ap); 1565c6fd2807SJeff Garzik } 1566c6fd2807SJeff Garzik 1567c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1568c6fd2807SJeff Garzik 1569c6fd2807SJeff Garzik return err_mask; 1570c6fd2807SJeff Garzik } 1571c6fd2807SJeff Garzik 1572c6fd2807SJeff Garzik /** 157333480a0eSTejun Heo * ata_exec_internal - execute libata internal command 15742432697bSTejun Heo * @dev: Device to which the command is sent 15752432697bSTejun Heo * @tf: Taskfile registers for the command and the result 15762432697bSTejun Heo * @cdb: CDB for packet command 15772432697bSTejun Heo * @dma_dir: Data tranfer direction of the command 15782432697bSTejun Heo * @buf: Data buffer of the command 15792432697bSTejun Heo * @buflen: Length of data buffer 15802b789108STejun Heo * @timeout: Timeout in msecs (0 for default) 15812432697bSTejun Heo * 15822432697bSTejun Heo * Wrapper around ata_exec_internal_sg() which takes simple 15832432697bSTejun Heo * buffer instead of sg list. 15842432697bSTejun Heo * 15852432697bSTejun Heo * LOCKING: 15862432697bSTejun Heo * None. Should be called with kernel context, might sleep. 15872432697bSTejun Heo * 15882432697bSTejun Heo * RETURNS: 15892432697bSTejun Heo * Zero on success, AC_ERR_* mask on failure 15902432697bSTejun Heo */ 15912432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev, 15922432697bSTejun Heo struct ata_taskfile *tf, const u8 *cdb, 15932b789108STejun Heo int dma_dir, void *buf, unsigned int buflen, 15942b789108STejun Heo unsigned long timeout) 15952432697bSTejun Heo { 159633480a0eSTejun Heo struct scatterlist *psg = NULL, sg; 159733480a0eSTejun Heo unsigned int n_elem = 0; 15982432697bSTejun Heo 159933480a0eSTejun Heo if (dma_dir != DMA_NONE) { 160033480a0eSTejun Heo WARN_ON(!buf); 16012432697bSTejun Heo sg_init_one(&sg, buf, buflen); 160233480a0eSTejun Heo psg = &sg; 160333480a0eSTejun Heo n_elem++; 160433480a0eSTejun Heo } 16052432697bSTejun Heo 16062b789108STejun Heo return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 16072b789108STejun Heo timeout); 16082432697bSTejun Heo } 16092432697bSTejun Heo 16102432697bSTejun Heo /** 1611c6fd2807SJeff Garzik * ata_do_simple_cmd - execute simple internal command 1612c6fd2807SJeff Garzik * @dev: Device to which the command is sent 1613c6fd2807SJeff Garzik * @cmd: Opcode to execute 1614c6fd2807SJeff Garzik * 1615c6fd2807SJeff Garzik * Execute a 'simple' command, that only consists of the opcode 1616c6fd2807SJeff Garzik * 'cmd' itself, without filling any other registers 1617c6fd2807SJeff Garzik * 1618c6fd2807SJeff Garzik * LOCKING: 1619c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1620c6fd2807SJeff Garzik * 1621c6fd2807SJeff Garzik * RETURNS: 1622c6fd2807SJeff Garzik * Zero on success, AC_ERR_* mask on failure 1623c6fd2807SJeff Garzik */ 1624c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1625c6fd2807SJeff Garzik { 1626c6fd2807SJeff Garzik struct ata_taskfile tf; 1627c6fd2807SJeff Garzik 1628c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1629c6fd2807SJeff Garzik 1630c6fd2807SJeff Garzik tf.command = cmd; 1631c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_DEVICE; 1632c6fd2807SJeff Garzik tf.protocol = ATA_PROT_NODATA; 1633c6fd2807SJeff Garzik 16342b789108STejun Heo return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1635c6fd2807SJeff Garzik } 1636c6fd2807SJeff Garzik 1637c6fd2807SJeff Garzik /** 1638c6fd2807SJeff Garzik * ata_pio_need_iordy - check if iordy needed 1639c6fd2807SJeff Garzik * @adev: ATA device 1640c6fd2807SJeff Garzik * 1641c6fd2807SJeff Garzik * Check if the current speed of the device requires IORDY. Used 1642c6fd2807SJeff Garzik * by various controllers for chip configuration. 1643c6fd2807SJeff Garzik */ 1644c6fd2807SJeff Garzik 1645c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1646c6fd2807SJeff Garzik { 1647432729f0SAlan Cox /* Controller doesn't support IORDY. Probably a pointless check 1648432729f0SAlan Cox as the caller should know this */ 16499af5c9c9STejun Heo if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1650c6fd2807SJeff Garzik return 0; 1651432729f0SAlan Cox /* PIO3 and higher it is mandatory */ 1652432729f0SAlan Cox if (adev->pio_mode > XFER_PIO_2) 1653c6fd2807SJeff Garzik return 1; 1654432729f0SAlan Cox /* We turn it on when possible */ 1655432729f0SAlan Cox if (ata_id_has_iordy(adev->id)) 1656432729f0SAlan Cox return 1; 1657432729f0SAlan Cox return 0; 1658432729f0SAlan Cox } 1659c6fd2807SJeff Garzik 1660432729f0SAlan Cox /** 1661432729f0SAlan Cox * ata_pio_mask_no_iordy - Return the non IORDY mask 1662432729f0SAlan Cox * @adev: ATA device 1663432729f0SAlan Cox * 1664432729f0SAlan Cox * Compute the highest mode possible if we are not using iordy. Return 1665432729f0SAlan Cox * -1 if no iordy mode is available. 1666432729f0SAlan Cox */ 1667432729f0SAlan Cox 1668432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1669432729f0SAlan Cox { 1670c6fd2807SJeff Garzik /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1671c6fd2807SJeff Garzik if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1672432729f0SAlan Cox u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1673c6fd2807SJeff Garzik /* Is the speed faster than the drive allows non IORDY ? */ 1674c6fd2807SJeff Garzik if (pio) { 1675c6fd2807SJeff Garzik /* This is cycle times not frequency - watch the logic! */ 1676c6fd2807SJeff Garzik if (pio > 240) /* PIO2 is 240nS per cycle */ 1677432729f0SAlan Cox return 3 << ATA_SHIFT_PIO; 1678432729f0SAlan Cox return 7 << ATA_SHIFT_PIO; 1679c6fd2807SJeff Garzik } 1680c6fd2807SJeff Garzik } 1681432729f0SAlan Cox return 3 << ATA_SHIFT_PIO; 1682c6fd2807SJeff Garzik } 1683c6fd2807SJeff Garzik 1684c6fd2807SJeff Garzik /** 1685c6fd2807SJeff Garzik * ata_dev_read_id - Read ID data from the specified device 1686c6fd2807SJeff Garzik * @dev: target device 1687c6fd2807SJeff Garzik * @p_class: pointer to class of the target device (may be changed) 1688bff04647STejun Heo * @flags: ATA_READID_* flags 1689c6fd2807SJeff Garzik * @id: buffer to read IDENTIFY data into 1690c6fd2807SJeff Garzik * 1691c6fd2807SJeff Garzik * Read ID data from the specified device. ATA_CMD_ID_ATA is 1692c6fd2807SJeff Garzik * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1693c6fd2807SJeff Garzik * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1694c6fd2807SJeff Garzik * for pre-ATA4 drives. 1695c6fd2807SJeff Garzik * 169650a99018SAlan Cox * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 169750a99018SAlan Cox * now we abort if we hit that case. 169850a99018SAlan Cox * 1699c6fd2807SJeff Garzik * LOCKING: 1700c6fd2807SJeff Garzik * Kernel thread context (may sleep) 1701c6fd2807SJeff Garzik * 1702c6fd2807SJeff Garzik * RETURNS: 1703c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1704c6fd2807SJeff Garzik */ 1705c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1706bff04647STejun Heo unsigned int flags, u16 *id) 1707c6fd2807SJeff Garzik { 17089af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1709c6fd2807SJeff Garzik unsigned int class = *p_class; 1710c6fd2807SJeff Garzik struct ata_taskfile tf; 1711c6fd2807SJeff Garzik unsigned int err_mask = 0; 1712c6fd2807SJeff Garzik const char *reason; 171354936f8bSTejun Heo int may_fallback = 1, tried_spinup = 0; 1714c6fd2807SJeff Garzik int rc; 1715c6fd2807SJeff Garzik 1716c6fd2807SJeff Garzik if (ata_msg_ctl(ap)) 171744877b4eSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 1718c6fd2807SJeff Garzik 1719c6fd2807SJeff Garzik ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1720c6fd2807SJeff Garzik retry: 1721c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1722c6fd2807SJeff Garzik 1723c6fd2807SJeff Garzik switch (class) { 1724c6fd2807SJeff Garzik case ATA_DEV_ATA: 1725c6fd2807SJeff Garzik tf.command = ATA_CMD_ID_ATA; 1726c6fd2807SJeff Garzik break; 1727c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1728c6fd2807SJeff Garzik tf.command = ATA_CMD_ID_ATAPI; 1729c6fd2807SJeff Garzik break; 1730c6fd2807SJeff Garzik default: 1731c6fd2807SJeff Garzik rc = -ENODEV; 1732c6fd2807SJeff Garzik reason = "unsupported class"; 1733c6fd2807SJeff Garzik goto err_out; 1734c6fd2807SJeff Garzik } 1735c6fd2807SJeff Garzik 1736c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 173781afe893STejun Heo 173881afe893STejun Heo /* Some devices choke if TF registers contain garbage. Make 173981afe893STejun Heo * sure those are properly initialized. 174081afe893STejun Heo */ 174181afe893STejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 174281afe893STejun Heo 174381afe893STejun Heo /* Device presence detection is unreliable on some 174481afe893STejun Heo * controllers. Always poll IDENTIFY if available. 174581afe893STejun Heo */ 174681afe893STejun Heo tf.flags |= ATA_TFLAG_POLLING; 1747c6fd2807SJeff Garzik 1748c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 17492b789108STejun Heo id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1750c6fd2807SJeff Garzik if (err_mask) { 1751800b3996STejun Heo if (err_mask & AC_ERR_NODEV_HINT) { 175255a8e2c8STejun Heo DPRINTK("ata%u.%d: NODEV after polling detection\n", 175344877b4eSTejun Heo ap->print_id, dev->devno); 175455a8e2c8STejun Heo return -ENOENT; 175555a8e2c8STejun Heo } 175655a8e2c8STejun Heo 175754936f8bSTejun Heo /* Device or controller might have reported the wrong 175854936f8bSTejun Heo * device class. Give a shot at the other IDENTIFY if 175954936f8bSTejun Heo * the current one is aborted by the device. 176054936f8bSTejun Heo */ 176154936f8bSTejun Heo if (may_fallback && 176254936f8bSTejun Heo (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 176354936f8bSTejun Heo may_fallback = 0; 176454936f8bSTejun Heo 176554936f8bSTejun Heo if (class == ATA_DEV_ATA) 176654936f8bSTejun Heo class = ATA_DEV_ATAPI; 176754936f8bSTejun Heo else 176854936f8bSTejun Heo class = ATA_DEV_ATA; 176954936f8bSTejun Heo goto retry; 177054936f8bSTejun Heo } 177154936f8bSTejun Heo 1772c6fd2807SJeff Garzik rc = -EIO; 1773c6fd2807SJeff Garzik reason = "I/O error"; 1774c6fd2807SJeff Garzik goto err_out; 1775c6fd2807SJeff Garzik } 1776c6fd2807SJeff Garzik 177754936f8bSTejun Heo /* Falling back doesn't make sense if ID data was read 177854936f8bSTejun Heo * successfully at least once. 177954936f8bSTejun Heo */ 178054936f8bSTejun Heo may_fallback = 0; 178154936f8bSTejun Heo 1782c6fd2807SJeff Garzik swap_buf_le16(id, ATA_ID_WORDS); 1783c6fd2807SJeff Garzik 1784c6fd2807SJeff Garzik /* sanity check */ 1785c6fd2807SJeff Garzik rc = -EINVAL; 17866070068bSAlan Cox reason = "device reports invalid type"; 17874a3381feSJeff Garzik 17884a3381feSJeff Garzik if (class == ATA_DEV_ATA) { 17894a3381feSJeff Garzik if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 17904a3381feSJeff Garzik goto err_out; 17914a3381feSJeff Garzik } else { 17924a3381feSJeff Garzik if (ata_id_is_ata(id)) 1793c6fd2807SJeff Garzik goto err_out; 1794c6fd2807SJeff Garzik } 1795c6fd2807SJeff Garzik 1796169439c2SMark Lord if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1797169439c2SMark Lord tried_spinup = 1; 1798169439c2SMark Lord /* 1799169439c2SMark Lord * Drive powered-up in standby mode, and requires a specific 1800169439c2SMark Lord * SET_FEATURES spin-up subcommand before it will accept 1801169439c2SMark Lord * anything other than the original IDENTIFY command. 1802169439c2SMark Lord */ 1803218f3d30SJeff Garzik err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1804fb0582f9SRyan Power if (err_mask && id[2] != 0x738c) { 1805169439c2SMark Lord rc = -EIO; 1806169439c2SMark Lord reason = "SPINUP failed"; 1807169439c2SMark Lord goto err_out; 1808169439c2SMark Lord } 1809169439c2SMark Lord /* 1810169439c2SMark Lord * If the drive initially returned incomplete IDENTIFY info, 1811169439c2SMark Lord * we now must reissue the IDENTIFY command. 1812169439c2SMark Lord */ 1813169439c2SMark Lord if (id[2] == 0x37c8) 1814169439c2SMark Lord goto retry; 1815169439c2SMark Lord } 1816169439c2SMark Lord 1817bff04647STejun Heo if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 1818c6fd2807SJeff Garzik /* 1819c6fd2807SJeff Garzik * The exact sequence expected by certain pre-ATA4 drives is: 1820c6fd2807SJeff Garzik * SRST RESET 182150a99018SAlan Cox * IDENTIFY (optional in early ATA) 182250a99018SAlan Cox * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 1823c6fd2807SJeff Garzik * anything else.. 1824c6fd2807SJeff Garzik * Some drives were very specific about that exact sequence. 182550a99018SAlan Cox * 182650a99018SAlan Cox * Note that ATA4 says lba is mandatory so the second check 182750a99018SAlan Cox * shoud never trigger. 1828c6fd2807SJeff Garzik */ 1829c6fd2807SJeff Garzik if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1830c6fd2807SJeff Garzik err_mask = ata_dev_init_params(dev, id[3], id[6]); 1831c6fd2807SJeff Garzik if (err_mask) { 1832c6fd2807SJeff Garzik rc = -EIO; 1833c6fd2807SJeff Garzik reason = "INIT_DEV_PARAMS failed"; 1834c6fd2807SJeff Garzik goto err_out; 1835c6fd2807SJeff Garzik } 1836c6fd2807SJeff Garzik 1837c6fd2807SJeff Garzik /* current CHS translation info (id[53-58]) might be 1838c6fd2807SJeff Garzik * changed. reread the identify device info. 1839c6fd2807SJeff Garzik */ 1840bff04647STejun Heo flags &= ~ATA_READID_POSTRESET; 1841c6fd2807SJeff Garzik goto retry; 1842c6fd2807SJeff Garzik } 1843c6fd2807SJeff Garzik } 1844c6fd2807SJeff Garzik 1845c6fd2807SJeff Garzik *p_class = class; 1846c6fd2807SJeff Garzik 1847c6fd2807SJeff Garzik return 0; 1848c6fd2807SJeff Garzik 1849c6fd2807SJeff Garzik err_out: 1850c6fd2807SJeff Garzik if (ata_msg_warn(ap)) 1851c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 1852c6fd2807SJeff Garzik "(%s, err_mask=0x%x)\n", reason, err_mask); 1853c6fd2807SJeff Garzik return rc; 1854c6fd2807SJeff Garzik } 1855c6fd2807SJeff Garzik 1856c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev) 1857c6fd2807SJeff Garzik { 18589af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 18599af5c9c9STejun Heo return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1860c6fd2807SJeff Garzik } 1861c6fd2807SJeff Garzik 1862c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev, 1863c6fd2807SJeff Garzik char *desc, size_t desc_sz) 1864c6fd2807SJeff Garzik { 18659af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1866c6fd2807SJeff Garzik int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 1867c6fd2807SJeff Garzik 1868c6fd2807SJeff Garzik if (!ata_id_has_ncq(dev->id)) { 1869c6fd2807SJeff Garzik desc[0] = '\0'; 1870c6fd2807SJeff Garzik return; 1871c6fd2807SJeff Garzik } 187275683fe7STejun Heo if (dev->horkage & ATA_HORKAGE_NONCQ) { 18736919a0a6SAlan Cox snprintf(desc, desc_sz, "NCQ (not used)"); 18746919a0a6SAlan Cox return; 18756919a0a6SAlan Cox } 1876c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_NCQ) { 1877cca3974eSJeff Garzik hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 1878c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_NCQ; 1879c6fd2807SJeff Garzik } 1880c6fd2807SJeff Garzik 1881c6fd2807SJeff Garzik if (hdepth >= ddepth) 1882c6fd2807SJeff Garzik snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 1883c6fd2807SJeff Garzik else 1884c6fd2807SJeff Garzik snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 1885c6fd2807SJeff Garzik } 1886c6fd2807SJeff Garzik 1887c6fd2807SJeff Garzik /** 1888c6fd2807SJeff Garzik * ata_dev_configure - Configure the specified ATA/ATAPI device 1889c6fd2807SJeff Garzik * @dev: Target device to configure 1890c6fd2807SJeff Garzik * 1891c6fd2807SJeff Garzik * Configure @dev according to @dev->id. Generic and low-level 1892c6fd2807SJeff Garzik * driver specific fixups are also applied. 1893c6fd2807SJeff Garzik * 1894c6fd2807SJeff Garzik * LOCKING: 1895c6fd2807SJeff Garzik * Kernel thread context (may sleep) 1896c6fd2807SJeff Garzik * 1897c6fd2807SJeff Garzik * RETURNS: 1898c6fd2807SJeff Garzik * 0 on success, -errno otherwise 1899c6fd2807SJeff Garzik */ 1900efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev) 1901c6fd2807SJeff Garzik { 19029af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 19039af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 19046746544cSTejun Heo int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1905c6fd2807SJeff Garzik const u16 *id = dev->id; 1906c6fd2807SJeff Garzik unsigned int xfer_mask; 1907b352e57dSAlan Cox char revbuf[7]; /* XYZ-99\0 */ 19083f64f565SEric D. Mudama char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 19093f64f565SEric D. Mudama char modelbuf[ATA_ID_PROD_LEN+1]; 1910c6fd2807SJeff Garzik int rc; 1911c6fd2807SJeff Garzik 1912c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 191344877b4eSTejun Heo ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 191444877b4eSTejun Heo __FUNCTION__); 1915c6fd2807SJeff Garzik return 0; 1916c6fd2807SJeff Garzik } 1917c6fd2807SJeff Garzik 1918c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 191944877b4eSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 1920c6fd2807SJeff Garzik 192175683fe7STejun Heo /* set horkage */ 192275683fe7STejun Heo dev->horkage |= ata_dev_blacklisted(dev); 192375683fe7STejun Heo 19246746544cSTejun Heo /* let ACPI work its magic */ 19256746544cSTejun Heo rc = ata_acpi_on_devcfg(dev); 19266746544cSTejun Heo if (rc) 19276746544cSTejun Heo return rc; 192808573a86SKristen Carlson Accardi 192905027adcSTejun Heo /* massage HPA, do it early as it might change IDENTIFY data */ 193005027adcSTejun Heo rc = ata_hpa_resize(dev); 193105027adcSTejun Heo if (rc) 193205027adcSTejun Heo return rc; 193305027adcSTejun Heo 1934c6fd2807SJeff Garzik /* print device capabilities */ 1935c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 1936c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_DEBUG, 1937c6fd2807SJeff Garzik "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 1938c6fd2807SJeff Garzik "85:%04x 86:%04x 87:%04x 88:%04x\n", 1939c6fd2807SJeff Garzik __FUNCTION__, 1940c6fd2807SJeff Garzik id[49], id[82], id[83], id[84], 1941c6fd2807SJeff Garzik id[85], id[86], id[87], id[88]); 1942c6fd2807SJeff Garzik 1943c6fd2807SJeff Garzik /* initialize to-be-configured parameters */ 1944c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_CFG_MASK; 1945c6fd2807SJeff Garzik dev->max_sectors = 0; 1946c6fd2807SJeff Garzik dev->cdb_len = 0; 1947c6fd2807SJeff Garzik dev->n_sectors = 0; 1948c6fd2807SJeff Garzik dev->cylinders = 0; 1949c6fd2807SJeff Garzik dev->heads = 0; 1950c6fd2807SJeff Garzik dev->sectors = 0; 1951c6fd2807SJeff Garzik 1952c6fd2807SJeff Garzik /* 1953c6fd2807SJeff Garzik * common ATA, ATAPI feature tests 1954c6fd2807SJeff Garzik */ 1955c6fd2807SJeff Garzik 1956c6fd2807SJeff Garzik /* find max transfer mode; for printk only */ 1957c6fd2807SJeff Garzik xfer_mask = ata_id_xfermask(id); 1958c6fd2807SJeff Garzik 1959c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 1960c6fd2807SJeff Garzik ata_dump_id(id); 1961c6fd2807SJeff Garzik 1962ef143d57SAlbert Lee /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 1963ef143d57SAlbert Lee ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 1964ef143d57SAlbert Lee sizeof(fwrevbuf)); 1965ef143d57SAlbert Lee 1966ef143d57SAlbert Lee ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 1967ef143d57SAlbert Lee sizeof(modelbuf)); 1968ef143d57SAlbert Lee 1969c6fd2807SJeff Garzik /* ATA-specific feature tests */ 1970c6fd2807SJeff Garzik if (dev->class == ATA_DEV_ATA) { 1971b352e57dSAlan Cox if (ata_id_is_cfa(id)) { 1972b352e57dSAlan Cox if (id[162] & 1) /* CPRM may make this media unusable */ 197344877b4eSTejun Heo ata_dev_printk(dev, KERN_WARNING, 197444877b4eSTejun Heo "supports DRM functions and may " 197544877b4eSTejun Heo "not be fully accessable.\n"); 1976b352e57dSAlan Cox snprintf(revbuf, 7, "CFA"); 19772dcb407eSJeff Garzik } else 1978b352e57dSAlan Cox snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 1979b352e57dSAlan Cox 1980c6fd2807SJeff Garzik dev->n_sectors = ata_id_n_sectors(id); 1981c6fd2807SJeff Garzik 19823f64f565SEric D. Mudama if (dev->id[59] & 0x100) 19833f64f565SEric D. Mudama dev->multi_count = dev->id[59] & 0xff; 19843f64f565SEric D. Mudama 1985c6fd2807SJeff Garzik if (ata_id_has_lba(id)) { 1986c6fd2807SJeff Garzik const char *lba_desc; 1987c6fd2807SJeff Garzik char ncq_desc[20]; 1988c6fd2807SJeff Garzik 1989c6fd2807SJeff Garzik lba_desc = "LBA"; 1990c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_LBA; 1991c6fd2807SJeff Garzik if (ata_id_has_lba48(id)) { 1992c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_LBA48; 1993c6fd2807SJeff Garzik lba_desc = "LBA48"; 19946fc49adbSTejun Heo 19956fc49adbSTejun Heo if (dev->n_sectors >= (1UL << 28) && 19966fc49adbSTejun Heo ata_id_has_flush_ext(id)) 19976fc49adbSTejun Heo dev->flags |= ATA_DFLAG_FLUSH_EXT; 1998c6fd2807SJeff Garzik } 1999c6fd2807SJeff Garzik 2000c6fd2807SJeff Garzik /* config NCQ */ 2001c6fd2807SJeff Garzik ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2002c6fd2807SJeff Garzik 2003c6fd2807SJeff Garzik /* print device info to dmesg */ 20043f64f565SEric D. Mudama if (ata_msg_drv(ap) && print_info) { 20053f64f565SEric D. Mudama ata_dev_printk(dev, KERN_INFO, 20063f64f565SEric D. Mudama "%s: %s, %s, max %s\n", 20073f64f565SEric D. Mudama revbuf, modelbuf, fwrevbuf, 20083f64f565SEric D. Mudama ata_mode_string(xfer_mask)); 20093f64f565SEric D. Mudama ata_dev_printk(dev, KERN_INFO, 20103f64f565SEric D. Mudama "%Lu sectors, multi %u: %s %s\n", 2011c6fd2807SJeff Garzik (unsigned long long)dev->n_sectors, 20123f64f565SEric D. Mudama dev->multi_count, lba_desc, ncq_desc); 20133f64f565SEric D. Mudama } 2014c6fd2807SJeff Garzik } else { 2015c6fd2807SJeff Garzik /* CHS */ 2016c6fd2807SJeff Garzik 2017c6fd2807SJeff Garzik /* Default translation */ 2018c6fd2807SJeff Garzik dev->cylinders = id[1]; 2019c6fd2807SJeff Garzik dev->heads = id[3]; 2020c6fd2807SJeff Garzik dev->sectors = id[6]; 2021c6fd2807SJeff Garzik 2022c6fd2807SJeff Garzik if (ata_id_current_chs_valid(id)) { 2023c6fd2807SJeff Garzik /* Current CHS translation is valid. */ 2024c6fd2807SJeff Garzik dev->cylinders = id[54]; 2025c6fd2807SJeff Garzik dev->heads = id[55]; 2026c6fd2807SJeff Garzik dev->sectors = id[56]; 2027c6fd2807SJeff Garzik } 2028c6fd2807SJeff Garzik 2029c6fd2807SJeff Garzik /* print device info to dmesg */ 20303f64f565SEric D. Mudama if (ata_msg_drv(ap) && print_info) { 2031c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, 20323f64f565SEric D. Mudama "%s: %s, %s, max %s\n", 20333f64f565SEric D. Mudama revbuf, modelbuf, fwrevbuf, 20343f64f565SEric D. Mudama ata_mode_string(xfer_mask)); 20353f64f565SEric D. Mudama ata_dev_printk(dev, KERN_INFO, 20363f64f565SEric D. Mudama "%Lu sectors, multi %u, CHS %u/%u/%u\n", 20373f64f565SEric D. Mudama (unsigned long long)dev->n_sectors, 20383f64f565SEric D. Mudama dev->multi_count, dev->cylinders, 20393f64f565SEric D. Mudama dev->heads, dev->sectors); 20403f64f565SEric D. Mudama } 2041c6fd2807SJeff Garzik } 2042c6fd2807SJeff Garzik 2043c6fd2807SJeff Garzik dev->cdb_len = 16; 2044c6fd2807SJeff Garzik } 2045c6fd2807SJeff Garzik 2046c6fd2807SJeff Garzik /* ATAPI-specific feature tests */ 2047c6fd2807SJeff Garzik else if (dev->class == ATA_DEV_ATAPI) { 2048854c73a2STejun Heo const char *cdb_intr_string = ""; 2049854c73a2STejun Heo const char *atapi_an_string = ""; 20507d77b247STejun Heo u32 sntf; 2051c6fd2807SJeff Garzik 2052c6fd2807SJeff Garzik rc = atapi_cdb_len(id); 2053c6fd2807SJeff Garzik if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2054c6fd2807SJeff Garzik if (ata_msg_warn(ap)) 2055c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 2056c6fd2807SJeff Garzik "unsupported CDB len\n"); 2057c6fd2807SJeff Garzik rc = -EINVAL; 2058c6fd2807SJeff Garzik goto err_out_nosup; 2059c6fd2807SJeff Garzik } 2060c6fd2807SJeff Garzik dev->cdb_len = (unsigned int) rc; 2061c6fd2807SJeff Garzik 20627d77b247STejun Heo /* Enable ATAPI AN if both the host and device have 20637d77b247STejun Heo * the support. If PMP is attached, SNTF is required 20647d77b247STejun Heo * to enable ATAPI AN to discern between PHY status 20657d77b247STejun Heo * changed notifications and ATAPI ANs. 20669f45cbd3SKristen Carlson Accardi */ 20677d77b247STejun Heo if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 20687d77b247STejun Heo (!ap->nr_pmp_links || 20697d77b247STejun Heo sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2070854c73a2STejun Heo unsigned int err_mask; 2071854c73a2STejun Heo 20729f45cbd3SKristen Carlson Accardi /* issue SET feature command to turn this on */ 2073218f3d30SJeff Garzik err_mask = ata_dev_set_feature(dev, 2074218f3d30SJeff Garzik SETFEATURES_SATA_ENABLE, SATA_AN); 2075854c73a2STejun Heo if (err_mask) 20769f45cbd3SKristen Carlson Accardi ata_dev_printk(dev, KERN_ERR, 2077854c73a2STejun Heo "failed to enable ATAPI AN " 2078854c73a2STejun Heo "(err_mask=0x%x)\n", err_mask); 2079854c73a2STejun Heo else { 20809f45cbd3SKristen Carlson Accardi dev->flags |= ATA_DFLAG_AN; 2081854c73a2STejun Heo atapi_an_string = ", ATAPI AN"; 2082854c73a2STejun Heo } 20839f45cbd3SKristen Carlson Accardi } 20849f45cbd3SKristen Carlson Accardi 2085c6fd2807SJeff Garzik if (ata_id_cdb_intr(dev->id)) { 2086c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_CDB_INTR; 2087c6fd2807SJeff Garzik cdb_intr_string = ", CDB intr"; 2088c6fd2807SJeff Garzik } 2089c6fd2807SJeff Garzik 2090c6fd2807SJeff Garzik /* print device info to dmesg */ 2091c6fd2807SJeff Garzik if (ata_msg_drv(ap) && print_info) 2092ef143d57SAlbert Lee ata_dev_printk(dev, KERN_INFO, 2093854c73a2STejun Heo "ATAPI: %s, %s, max %s%s%s\n", 2094ef143d57SAlbert Lee modelbuf, fwrevbuf, 2095c6fd2807SJeff Garzik ata_mode_string(xfer_mask), 2096854c73a2STejun Heo cdb_intr_string, atapi_an_string); 2097c6fd2807SJeff Garzik } 2098c6fd2807SJeff Garzik 2099914ed354STejun Heo /* determine max_sectors */ 2100914ed354STejun Heo dev->max_sectors = ATA_MAX_SECTORS; 2101914ed354STejun Heo if (dev->flags & ATA_DFLAG_LBA48) 2102914ed354STejun Heo dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2103914ed354STejun Heo 210493590859SAlan Cox if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 210593590859SAlan Cox /* Let the user know. We don't want to disallow opens for 210693590859SAlan Cox rescue purposes, or in case the vendor is just a blithering 210793590859SAlan Cox idiot */ 210893590859SAlan Cox if (print_info) { 210993590859SAlan Cox ata_dev_printk(dev, KERN_WARNING, 211093590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n"); 211193590859SAlan Cox ata_dev_printk(dev, KERN_WARNING, 211293590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n"); 211393590859SAlan Cox } 211493590859SAlan Cox } 211593590859SAlan Cox 2116c6fd2807SJeff Garzik /* limit bridge transfers to udma5, 200 sectors */ 2117c6fd2807SJeff Garzik if (ata_dev_knobble(dev)) { 2118c6fd2807SJeff Garzik if (ata_msg_drv(ap) && print_info) 2119c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, 2120c6fd2807SJeff Garzik "applying bridge limits\n"); 2121c6fd2807SJeff Garzik dev->udma_mask &= ATA_UDMA5; 2122c6fd2807SJeff Garzik dev->max_sectors = ATA_MAX_SECTORS; 2123c6fd2807SJeff Garzik } 2124c6fd2807SJeff Garzik 212575683fe7STejun Heo if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 212603ec52deSTejun Heo dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 212703ec52deSTejun Heo dev->max_sectors); 212818d6e9d5SAlbert Lee 2129c6fd2807SJeff Garzik if (ap->ops->dev_config) 2130cd0d3bbcSAlan ap->ops->dev_config(dev); 2131c6fd2807SJeff Garzik 2132c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 2133c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", 2134c6fd2807SJeff Garzik __FUNCTION__, ata_chk_status(ap)); 2135c6fd2807SJeff Garzik return 0; 2136c6fd2807SJeff Garzik 2137c6fd2807SJeff Garzik err_out_nosup: 2138c6fd2807SJeff Garzik if (ata_msg_probe(ap)) 2139c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_DEBUG, 2140c6fd2807SJeff Garzik "%s: EXIT, err\n", __FUNCTION__); 2141c6fd2807SJeff Garzik return rc; 2142c6fd2807SJeff Garzik } 2143c6fd2807SJeff Garzik 2144c6fd2807SJeff Garzik /** 21452e41e8e6SAlan Cox * ata_cable_40wire - return 40 wire cable type 2146be0d18dfSAlan Cox * @ap: port 2147be0d18dfSAlan Cox * 21482e41e8e6SAlan Cox * Helper method for drivers which want to hardwire 40 wire cable 2149be0d18dfSAlan Cox * detection. 2150be0d18dfSAlan Cox */ 2151be0d18dfSAlan Cox 2152be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap) 2153be0d18dfSAlan Cox { 2154be0d18dfSAlan Cox return ATA_CBL_PATA40; 2155be0d18dfSAlan Cox } 2156be0d18dfSAlan Cox 2157be0d18dfSAlan Cox /** 21582e41e8e6SAlan Cox * ata_cable_80wire - return 80 wire cable type 2159be0d18dfSAlan Cox * @ap: port 2160be0d18dfSAlan Cox * 21612e41e8e6SAlan Cox * Helper method for drivers which want to hardwire 80 wire cable 2162be0d18dfSAlan Cox * detection. 2163be0d18dfSAlan Cox */ 2164be0d18dfSAlan Cox 2165be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap) 2166be0d18dfSAlan Cox { 2167be0d18dfSAlan Cox return ATA_CBL_PATA80; 2168be0d18dfSAlan Cox } 2169be0d18dfSAlan Cox 2170be0d18dfSAlan Cox /** 2171be0d18dfSAlan Cox * ata_cable_unknown - return unknown PATA cable. 2172be0d18dfSAlan Cox * @ap: port 2173be0d18dfSAlan Cox * 2174be0d18dfSAlan Cox * Helper method for drivers which have no PATA cable detection. 2175be0d18dfSAlan Cox */ 2176be0d18dfSAlan Cox 2177be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap) 2178be0d18dfSAlan Cox { 2179be0d18dfSAlan Cox return ATA_CBL_PATA_UNK; 2180be0d18dfSAlan Cox } 2181be0d18dfSAlan Cox 2182be0d18dfSAlan Cox /** 2183be0d18dfSAlan Cox * ata_cable_sata - return SATA cable type 2184be0d18dfSAlan Cox * @ap: port 2185be0d18dfSAlan Cox * 2186be0d18dfSAlan Cox * Helper method for drivers which have SATA cables 2187be0d18dfSAlan Cox */ 2188be0d18dfSAlan Cox 2189be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap) 2190be0d18dfSAlan Cox { 2191be0d18dfSAlan Cox return ATA_CBL_SATA; 2192be0d18dfSAlan Cox } 2193be0d18dfSAlan Cox 2194be0d18dfSAlan Cox /** 2195c6fd2807SJeff Garzik * ata_bus_probe - Reset and probe ATA bus 2196c6fd2807SJeff Garzik * @ap: Bus to probe 2197c6fd2807SJeff Garzik * 2198c6fd2807SJeff Garzik * Master ATA bus probing function. Initiates a hardware-dependent 2199c6fd2807SJeff Garzik * bus reset, then attempts to identify any devices found on 2200c6fd2807SJeff Garzik * the bus. 2201c6fd2807SJeff Garzik * 2202c6fd2807SJeff Garzik * LOCKING: 2203c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2204c6fd2807SJeff Garzik * 2205c6fd2807SJeff Garzik * RETURNS: 2206c6fd2807SJeff Garzik * Zero on success, negative errno otherwise. 2207c6fd2807SJeff Garzik */ 2208c6fd2807SJeff Garzik 2209c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap) 2210c6fd2807SJeff Garzik { 2211c6fd2807SJeff Garzik unsigned int classes[ATA_MAX_DEVICES]; 2212c6fd2807SJeff Garzik int tries[ATA_MAX_DEVICES]; 2213f58229f8STejun Heo int rc; 2214c6fd2807SJeff Garzik struct ata_device *dev; 2215c6fd2807SJeff Garzik 2216c6fd2807SJeff Garzik ata_port_probe(ap); 2217c6fd2807SJeff Garzik 2218f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) 2219f58229f8STejun Heo tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2220c6fd2807SJeff Garzik 2221c6fd2807SJeff Garzik retry: 2222cdeab114STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2223cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2224cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2225cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2226cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2227cdeab114STejun Heo * suitable controller mode we should not touch the 2228cdeab114STejun Heo * bus as we may be talking too fast. 2229cdeab114STejun Heo */ 2230cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2231cdeab114STejun Heo 2232cdeab114STejun Heo /* If the controller has a pio mode setup function 2233cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2234cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2235cdeab114STejun Heo * configuring devices. 2236cdeab114STejun Heo */ 2237cdeab114STejun Heo if (ap->ops->set_piomode) 2238cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2239cdeab114STejun Heo } 2240cdeab114STejun Heo 2241c6fd2807SJeff Garzik /* reset and determine device classes */ 2242c6fd2807SJeff Garzik ap->ops->phy_reset(ap); 2243c6fd2807SJeff Garzik 2244f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2245c6fd2807SJeff Garzik if (!(ap->flags & ATA_FLAG_DISABLED) && 2246c6fd2807SJeff Garzik dev->class != ATA_DEV_UNKNOWN) 2247c6fd2807SJeff Garzik classes[dev->devno] = dev->class; 2248c6fd2807SJeff Garzik else 2249c6fd2807SJeff Garzik classes[dev->devno] = ATA_DEV_NONE; 2250c6fd2807SJeff Garzik 2251c6fd2807SJeff Garzik dev->class = ATA_DEV_UNKNOWN; 2252c6fd2807SJeff Garzik } 2253c6fd2807SJeff Garzik 2254c6fd2807SJeff Garzik ata_port_probe(ap); 2255c6fd2807SJeff Garzik 2256f31f0cc2SJeff Garzik /* read IDENTIFY page and configure devices. We have to do the identify 2257f31f0cc2SJeff Garzik specific sequence bass-ackwards so that PDIAG- is released by 2258f31f0cc2SJeff Garzik the slave device */ 2259f31f0cc2SJeff Garzik 2260f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2261f58229f8STejun Heo if (tries[dev->devno]) 2262f58229f8STejun Heo dev->class = classes[dev->devno]; 2263c6fd2807SJeff Garzik 2264c6fd2807SJeff Garzik if (!ata_dev_enabled(dev)) 2265c6fd2807SJeff Garzik continue; 2266c6fd2807SJeff Garzik 2267bff04647STejun Heo rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2268bff04647STejun Heo dev->id); 2269c6fd2807SJeff Garzik if (rc) 2270c6fd2807SJeff Garzik goto fail; 2271f31f0cc2SJeff Garzik } 2272f31f0cc2SJeff Garzik 2273be0d18dfSAlan Cox /* Now ask for the cable type as PDIAG- should have been released */ 2274be0d18dfSAlan Cox if (ap->ops->cable_detect) 2275be0d18dfSAlan Cox ap->cbl = ap->ops->cable_detect(ap); 2276be0d18dfSAlan Cox 2277614fe29bSAlan Cox /* We may have SATA bridge glue hiding here irrespective of the 2278614fe29bSAlan Cox reported cable types and sensed types */ 2279614fe29bSAlan Cox ata_link_for_each_dev(dev, &ap->link) { 2280614fe29bSAlan Cox if (!ata_dev_enabled(dev)) 2281614fe29bSAlan Cox continue; 2282614fe29bSAlan Cox /* SATA drives indicate we have a bridge. We don't know which 2283614fe29bSAlan Cox end of the link the bridge is which is a problem */ 2284614fe29bSAlan Cox if (ata_id_is_sata(dev->id)) 2285614fe29bSAlan Cox ap->cbl = ATA_CBL_SATA; 2286614fe29bSAlan Cox } 2287614fe29bSAlan Cox 2288f31f0cc2SJeff Garzik /* After the identify sequence we can now set up the devices. We do 2289f31f0cc2SJeff Garzik this in the normal order so that the user doesn't get confused */ 2290f31f0cc2SJeff Garzik 2291f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) { 2292f31f0cc2SJeff Garzik if (!ata_dev_enabled(dev)) 2293f31f0cc2SJeff Garzik continue; 2294c6fd2807SJeff Garzik 22959af5c9c9STejun Heo ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2296efdaedc4STejun Heo rc = ata_dev_configure(dev); 22979af5c9c9STejun Heo ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2298c6fd2807SJeff Garzik if (rc) 2299c6fd2807SJeff Garzik goto fail; 2300c6fd2807SJeff Garzik } 2301c6fd2807SJeff Garzik 2302c6fd2807SJeff Garzik /* configure transfer mode */ 23030260731fSTejun Heo rc = ata_set_mode(&ap->link, &dev); 23044ae72a1eSTejun Heo if (rc) 2305c6fd2807SJeff Garzik goto fail; 2306c6fd2807SJeff Garzik 2307f58229f8STejun Heo ata_link_for_each_dev(dev, &ap->link) 2308f58229f8STejun Heo if (ata_dev_enabled(dev)) 2309c6fd2807SJeff Garzik return 0; 2310c6fd2807SJeff Garzik 2311c6fd2807SJeff Garzik /* no device present, disable port */ 2312c6fd2807SJeff Garzik ata_port_disable(ap); 2313c6fd2807SJeff Garzik return -ENODEV; 2314c6fd2807SJeff Garzik 2315c6fd2807SJeff Garzik fail: 23164ae72a1eSTejun Heo tries[dev->devno]--; 23174ae72a1eSTejun Heo 2318c6fd2807SJeff Garzik switch (rc) { 2319c6fd2807SJeff Garzik case -EINVAL: 23204ae72a1eSTejun Heo /* eeek, something went very wrong, give up */ 2321c6fd2807SJeff Garzik tries[dev->devno] = 0; 2322c6fd2807SJeff Garzik break; 23234ae72a1eSTejun Heo 23244ae72a1eSTejun Heo case -ENODEV: 23254ae72a1eSTejun Heo /* give it just one more chance */ 23264ae72a1eSTejun Heo tries[dev->devno] = min(tries[dev->devno], 1); 2327c6fd2807SJeff Garzik case -EIO: 23284ae72a1eSTejun Heo if (tries[dev->devno] == 1) { 23294ae72a1eSTejun Heo /* This is the last chance, better to slow 23304ae72a1eSTejun Heo * down than lose it. 23314ae72a1eSTejun Heo */ 2332936fd732STejun Heo sata_down_spd_limit(&ap->link); 23334ae72a1eSTejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 23344ae72a1eSTejun Heo } 2335c6fd2807SJeff Garzik } 2336c6fd2807SJeff Garzik 23374ae72a1eSTejun Heo if (!tries[dev->devno]) 2338c6fd2807SJeff Garzik ata_dev_disable(dev); 2339c6fd2807SJeff Garzik 2340c6fd2807SJeff Garzik goto retry; 2341c6fd2807SJeff Garzik } 2342c6fd2807SJeff Garzik 2343c6fd2807SJeff Garzik /** 2344c6fd2807SJeff Garzik * ata_port_probe - Mark port as enabled 2345c6fd2807SJeff Garzik * @ap: Port for which we indicate enablement 2346c6fd2807SJeff Garzik * 2347c6fd2807SJeff Garzik * Modify @ap data structure such that the system 2348c6fd2807SJeff Garzik * thinks that the entire port is enabled. 2349c6fd2807SJeff Garzik * 2350cca3974eSJeff Garzik * LOCKING: host lock, or some other form of 2351c6fd2807SJeff Garzik * serialization. 2352c6fd2807SJeff Garzik */ 2353c6fd2807SJeff Garzik 2354c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap) 2355c6fd2807SJeff Garzik { 2356c6fd2807SJeff Garzik ap->flags &= ~ATA_FLAG_DISABLED; 2357c6fd2807SJeff Garzik } 2358c6fd2807SJeff Garzik 2359c6fd2807SJeff Garzik /** 2360c6fd2807SJeff Garzik * sata_print_link_status - Print SATA link status 2361936fd732STejun Heo * @link: SATA link to printk link status about 2362c6fd2807SJeff Garzik * 2363c6fd2807SJeff Garzik * This function prints link speed and status of a SATA link. 2364c6fd2807SJeff Garzik * 2365c6fd2807SJeff Garzik * LOCKING: 2366c6fd2807SJeff Garzik * None. 2367c6fd2807SJeff Garzik */ 2368936fd732STejun Heo void sata_print_link_status(struct ata_link *link) 2369c6fd2807SJeff Garzik { 2370c6fd2807SJeff Garzik u32 sstatus, scontrol, tmp; 2371c6fd2807SJeff Garzik 2372936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2373c6fd2807SJeff Garzik return; 2374936fd732STejun Heo sata_scr_read(link, SCR_CONTROL, &scontrol); 2375c6fd2807SJeff Garzik 2376936fd732STejun Heo if (ata_link_online(link)) { 2377c6fd2807SJeff Garzik tmp = (sstatus >> 4) & 0xf; 2378936fd732STejun Heo ata_link_printk(link, KERN_INFO, 2379c6fd2807SJeff Garzik "SATA link up %s (SStatus %X SControl %X)\n", 2380c6fd2807SJeff Garzik sata_spd_string(tmp), sstatus, scontrol); 2381c6fd2807SJeff Garzik } else { 2382936fd732STejun Heo ata_link_printk(link, KERN_INFO, 2383c6fd2807SJeff Garzik "SATA link down (SStatus %X SControl %X)\n", 2384c6fd2807SJeff Garzik sstatus, scontrol); 2385c6fd2807SJeff Garzik } 2386c6fd2807SJeff Garzik } 2387c6fd2807SJeff Garzik 2388c6fd2807SJeff Garzik /** 2389c6fd2807SJeff Garzik * __sata_phy_reset - Wake/reset a low-level SATA PHY 2390c6fd2807SJeff Garzik * @ap: SATA port associated with target SATA PHY. 2391c6fd2807SJeff Garzik * 2392c6fd2807SJeff Garzik * This function issues commands to standard SATA Sxxx 2393c6fd2807SJeff Garzik * PHY registers, to wake up the phy (and device), and 2394c6fd2807SJeff Garzik * clear any reset condition. 2395c6fd2807SJeff Garzik * 2396c6fd2807SJeff Garzik * LOCKING: 2397c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2398c6fd2807SJeff Garzik * 2399c6fd2807SJeff Garzik */ 2400c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap) 2401c6fd2807SJeff Garzik { 2402936fd732STejun Heo struct ata_link *link = &ap->link; 2403c6fd2807SJeff Garzik unsigned long timeout = jiffies + (HZ * 5); 2404936fd732STejun Heo u32 sstatus; 2405c6fd2807SJeff Garzik 2406c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_SATA_RESET) { 2407c6fd2807SJeff Garzik /* issue phy wake/reset */ 2408936fd732STejun Heo sata_scr_write_flush(link, SCR_CONTROL, 0x301); 2409c6fd2807SJeff Garzik /* Couldn't find anything in SATA I/II specs, but 2410c6fd2807SJeff Garzik * AHCI-1.1 10.4.2 says at least 1 ms. */ 2411c6fd2807SJeff Garzik mdelay(1); 2412c6fd2807SJeff Garzik } 2413c6fd2807SJeff Garzik /* phy wake/clear reset */ 2414936fd732STejun Heo sata_scr_write_flush(link, SCR_CONTROL, 0x300); 2415c6fd2807SJeff Garzik 2416c6fd2807SJeff Garzik /* wait for phy to become ready, if necessary */ 2417c6fd2807SJeff Garzik do { 2418c6fd2807SJeff Garzik msleep(200); 2419936fd732STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus); 2420c6fd2807SJeff Garzik if ((sstatus & 0xf) != 1) 2421c6fd2807SJeff Garzik break; 2422c6fd2807SJeff Garzik } while (time_before(jiffies, timeout)); 2423c6fd2807SJeff Garzik 2424c6fd2807SJeff Garzik /* print link status */ 2425936fd732STejun Heo sata_print_link_status(link); 2426c6fd2807SJeff Garzik 2427c6fd2807SJeff Garzik /* TODO: phy layer with polling, timeouts, etc. */ 2428936fd732STejun Heo if (!ata_link_offline(link)) 2429c6fd2807SJeff Garzik ata_port_probe(ap); 2430c6fd2807SJeff Garzik else 2431c6fd2807SJeff Garzik ata_port_disable(ap); 2432c6fd2807SJeff Garzik 2433c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_DISABLED) 2434c6fd2807SJeff Garzik return; 2435c6fd2807SJeff Garzik 2436c6fd2807SJeff Garzik if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2437c6fd2807SJeff Garzik ata_port_disable(ap); 2438c6fd2807SJeff Garzik return; 2439c6fd2807SJeff Garzik } 2440c6fd2807SJeff Garzik 2441c6fd2807SJeff Garzik ap->cbl = ATA_CBL_SATA; 2442c6fd2807SJeff Garzik } 2443c6fd2807SJeff Garzik 2444c6fd2807SJeff Garzik /** 2445c6fd2807SJeff Garzik * sata_phy_reset - Reset SATA bus. 2446c6fd2807SJeff Garzik * @ap: SATA port associated with target SATA PHY. 2447c6fd2807SJeff Garzik * 2448c6fd2807SJeff Garzik * This function resets the SATA bus, and then probes 2449c6fd2807SJeff Garzik * the bus for devices. 2450c6fd2807SJeff Garzik * 2451c6fd2807SJeff Garzik * LOCKING: 2452c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2453c6fd2807SJeff Garzik * 2454c6fd2807SJeff Garzik */ 2455c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap) 2456c6fd2807SJeff Garzik { 2457c6fd2807SJeff Garzik __sata_phy_reset(ap); 2458c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_DISABLED) 2459c6fd2807SJeff Garzik return; 2460c6fd2807SJeff Garzik ata_bus_reset(ap); 2461c6fd2807SJeff Garzik } 2462c6fd2807SJeff Garzik 2463c6fd2807SJeff Garzik /** 2464c6fd2807SJeff Garzik * ata_dev_pair - return other device on cable 2465c6fd2807SJeff Garzik * @adev: device 2466c6fd2807SJeff Garzik * 2467c6fd2807SJeff Garzik * Obtain the other device on the same cable, or if none is 2468c6fd2807SJeff Garzik * present NULL is returned 2469c6fd2807SJeff Garzik */ 2470c6fd2807SJeff Garzik 2471c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev) 2472c6fd2807SJeff Garzik { 24739af5c9c9STejun Heo struct ata_link *link = adev->link; 24749af5c9c9STejun Heo struct ata_device *pair = &link->device[1 - adev->devno]; 2475c6fd2807SJeff Garzik if (!ata_dev_enabled(pair)) 2476c6fd2807SJeff Garzik return NULL; 2477c6fd2807SJeff Garzik return pair; 2478c6fd2807SJeff Garzik } 2479c6fd2807SJeff Garzik 2480c6fd2807SJeff Garzik /** 2481c6fd2807SJeff Garzik * ata_port_disable - Disable port. 2482c6fd2807SJeff Garzik * @ap: Port to be disabled. 2483c6fd2807SJeff Garzik * 2484c6fd2807SJeff Garzik * Modify @ap data structure such that the system 2485c6fd2807SJeff Garzik * thinks that the entire port is disabled, and should 2486c6fd2807SJeff Garzik * never attempt to probe or communicate with devices 2487c6fd2807SJeff Garzik * on this port. 2488c6fd2807SJeff Garzik * 2489cca3974eSJeff Garzik * LOCKING: host lock, or some other form of 2490c6fd2807SJeff Garzik * serialization. 2491c6fd2807SJeff Garzik */ 2492c6fd2807SJeff Garzik 2493c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap) 2494c6fd2807SJeff Garzik { 24959af5c9c9STejun Heo ap->link.device[0].class = ATA_DEV_NONE; 24969af5c9c9STejun Heo ap->link.device[1].class = ATA_DEV_NONE; 2497c6fd2807SJeff Garzik ap->flags |= ATA_FLAG_DISABLED; 2498c6fd2807SJeff Garzik } 2499c6fd2807SJeff Garzik 2500c6fd2807SJeff Garzik /** 2501c6fd2807SJeff Garzik * sata_down_spd_limit - adjust SATA spd limit downward 2502936fd732STejun Heo * @link: Link to adjust SATA spd limit for 2503c6fd2807SJeff Garzik * 2504936fd732STejun Heo * Adjust SATA spd limit of @link downward. Note that this 2505c6fd2807SJeff Garzik * function only adjusts the limit. The change must be applied 2506c6fd2807SJeff Garzik * using sata_set_spd(). 2507c6fd2807SJeff Garzik * 2508c6fd2807SJeff Garzik * LOCKING: 2509c6fd2807SJeff Garzik * Inherited from caller. 2510c6fd2807SJeff Garzik * 2511c6fd2807SJeff Garzik * RETURNS: 2512c6fd2807SJeff Garzik * 0 on success, negative errno on failure 2513c6fd2807SJeff Garzik */ 2514936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link) 2515c6fd2807SJeff Garzik { 2516c6fd2807SJeff Garzik u32 sstatus, spd, mask; 2517c6fd2807SJeff Garzik int rc, highbit; 2518c6fd2807SJeff Garzik 2519936fd732STejun Heo if (!sata_scr_valid(link)) 2520008a7896STejun Heo return -EOPNOTSUPP; 2521008a7896STejun Heo 2522008a7896STejun Heo /* If SCR can be read, use it to determine the current SPD. 2523936fd732STejun Heo * If not, use cached value in link->sata_spd. 2524008a7896STejun Heo */ 2525936fd732STejun Heo rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2526008a7896STejun Heo if (rc == 0) 2527008a7896STejun Heo spd = (sstatus >> 4) & 0xf; 2528008a7896STejun Heo else 2529936fd732STejun Heo spd = link->sata_spd; 2530c6fd2807SJeff Garzik 2531936fd732STejun Heo mask = link->sata_spd_limit; 2532c6fd2807SJeff Garzik if (mask <= 1) 2533c6fd2807SJeff Garzik return -EINVAL; 2534008a7896STejun Heo 2535008a7896STejun Heo /* unconditionally mask off the highest bit */ 2536c6fd2807SJeff Garzik highbit = fls(mask) - 1; 2537c6fd2807SJeff Garzik mask &= ~(1 << highbit); 2538c6fd2807SJeff Garzik 2539008a7896STejun Heo /* Mask off all speeds higher than or equal to the current 2540008a7896STejun Heo * one. Force 1.5Gbps if current SPD is not available. 2541008a7896STejun Heo */ 2542008a7896STejun Heo if (spd > 1) 2543008a7896STejun Heo mask &= (1 << (spd - 1)) - 1; 2544008a7896STejun Heo else 2545008a7896STejun Heo mask &= 1; 2546008a7896STejun Heo 2547008a7896STejun Heo /* were we already at the bottom? */ 2548c6fd2807SJeff Garzik if (!mask) 2549c6fd2807SJeff Garzik return -EINVAL; 2550c6fd2807SJeff Garzik 2551936fd732STejun Heo link->sata_spd_limit = mask; 2552c6fd2807SJeff Garzik 2553936fd732STejun Heo ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2554c6fd2807SJeff Garzik sata_spd_string(fls(mask))); 2555c6fd2807SJeff Garzik 2556c6fd2807SJeff Garzik return 0; 2557c6fd2807SJeff Garzik } 2558c6fd2807SJeff Garzik 2559936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2560c6fd2807SJeff Garzik { 2561c6fd2807SJeff Garzik u32 spd, limit; 2562c6fd2807SJeff Garzik 2563936fd732STejun Heo if (link->sata_spd_limit == UINT_MAX) 2564c6fd2807SJeff Garzik limit = 0; 2565c6fd2807SJeff Garzik else 2566936fd732STejun Heo limit = fls(link->sata_spd_limit); 2567c6fd2807SJeff Garzik 2568c6fd2807SJeff Garzik spd = (*scontrol >> 4) & 0xf; 2569c6fd2807SJeff Garzik *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); 2570c6fd2807SJeff Garzik 2571c6fd2807SJeff Garzik return spd != limit; 2572c6fd2807SJeff Garzik } 2573c6fd2807SJeff Garzik 2574c6fd2807SJeff Garzik /** 2575c6fd2807SJeff Garzik * sata_set_spd_needed - is SATA spd configuration needed 2576936fd732STejun Heo * @link: Link in question 2577c6fd2807SJeff Garzik * 2578c6fd2807SJeff Garzik * Test whether the spd limit in SControl matches 2579936fd732STejun Heo * @link->sata_spd_limit. This function is used to determine 2580c6fd2807SJeff Garzik * whether hardreset is necessary to apply SATA spd 2581c6fd2807SJeff Garzik * configuration. 2582c6fd2807SJeff Garzik * 2583c6fd2807SJeff Garzik * LOCKING: 2584c6fd2807SJeff Garzik * Inherited from caller. 2585c6fd2807SJeff Garzik * 2586c6fd2807SJeff Garzik * RETURNS: 2587c6fd2807SJeff Garzik * 1 if SATA spd configuration is needed, 0 otherwise. 2588c6fd2807SJeff Garzik */ 2589936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link) 2590c6fd2807SJeff Garzik { 2591c6fd2807SJeff Garzik u32 scontrol; 2592c6fd2807SJeff Garzik 2593936fd732STejun Heo if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2594c6fd2807SJeff Garzik return 0; 2595c6fd2807SJeff Garzik 2596936fd732STejun Heo return __sata_set_spd_needed(link, &scontrol); 2597c6fd2807SJeff Garzik } 2598c6fd2807SJeff Garzik 2599c6fd2807SJeff Garzik /** 2600c6fd2807SJeff Garzik * sata_set_spd - set SATA spd according to spd limit 2601936fd732STejun Heo * @link: Link to set SATA spd for 2602c6fd2807SJeff Garzik * 2603936fd732STejun Heo * Set SATA spd of @link according to sata_spd_limit. 2604c6fd2807SJeff Garzik * 2605c6fd2807SJeff Garzik * LOCKING: 2606c6fd2807SJeff Garzik * Inherited from caller. 2607c6fd2807SJeff Garzik * 2608c6fd2807SJeff Garzik * RETURNS: 2609c6fd2807SJeff Garzik * 0 if spd doesn't need to be changed, 1 if spd has been 2610c6fd2807SJeff Garzik * changed. Negative errno if SCR registers are inaccessible. 2611c6fd2807SJeff Garzik */ 2612936fd732STejun Heo int sata_set_spd(struct ata_link *link) 2613c6fd2807SJeff Garzik { 2614c6fd2807SJeff Garzik u32 scontrol; 2615c6fd2807SJeff Garzik int rc; 2616c6fd2807SJeff Garzik 2617936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2618c6fd2807SJeff Garzik return rc; 2619c6fd2807SJeff Garzik 2620936fd732STejun Heo if (!__sata_set_spd_needed(link, &scontrol)) 2621c6fd2807SJeff Garzik return 0; 2622c6fd2807SJeff Garzik 2623936fd732STejun Heo if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2624c6fd2807SJeff Garzik return rc; 2625c6fd2807SJeff Garzik 2626c6fd2807SJeff Garzik return 1; 2627c6fd2807SJeff Garzik } 2628c6fd2807SJeff Garzik 2629c6fd2807SJeff Garzik /* 2630c6fd2807SJeff Garzik * This mode timing computation functionality is ported over from 2631c6fd2807SJeff Garzik * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2632c6fd2807SJeff Garzik */ 2633c6fd2807SJeff Garzik /* 2634b352e57dSAlan Cox * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2635c6fd2807SJeff Garzik * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2636b352e57dSAlan Cox * for UDMA6, which is currently supported only by Maxtor drives. 2637b352e57dSAlan Cox * 2638b352e57dSAlan Cox * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2639c6fd2807SJeff Garzik */ 2640c6fd2807SJeff Garzik 2641c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = { 2642c6fd2807SJeff Garzik 2643c6fd2807SJeff Garzik { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 2644c6fd2807SJeff Garzik { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 2645c6fd2807SJeff Garzik { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 2646c6fd2807SJeff Garzik { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 2647c6fd2807SJeff Garzik 2648b352e57dSAlan Cox { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, 2649b352e57dSAlan Cox { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, 2650c6fd2807SJeff Garzik { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 2651c6fd2807SJeff Garzik { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 2652c6fd2807SJeff Garzik { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2653c6fd2807SJeff Garzik 2654c6fd2807SJeff Garzik /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2655c6fd2807SJeff Garzik 2656c6fd2807SJeff Garzik { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 2657c6fd2807SJeff Garzik { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 2658c6fd2807SJeff Garzik { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 2659c6fd2807SJeff Garzik 2660c6fd2807SJeff Garzik { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 2661c6fd2807SJeff Garzik { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 2662c6fd2807SJeff Garzik { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 2663c6fd2807SJeff Garzik 2664b352e57dSAlan Cox { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, 2665b352e57dSAlan Cox { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, 2666c6fd2807SJeff Garzik { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 2667c6fd2807SJeff Garzik { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 2668c6fd2807SJeff Garzik 2669c6fd2807SJeff Garzik { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 2670c6fd2807SJeff Garzik { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 2671c6fd2807SJeff Garzik { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 2672c6fd2807SJeff Garzik 2673c6fd2807SJeff Garzik /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 2674c6fd2807SJeff Garzik 2675c6fd2807SJeff Garzik { 0xFF } 2676c6fd2807SJeff Garzik }; 2677c6fd2807SJeff Garzik 2678c6fd2807SJeff Garzik #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2679c6fd2807SJeff Garzik #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2680c6fd2807SJeff Garzik 2681c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2682c6fd2807SJeff Garzik { 2683c6fd2807SJeff Garzik q->setup = EZ(t->setup * 1000, T); 2684c6fd2807SJeff Garzik q->act8b = EZ(t->act8b * 1000, T); 2685c6fd2807SJeff Garzik q->rec8b = EZ(t->rec8b * 1000, T); 2686c6fd2807SJeff Garzik q->cyc8b = EZ(t->cyc8b * 1000, T); 2687c6fd2807SJeff Garzik q->active = EZ(t->active * 1000, T); 2688c6fd2807SJeff Garzik q->recover = EZ(t->recover * 1000, T); 2689c6fd2807SJeff Garzik q->cycle = EZ(t->cycle * 1000, T); 2690c6fd2807SJeff Garzik q->udma = EZ(t->udma * 1000, UT); 2691c6fd2807SJeff Garzik } 2692c6fd2807SJeff Garzik 2693c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2694c6fd2807SJeff Garzik struct ata_timing *m, unsigned int what) 2695c6fd2807SJeff Garzik { 2696c6fd2807SJeff Garzik if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2697c6fd2807SJeff Garzik if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2698c6fd2807SJeff Garzik if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2699c6fd2807SJeff Garzik if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2700c6fd2807SJeff Garzik if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2701c6fd2807SJeff Garzik if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2702c6fd2807SJeff Garzik if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2703c6fd2807SJeff Garzik if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2704c6fd2807SJeff Garzik } 2705c6fd2807SJeff Garzik 2706c6fd2807SJeff Garzik static const struct ata_timing *ata_timing_find_mode(unsigned short speed) 2707c6fd2807SJeff Garzik { 2708c6fd2807SJeff Garzik const struct ata_timing *t; 2709c6fd2807SJeff Garzik 2710c6fd2807SJeff Garzik for (t = ata_timing; t->mode != speed; t++) 2711c6fd2807SJeff Garzik if (t->mode == 0xFF) 2712c6fd2807SJeff Garzik return NULL; 2713c6fd2807SJeff Garzik return t; 2714c6fd2807SJeff Garzik } 2715c6fd2807SJeff Garzik 2716c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2717c6fd2807SJeff Garzik struct ata_timing *t, int T, int UT) 2718c6fd2807SJeff Garzik { 2719c6fd2807SJeff Garzik const struct ata_timing *s; 2720c6fd2807SJeff Garzik struct ata_timing p; 2721c6fd2807SJeff Garzik 2722c6fd2807SJeff Garzik /* 2723c6fd2807SJeff Garzik * Find the mode. 2724c6fd2807SJeff Garzik */ 2725c6fd2807SJeff Garzik 2726c6fd2807SJeff Garzik if (!(s = ata_timing_find_mode(speed))) 2727c6fd2807SJeff Garzik return -EINVAL; 2728c6fd2807SJeff Garzik 2729c6fd2807SJeff Garzik memcpy(t, s, sizeof(*s)); 2730c6fd2807SJeff Garzik 2731c6fd2807SJeff Garzik /* 2732c6fd2807SJeff Garzik * If the drive is an EIDE drive, it can tell us it needs extended 2733c6fd2807SJeff Garzik * PIO/MW_DMA cycle timing. 2734c6fd2807SJeff Garzik */ 2735c6fd2807SJeff Garzik 2736c6fd2807SJeff Garzik if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2737c6fd2807SJeff Garzik memset(&p, 0, sizeof(p)); 2738c6fd2807SJeff Garzik if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2739c6fd2807SJeff Garzik if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 2740c6fd2807SJeff Garzik else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 2741c6fd2807SJeff Garzik } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 2742c6fd2807SJeff Garzik p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 2743c6fd2807SJeff Garzik } 2744c6fd2807SJeff Garzik ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 2745c6fd2807SJeff Garzik } 2746c6fd2807SJeff Garzik 2747c6fd2807SJeff Garzik /* 2748c6fd2807SJeff Garzik * Convert the timing to bus clock counts. 2749c6fd2807SJeff Garzik */ 2750c6fd2807SJeff Garzik 2751c6fd2807SJeff Garzik ata_timing_quantize(t, t, T, UT); 2752c6fd2807SJeff Garzik 2753c6fd2807SJeff Garzik /* 2754c6fd2807SJeff Garzik * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 2755c6fd2807SJeff Garzik * S.M.A.R.T * and some other commands. We have to ensure that the 2756c6fd2807SJeff Garzik * DMA cycle timing is slower/equal than the fastest PIO timing. 2757c6fd2807SJeff Garzik */ 2758c6fd2807SJeff Garzik 2759fd3367afSAlan if (speed > XFER_PIO_6) { 2760c6fd2807SJeff Garzik ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 2761c6fd2807SJeff Garzik ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 2762c6fd2807SJeff Garzik } 2763c6fd2807SJeff Garzik 2764c6fd2807SJeff Garzik /* 2765c6fd2807SJeff Garzik * Lengthen active & recovery time so that cycle time is correct. 2766c6fd2807SJeff Garzik */ 2767c6fd2807SJeff Garzik 2768c6fd2807SJeff Garzik if (t->act8b + t->rec8b < t->cyc8b) { 2769c6fd2807SJeff Garzik t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 2770c6fd2807SJeff Garzik t->rec8b = t->cyc8b - t->act8b; 2771c6fd2807SJeff Garzik } 2772c6fd2807SJeff Garzik 2773c6fd2807SJeff Garzik if (t->active + t->recover < t->cycle) { 2774c6fd2807SJeff Garzik t->active += (t->cycle - (t->active + t->recover)) / 2; 2775c6fd2807SJeff Garzik t->recover = t->cycle - t->active; 2776c6fd2807SJeff Garzik } 27774f701d1eSAlan Cox 27784f701d1eSAlan Cox /* In a few cases quantisation may produce enough errors to 27794f701d1eSAlan Cox leave t->cycle too low for the sum of active and recovery 27804f701d1eSAlan Cox if so we must correct this */ 27814f701d1eSAlan Cox if (t->active + t->recover > t->cycle) 27824f701d1eSAlan Cox t->cycle = t->active + t->recover; 2783c6fd2807SJeff Garzik 2784c6fd2807SJeff Garzik return 0; 2785c6fd2807SJeff Garzik } 2786c6fd2807SJeff Garzik 2787c6fd2807SJeff Garzik /** 2788c6fd2807SJeff Garzik * ata_down_xfermask_limit - adjust dev xfer masks downward 2789c6fd2807SJeff Garzik * @dev: Device to adjust xfer masks 2790458337dbSTejun Heo * @sel: ATA_DNXFER_* selector 2791c6fd2807SJeff Garzik * 2792c6fd2807SJeff Garzik * Adjust xfer masks of @dev downward. Note that this function 2793c6fd2807SJeff Garzik * does not apply the change. Invoking ata_set_mode() afterwards 2794c6fd2807SJeff Garzik * will apply the limit. 2795c6fd2807SJeff Garzik * 2796c6fd2807SJeff Garzik * LOCKING: 2797c6fd2807SJeff Garzik * Inherited from caller. 2798c6fd2807SJeff Garzik * 2799c6fd2807SJeff Garzik * RETURNS: 2800c6fd2807SJeff Garzik * 0 on success, negative errno on failure 2801c6fd2807SJeff Garzik */ 2802458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 2803c6fd2807SJeff Garzik { 2804458337dbSTejun Heo char buf[32]; 2805458337dbSTejun Heo unsigned int orig_mask, xfer_mask; 2806458337dbSTejun Heo unsigned int pio_mask, mwdma_mask, udma_mask; 2807458337dbSTejun Heo int quiet, highbit; 2808c6fd2807SJeff Garzik 2809458337dbSTejun Heo quiet = !!(sel & ATA_DNXFER_QUIET); 2810458337dbSTejun Heo sel &= ~ATA_DNXFER_QUIET; 2811458337dbSTejun Heo 2812458337dbSTejun Heo xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 2813458337dbSTejun Heo dev->mwdma_mask, 2814c6fd2807SJeff Garzik dev->udma_mask); 2815458337dbSTejun Heo ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 2816c6fd2807SJeff Garzik 2817458337dbSTejun Heo switch (sel) { 2818458337dbSTejun Heo case ATA_DNXFER_PIO: 2819458337dbSTejun Heo highbit = fls(pio_mask) - 1; 2820458337dbSTejun Heo pio_mask &= ~(1 << highbit); 2821458337dbSTejun Heo break; 2822458337dbSTejun Heo 2823458337dbSTejun Heo case ATA_DNXFER_DMA: 2824458337dbSTejun Heo if (udma_mask) { 2825458337dbSTejun Heo highbit = fls(udma_mask) - 1; 2826458337dbSTejun Heo udma_mask &= ~(1 << highbit); 2827458337dbSTejun Heo if (!udma_mask) 2828458337dbSTejun Heo return -ENOENT; 2829458337dbSTejun Heo } else if (mwdma_mask) { 2830458337dbSTejun Heo highbit = fls(mwdma_mask) - 1; 2831458337dbSTejun Heo mwdma_mask &= ~(1 << highbit); 2832458337dbSTejun Heo if (!mwdma_mask) 2833458337dbSTejun Heo return -ENOENT; 2834458337dbSTejun Heo } 2835458337dbSTejun Heo break; 2836458337dbSTejun Heo 2837458337dbSTejun Heo case ATA_DNXFER_40C: 2838458337dbSTejun Heo udma_mask &= ATA_UDMA_MASK_40C; 2839458337dbSTejun Heo break; 2840458337dbSTejun Heo 2841458337dbSTejun Heo case ATA_DNXFER_FORCE_PIO0: 2842458337dbSTejun Heo pio_mask &= 1; 2843458337dbSTejun Heo case ATA_DNXFER_FORCE_PIO: 2844458337dbSTejun Heo mwdma_mask = 0; 2845458337dbSTejun Heo udma_mask = 0; 2846458337dbSTejun Heo break; 2847458337dbSTejun Heo 2848458337dbSTejun Heo default: 2849458337dbSTejun Heo BUG(); 2850458337dbSTejun Heo } 2851458337dbSTejun Heo 2852458337dbSTejun Heo xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 2853458337dbSTejun Heo 2854458337dbSTejun Heo if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 2855458337dbSTejun Heo return -ENOENT; 2856458337dbSTejun Heo 2857458337dbSTejun Heo if (!quiet) { 2858458337dbSTejun Heo if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 2859458337dbSTejun Heo snprintf(buf, sizeof(buf), "%s:%s", 2860458337dbSTejun Heo ata_mode_string(xfer_mask), 2861458337dbSTejun Heo ata_mode_string(xfer_mask & ATA_MASK_PIO)); 2862458337dbSTejun Heo else 2863458337dbSTejun Heo snprintf(buf, sizeof(buf), "%s", 2864458337dbSTejun Heo ata_mode_string(xfer_mask)); 2865458337dbSTejun Heo 2866458337dbSTejun Heo ata_dev_printk(dev, KERN_WARNING, 2867458337dbSTejun Heo "limiting speed to %s\n", buf); 2868458337dbSTejun Heo } 2869c6fd2807SJeff Garzik 2870c6fd2807SJeff Garzik ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 2871c6fd2807SJeff Garzik &dev->udma_mask); 2872c6fd2807SJeff Garzik 2873c6fd2807SJeff Garzik return 0; 2874c6fd2807SJeff Garzik } 2875c6fd2807SJeff Garzik 2876c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev) 2877c6fd2807SJeff Garzik { 28789af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 2879c6fd2807SJeff Garzik unsigned int err_mask; 2880c6fd2807SJeff Garzik int rc; 2881c6fd2807SJeff Garzik 2882c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_PIO; 2883c6fd2807SJeff Garzik if (dev->xfer_shift == ATA_SHIFT_PIO) 2884c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_PIO; 2885c6fd2807SJeff Garzik 2886c6fd2807SJeff Garzik err_mask = ata_dev_set_xfermode(dev); 28872dcb407eSJeff Garzik 288811750a40SAlan /* Old CFA may refuse this command, which is just fine */ 288911750a40SAlan if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 289011750a40SAlan err_mask &= ~AC_ERR_DEV; 28912dcb407eSJeff Garzik 28920bc2a79aSAlan Cox /* Some very old devices and some bad newer ones fail any kind of 28930bc2a79aSAlan Cox SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 28940bc2a79aSAlan Cox if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 28950bc2a79aSAlan Cox dev->pio_mode <= XFER_PIO_2) 28960bc2a79aSAlan Cox err_mask &= ~AC_ERR_DEV; 28972dcb407eSJeff Garzik 28983acaf94bSAlan Cox /* Early MWDMA devices do DMA but don't allow DMA mode setting. 28993acaf94bSAlan Cox Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 29003acaf94bSAlan Cox if (dev->xfer_shift == ATA_SHIFT_MWDMA && 29013acaf94bSAlan Cox dev->dma_mode == XFER_MW_DMA_0 && 29023acaf94bSAlan Cox (dev->id[63] >> 8) & 1) 29033acaf94bSAlan Cox err_mask &= ~AC_ERR_DEV; 29043acaf94bSAlan Cox 2905c6fd2807SJeff Garzik if (err_mask) { 2906c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2907c6fd2807SJeff Garzik "(err_mask=0x%x)\n", err_mask); 2908c6fd2807SJeff Garzik return -EIO; 2909c6fd2807SJeff Garzik } 2910c6fd2807SJeff Garzik 2911baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_POST_SETMODE; 2912422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 2913baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 2914c6fd2807SJeff Garzik if (rc) 2915c6fd2807SJeff Garzik return rc; 2916c6fd2807SJeff Garzik 2917c6fd2807SJeff Garzik DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2918c6fd2807SJeff Garzik dev->xfer_shift, (int)dev->xfer_mode); 2919c6fd2807SJeff Garzik 2920c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "configured for %s\n", 2921c6fd2807SJeff Garzik ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); 2922c6fd2807SJeff Garzik return 0; 2923c6fd2807SJeff Garzik } 2924c6fd2807SJeff Garzik 2925c6fd2807SJeff Garzik /** 292604351821SAlan * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 29270260731fSTejun Heo * @link: link on which timings will be programmed 2928c6fd2807SJeff Garzik * @r_failed_dev: out paramter for failed device 2929c6fd2807SJeff Garzik * 293004351821SAlan * Standard implementation of the function used to tune and set 293104351821SAlan * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 293204351821SAlan * ata_dev_set_mode() fails, pointer to the failing device is 2933c6fd2807SJeff Garzik * returned in @r_failed_dev. 2934c6fd2807SJeff Garzik * 2935c6fd2807SJeff Garzik * LOCKING: 2936c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 2937c6fd2807SJeff Garzik * 2938c6fd2807SJeff Garzik * RETURNS: 2939c6fd2807SJeff Garzik * 0 on success, negative errno otherwise 2940c6fd2807SJeff Garzik */ 294104351821SAlan 29420260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 2943c6fd2807SJeff Garzik { 29440260731fSTejun Heo struct ata_port *ap = link->ap; 2945c6fd2807SJeff Garzik struct ata_device *dev; 2946f58229f8STejun Heo int rc = 0, used_dma = 0, found = 0; 2947c6fd2807SJeff Garzik 2948c6fd2807SJeff Garzik /* step 1: calculate xfer_mask */ 2949f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 2950c6fd2807SJeff Garzik unsigned int pio_mask, dma_mask; 2951b3a70601SAlan Cox unsigned int mode_mask; 2952c6fd2807SJeff Garzik 2953c6fd2807SJeff Garzik if (!ata_dev_enabled(dev)) 2954c6fd2807SJeff Garzik continue; 2955c6fd2807SJeff Garzik 2956b3a70601SAlan Cox mode_mask = ATA_DMA_MASK_ATA; 2957b3a70601SAlan Cox if (dev->class == ATA_DEV_ATAPI) 2958b3a70601SAlan Cox mode_mask = ATA_DMA_MASK_ATAPI; 2959b3a70601SAlan Cox else if (ata_id_is_cfa(dev->id)) 2960b3a70601SAlan Cox mode_mask = ATA_DMA_MASK_CFA; 2961b3a70601SAlan Cox 2962c6fd2807SJeff Garzik ata_dev_xfermask(dev); 2963c6fd2807SJeff Garzik 2964c6fd2807SJeff Garzik pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2965c6fd2807SJeff Garzik dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2966b3a70601SAlan Cox 2967b3a70601SAlan Cox if (libata_dma_mask & mode_mask) 2968b3a70601SAlan Cox dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2969b3a70601SAlan Cox else 2970b3a70601SAlan Cox dma_mask = 0; 2971b3a70601SAlan Cox 2972c6fd2807SJeff Garzik dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2973c6fd2807SJeff Garzik dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2974c6fd2807SJeff Garzik 2975c6fd2807SJeff Garzik found = 1; 2976c6fd2807SJeff Garzik if (dev->dma_mode) 2977c6fd2807SJeff Garzik used_dma = 1; 2978c6fd2807SJeff Garzik } 2979c6fd2807SJeff Garzik if (!found) 2980c6fd2807SJeff Garzik goto out; 2981c6fd2807SJeff Garzik 2982c6fd2807SJeff Garzik /* step 2: always set host PIO timings */ 2983f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 2984c6fd2807SJeff Garzik if (!ata_dev_enabled(dev)) 2985c6fd2807SJeff Garzik continue; 2986c6fd2807SJeff Garzik 2987c6fd2807SJeff Garzik if (!dev->pio_mode) { 2988c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 2989c6fd2807SJeff Garzik rc = -EINVAL; 2990c6fd2807SJeff Garzik goto out; 2991c6fd2807SJeff Garzik } 2992c6fd2807SJeff Garzik 2993c6fd2807SJeff Garzik dev->xfer_mode = dev->pio_mode; 2994c6fd2807SJeff Garzik dev->xfer_shift = ATA_SHIFT_PIO; 2995c6fd2807SJeff Garzik if (ap->ops->set_piomode) 2996c6fd2807SJeff Garzik ap->ops->set_piomode(ap, dev); 2997c6fd2807SJeff Garzik } 2998c6fd2807SJeff Garzik 2999c6fd2807SJeff Garzik /* step 3: set host DMA timings */ 3000f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 3001c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) || !dev->dma_mode) 3002c6fd2807SJeff Garzik continue; 3003c6fd2807SJeff Garzik 3004c6fd2807SJeff Garzik dev->xfer_mode = dev->dma_mode; 3005c6fd2807SJeff Garzik dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3006c6fd2807SJeff Garzik if (ap->ops->set_dmamode) 3007c6fd2807SJeff Garzik ap->ops->set_dmamode(ap, dev); 3008c6fd2807SJeff Garzik } 3009c6fd2807SJeff Garzik 3010c6fd2807SJeff Garzik /* step 4: update devices' xfer mode */ 3011f58229f8STejun Heo ata_link_for_each_dev(dev, link) { 301218d90debSAlan /* don't update suspended devices' xfer mode */ 30139666f400STejun Heo if (!ata_dev_enabled(dev)) 3014c6fd2807SJeff Garzik continue; 3015c6fd2807SJeff Garzik 3016c6fd2807SJeff Garzik rc = ata_dev_set_mode(dev); 3017c6fd2807SJeff Garzik if (rc) 3018c6fd2807SJeff Garzik goto out; 3019c6fd2807SJeff Garzik } 3020c6fd2807SJeff Garzik 3021c6fd2807SJeff Garzik /* Record simplex status. If we selected DMA then the other 3022c6fd2807SJeff Garzik * host channels are not permitted to do so. 3023c6fd2807SJeff Garzik */ 3024cca3974eSJeff Garzik if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3025032af1ceSAlan ap->host->simplex_claimed = ap; 3026c6fd2807SJeff Garzik 3027c6fd2807SJeff Garzik out: 3028c6fd2807SJeff Garzik if (rc) 3029c6fd2807SJeff Garzik *r_failed_dev = dev; 3030c6fd2807SJeff Garzik return rc; 3031c6fd2807SJeff Garzik } 3032c6fd2807SJeff Garzik 3033c6fd2807SJeff Garzik /** 303404351821SAlan * ata_set_mode - Program timings and issue SET FEATURES - XFER 30350260731fSTejun Heo * @link: link on which timings will be programmed 303604351821SAlan * @r_failed_dev: out paramter for failed device 303704351821SAlan * 303804351821SAlan * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 303904351821SAlan * ata_set_mode() fails, pointer to the failing device is 304004351821SAlan * returned in @r_failed_dev. 304104351821SAlan * 304204351821SAlan * LOCKING: 304304351821SAlan * PCI/etc. bus probe sem. 304404351821SAlan * 304504351821SAlan * RETURNS: 304604351821SAlan * 0 on success, negative errno otherwise 304704351821SAlan */ 30480260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 304904351821SAlan { 30500260731fSTejun Heo struct ata_port *ap = link->ap; 30510260731fSTejun Heo 305204351821SAlan /* has private set_mode? */ 305304351821SAlan if (ap->ops->set_mode) 30540260731fSTejun Heo return ap->ops->set_mode(link, r_failed_dev); 30550260731fSTejun Heo return ata_do_set_mode(link, r_failed_dev); 305604351821SAlan } 305704351821SAlan 305804351821SAlan /** 3059c6fd2807SJeff Garzik * ata_tf_to_host - issue ATA taskfile to host controller 3060c6fd2807SJeff Garzik * @ap: port to which command is being issued 3061c6fd2807SJeff Garzik * @tf: ATA taskfile register set 3062c6fd2807SJeff Garzik * 3063c6fd2807SJeff Garzik * Issues ATA taskfile register set to ATA host controller, 3064c6fd2807SJeff Garzik * with proper synchronization with interrupt handler and 3065c6fd2807SJeff Garzik * other threads. 3066c6fd2807SJeff Garzik * 3067c6fd2807SJeff Garzik * LOCKING: 3068cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 3069c6fd2807SJeff Garzik */ 3070c6fd2807SJeff Garzik 3071c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap, 3072c6fd2807SJeff Garzik const struct ata_taskfile *tf) 3073c6fd2807SJeff Garzik { 3074c6fd2807SJeff Garzik ap->ops->tf_load(ap, tf); 3075c6fd2807SJeff Garzik ap->ops->exec_command(ap, tf); 3076c6fd2807SJeff Garzik } 3077c6fd2807SJeff Garzik 3078c6fd2807SJeff Garzik /** 3079c6fd2807SJeff Garzik * ata_busy_sleep - sleep until BSY clears, or timeout 3080c6fd2807SJeff Garzik * @ap: port containing status register to be polled 3081c6fd2807SJeff Garzik * @tmout_pat: impatience timeout 3082c6fd2807SJeff Garzik * @tmout: overall timeout 3083c6fd2807SJeff Garzik * 3084c6fd2807SJeff Garzik * Sleep until ATA Status register bit BSY clears, 3085c6fd2807SJeff Garzik * or a timeout occurs. 3086c6fd2807SJeff Garzik * 3087d1adc1bbSTejun Heo * LOCKING: 3088d1adc1bbSTejun Heo * Kernel thread context (may sleep). 3089d1adc1bbSTejun Heo * 3090d1adc1bbSTejun Heo * RETURNS: 3091d1adc1bbSTejun Heo * 0 on success, -errno otherwise. 3092c6fd2807SJeff Garzik */ 3093d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap, 3094c6fd2807SJeff Garzik unsigned long tmout_pat, unsigned long tmout) 3095c6fd2807SJeff Garzik { 3096c6fd2807SJeff Garzik unsigned long timer_start, timeout; 3097c6fd2807SJeff Garzik u8 status; 3098c6fd2807SJeff Garzik 3099c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 300); 3100c6fd2807SJeff Garzik timer_start = jiffies; 3101c6fd2807SJeff Garzik timeout = timer_start + tmout_pat; 3102d1adc1bbSTejun Heo while (status != 0xff && (status & ATA_BUSY) && 3103d1adc1bbSTejun Heo time_before(jiffies, timeout)) { 3104c6fd2807SJeff Garzik msleep(50); 3105c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 3); 3106c6fd2807SJeff Garzik } 3107c6fd2807SJeff Garzik 3108d1adc1bbSTejun Heo if (status != 0xff && (status & ATA_BUSY)) 3109c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, 311035aa7a43SJeff Garzik "port is slow to respond, please be patient " 311135aa7a43SJeff Garzik "(Status 0x%x)\n", status); 3112c6fd2807SJeff Garzik 3113c6fd2807SJeff Garzik timeout = timer_start + tmout; 3114d1adc1bbSTejun Heo while (status != 0xff && (status & ATA_BUSY) && 3115d1adc1bbSTejun Heo time_before(jiffies, timeout)) { 3116c6fd2807SJeff Garzik msleep(50); 3117c6fd2807SJeff Garzik status = ata_chk_status(ap); 3118c6fd2807SJeff Garzik } 3119c6fd2807SJeff Garzik 3120d1adc1bbSTejun Heo if (status == 0xff) 3121d1adc1bbSTejun Heo return -ENODEV; 3122d1adc1bbSTejun Heo 3123c6fd2807SJeff Garzik if (status & ATA_BUSY) { 3124c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "port failed to respond " 312535aa7a43SJeff Garzik "(%lu secs, Status 0x%x)\n", 312635aa7a43SJeff Garzik tmout / HZ, status); 3127d1adc1bbSTejun Heo return -EBUSY; 3128c6fd2807SJeff Garzik } 3129c6fd2807SJeff Garzik 3130c6fd2807SJeff Garzik return 0; 3131c6fd2807SJeff Garzik } 3132c6fd2807SJeff Garzik 3133d4b2bab4STejun Heo /** 313488ff6eafSTejun Heo * ata_wait_after_reset - wait before checking status after reset 313588ff6eafSTejun Heo * @ap: port containing status register to be polled 313688ff6eafSTejun Heo * @deadline: deadline jiffies for the operation 313788ff6eafSTejun Heo * 313888ff6eafSTejun Heo * After reset, we need to pause a while before reading status. 313988ff6eafSTejun Heo * Also, certain combination of controller and device report 0xff 314088ff6eafSTejun Heo * for some duration (e.g. until SATA PHY is up and running) 314188ff6eafSTejun Heo * which is interpreted as empty port in ATA world. This 314288ff6eafSTejun Heo * function also waits for such devices to get out of 0xff 314388ff6eafSTejun Heo * status. 314488ff6eafSTejun Heo * 314588ff6eafSTejun Heo * LOCKING: 314688ff6eafSTejun Heo * Kernel thread context (may sleep). 314788ff6eafSTejun Heo */ 314888ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline) 314988ff6eafSTejun Heo { 315088ff6eafSTejun Heo unsigned long until = jiffies + ATA_TMOUT_FF_WAIT; 315188ff6eafSTejun Heo 315288ff6eafSTejun Heo if (time_before(until, deadline)) 315388ff6eafSTejun Heo deadline = until; 315488ff6eafSTejun Heo 315588ff6eafSTejun Heo /* Spec mandates ">= 2ms" before checking status. We wait 315688ff6eafSTejun Heo * 150ms, because that was the magic delay used for ATAPI 315788ff6eafSTejun Heo * devices in Hale Landis's ATADRVR, for the period of time 315888ff6eafSTejun Heo * between when the ATA command register is written, and then 315988ff6eafSTejun Heo * status is checked. Because waiting for "a while" before 316088ff6eafSTejun Heo * checking status is fine, post SRST, we perform this magic 316188ff6eafSTejun Heo * delay here as well. 316288ff6eafSTejun Heo * 316388ff6eafSTejun Heo * Old drivers/ide uses the 2mS rule and then waits for ready. 316488ff6eafSTejun Heo */ 316588ff6eafSTejun Heo msleep(150); 316688ff6eafSTejun Heo 316788ff6eafSTejun Heo /* Wait for 0xff to clear. Some SATA devices take a long time 316888ff6eafSTejun Heo * to clear 0xff after reset. For example, HHD424020F7SV00 316988ff6eafSTejun Heo * iVDR needs >= 800ms while. Quantum GoVault needs even more 317088ff6eafSTejun Heo * than that. 317188ff6eafSTejun Heo */ 317288ff6eafSTejun Heo while (1) { 317388ff6eafSTejun Heo u8 status = ata_chk_status(ap); 317488ff6eafSTejun Heo 317588ff6eafSTejun Heo if (status != 0xff || time_after(jiffies, deadline)) 317688ff6eafSTejun Heo return; 317788ff6eafSTejun Heo 317888ff6eafSTejun Heo msleep(50); 317988ff6eafSTejun Heo } 318088ff6eafSTejun Heo } 318188ff6eafSTejun Heo 318288ff6eafSTejun Heo /** 3183d4b2bab4STejun Heo * ata_wait_ready - sleep until BSY clears, or timeout 3184d4b2bab4STejun Heo * @ap: port containing status register to be polled 3185d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3186d4b2bab4STejun Heo * 3187d4b2bab4STejun Heo * Sleep until ATA Status register bit BSY clears, or timeout 3188d4b2bab4STejun Heo * occurs. 3189d4b2bab4STejun Heo * 3190d4b2bab4STejun Heo * LOCKING: 3191d4b2bab4STejun Heo * Kernel thread context (may sleep). 3192d4b2bab4STejun Heo * 3193d4b2bab4STejun Heo * RETURNS: 3194d4b2bab4STejun Heo * 0 on success, -errno otherwise. 3195d4b2bab4STejun Heo */ 3196d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline) 3197d4b2bab4STejun Heo { 3198d4b2bab4STejun Heo unsigned long start = jiffies; 3199d4b2bab4STejun Heo int warned = 0; 3200d4b2bab4STejun Heo 3201d4b2bab4STejun Heo while (1) { 3202d4b2bab4STejun Heo u8 status = ata_chk_status(ap); 3203d4b2bab4STejun Heo unsigned long now = jiffies; 3204d4b2bab4STejun Heo 3205d4b2bab4STejun Heo if (!(status & ATA_BUSY)) 3206d4b2bab4STejun Heo return 0; 3207936fd732STejun Heo if (!ata_link_online(&ap->link) && status == 0xff) 3208d4b2bab4STejun Heo return -ENODEV; 3209d4b2bab4STejun Heo if (time_after(now, deadline)) 3210d4b2bab4STejun Heo return -EBUSY; 3211d4b2bab4STejun Heo 3212d4b2bab4STejun Heo if (!warned && time_after(now, start + 5 * HZ) && 3213d4b2bab4STejun Heo (deadline - now > 3 * HZ)) { 3214d4b2bab4STejun Heo ata_port_printk(ap, KERN_WARNING, 3215d4b2bab4STejun Heo "port is slow to respond, please be patient " 3216d4b2bab4STejun Heo "(Status 0x%x)\n", status); 3217d4b2bab4STejun Heo warned = 1; 3218d4b2bab4STejun Heo } 3219d4b2bab4STejun Heo 3220d4b2bab4STejun Heo msleep(50); 3221d4b2bab4STejun Heo } 3222d4b2bab4STejun Heo } 3223d4b2bab4STejun Heo 3224d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask, 3225d4b2bab4STejun Heo unsigned long deadline) 3226c6fd2807SJeff Garzik { 3227c6fd2807SJeff Garzik struct ata_ioports *ioaddr = &ap->ioaddr; 3228c6fd2807SJeff Garzik unsigned int dev0 = devmask & (1 << 0); 3229c6fd2807SJeff Garzik unsigned int dev1 = devmask & (1 << 1); 32309b89391cSTejun Heo int rc, ret = 0; 3231c6fd2807SJeff Garzik 3232c6fd2807SJeff Garzik /* if device 0 was found in ata_devchk, wait for its 3233c6fd2807SJeff Garzik * BSY bit to clear 3234c6fd2807SJeff Garzik */ 3235d4b2bab4STejun Heo if (dev0) { 3236d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 32379b89391cSTejun Heo if (rc) { 32389b89391cSTejun Heo if (rc != -ENODEV) 3239d4b2bab4STejun Heo return rc; 32409b89391cSTejun Heo ret = rc; 32419b89391cSTejun Heo } 3242d4b2bab4STejun Heo } 3243c6fd2807SJeff Garzik 3244e141d999STejun Heo /* if device 1 was found in ata_devchk, wait for register 3245e141d999STejun Heo * access briefly, then wait for BSY to clear. 3246c6fd2807SJeff Garzik */ 3247e141d999STejun Heo if (dev1) { 3248e141d999STejun Heo int i; 3249c6fd2807SJeff Garzik 3250c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 3251e141d999STejun Heo 3252e141d999STejun Heo /* Wait for register access. Some ATAPI devices fail 3253e141d999STejun Heo * to set nsect/lbal after reset, so don't waste too 3254e141d999STejun Heo * much time on it. We're gonna wait for !BSY anyway. 3255e141d999STejun Heo */ 3256e141d999STejun Heo for (i = 0; i < 2; i++) { 3257e141d999STejun Heo u8 nsect, lbal; 3258e141d999STejun Heo 32590d5ff566STejun Heo nsect = ioread8(ioaddr->nsect_addr); 32600d5ff566STejun Heo lbal = ioread8(ioaddr->lbal_addr); 3261c6fd2807SJeff Garzik if ((nsect == 1) && (lbal == 1)) 3262c6fd2807SJeff Garzik break; 3263c6fd2807SJeff Garzik msleep(50); /* give drive a breather */ 3264c6fd2807SJeff Garzik } 3265e141d999STejun Heo 3266d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 32679b89391cSTejun Heo if (rc) { 32689b89391cSTejun Heo if (rc != -ENODEV) 3269d4b2bab4STejun Heo return rc; 32709b89391cSTejun Heo ret = rc; 32719b89391cSTejun Heo } 3272d4b2bab4STejun Heo } 3273c6fd2807SJeff Garzik 3274c6fd2807SJeff Garzik /* is all this really necessary? */ 3275c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3276c6fd2807SJeff Garzik if (dev1) 3277c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 3278c6fd2807SJeff Garzik if (dev0) 3279c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3280d4b2bab4STejun Heo 32819b89391cSTejun Heo return ret; 3282c6fd2807SJeff Garzik } 3283c6fd2807SJeff Garzik 3284d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 3285d4b2bab4STejun Heo unsigned long deadline) 3286c6fd2807SJeff Garzik { 3287c6fd2807SJeff Garzik struct ata_ioports *ioaddr = &ap->ioaddr; 3288c6fd2807SJeff Garzik 328944877b4eSTejun Heo DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 3290c6fd2807SJeff Garzik 3291c6fd2807SJeff Garzik /* software reset. causes dev0 to be selected */ 32920d5ff566STejun Heo iowrite8(ap->ctl, ioaddr->ctl_addr); 3293c6fd2807SJeff Garzik udelay(20); /* FIXME: flush */ 32940d5ff566STejun Heo iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 3295c6fd2807SJeff Garzik udelay(20); /* FIXME: flush */ 32960d5ff566STejun Heo iowrite8(ap->ctl, ioaddr->ctl_addr); 3297c6fd2807SJeff Garzik 329888ff6eafSTejun Heo /* wait a while before checking status */ 329988ff6eafSTejun Heo ata_wait_after_reset(ap, deadline); 3300c6fd2807SJeff Garzik 3301c6fd2807SJeff Garzik /* Before we perform post reset processing we want to see if 3302c6fd2807SJeff Garzik * the bus shows 0xFF because the odd clown forgets the D7 3303c6fd2807SJeff Garzik * pulldown resistor. 3304c6fd2807SJeff Garzik */ 3305150981b0SAlan Cox if (ata_chk_status(ap) == 0xFF) 33069b89391cSTejun Heo return -ENODEV; 3307c6fd2807SJeff Garzik 3308d4b2bab4STejun Heo return ata_bus_post_reset(ap, devmask, deadline); 3309c6fd2807SJeff Garzik } 3310c6fd2807SJeff Garzik 3311c6fd2807SJeff Garzik /** 3312c6fd2807SJeff Garzik * ata_bus_reset - reset host port and associated ATA channel 3313c6fd2807SJeff Garzik * @ap: port to reset 3314c6fd2807SJeff Garzik * 3315c6fd2807SJeff Garzik * This is typically the first time we actually start issuing 3316c6fd2807SJeff Garzik * commands to the ATA channel. We wait for BSY to clear, then 3317c6fd2807SJeff Garzik * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 3318c6fd2807SJeff Garzik * result. Determine what devices, if any, are on the channel 3319c6fd2807SJeff Garzik * by looking at the device 0/1 error register. Look at the signature 3320c6fd2807SJeff Garzik * stored in each device's taskfile registers, to determine if 3321c6fd2807SJeff Garzik * the device is ATA or ATAPI. 3322c6fd2807SJeff Garzik * 3323c6fd2807SJeff Garzik * LOCKING: 3324c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 3325cca3974eSJeff Garzik * Obtains host lock. 3326c6fd2807SJeff Garzik * 3327c6fd2807SJeff Garzik * SIDE EFFECTS: 3328c6fd2807SJeff Garzik * Sets ATA_FLAG_DISABLED if bus reset fails. 3329c6fd2807SJeff Garzik */ 3330c6fd2807SJeff Garzik 3331c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap) 3332c6fd2807SJeff Garzik { 33339af5c9c9STejun Heo struct ata_device *device = ap->link.device; 3334c6fd2807SJeff Garzik struct ata_ioports *ioaddr = &ap->ioaddr; 3335c6fd2807SJeff Garzik unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3336c6fd2807SJeff Garzik u8 err; 3337c6fd2807SJeff Garzik unsigned int dev0, dev1 = 0, devmask = 0; 33389b89391cSTejun Heo int rc; 3339c6fd2807SJeff Garzik 334044877b4eSTejun Heo DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); 3341c6fd2807SJeff Garzik 3342c6fd2807SJeff Garzik /* determine if device 0/1 are present */ 3343c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_SATA_RESET) 3344c6fd2807SJeff Garzik dev0 = 1; 3345c6fd2807SJeff Garzik else { 3346c6fd2807SJeff Garzik dev0 = ata_devchk(ap, 0); 3347c6fd2807SJeff Garzik if (slave_possible) 3348c6fd2807SJeff Garzik dev1 = ata_devchk(ap, 1); 3349c6fd2807SJeff Garzik } 3350c6fd2807SJeff Garzik 3351c6fd2807SJeff Garzik if (dev0) 3352c6fd2807SJeff Garzik devmask |= (1 << 0); 3353c6fd2807SJeff Garzik if (dev1) 3354c6fd2807SJeff Garzik devmask |= (1 << 1); 3355c6fd2807SJeff Garzik 3356c6fd2807SJeff Garzik /* select device 0 again */ 3357c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3358c6fd2807SJeff Garzik 3359c6fd2807SJeff Garzik /* issue bus reset */ 33609b89391cSTejun Heo if (ap->flags & ATA_FLAG_SRST) { 33619b89391cSTejun Heo rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); 33629b89391cSTejun Heo if (rc && rc != -ENODEV) 3363c6fd2807SJeff Garzik goto err_out; 33649b89391cSTejun Heo } 3365c6fd2807SJeff Garzik 3366c6fd2807SJeff Garzik /* 3367c6fd2807SJeff Garzik * determine by signature whether we have ATA or ATAPI devices 3368c6fd2807SJeff Garzik */ 33693f19859eSTejun Heo device[0].class = ata_dev_try_classify(&device[0], dev0, &err); 3370c6fd2807SJeff Garzik if ((slave_possible) && (err != 0x81)) 33713f19859eSTejun Heo device[1].class = ata_dev_try_classify(&device[1], dev1, &err); 3372c6fd2807SJeff Garzik 3373c6fd2807SJeff Garzik /* is double-select really necessary? */ 33749af5c9c9STejun Heo if (device[1].class != ATA_DEV_NONE) 3375c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 33769af5c9c9STejun Heo if (device[0].class != ATA_DEV_NONE) 3377c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3378c6fd2807SJeff Garzik 3379c6fd2807SJeff Garzik /* if no devices were detected, disable this port */ 33809af5c9c9STejun Heo if ((device[0].class == ATA_DEV_NONE) && 33819af5c9c9STejun Heo (device[1].class == ATA_DEV_NONE)) 3382c6fd2807SJeff Garzik goto err_out; 3383c6fd2807SJeff Garzik 3384c6fd2807SJeff Garzik if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 3385c6fd2807SJeff Garzik /* set up device control for ATA_FLAG_SATA_RESET */ 33860d5ff566STejun Heo iowrite8(ap->ctl, ioaddr->ctl_addr); 3387c6fd2807SJeff Garzik } 3388c6fd2807SJeff Garzik 3389c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 3390c6fd2807SJeff Garzik return; 3391c6fd2807SJeff Garzik 3392c6fd2807SJeff Garzik err_out: 3393c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "disabling port\n"); 3394ac8869d5SJeff Garzik ata_port_disable(ap); 3395c6fd2807SJeff Garzik 3396c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 3397c6fd2807SJeff Garzik } 3398c6fd2807SJeff Garzik 3399c6fd2807SJeff Garzik /** 3400936fd732STejun Heo * sata_link_debounce - debounce SATA phy status 3401936fd732STejun Heo * @link: ATA link to debounce SATA phy status for 3402c6fd2807SJeff Garzik * @params: timing parameters { interval, duratinon, timeout } in msec 3403d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3404c6fd2807SJeff Garzik * 3405936fd732STejun Heo * Make sure SStatus of @link reaches stable state, determined by 3406c6fd2807SJeff Garzik * holding the same value where DET is not 1 for @duration polled 3407c6fd2807SJeff Garzik * every @interval, before @timeout. Timeout constraints the 3408d4b2bab4STejun Heo * beginning of the stable state. Because DET gets stuck at 1 on 3409d4b2bab4STejun Heo * some controllers after hot unplugging, this functions waits 3410c6fd2807SJeff Garzik * until timeout then returns 0 if DET is stable at 1. 3411c6fd2807SJeff Garzik * 3412d4b2bab4STejun Heo * @timeout is further limited by @deadline. The sooner of the 3413d4b2bab4STejun Heo * two is used. 3414d4b2bab4STejun Heo * 3415c6fd2807SJeff Garzik * LOCKING: 3416c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3417c6fd2807SJeff Garzik * 3418c6fd2807SJeff Garzik * RETURNS: 3419c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3420c6fd2807SJeff Garzik */ 3421936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3422d4b2bab4STejun Heo unsigned long deadline) 3423c6fd2807SJeff Garzik { 3424c6fd2807SJeff Garzik unsigned long interval_msec = params[0]; 3425d4b2bab4STejun Heo unsigned long duration = msecs_to_jiffies(params[1]); 3426d4b2bab4STejun Heo unsigned long last_jiffies, t; 3427c6fd2807SJeff Garzik u32 last, cur; 3428c6fd2807SJeff Garzik int rc; 3429c6fd2807SJeff Garzik 3430d4b2bab4STejun Heo t = jiffies + msecs_to_jiffies(params[2]); 3431d4b2bab4STejun Heo if (time_before(t, deadline)) 3432d4b2bab4STejun Heo deadline = t; 3433d4b2bab4STejun Heo 3434936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3435c6fd2807SJeff Garzik return rc; 3436c6fd2807SJeff Garzik cur &= 0xf; 3437c6fd2807SJeff Garzik 3438c6fd2807SJeff Garzik last = cur; 3439c6fd2807SJeff Garzik last_jiffies = jiffies; 3440c6fd2807SJeff Garzik 3441c6fd2807SJeff Garzik while (1) { 3442c6fd2807SJeff Garzik msleep(interval_msec); 3443936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3444c6fd2807SJeff Garzik return rc; 3445c6fd2807SJeff Garzik cur &= 0xf; 3446c6fd2807SJeff Garzik 3447c6fd2807SJeff Garzik /* DET stable? */ 3448c6fd2807SJeff Garzik if (cur == last) { 3449d4b2bab4STejun Heo if (cur == 1 && time_before(jiffies, deadline)) 3450c6fd2807SJeff Garzik continue; 3451c6fd2807SJeff Garzik if (time_after(jiffies, last_jiffies + duration)) 3452c6fd2807SJeff Garzik return 0; 3453c6fd2807SJeff Garzik continue; 3454c6fd2807SJeff Garzik } 3455c6fd2807SJeff Garzik 3456c6fd2807SJeff Garzik /* unstable, start over */ 3457c6fd2807SJeff Garzik last = cur; 3458c6fd2807SJeff Garzik last_jiffies = jiffies; 3459c6fd2807SJeff Garzik 3460f1545154STejun Heo /* Check deadline. If debouncing failed, return 3461f1545154STejun Heo * -EPIPE to tell upper layer to lower link speed. 3462f1545154STejun Heo */ 3463d4b2bab4STejun Heo if (time_after(jiffies, deadline)) 3464f1545154STejun Heo return -EPIPE; 3465c6fd2807SJeff Garzik } 3466c6fd2807SJeff Garzik } 3467c6fd2807SJeff Garzik 3468c6fd2807SJeff Garzik /** 3469936fd732STejun Heo * sata_link_resume - resume SATA link 3470936fd732STejun Heo * @link: ATA link to resume SATA 3471c6fd2807SJeff Garzik * @params: timing parameters { interval, duratinon, timeout } in msec 3472d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3473c6fd2807SJeff Garzik * 3474936fd732STejun Heo * Resume SATA phy @link and debounce it. 3475c6fd2807SJeff Garzik * 3476c6fd2807SJeff Garzik * LOCKING: 3477c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3478c6fd2807SJeff Garzik * 3479c6fd2807SJeff Garzik * RETURNS: 3480c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3481c6fd2807SJeff Garzik */ 3482936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params, 3483d4b2bab4STejun Heo unsigned long deadline) 3484c6fd2807SJeff Garzik { 3485c6fd2807SJeff Garzik u32 scontrol; 3486c6fd2807SJeff Garzik int rc; 3487c6fd2807SJeff Garzik 3488936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3489c6fd2807SJeff Garzik return rc; 3490c6fd2807SJeff Garzik 3491c6fd2807SJeff Garzik scontrol = (scontrol & 0x0f0) | 0x300; 3492c6fd2807SJeff Garzik 3493936fd732STejun Heo if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3494c6fd2807SJeff Garzik return rc; 3495c6fd2807SJeff Garzik 3496c6fd2807SJeff Garzik /* Some PHYs react badly if SStatus is pounded immediately 3497c6fd2807SJeff Garzik * after resuming. Delay 200ms before debouncing. 3498c6fd2807SJeff Garzik */ 3499c6fd2807SJeff Garzik msleep(200); 3500c6fd2807SJeff Garzik 3501936fd732STejun Heo return sata_link_debounce(link, params, deadline); 3502c6fd2807SJeff Garzik } 3503c6fd2807SJeff Garzik 3504c6fd2807SJeff Garzik /** 3505c6fd2807SJeff Garzik * ata_std_prereset - prepare for reset 3506cc0680a5STejun Heo * @link: ATA link to be reset 3507d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3508c6fd2807SJeff Garzik * 3509cc0680a5STejun Heo * @link is about to be reset. Initialize it. Failure from 3510b8cffc6aSTejun Heo * prereset makes libata abort whole reset sequence and give up 3511b8cffc6aSTejun Heo * that port, so prereset should be best-effort. It does its 3512b8cffc6aSTejun Heo * best to prepare for reset sequence but if things go wrong, it 3513b8cffc6aSTejun Heo * should just whine, not fail. 3514c6fd2807SJeff Garzik * 3515c6fd2807SJeff Garzik * LOCKING: 3516c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3517c6fd2807SJeff Garzik * 3518c6fd2807SJeff Garzik * RETURNS: 3519c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 3520c6fd2807SJeff Garzik */ 3521cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3522c6fd2807SJeff Garzik { 3523cc0680a5STejun Heo struct ata_port *ap = link->ap; 3524936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 3525c6fd2807SJeff Garzik const unsigned long *timing = sata_ehc_deb_timing(ehc); 3526c6fd2807SJeff Garzik int rc; 3527c6fd2807SJeff Garzik 352831daabdaSTejun Heo /* handle link resume */ 3529c6fd2807SJeff Garzik if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && 35300c88758bSTejun Heo (link->flags & ATA_LFLAG_HRST_TO_RESUME)) 3531c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 3532c6fd2807SJeff Garzik 3533633273a3STejun Heo /* Some PMPs don't work with only SRST, force hardreset if PMP 3534633273a3STejun Heo * is supported. 3535633273a3STejun Heo */ 3536633273a3STejun Heo if (ap->flags & ATA_FLAG_PMP) 3537633273a3STejun Heo ehc->i.action |= ATA_EH_HARDRESET; 3538633273a3STejun Heo 3539c6fd2807SJeff Garzik /* if we're about to do hardreset, nothing more to do */ 3540c6fd2807SJeff Garzik if (ehc->i.action & ATA_EH_HARDRESET) 3541c6fd2807SJeff Garzik return 0; 3542c6fd2807SJeff Garzik 3543936fd732STejun Heo /* if SATA, resume link */ 3544a16abc0bSTejun Heo if (ap->flags & ATA_FLAG_SATA) { 3545936fd732STejun Heo rc = sata_link_resume(link, timing, deadline); 3546b8cffc6aSTejun Heo /* whine about phy resume failure but proceed */ 3547b8cffc6aSTejun Heo if (rc && rc != -EOPNOTSUPP) 3548cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "failed to resume " 3549c6fd2807SJeff Garzik "link for reset (errno=%d)\n", rc); 3550c6fd2807SJeff Garzik } 3551c6fd2807SJeff Garzik 3552c6fd2807SJeff Garzik /* Wait for !BSY if the controller can wait for the first D2H 3553c6fd2807SJeff Garzik * Reg FIS and we don't know that no device is attached. 3554c6fd2807SJeff Garzik */ 35550c88758bSTejun Heo if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) { 3556b8cffc6aSTejun Heo rc = ata_wait_ready(ap, deadline); 35576dffaf61STejun Heo if (rc && rc != -ENODEV) { 3558cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "device not ready " 3559b8cffc6aSTejun Heo "(errno=%d), forcing hardreset\n", rc); 3560b8cffc6aSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 3561b8cffc6aSTejun Heo } 3562b8cffc6aSTejun Heo } 3563c6fd2807SJeff Garzik 3564c6fd2807SJeff Garzik return 0; 3565c6fd2807SJeff Garzik } 3566c6fd2807SJeff Garzik 3567c6fd2807SJeff Garzik /** 3568c6fd2807SJeff Garzik * ata_std_softreset - reset host port via ATA SRST 3569cc0680a5STejun Heo * @link: ATA link to reset 3570c6fd2807SJeff Garzik * @classes: resulting classes of attached devices 3571d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3572c6fd2807SJeff Garzik * 3573c6fd2807SJeff Garzik * Reset host port using ATA SRST. 3574c6fd2807SJeff Garzik * 3575c6fd2807SJeff Garzik * LOCKING: 3576c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3577c6fd2807SJeff Garzik * 3578c6fd2807SJeff Garzik * RETURNS: 3579c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 3580c6fd2807SJeff Garzik */ 3581cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes, 3582d4b2bab4STejun Heo unsigned long deadline) 3583c6fd2807SJeff Garzik { 3584cc0680a5STejun Heo struct ata_port *ap = link->ap; 3585c6fd2807SJeff Garzik unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3586d4b2bab4STejun Heo unsigned int devmask = 0; 3587d4b2bab4STejun Heo int rc; 3588c6fd2807SJeff Garzik u8 err; 3589c6fd2807SJeff Garzik 3590c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3591c6fd2807SJeff Garzik 3592936fd732STejun Heo if (ata_link_offline(link)) { 3593c6fd2807SJeff Garzik classes[0] = ATA_DEV_NONE; 3594c6fd2807SJeff Garzik goto out; 3595c6fd2807SJeff Garzik } 3596c6fd2807SJeff Garzik 3597c6fd2807SJeff Garzik /* determine if device 0/1 are present */ 3598c6fd2807SJeff Garzik if (ata_devchk(ap, 0)) 3599c6fd2807SJeff Garzik devmask |= (1 << 0); 3600c6fd2807SJeff Garzik if (slave_possible && ata_devchk(ap, 1)) 3601c6fd2807SJeff Garzik devmask |= (1 << 1); 3602c6fd2807SJeff Garzik 3603c6fd2807SJeff Garzik /* select device 0 again */ 3604c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3605c6fd2807SJeff Garzik 3606c6fd2807SJeff Garzik /* issue bus reset */ 3607c6fd2807SJeff Garzik DPRINTK("about to softreset, devmask=%x\n", devmask); 3608d4b2bab4STejun Heo rc = ata_bus_softreset(ap, devmask, deadline); 36099b89391cSTejun Heo /* if link is occupied, -ENODEV too is an error */ 3610936fd732STejun Heo if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 3611cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 3612d4b2bab4STejun Heo return rc; 3613c6fd2807SJeff Garzik } 3614c6fd2807SJeff Garzik 3615c6fd2807SJeff Garzik /* determine by signature whether we have ATA or ATAPI devices */ 36163f19859eSTejun Heo classes[0] = ata_dev_try_classify(&link->device[0], 36173f19859eSTejun Heo devmask & (1 << 0), &err); 3618c6fd2807SJeff Garzik if (slave_possible && err != 0x81) 36193f19859eSTejun Heo classes[1] = ata_dev_try_classify(&link->device[1], 36203f19859eSTejun Heo devmask & (1 << 1), &err); 3621c6fd2807SJeff Garzik 3622c6fd2807SJeff Garzik out: 3623c6fd2807SJeff Garzik DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 3624c6fd2807SJeff Garzik return 0; 3625c6fd2807SJeff Garzik } 3626c6fd2807SJeff Garzik 3627c6fd2807SJeff Garzik /** 3628cc0680a5STejun Heo * sata_link_hardreset - reset link via SATA phy reset 3629cc0680a5STejun Heo * @link: link to reset 3630b6103f6dSTejun Heo * @timing: timing parameters { interval, duratinon, timeout } in msec 3631d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3632c6fd2807SJeff Garzik * 3633cc0680a5STejun Heo * SATA phy-reset @link using DET bits of SControl register. 3634c6fd2807SJeff Garzik * 3635c6fd2807SJeff Garzik * LOCKING: 3636c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3637c6fd2807SJeff Garzik * 3638c6fd2807SJeff Garzik * RETURNS: 3639c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 3640c6fd2807SJeff Garzik */ 3641cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3642d4b2bab4STejun Heo unsigned long deadline) 3643c6fd2807SJeff Garzik { 3644c6fd2807SJeff Garzik u32 scontrol; 3645c6fd2807SJeff Garzik int rc; 3646c6fd2807SJeff Garzik 3647c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3648c6fd2807SJeff Garzik 3649936fd732STejun Heo if (sata_set_spd_needed(link)) { 3650c6fd2807SJeff Garzik /* SATA spec says nothing about how to reconfigure 3651c6fd2807SJeff Garzik * spd. To be on the safe side, turn off phy during 3652c6fd2807SJeff Garzik * reconfiguration. This works for at least ICH7 AHCI 3653c6fd2807SJeff Garzik * and Sil3124. 3654c6fd2807SJeff Garzik */ 3655936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3656b6103f6dSTejun Heo goto out; 3657c6fd2807SJeff Garzik 3658cea0d336SJeff Garzik scontrol = (scontrol & 0x0f0) | 0x304; 3659c6fd2807SJeff Garzik 3660936fd732STejun Heo if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3661b6103f6dSTejun Heo goto out; 3662c6fd2807SJeff Garzik 3663936fd732STejun Heo sata_set_spd(link); 3664c6fd2807SJeff Garzik } 3665c6fd2807SJeff Garzik 3666c6fd2807SJeff Garzik /* issue phy wake/reset */ 3667936fd732STejun Heo if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3668b6103f6dSTejun Heo goto out; 3669c6fd2807SJeff Garzik 3670c6fd2807SJeff Garzik scontrol = (scontrol & 0x0f0) | 0x301; 3671c6fd2807SJeff Garzik 3672936fd732STejun Heo if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3673b6103f6dSTejun Heo goto out; 3674c6fd2807SJeff Garzik 3675c6fd2807SJeff Garzik /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3676c6fd2807SJeff Garzik * 10.4.2 says at least 1 ms. 3677c6fd2807SJeff Garzik */ 3678c6fd2807SJeff Garzik msleep(1); 3679c6fd2807SJeff Garzik 3680936fd732STejun Heo /* bring link back */ 3681936fd732STejun Heo rc = sata_link_resume(link, timing, deadline); 3682b6103f6dSTejun Heo out: 3683b6103f6dSTejun Heo DPRINTK("EXIT, rc=%d\n", rc); 3684b6103f6dSTejun Heo return rc; 3685b6103f6dSTejun Heo } 3686b6103f6dSTejun Heo 3687b6103f6dSTejun Heo /** 3688b6103f6dSTejun Heo * sata_std_hardreset - reset host port via SATA phy reset 3689cc0680a5STejun Heo * @link: link to reset 3690b6103f6dSTejun Heo * @class: resulting class of attached device 3691d4b2bab4STejun Heo * @deadline: deadline jiffies for the operation 3692b6103f6dSTejun Heo * 3693b6103f6dSTejun Heo * SATA phy-reset host port using DET bits of SControl register, 3694b6103f6dSTejun Heo * wait for !BSY and classify the attached device. 3695b6103f6dSTejun Heo * 3696b6103f6dSTejun Heo * LOCKING: 3697b6103f6dSTejun Heo * Kernel thread context (may sleep) 3698b6103f6dSTejun Heo * 3699b6103f6dSTejun Heo * RETURNS: 3700b6103f6dSTejun Heo * 0 on success, -errno otherwise. 3701b6103f6dSTejun Heo */ 3702cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3703d4b2bab4STejun Heo unsigned long deadline) 3704b6103f6dSTejun Heo { 3705cc0680a5STejun Heo struct ata_port *ap = link->ap; 3706936fd732STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3707b6103f6dSTejun Heo int rc; 3708b6103f6dSTejun Heo 3709b6103f6dSTejun Heo DPRINTK("ENTER\n"); 3710b6103f6dSTejun Heo 3711b6103f6dSTejun Heo /* do hardreset */ 3712cc0680a5STejun Heo rc = sata_link_hardreset(link, timing, deadline); 3713b6103f6dSTejun Heo if (rc) { 3714cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 3715b6103f6dSTejun Heo "COMRESET failed (errno=%d)\n", rc); 3716b6103f6dSTejun Heo return rc; 3717b6103f6dSTejun Heo } 3718c6fd2807SJeff Garzik 3719c6fd2807SJeff Garzik /* TODO: phy layer with polling, timeouts, etc. */ 3720936fd732STejun Heo if (ata_link_offline(link)) { 3721c6fd2807SJeff Garzik *class = ATA_DEV_NONE; 3722c6fd2807SJeff Garzik DPRINTK("EXIT, link offline\n"); 3723c6fd2807SJeff Garzik return 0; 3724c6fd2807SJeff Garzik } 3725c6fd2807SJeff Garzik 372688ff6eafSTejun Heo /* wait a while before checking status */ 372788ff6eafSTejun Heo ata_wait_after_reset(ap, deadline); 372834fee227STejun Heo 3729633273a3STejun Heo /* If PMP is supported, we have to do follow-up SRST. Note 3730633273a3STejun Heo * that some PMPs don't send D2H Reg FIS after hardreset at 3731633273a3STejun Heo * all if the first port is empty. Wait for it just for a 3732633273a3STejun Heo * second and request follow-up SRST. 3733633273a3STejun Heo */ 3734633273a3STejun Heo if (ap->flags & ATA_FLAG_PMP) { 3735633273a3STejun Heo ata_wait_ready(ap, jiffies + HZ); 3736633273a3STejun Heo return -EAGAIN; 3737633273a3STejun Heo } 3738633273a3STejun Heo 3739d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 37409b89391cSTejun Heo /* link occupied, -ENODEV too is an error */ 37419b89391cSTejun Heo if (rc) { 3742cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 3743d4b2bab4STejun Heo "COMRESET failed (errno=%d)\n", rc); 3744d4b2bab4STejun Heo return rc; 3745c6fd2807SJeff Garzik } 3746c6fd2807SJeff Garzik 3747c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); /* probably unnecessary */ 3748c6fd2807SJeff Garzik 37493f19859eSTejun Heo *class = ata_dev_try_classify(link->device, 1, NULL); 3750c6fd2807SJeff Garzik 3751c6fd2807SJeff Garzik DPRINTK("EXIT, class=%u\n", *class); 3752c6fd2807SJeff Garzik return 0; 3753c6fd2807SJeff Garzik } 3754c6fd2807SJeff Garzik 3755c6fd2807SJeff Garzik /** 3756c6fd2807SJeff Garzik * ata_std_postreset - standard postreset callback 3757cc0680a5STejun Heo * @link: the target ata_link 3758c6fd2807SJeff Garzik * @classes: classes of attached devices 3759c6fd2807SJeff Garzik * 3760c6fd2807SJeff Garzik * This function is invoked after a successful reset. Note that 3761c6fd2807SJeff Garzik * the device might have been reset more than once using 3762c6fd2807SJeff Garzik * different reset methods before postreset is invoked. 3763c6fd2807SJeff Garzik * 3764c6fd2807SJeff Garzik * LOCKING: 3765c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3766c6fd2807SJeff Garzik */ 3767cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3768c6fd2807SJeff Garzik { 3769cc0680a5STejun Heo struct ata_port *ap = link->ap; 3770c6fd2807SJeff Garzik u32 serror; 3771c6fd2807SJeff Garzik 3772c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3773c6fd2807SJeff Garzik 3774c6fd2807SJeff Garzik /* print link status */ 3775936fd732STejun Heo sata_print_link_status(link); 3776c6fd2807SJeff Garzik 3777c6fd2807SJeff Garzik /* clear SError */ 3778936fd732STejun Heo if (sata_scr_read(link, SCR_ERROR, &serror) == 0) 3779936fd732STejun Heo sata_scr_write(link, SCR_ERROR, serror); 3780c6fd2807SJeff Garzik 3781c6fd2807SJeff Garzik /* is double-select really necessary? */ 3782c6fd2807SJeff Garzik if (classes[0] != ATA_DEV_NONE) 3783c6fd2807SJeff Garzik ap->ops->dev_select(ap, 1); 3784c6fd2807SJeff Garzik if (classes[1] != ATA_DEV_NONE) 3785c6fd2807SJeff Garzik ap->ops->dev_select(ap, 0); 3786c6fd2807SJeff Garzik 3787c6fd2807SJeff Garzik /* bail out if no device is present */ 3788c6fd2807SJeff Garzik if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 3789c6fd2807SJeff Garzik DPRINTK("EXIT, no device\n"); 3790c6fd2807SJeff Garzik return; 3791c6fd2807SJeff Garzik } 3792c6fd2807SJeff Garzik 3793c6fd2807SJeff Garzik /* set up device control */ 37940d5ff566STejun Heo if (ap->ioaddr.ctl_addr) 37950d5ff566STejun Heo iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 3796c6fd2807SJeff Garzik 3797c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 3798c6fd2807SJeff Garzik } 3799c6fd2807SJeff Garzik 3800c6fd2807SJeff Garzik /** 3801c6fd2807SJeff Garzik * ata_dev_same_device - Determine whether new ID matches configured device 3802c6fd2807SJeff Garzik * @dev: device to compare against 3803c6fd2807SJeff Garzik * @new_class: class of the new device 3804c6fd2807SJeff Garzik * @new_id: IDENTIFY page of the new device 3805c6fd2807SJeff Garzik * 3806c6fd2807SJeff Garzik * Compare @new_class and @new_id against @dev and determine 3807c6fd2807SJeff Garzik * whether @dev is the device indicated by @new_class and 3808c6fd2807SJeff Garzik * @new_id. 3809c6fd2807SJeff Garzik * 3810c6fd2807SJeff Garzik * LOCKING: 3811c6fd2807SJeff Garzik * None. 3812c6fd2807SJeff Garzik * 3813c6fd2807SJeff Garzik * RETURNS: 3814c6fd2807SJeff Garzik * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3815c6fd2807SJeff Garzik */ 3816c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3817c6fd2807SJeff Garzik const u16 *new_id) 3818c6fd2807SJeff Garzik { 3819c6fd2807SJeff Garzik const u16 *old_id = dev->id; 3820a0cf733bSTejun Heo unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3821a0cf733bSTejun Heo unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3822c6fd2807SJeff Garzik 3823c6fd2807SJeff Garzik if (dev->class != new_class) { 3824c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 3825c6fd2807SJeff Garzik dev->class, new_class); 3826c6fd2807SJeff Garzik return 0; 3827c6fd2807SJeff Garzik } 3828c6fd2807SJeff Garzik 3829a0cf733bSTejun Heo ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3830a0cf733bSTejun Heo ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3831a0cf733bSTejun Heo ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3832a0cf733bSTejun Heo ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3833c6fd2807SJeff Garzik 3834c6fd2807SJeff Garzik if (strcmp(model[0], model[1])) { 3835c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "model number mismatch " 3836c6fd2807SJeff Garzik "'%s' != '%s'\n", model[0], model[1]); 3837c6fd2807SJeff Garzik return 0; 3838c6fd2807SJeff Garzik } 3839c6fd2807SJeff Garzik 3840c6fd2807SJeff Garzik if (strcmp(serial[0], serial[1])) { 3841c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 3842c6fd2807SJeff Garzik "'%s' != '%s'\n", serial[0], serial[1]); 3843c6fd2807SJeff Garzik return 0; 3844c6fd2807SJeff Garzik } 3845c6fd2807SJeff Garzik 3846c6fd2807SJeff Garzik return 1; 3847c6fd2807SJeff Garzik } 3848c6fd2807SJeff Garzik 3849c6fd2807SJeff Garzik /** 3850fe30911bSTejun Heo * ata_dev_reread_id - Re-read IDENTIFY data 38513fae450cSHenrik Kretzschmar * @dev: target ATA device 3852bff04647STejun Heo * @readid_flags: read ID flags 3853c6fd2807SJeff Garzik * 3854c6fd2807SJeff Garzik * Re-read IDENTIFY page and make sure @dev is still attached to 3855c6fd2807SJeff Garzik * the port. 3856c6fd2807SJeff Garzik * 3857c6fd2807SJeff Garzik * LOCKING: 3858c6fd2807SJeff Garzik * Kernel thread context (may sleep) 3859c6fd2807SJeff Garzik * 3860c6fd2807SJeff Garzik * RETURNS: 3861c6fd2807SJeff Garzik * 0 on success, negative errno otherwise 3862c6fd2807SJeff Garzik */ 3863fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3864c6fd2807SJeff Garzik { 3865c6fd2807SJeff Garzik unsigned int class = dev->class; 38669af5c9c9STejun Heo u16 *id = (void *)dev->link->ap->sector_buf; 3867c6fd2807SJeff Garzik int rc; 3868c6fd2807SJeff Garzik 3869c6fd2807SJeff Garzik /* read ID data */ 3870bff04647STejun Heo rc = ata_dev_read_id(dev, &class, readid_flags, id); 3871c6fd2807SJeff Garzik if (rc) 3872fe30911bSTejun Heo return rc; 3873c6fd2807SJeff Garzik 3874c6fd2807SJeff Garzik /* is the device still there? */ 3875fe30911bSTejun Heo if (!ata_dev_same_device(dev, class, id)) 3876fe30911bSTejun Heo return -ENODEV; 3877c6fd2807SJeff Garzik 3878c6fd2807SJeff Garzik memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3879fe30911bSTejun Heo return 0; 3880fe30911bSTejun Heo } 3881fe30911bSTejun Heo 3882fe30911bSTejun Heo /** 3883fe30911bSTejun Heo * ata_dev_revalidate - Revalidate ATA device 3884fe30911bSTejun Heo * @dev: device to revalidate 3885422c9daaSTejun Heo * @new_class: new class code 3886fe30911bSTejun Heo * @readid_flags: read ID flags 3887fe30911bSTejun Heo * 3888fe30911bSTejun Heo * Re-read IDENTIFY page, make sure @dev is still attached to the 3889fe30911bSTejun Heo * port and reconfigure it according to the new IDENTIFY page. 3890fe30911bSTejun Heo * 3891fe30911bSTejun Heo * LOCKING: 3892fe30911bSTejun Heo * Kernel thread context (may sleep) 3893fe30911bSTejun Heo * 3894fe30911bSTejun Heo * RETURNS: 3895fe30911bSTejun Heo * 0 on success, negative errno otherwise 3896fe30911bSTejun Heo */ 3897422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 3898422c9daaSTejun Heo unsigned int readid_flags) 3899fe30911bSTejun Heo { 39006ddcd3b0STejun Heo u64 n_sectors = dev->n_sectors; 3901fe30911bSTejun Heo int rc; 3902fe30911bSTejun Heo 3903fe30911bSTejun Heo if (!ata_dev_enabled(dev)) 3904fe30911bSTejun Heo return -ENODEV; 3905fe30911bSTejun Heo 3906422c9daaSTejun Heo /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 3907422c9daaSTejun Heo if (ata_class_enabled(new_class) && 3908422c9daaSTejun Heo new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { 3909422c9daaSTejun Heo ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 3910422c9daaSTejun Heo dev->class, new_class); 3911422c9daaSTejun Heo rc = -ENODEV; 3912422c9daaSTejun Heo goto fail; 3913422c9daaSTejun Heo } 3914422c9daaSTejun Heo 3915fe30911bSTejun Heo /* re-read ID */ 3916fe30911bSTejun Heo rc = ata_dev_reread_id(dev, readid_flags); 3917fe30911bSTejun Heo if (rc) 3918fe30911bSTejun Heo goto fail; 3919c6fd2807SJeff Garzik 3920c6fd2807SJeff Garzik /* configure device according to the new ID */ 3921efdaedc4STejun Heo rc = ata_dev_configure(dev); 39226ddcd3b0STejun Heo if (rc) 39236ddcd3b0STejun Heo goto fail; 39246ddcd3b0STejun Heo 39256ddcd3b0STejun Heo /* verify n_sectors hasn't changed */ 3926b54eebd6STejun Heo if (dev->class == ATA_DEV_ATA && n_sectors && 3927b54eebd6STejun Heo dev->n_sectors != n_sectors) { 39286ddcd3b0STejun Heo ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 39296ddcd3b0STejun Heo "%llu != %llu\n", 39306ddcd3b0STejun Heo (unsigned long long)n_sectors, 39316ddcd3b0STejun Heo (unsigned long long)dev->n_sectors); 39328270bec4STejun Heo 39338270bec4STejun Heo /* restore original n_sectors */ 39348270bec4STejun Heo dev->n_sectors = n_sectors; 39358270bec4STejun Heo 39366ddcd3b0STejun Heo rc = -ENODEV; 39376ddcd3b0STejun Heo goto fail; 39386ddcd3b0STejun Heo } 39396ddcd3b0STejun Heo 3940c6fd2807SJeff Garzik return 0; 3941c6fd2807SJeff Garzik 3942c6fd2807SJeff Garzik fail: 3943c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 3944c6fd2807SJeff Garzik return rc; 3945c6fd2807SJeff Garzik } 3946c6fd2807SJeff Garzik 39476919a0a6SAlan Cox struct ata_blacklist_entry { 39486919a0a6SAlan Cox const char *model_num; 39496919a0a6SAlan Cox const char *model_rev; 39506919a0a6SAlan Cox unsigned long horkage; 39516919a0a6SAlan Cox }; 39526919a0a6SAlan Cox 39536919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = { 39546919a0a6SAlan Cox /* Devices with DMA related problems under Linux */ 39556919a0a6SAlan Cox { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 39566919a0a6SAlan Cox { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 39576919a0a6SAlan Cox { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 39586919a0a6SAlan Cox { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 39596919a0a6SAlan Cox { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 39606919a0a6SAlan Cox { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 39616919a0a6SAlan Cox { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 39626919a0a6SAlan Cox { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 39636919a0a6SAlan Cox { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 39646919a0a6SAlan Cox { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 39656919a0a6SAlan Cox { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 39666919a0a6SAlan Cox { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 39676919a0a6SAlan Cox { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 39686919a0a6SAlan Cox { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 39696919a0a6SAlan Cox { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 39706919a0a6SAlan Cox { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 39716919a0a6SAlan Cox { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 39726919a0a6SAlan Cox { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 39736919a0a6SAlan Cox { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 39746919a0a6SAlan Cox { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 39756919a0a6SAlan Cox { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 39766919a0a6SAlan Cox { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 39776919a0a6SAlan Cox { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 39786919a0a6SAlan Cox { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 39796919a0a6SAlan Cox { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 39806919a0a6SAlan Cox { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 39816919a0a6SAlan Cox { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 39826919a0a6SAlan Cox { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 39836919a0a6SAlan Cox { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 398439f19886SDave Jones { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 39853af9a77aSTejun Heo /* Odd clown on sil3726/4726 PMPs */ 39863af9a77aSTejun Heo { "Config Disk", NULL, ATA_HORKAGE_NODMA | 39873af9a77aSTejun Heo ATA_HORKAGE_SKIP_PM }, 39886919a0a6SAlan Cox 398918d6e9d5SAlbert Lee /* Weird ATAPI devices */ 399040a1d531STejun Heo { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 399118d6e9d5SAlbert Lee 39926919a0a6SAlan Cox /* Devices we expect to fail diagnostics */ 39936919a0a6SAlan Cox 39946919a0a6SAlan Cox /* Devices where NCQ should be avoided */ 39956919a0a6SAlan Cox /* NCQ is slow */ 39966919a0a6SAlan Cox { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 399709125ea6STejun Heo /* http://thread.gmane.org/gmane.linux.ide/14907 */ 399809125ea6STejun Heo { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 39997acfaf30SPaul Rolland /* NCQ is broken */ 4000539cc7c7SJeff Garzik { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 40010e3dbc01SAlan Cox { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 40020b0a43e0SDavid Milburn { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ }, 40030b0a43e0SDavid Milburn { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ }, 4004da6f0ec2SPaolo Ornati { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4005539cc7c7SJeff Garzik 400636e337d0SRobert Hancock /* Blacklist entries taken from Silicon Image 3124/3132 400736e337d0SRobert Hancock Windows driver .inf file - also several Linux problem reports */ 400836e337d0SRobert Hancock { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 400936e337d0SRobert Hancock { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 401036e337d0SRobert Hancock { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4011bd9c5a39STejun Heo /* Drives which do spurious command completion */ 4012bd9c5a39STejun Heo { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, 40132f8fcebbSTejun Heo { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, 401470edb185STejun Heo { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, }, 4015e14cbfa6STejun Heo { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, 40160c173174STejun Heo { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, }, 40172f8fcebbSTejun Heo { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 40187f567620STejun Heo { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, }, 4019a520f261STejun Heo { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, }, 40207f567620STejun Heo { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, }, 40213fb6589cSTejun Heo { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, }, 4022954bb005STejun Heo { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, }, 402313587960STejun Heo { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, }, 40247f567620STejun Heo { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, }, 40257f567620STejun Heo { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, }, 40265d6aca8dSTejun Heo { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, 40276919a0a6SAlan Cox 402816c55b03STejun Heo /* devices which puke on READ_NATIVE_MAX */ 402916c55b03STejun Heo { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 403016c55b03STejun Heo { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 403116c55b03STejun Heo { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 403216c55b03STejun Heo { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 40336919a0a6SAlan Cox 403493328e11SAlan Cox /* Devices which report 1 sector over size HPA */ 403593328e11SAlan Cox { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 403693328e11SAlan Cox { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 403793328e11SAlan Cox 40386919a0a6SAlan Cox /* End Marker */ 40396919a0a6SAlan Cox { } 4040c6fd2807SJeff Garzik }; 4041c6fd2807SJeff Garzik 4042741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4043539cc7c7SJeff Garzik { 4044539cc7c7SJeff Garzik const char *p; 4045539cc7c7SJeff Garzik int len; 4046539cc7c7SJeff Garzik 4047539cc7c7SJeff Garzik /* 4048539cc7c7SJeff Garzik * check for trailing wildcard: *\0 4049539cc7c7SJeff Garzik */ 4050539cc7c7SJeff Garzik p = strchr(patt, wildchar); 4051539cc7c7SJeff Garzik if (p && ((*(p + 1)) == 0)) 4052539cc7c7SJeff Garzik len = p - patt; 4053317b50b8SAndrew Paprocki else { 4054539cc7c7SJeff Garzik len = strlen(name); 4055317b50b8SAndrew Paprocki if (!len) { 4056317b50b8SAndrew Paprocki if (!*patt) 4057317b50b8SAndrew Paprocki return 0; 4058317b50b8SAndrew Paprocki return -1; 4059317b50b8SAndrew Paprocki } 4060317b50b8SAndrew Paprocki } 4061539cc7c7SJeff Garzik 4062539cc7c7SJeff Garzik return strncmp(patt, name, len); 4063539cc7c7SJeff Garzik } 4064539cc7c7SJeff Garzik 406575683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4066c6fd2807SJeff Garzik { 40678bfa79fcSTejun Heo unsigned char model_num[ATA_ID_PROD_LEN + 1]; 40688bfa79fcSTejun Heo unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 40696919a0a6SAlan Cox const struct ata_blacklist_entry *ad = ata_device_blacklist; 4070c6fd2807SJeff Garzik 40718bfa79fcSTejun Heo ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 40728bfa79fcSTejun Heo ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4073c6fd2807SJeff Garzik 40746919a0a6SAlan Cox while (ad->model_num) { 4075539cc7c7SJeff Garzik if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 40766919a0a6SAlan Cox if (ad->model_rev == NULL) 40776919a0a6SAlan Cox return ad->horkage; 4078539cc7c7SJeff Garzik if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 40796919a0a6SAlan Cox return ad->horkage; 4080c6fd2807SJeff Garzik } 40816919a0a6SAlan Cox ad++; 4082c6fd2807SJeff Garzik } 4083c6fd2807SJeff Garzik return 0; 4084c6fd2807SJeff Garzik } 4085c6fd2807SJeff Garzik 40866919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev) 40876919a0a6SAlan Cox { 40886919a0a6SAlan Cox /* We don't support polling DMA. 40896919a0a6SAlan Cox * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 40906919a0a6SAlan Cox * if the LLDD handles only interrupts in the HSM_ST_LAST state. 40916919a0a6SAlan Cox */ 40929af5c9c9STejun Heo if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 40936919a0a6SAlan Cox (dev->flags & ATA_DFLAG_CDB_INTR)) 40946919a0a6SAlan Cox return 1; 409575683fe7STejun Heo return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 40966919a0a6SAlan Cox } 40976919a0a6SAlan Cox 4098c6fd2807SJeff Garzik /** 4099c6fd2807SJeff Garzik * ata_dev_xfermask - Compute supported xfermask of the given device 4100c6fd2807SJeff Garzik * @dev: Device to compute xfermask for 4101c6fd2807SJeff Garzik * 4102c6fd2807SJeff Garzik * Compute supported xfermask of @dev and store it in 4103c6fd2807SJeff Garzik * dev->*_mask. This function is responsible for applying all 4104c6fd2807SJeff Garzik * known limits including host controller limits, device 4105c6fd2807SJeff Garzik * blacklist, etc... 4106c6fd2807SJeff Garzik * 4107c6fd2807SJeff Garzik * LOCKING: 4108c6fd2807SJeff Garzik * None. 4109c6fd2807SJeff Garzik */ 4110c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev) 4111c6fd2807SJeff Garzik { 41129af5c9c9STejun Heo struct ata_link *link = dev->link; 41139af5c9c9STejun Heo struct ata_port *ap = link->ap; 4114cca3974eSJeff Garzik struct ata_host *host = ap->host; 4115c6fd2807SJeff Garzik unsigned long xfer_mask; 4116c6fd2807SJeff Garzik 4117c6fd2807SJeff Garzik /* controller modes available */ 4118c6fd2807SJeff Garzik xfer_mask = ata_pack_xfermask(ap->pio_mask, 4119c6fd2807SJeff Garzik ap->mwdma_mask, ap->udma_mask); 4120c6fd2807SJeff Garzik 41218343f889SRobert Hancock /* drive modes available */ 4122c6fd2807SJeff Garzik xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4123c6fd2807SJeff Garzik dev->mwdma_mask, dev->udma_mask); 4124c6fd2807SJeff Garzik xfer_mask &= ata_id_xfermask(dev->id); 4125c6fd2807SJeff Garzik 4126b352e57dSAlan Cox /* 4127b352e57dSAlan Cox * CFA Advanced TrueIDE timings are not allowed on a shared 4128b352e57dSAlan Cox * cable 4129b352e57dSAlan Cox */ 4130b352e57dSAlan Cox if (ata_dev_pair(dev)) { 4131b352e57dSAlan Cox /* No PIO5 or PIO6 */ 4132b352e57dSAlan Cox xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4133b352e57dSAlan Cox /* No MWDMA3 or MWDMA 4 */ 4134b352e57dSAlan Cox xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4135b352e57dSAlan Cox } 4136b352e57dSAlan Cox 4137c6fd2807SJeff Garzik if (ata_dma_blacklisted(dev)) { 4138c6fd2807SJeff Garzik xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4139c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 4140c6fd2807SJeff Garzik "device is on DMA blacklist, disabling DMA\n"); 4141c6fd2807SJeff Garzik } 4142c6fd2807SJeff Garzik 414314d66ab7SPetr Vandrovec if ((host->flags & ATA_HOST_SIMPLEX) && 414414d66ab7SPetr Vandrovec host->simplex_claimed && host->simplex_claimed != ap) { 4145c6fd2807SJeff Garzik xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4146c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4147c6fd2807SJeff Garzik "other device, disabling DMA\n"); 4148c6fd2807SJeff Garzik } 4149c6fd2807SJeff Garzik 4150e424675fSJeff Garzik if (ap->flags & ATA_FLAG_NO_IORDY) 4151e424675fSJeff Garzik xfer_mask &= ata_pio_mask_no_iordy(dev); 4152e424675fSJeff Garzik 4153c6fd2807SJeff Garzik if (ap->ops->mode_filter) 4154a76b62caSAlan Cox xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4155c6fd2807SJeff Garzik 41568343f889SRobert Hancock /* Apply cable rule here. Don't apply it early because when 41578343f889SRobert Hancock * we handle hot plug the cable type can itself change. 41588343f889SRobert Hancock * Check this last so that we know if the transfer rate was 41598343f889SRobert Hancock * solely limited by the cable. 41608343f889SRobert Hancock * Unknown or 80 wire cables reported host side are checked 41618343f889SRobert Hancock * drive side as well. Cases where we know a 40wire cable 41628343f889SRobert Hancock * is used safely for 80 are not checked here. 41638343f889SRobert Hancock */ 41648343f889SRobert Hancock if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 41658343f889SRobert Hancock /* UDMA/44 or higher would be available */ 41668343f889SRobert Hancock if ((ap->cbl == ATA_CBL_PATA40) || 41678343f889SRobert Hancock (ata_drive_40wire(dev->id) && 41688343f889SRobert Hancock (ap->cbl == ATA_CBL_PATA_UNK || 41698343f889SRobert Hancock ap->cbl == ATA_CBL_PATA80))) { 41708343f889SRobert Hancock ata_dev_printk(dev, KERN_WARNING, 41718343f889SRobert Hancock "limited to UDMA/33 due to 40-wire cable\n"); 41728343f889SRobert Hancock xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 41738343f889SRobert Hancock } 41748343f889SRobert Hancock 4175c6fd2807SJeff Garzik ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4176c6fd2807SJeff Garzik &dev->mwdma_mask, &dev->udma_mask); 4177c6fd2807SJeff Garzik } 4178c6fd2807SJeff Garzik 4179c6fd2807SJeff Garzik /** 4180c6fd2807SJeff Garzik * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4181c6fd2807SJeff Garzik * @dev: Device to which command will be sent 4182c6fd2807SJeff Garzik * 4183c6fd2807SJeff Garzik * Issue SET FEATURES - XFER MODE command to device @dev 4184c6fd2807SJeff Garzik * on port @ap. 4185c6fd2807SJeff Garzik * 4186c6fd2807SJeff Garzik * LOCKING: 4187c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 4188c6fd2807SJeff Garzik * 4189c6fd2807SJeff Garzik * RETURNS: 4190c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 4191c6fd2807SJeff Garzik */ 4192c6fd2807SJeff Garzik 4193c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4194c6fd2807SJeff Garzik { 4195c6fd2807SJeff Garzik struct ata_taskfile tf; 4196c6fd2807SJeff Garzik unsigned int err_mask; 4197c6fd2807SJeff Garzik 4198c6fd2807SJeff Garzik /* set up set-features taskfile */ 4199c6fd2807SJeff Garzik DPRINTK("set features - xfer mode\n"); 4200c6fd2807SJeff Garzik 4201464cf177STejun Heo /* Some controllers and ATAPI devices show flaky interrupt 4202464cf177STejun Heo * behavior after setting xfer mode. Use polling instead. 4203464cf177STejun Heo */ 4204c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 4205c6fd2807SJeff Garzik tf.command = ATA_CMD_SET_FEATURES; 4206c6fd2807SJeff Garzik tf.feature = SETFEATURES_XFER; 4207464cf177STejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4208c6fd2807SJeff Garzik tf.protocol = ATA_PROT_NODATA; 4209c6fd2807SJeff Garzik tf.nsect = dev->xfer_mode; 4210c6fd2807SJeff Garzik 42112b789108STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4212c6fd2807SJeff Garzik 4213c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 4214c6fd2807SJeff Garzik return err_mask; 4215c6fd2807SJeff Garzik } 4216c6fd2807SJeff Garzik /** 4217218f3d30SJeff Garzik * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 42189f45cbd3SKristen Carlson Accardi * @dev: Device to which command will be sent 42199f45cbd3SKristen Carlson Accardi * @enable: Whether to enable or disable the feature 4220218f3d30SJeff Garzik * @feature: The sector count represents the feature to set 42219f45cbd3SKristen Carlson Accardi * 42229f45cbd3SKristen Carlson Accardi * Issue SET FEATURES - SATA FEATURES command to device @dev 4223218f3d30SJeff Garzik * on port @ap with sector count 42249f45cbd3SKristen Carlson Accardi * 42259f45cbd3SKristen Carlson Accardi * LOCKING: 42269f45cbd3SKristen Carlson Accardi * PCI/etc. bus probe sem. 42279f45cbd3SKristen Carlson Accardi * 42289f45cbd3SKristen Carlson Accardi * RETURNS: 42299f45cbd3SKristen Carlson Accardi * 0 on success, AC_ERR_* mask otherwise. 42309f45cbd3SKristen Carlson Accardi */ 4231218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4232218f3d30SJeff Garzik u8 feature) 42339f45cbd3SKristen Carlson Accardi { 42349f45cbd3SKristen Carlson Accardi struct ata_taskfile tf; 42359f45cbd3SKristen Carlson Accardi unsigned int err_mask; 42369f45cbd3SKristen Carlson Accardi 42379f45cbd3SKristen Carlson Accardi /* set up set-features taskfile */ 42389f45cbd3SKristen Carlson Accardi DPRINTK("set features - SATA features\n"); 42399f45cbd3SKristen Carlson Accardi 42409f45cbd3SKristen Carlson Accardi ata_tf_init(dev, &tf); 42419f45cbd3SKristen Carlson Accardi tf.command = ATA_CMD_SET_FEATURES; 42429f45cbd3SKristen Carlson Accardi tf.feature = enable; 42439f45cbd3SKristen Carlson Accardi tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 42449f45cbd3SKristen Carlson Accardi tf.protocol = ATA_PROT_NODATA; 4245218f3d30SJeff Garzik tf.nsect = feature; 42469f45cbd3SKristen Carlson Accardi 42472b789108STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 42489f45cbd3SKristen Carlson Accardi 42499f45cbd3SKristen Carlson Accardi DPRINTK("EXIT, err_mask=%x\n", err_mask); 42509f45cbd3SKristen Carlson Accardi return err_mask; 42519f45cbd3SKristen Carlson Accardi } 42529f45cbd3SKristen Carlson Accardi 42539f45cbd3SKristen Carlson Accardi /** 4254c6fd2807SJeff Garzik * ata_dev_init_params - Issue INIT DEV PARAMS command 4255c6fd2807SJeff Garzik * @dev: Device to which command will be sent 4256c6fd2807SJeff Garzik * @heads: Number of heads (taskfile parameter) 4257c6fd2807SJeff Garzik * @sectors: Number of sectors (taskfile parameter) 4258c6fd2807SJeff Garzik * 4259c6fd2807SJeff Garzik * LOCKING: 4260c6fd2807SJeff Garzik * Kernel thread context (may sleep) 4261c6fd2807SJeff Garzik * 4262c6fd2807SJeff Garzik * RETURNS: 4263c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 4264c6fd2807SJeff Garzik */ 4265c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev, 4266c6fd2807SJeff Garzik u16 heads, u16 sectors) 4267c6fd2807SJeff Garzik { 4268c6fd2807SJeff Garzik struct ata_taskfile tf; 4269c6fd2807SJeff Garzik unsigned int err_mask; 4270c6fd2807SJeff Garzik 4271c6fd2807SJeff Garzik /* Number of sectors per track 1-255. Number of heads 1-16 */ 4272c6fd2807SJeff Garzik if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4273c6fd2807SJeff Garzik return AC_ERR_INVALID; 4274c6fd2807SJeff Garzik 4275c6fd2807SJeff Garzik /* set up init dev params taskfile */ 4276c6fd2807SJeff Garzik DPRINTK("init dev params \n"); 4277c6fd2807SJeff Garzik 4278c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 4279c6fd2807SJeff Garzik tf.command = ATA_CMD_INIT_DEV_PARAMS; 4280c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4281c6fd2807SJeff Garzik tf.protocol = ATA_PROT_NODATA; 4282c6fd2807SJeff Garzik tf.nsect = sectors; 4283c6fd2807SJeff Garzik tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4284c6fd2807SJeff Garzik 42852b789108STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 428618b2466cSAlan Cox /* A clean abort indicates an original or just out of spec drive 428718b2466cSAlan Cox and we should continue as we issue the setup based on the 428818b2466cSAlan Cox drive reported working geometry */ 428918b2466cSAlan Cox if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 429018b2466cSAlan Cox err_mask = 0; 4291c6fd2807SJeff Garzik 4292c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 4293c6fd2807SJeff Garzik return err_mask; 4294c6fd2807SJeff Garzik } 4295c6fd2807SJeff Garzik 4296c6fd2807SJeff Garzik /** 4297c6fd2807SJeff Garzik * ata_sg_clean - Unmap DMA memory associated with command 4298c6fd2807SJeff Garzik * @qc: Command containing DMA memory to be released 4299c6fd2807SJeff Garzik * 4300c6fd2807SJeff Garzik * Unmap all mapped DMA memory associated with this command. 4301c6fd2807SJeff Garzik * 4302c6fd2807SJeff Garzik * LOCKING: 4303cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4304c6fd2807SJeff Garzik */ 430570e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc) 4306c6fd2807SJeff Garzik { 4307c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4308c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4309c6fd2807SJeff Garzik int dir = qc->dma_dir; 4310c6fd2807SJeff Garzik void *pad_buf = NULL; 4311c6fd2807SJeff Garzik 4312c6fd2807SJeff Garzik WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); 4313c6fd2807SJeff Garzik WARN_ON(sg == NULL); 4314c6fd2807SJeff Garzik 4315c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SINGLE) 4316c6fd2807SJeff Garzik WARN_ON(qc->n_elem > 1); 4317c6fd2807SJeff Garzik 4318c6fd2807SJeff Garzik VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4319c6fd2807SJeff Garzik 4320c6fd2807SJeff Garzik /* if we padded the buffer out to 32-bit bound, and data 4321c6fd2807SJeff Garzik * xfer direction is from-device, we must copy from the 4322c6fd2807SJeff Garzik * pad buffer back into the supplied buffer 4323c6fd2807SJeff Garzik */ 4324c6fd2807SJeff Garzik if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) 4325c6fd2807SJeff Garzik pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4326c6fd2807SJeff Garzik 4327c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SG) { 4328c6fd2807SJeff Garzik if (qc->n_elem) 4329c6fd2807SJeff Garzik dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4330c6fd2807SJeff Garzik /* restore last sg */ 433187260216SJens Axboe sg_last(sg, qc->orig_n_elem)->length += qc->pad_len; 4332c6fd2807SJeff Garzik if (pad_buf) { 4333c6fd2807SJeff Garzik struct scatterlist *psg = &qc->pad_sgent; 433445711f1aSJens Axboe void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); 4335c6fd2807SJeff Garzik memcpy(addr + psg->offset, pad_buf, qc->pad_len); 4336c6fd2807SJeff Garzik kunmap_atomic(addr, KM_IRQ0); 4337c6fd2807SJeff Garzik } 4338c6fd2807SJeff Garzik } else { 4339c6fd2807SJeff Garzik if (qc->n_elem) 4340c6fd2807SJeff Garzik dma_unmap_single(ap->dev, 4341c6fd2807SJeff Garzik sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), 4342c6fd2807SJeff Garzik dir); 4343c6fd2807SJeff Garzik /* restore sg */ 4344c6fd2807SJeff Garzik sg->length += qc->pad_len; 4345c6fd2807SJeff Garzik if (pad_buf) 4346c6fd2807SJeff Garzik memcpy(qc->buf_virt + sg->length - qc->pad_len, 4347c6fd2807SJeff Garzik pad_buf, qc->pad_len); 4348c6fd2807SJeff Garzik } 4349c6fd2807SJeff Garzik 4350c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_DMAMAP; 4351c6fd2807SJeff Garzik qc->__sg = NULL; 4352c6fd2807SJeff Garzik } 4353c6fd2807SJeff Garzik 4354c6fd2807SJeff Garzik /** 4355c6fd2807SJeff Garzik * ata_fill_sg - Fill PCI IDE PRD table 4356c6fd2807SJeff Garzik * @qc: Metadata associated with taskfile to be transferred 4357c6fd2807SJeff Garzik * 4358c6fd2807SJeff Garzik * Fill PCI IDE PRD (scatter-gather) table with segments 4359c6fd2807SJeff Garzik * associated with the current disk command. 4360c6fd2807SJeff Garzik * 4361c6fd2807SJeff Garzik * LOCKING: 4362cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4363c6fd2807SJeff Garzik * 4364c6fd2807SJeff Garzik */ 4365c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc) 4366c6fd2807SJeff Garzik { 4367c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4368c6fd2807SJeff Garzik struct scatterlist *sg; 4369c6fd2807SJeff Garzik unsigned int idx; 4370c6fd2807SJeff Garzik 4371c6fd2807SJeff Garzik WARN_ON(qc->__sg == NULL); 4372c6fd2807SJeff Garzik WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); 4373c6fd2807SJeff Garzik 4374c6fd2807SJeff Garzik idx = 0; 4375c6fd2807SJeff Garzik ata_for_each_sg(sg, qc) { 4376c6fd2807SJeff Garzik u32 addr, offset; 4377c6fd2807SJeff Garzik u32 sg_len, len; 4378c6fd2807SJeff Garzik 4379c6fd2807SJeff Garzik /* determine if physical DMA addr spans 64K boundary. 4380c6fd2807SJeff Garzik * Note h/w doesn't support 64-bit, so we unconditionally 4381c6fd2807SJeff Garzik * truncate dma_addr_t to u32. 4382c6fd2807SJeff Garzik */ 4383c6fd2807SJeff Garzik addr = (u32) sg_dma_address(sg); 4384c6fd2807SJeff Garzik sg_len = sg_dma_len(sg); 4385c6fd2807SJeff Garzik 4386c6fd2807SJeff Garzik while (sg_len) { 4387c6fd2807SJeff Garzik offset = addr & 0xffff; 4388c6fd2807SJeff Garzik len = sg_len; 4389c6fd2807SJeff Garzik if ((offset + sg_len) > 0x10000) 4390c6fd2807SJeff Garzik len = 0x10000 - offset; 4391c6fd2807SJeff Garzik 4392c6fd2807SJeff Garzik ap->prd[idx].addr = cpu_to_le32(addr); 4393c6fd2807SJeff Garzik ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 4394c6fd2807SJeff Garzik VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4395c6fd2807SJeff Garzik 4396c6fd2807SJeff Garzik idx++; 4397c6fd2807SJeff Garzik sg_len -= len; 4398c6fd2807SJeff Garzik addr += len; 4399c6fd2807SJeff Garzik } 4400c6fd2807SJeff Garzik } 4401c6fd2807SJeff Garzik 4402c6fd2807SJeff Garzik if (idx) 4403c6fd2807SJeff Garzik ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4404c6fd2807SJeff Garzik } 4405b9a4197eSTejun Heo 4406c6fd2807SJeff Garzik /** 4407d26fc955SAlan Cox * ata_fill_sg_dumb - Fill PCI IDE PRD table 4408d26fc955SAlan Cox * @qc: Metadata associated with taskfile to be transferred 4409d26fc955SAlan Cox * 4410d26fc955SAlan Cox * Fill PCI IDE PRD (scatter-gather) table with segments 4411d26fc955SAlan Cox * associated with the current disk command. Perform the fill 4412d26fc955SAlan Cox * so that we avoid writing any length 64K records for 4413d26fc955SAlan Cox * controllers that don't follow the spec. 4414d26fc955SAlan Cox * 4415d26fc955SAlan Cox * LOCKING: 4416d26fc955SAlan Cox * spin_lock_irqsave(host lock) 4417d26fc955SAlan Cox * 4418d26fc955SAlan Cox */ 4419d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) 4420d26fc955SAlan Cox { 4421d26fc955SAlan Cox struct ata_port *ap = qc->ap; 4422d26fc955SAlan Cox struct scatterlist *sg; 4423d26fc955SAlan Cox unsigned int idx; 4424d26fc955SAlan Cox 4425d26fc955SAlan Cox WARN_ON(qc->__sg == NULL); 4426d26fc955SAlan Cox WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); 4427d26fc955SAlan Cox 4428d26fc955SAlan Cox idx = 0; 4429d26fc955SAlan Cox ata_for_each_sg(sg, qc) { 4430d26fc955SAlan Cox u32 addr, offset; 4431d26fc955SAlan Cox u32 sg_len, len, blen; 4432d26fc955SAlan Cox 4433d26fc955SAlan Cox /* determine if physical DMA addr spans 64K boundary. 4434d26fc955SAlan Cox * Note h/w doesn't support 64-bit, so we unconditionally 4435d26fc955SAlan Cox * truncate dma_addr_t to u32. 4436d26fc955SAlan Cox */ 4437d26fc955SAlan Cox addr = (u32) sg_dma_address(sg); 4438d26fc955SAlan Cox sg_len = sg_dma_len(sg); 4439d26fc955SAlan Cox 4440d26fc955SAlan Cox while (sg_len) { 4441d26fc955SAlan Cox offset = addr & 0xffff; 4442d26fc955SAlan Cox len = sg_len; 4443d26fc955SAlan Cox if ((offset + sg_len) > 0x10000) 4444d26fc955SAlan Cox len = 0x10000 - offset; 4445d26fc955SAlan Cox 4446d26fc955SAlan Cox blen = len & 0xffff; 4447d26fc955SAlan Cox ap->prd[idx].addr = cpu_to_le32(addr); 4448d26fc955SAlan Cox if (blen == 0) { 4449d26fc955SAlan Cox /* Some PATA chipsets like the CS5530 can't 4450d26fc955SAlan Cox cope with 0x0000 meaning 64K as the spec says */ 4451d26fc955SAlan Cox ap->prd[idx].flags_len = cpu_to_le32(0x8000); 4452d26fc955SAlan Cox blen = 0x8000; 4453d26fc955SAlan Cox ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000); 4454d26fc955SAlan Cox } 4455d26fc955SAlan Cox ap->prd[idx].flags_len = cpu_to_le32(blen); 4456d26fc955SAlan Cox VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4457d26fc955SAlan Cox 4458d26fc955SAlan Cox idx++; 4459d26fc955SAlan Cox sg_len -= len; 4460d26fc955SAlan Cox addr += len; 4461d26fc955SAlan Cox } 4462d26fc955SAlan Cox } 4463d26fc955SAlan Cox 4464d26fc955SAlan Cox if (idx) 4465d26fc955SAlan Cox ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4466d26fc955SAlan Cox } 4467d26fc955SAlan Cox 4468d26fc955SAlan Cox /** 4469c6fd2807SJeff Garzik * ata_check_atapi_dma - Check whether ATAPI DMA can be supported 4470c6fd2807SJeff Garzik * @qc: Metadata associated with taskfile to check 4471c6fd2807SJeff Garzik * 4472c6fd2807SJeff Garzik * Allow low-level driver to filter ATA PACKET commands, returning 4473c6fd2807SJeff Garzik * a status indicating whether or not it is OK to use DMA for the 4474c6fd2807SJeff Garzik * supplied PACKET command. 4475c6fd2807SJeff Garzik * 4476c6fd2807SJeff Garzik * LOCKING: 4477cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4478c6fd2807SJeff Garzik * 4479c6fd2807SJeff Garzik * RETURNS: 0 when ATAPI DMA can be used 4480c6fd2807SJeff Garzik * nonzero otherwise 4481c6fd2807SJeff Garzik */ 4482c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc) 4483c6fd2807SJeff Garzik { 4484c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4485c6fd2807SJeff Garzik 4486b9a4197eSTejun Heo /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4487b9a4197eSTejun Heo * few ATAPI devices choke on such DMA requests. 4488b9a4197eSTejun Heo */ 4489b9a4197eSTejun Heo if (unlikely(qc->nbytes & 15)) 44906f23a31dSAlbert Lee return 1; 44916f23a31dSAlbert Lee 4492c6fd2807SJeff Garzik if (ap->ops->check_atapi_dma) 4493b9a4197eSTejun Heo return ap->ops->check_atapi_dma(qc); 4494c6fd2807SJeff Garzik 4495b9a4197eSTejun Heo return 0; 4496c6fd2807SJeff Garzik } 4497b9a4197eSTejun Heo 4498c6fd2807SJeff Garzik /** 449931cc23b3STejun Heo * ata_std_qc_defer - Check whether a qc needs to be deferred 450031cc23b3STejun Heo * @qc: ATA command in question 450131cc23b3STejun Heo * 450231cc23b3STejun Heo * Non-NCQ commands cannot run with any other command, NCQ or 450331cc23b3STejun Heo * not. As upper layer only knows the queue depth, we are 450431cc23b3STejun Heo * responsible for maintaining exclusion. This function checks 450531cc23b3STejun Heo * whether a new command @qc can be issued. 450631cc23b3STejun Heo * 450731cc23b3STejun Heo * LOCKING: 450831cc23b3STejun Heo * spin_lock_irqsave(host lock) 450931cc23b3STejun Heo * 451031cc23b3STejun Heo * RETURNS: 451131cc23b3STejun Heo * ATA_DEFER_* if deferring is needed, 0 otherwise. 451231cc23b3STejun Heo */ 451331cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc) 451431cc23b3STejun Heo { 451531cc23b3STejun Heo struct ata_link *link = qc->dev->link; 451631cc23b3STejun Heo 451731cc23b3STejun Heo if (qc->tf.protocol == ATA_PROT_NCQ) { 451831cc23b3STejun Heo if (!ata_tag_valid(link->active_tag)) 451931cc23b3STejun Heo return 0; 452031cc23b3STejun Heo } else { 452131cc23b3STejun Heo if (!ata_tag_valid(link->active_tag) && !link->sactive) 452231cc23b3STejun Heo return 0; 452331cc23b3STejun Heo } 452431cc23b3STejun Heo 452531cc23b3STejun Heo return ATA_DEFER_LINK; 452631cc23b3STejun Heo } 452731cc23b3STejun Heo 452831cc23b3STejun Heo /** 4529c6fd2807SJeff Garzik * ata_qc_prep - Prepare taskfile for submission 4530c6fd2807SJeff Garzik * @qc: Metadata associated with taskfile to be prepared 4531c6fd2807SJeff Garzik * 4532c6fd2807SJeff Garzik * Prepare ATA taskfile for submission. 4533c6fd2807SJeff Garzik * 4534c6fd2807SJeff Garzik * LOCKING: 4535cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4536c6fd2807SJeff Garzik */ 4537c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc) 4538c6fd2807SJeff Garzik { 4539c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4540c6fd2807SJeff Garzik return; 4541c6fd2807SJeff Garzik 4542c6fd2807SJeff Garzik ata_fill_sg(qc); 4543c6fd2807SJeff Garzik } 4544c6fd2807SJeff Garzik 4545d26fc955SAlan Cox /** 4546d26fc955SAlan Cox * ata_dumb_qc_prep - Prepare taskfile for submission 4547d26fc955SAlan Cox * @qc: Metadata associated with taskfile to be prepared 4548d26fc955SAlan Cox * 4549d26fc955SAlan Cox * Prepare ATA taskfile for submission. 4550d26fc955SAlan Cox * 4551d26fc955SAlan Cox * LOCKING: 4552d26fc955SAlan Cox * spin_lock_irqsave(host lock) 4553d26fc955SAlan Cox */ 4554d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc) 4555d26fc955SAlan Cox { 4556d26fc955SAlan Cox if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4557d26fc955SAlan Cox return; 4558d26fc955SAlan Cox 4559d26fc955SAlan Cox ata_fill_sg_dumb(qc); 4560d26fc955SAlan Cox } 4561d26fc955SAlan Cox 4562c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4563c6fd2807SJeff Garzik 4564c6fd2807SJeff Garzik /** 4565c6fd2807SJeff Garzik * ata_sg_init_one - Associate command with memory buffer 4566c6fd2807SJeff Garzik * @qc: Command to be associated 4567c6fd2807SJeff Garzik * @buf: Memory buffer 4568c6fd2807SJeff Garzik * @buflen: Length of memory buffer, in bytes. 4569c6fd2807SJeff Garzik * 4570c6fd2807SJeff Garzik * Initialize the data-related elements of queued_cmd @qc 4571c6fd2807SJeff Garzik * to point to a single memory buffer, @buf of byte length @buflen. 4572c6fd2807SJeff Garzik * 4573c6fd2807SJeff Garzik * LOCKING: 4574cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4575c6fd2807SJeff Garzik */ 4576c6fd2807SJeff Garzik 4577c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 4578c6fd2807SJeff Garzik { 4579c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SINGLE; 4580c6fd2807SJeff Garzik 4581c6fd2807SJeff Garzik qc->__sg = &qc->sgent; 4582c6fd2807SJeff Garzik qc->n_elem = 1; 4583c6fd2807SJeff Garzik qc->orig_n_elem = 1; 4584c6fd2807SJeff Garzik qc->buf_virt = buf; 4585c6fd2807SJeff Garzik qc->nbytes = buflen; 458687260216SJens Axboe qc->cursg = qc->__sg; 4587c6fd2807SJeff Garzik 458861c0596cSTejun Heo sg_init_one(&qc->sgent, buf, buflen); 4589c6fd2807SJeff Garzik } 4590c6fd2807SJeff Garzik 4591c6fd2807SJeff Garzik /** 4592c6fd2807SJeff Garzik * ata_sg_init - Associate command with scatter-gather table. 4593c6fd2807SJeff Garzik * @qc: Command to be associated 4594c6fd2807SJeff Garzik * @sg: Scatter-gather table. 4595c6fd2807SJeff Garzik * @n_elem: Number of elements in s/g table. 4596c6fd2807SJeff Garzik * 4597c6fd2807SJeff Garzik * Initialize the data-related elements of queued_cmd @qc 4598c6fd2807SJeff Garzik * to point to a scatter-gather table @sg, containing @n_elem 4599c6fd2807SJeff Garzik * elements. 4600c6fd2807SJeff Garzik * 4601c6fd2807SJeff Garzik * LOCKING: 4602cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4603c6fd2807SJeff Garzik */ 4604c6fd2807SJeff Garzik 4605c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4606c6fd2807SJeff Garzik unsigned int n_elem) 4607c6fd2807SJeff Garzik { 4608c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SG; 4609c6fd2807SJeff Garzik qc->__sg = sg; 4610c6fd2807SJeff Garzik qc->n_elem = n_elem; 4611c6fd2807SJeff Garzik qc->orig_n_elem = n_elem; 461287260216SJens Axboe qc->cursg = qc->__sg; 4613c6fd2807SJeff Garzik } 4614c6fd2807SJeff Garzik 4615c6fd2807SJeff Garzik /** 4616c6fd2807SJeff Garzik * ata_sg_setup_one - DMA-map the memory buffer associated with a command. 4617c6fd2807SJeff Garzik * @qc: Command with memory buffer to be mapped. 4618c6fd2807SJeff Garzik * 4619c6fd2807SJeff Garzik * DMA-map the memory buffer associated with queued_cmd @qc. 4620c6fd2807SJeff Garzik * 4621c6fd2807SJeff Garzik * LOCKING: 4622cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4623c6fd2807SJeff Garzik * 4624c6fd2807SJeff Garzik * RETURNS: 4625c6fd2807SJeff Garzik * Zero on success, negative on error. 4626c6fd2807SJeff Garzik */ 4627c6fd2807SJeff Garzik 4628c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc) 4629c6fd2807SJeff Garzik { 4630c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4631c6fd2807SJeff Garzik int dir = qc->dma_dir; 4632c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 4633c6fd2807SJeff Garzik dma_addr_t dma_address; 4634c6fd2807SJeff Garzik int trim_sg = 0; 4635c6fd2807SJeff Garzik 4636c6fd2807SJeff Garzik /* we must lengthen transfers to end on a 32-bit boundary */ 4637c6fd2807SJeff Garzik qc->pad_len = sg->length & 3; 4638c6fd2807SJeff Garzik if (qc->pad_len) { 4639c6fd2807SJeff Garzik void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4640c6fd2807SJeff Garzik struct scatterlist *psg = &qc->pad_sgent; 4641c6fd2807SJeff Garzik 4642c6fd2807SJeff Garzik WARN_ON(qc->dev->class != ATA_DEV_ATAPI); 4643c6fd2807SJeff Garzik 4644c6fd2807SJeff Garzik memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4645c6fd2807SJeff Garzik 4646c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_WRITE) 4647c6fd2807SJeff Garzik memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, 4648c6fd2807SJeff Garzik qc->pad_len); 4649c6fd2807SJeff Garzik 4650c6fd2807SJeff Garzik sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 4651c6fd2807SJeff Garzik sg_dma_len(psg) = ATA_DMA_PAD_SZ; 4652c6fd2807SJeff Garzik /* trim sg */ 4653c6fd2807SJeff Garzik sg->length -= qc->pad_len; 4654c6fd2807SJeff Garzik if (sg->length == 0) 4655c6fd2807SJeff Garzik trim_sg = 1; 4656c6fd2807SJeff Garzik 4657c6fd2807SJeff Garzik DPRINTK("padding done, sg->length=%u pad_len=%u\n", 4658c6fd2807SJeff Garzik sg->length, qc->pad_len); 4659c6fd2807SJeff Garzik } 4660c6fd2807SJeff Garzik 4661c6fd2807SJeff Garzik if (trim_sg) { 4662c6fd2807SJeff Garzik qc->n_elem--; 4663c6fd2807SJeff Garzik goto skip_map; 4664c6fd2807SJeff Garzik } 4665c6fd2807SJeff Garzik 4666c6fd2807SJeff Garzik dma_address = dma_map_single(ap->dev, qc->buf_virt, 4667c6fd2807SJeff Garzik sg->length, dir); 4668c6fd2807SJeff Garzik if (dma_mapping_error(dma_address)) { 4669c6fd2807SJeff Garzik /* restore sg */ 4670c6fd2807SJeff Garzik sg->length += qc->pad_len; 4671c6fd2807SJeff Garzik return -1; 4672c6fd2807SJeff Garzik } 4673c6fd2807SJeff Garzik 4674c6fd2807SJeff Garzik sg_dma_address(sg) = dma_address; 4675c6fd2807SJeff Garzik sg_dma_len(sg) = sg->length; 4676c6fd2807SJeff Garzik 4677c6fd2807SJeff Garzik skip_map: 4678c6fd2807SJeff Garzik DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 4679c6fd2807SJeff Garzik qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 4680c6fd2807SJeff Garzik 4681c6fd2807SJeff Garzik return 0; 4682c6fd2807SJeff Garzik } 4683c6fd2807SJeff Garzik 4684c6fd2807SJeff Garzik /** 4685c6fd2807SJeff Garzik * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4686c6fd2807SJeff Garzik * @qc: Command with scatter-gather table to be mapped. 4687c6fd2807SJeff Garzik * 4688c6fd2807SJeff Garzik * DMA-map the scatter-gather table associated with queued_cmd @qc. 4689c6fd2807SJeff Garzik * 4690c6fd2807SJeff Garzik * LOCKING: 4691cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 4692c6fd2807SJeff Garzik * 4693c6fd2807SJeff Garzik * RETURNS: 4694c6fd2807SJeff Garzik * Zero on success, negative on error. 4695c6fd2807SJeff Garzik * 4696c6fd2807SJeff Garzik */ 4697c6fd2807SJeff Garzik 4698c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc) 4699c6fd2807SJeff Garzik { 4700c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4701c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 470287260216SJens Axboe struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem); 4703c6fd2807SJeff Garzik int n_elem, pre_n_elem, dir, trim_sg = 0; 4704c6fd2807SJeff Garzik 470544877b4eSTejun Heo VPRINTK("ENTER, ata%u\n", ap->print_id); 4706c6fd2807SJeff Garzik WARN_ON(!(qc->flags & ATA_QCFLAG_SG)); 4707c6fd2807SJeff Garzik 4708c6fd2807SJeff Garzik /* we must lengthen transfers to end on a 32-bit boundary */ 4709c6fd2807SJeff Garzik qc->pad_len = lsg->length & 3; 4710c6fd2807SJeff Garzik if (qc->pad_len) { 4711c6fd2807SJeff Garzik void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4712c6fd2807SJeff Garzik struct scatterlist *psg = &qc->pad_sgent; 4713c6fd2807SJeff Garzik unsigned int offset; 4714c6fd2807SJeff Garzik 4715c6fd2807SJeff Garzik WARN_ON(qc->dev->class != ATA_DEV_ATAPI); 4716c6fd2807SJeff Garzik 4717c6fd2807SJeff Garzik memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4718c6fd2807SJeff Garzik 4719c6fd2807SJeff Garzik /* 4720c6fd2807SJeff Garzik * psg->page/offset are used to copy to-be-written 4721c6fd2807SJeff Garzik * data in this function or read data in ata_sg_clean. 4722c6fd2807SJeff Garzik */ 4723c6fd2807SJeff Garzik offset = lsg->offset + lsg->length - qc->pad_len; 4724642f1490SJens Axboe sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), 4725642f1490SJens Axboe qc->pad_len, offset_in_page(offset)); 4726c6fd2807SJeff Garzik 4727c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_WRITE) { 472845711f1aSJens Axboe void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); 4729c6fd2807SJeff Garzik memcpy(pad_buf, addr + psg->offset, qc->pad_len); 4730c6fd2807SJeff Garzik kunmap_atomic(addr, KM_IRQ0); 4731c6fd2807SJeff Garzik } 4732c6fd2807SJeff Garzik 4733c6fd2807SJeff Garzik sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 4734c6fd2807SJeff Garzik sg_dma_len(psg) = ATA_DMA_PAD_SZ; 4735c6fd2807SJeff Garzik /* trim last sg */ 4736c6fd2807SJeff Garzik lsg->length -= qc->pad_len; 4737c6fd2807SJeff Garzik if (lsg->length == 0) 4738c6fd2807SJeff Garzik trim_sg = 1; 4739c6fd2807SJeff Garzik 4740c6fd2807SJeff Garzik DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 4741c6fd2807SJeff Garzik qc->n_elem - 1, lsg->length, qc->pad_len); 4742c6fd2807SJeff Garzik } 4743c6fd2807SJeff Garzik 4744c6fd2807SJeff Garzik pre_n_elem = qc->n_elem; 4745c6fd2807SJeff Garzik if (trim_sg && pre_n_elem) 4746c6fd2807SJeff Garzik pre_n_elem--; 4747c6fd2807SJeff Garzik 4748c6fd2807SJeff Garzik if (!pre_n_elem) { 4749c6fd2807SJeff Garzik n_elem = 0; 4750c6fd2807SJeff Garzik goto skip_map; 4751c6fd2807SJeff Garzik } 4752c6fd2807SJeff Garzik 4753c6fd2807SJeff Garzik dir = qc->dma_dir; 4754c6fd2807SJeff Garzik n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); 4755c6fd2807SJeff Garzik if (n_elem < 1) { 4756c6fd2807SJeff Garzik /* restore last sg */ 4757c6fd2807SJeff Garzik lsg->length += qc->pad_len; 4758c6fd2807SJeff Garzik return -1; 4759c6fd2807SJeff Garzik } 4760c6fd2807SJeff Garzik 4761c6fd2807SJeff Garzik DPRINTK("%d sg elements mapped\n", n_elem); 4762c6fd2807SJeff Garzik 4763c6fd2807SJeff Garzik skip_map: 4764c6fd2807SJeff Garzik qc->n_elem = n_elem; 4765c6fd2807SJeff Garzik 4766c6fd2807SJeff Garzik return 0; 4767c6fd2807SJeff Garzik } 4768c6fd2807SJeff Garzik 4769c6fd2807SJeff Garzik /** 4770c6fd2807SJeff Garzik * swap_buf_le16 - swap halves of 16-bit words in place 4771c6fd2807SJeff Garzik * @buf: Buffer to swap 4772c6fd2807SJeff Garzik * @buf_words: Number of 16-bit words in buffer. 4773c6fd2807SJeff Garzik * 4774c6fd2807SJeff Garzik * Swap halves of 16-bit words if needed to convert from 4775c6fd2807SJeff Garzik * little-endian byte order to native cpu byte order, or 4776c6fd2807SJeff Garzik * vice-versa. 4777c6fd2807SJeff Garzik * 4778c6fd2807SJeff Garzik * LOCKING: 4779c6fd2807SJeff Garzik * Inherited from caller. 4780c6fd2807SJeff Garzik */ 4781c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words) 4782c6fd2807SJeff Garzik { 4783c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN 4784c6fd2807SJeff Garzik unsigned int i; 4785c6fd2807SJeff Garzik 4786c6fd2807SJeff Garzik for (i = 0; i < buf_words; i++) 4787c6fd2807SJeff Garzik buf[i] = le16_to_cpu(buf[i]); 4788c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */ 4789c6fd2807SJeff Garzik } 4790c6fd2807SJeff Garzik 4791c6fd2807SJeff Garzik /** 47920d5ff566STejun Heo * ata_data_xfer - Transfer data by PIO 4793c6fd2807SJeff Garzik * @adev: device to target 4794c6fd2807SJeff Garzik * @buf: data buffer 4795c6fd2807SJeff Garzik * @buflen: buffer length 4796c6fd2807SJeff Garzik * @write_data: read/write 4797c6fd2807SJeff Garzik * 4798c6fd2807SJeff Garzik * Transfer data from/to the device data register by PIO. 4799c6fd2807SJeff Garzik * 4800c6fd2807SJeff Garzik * LOCKING: 4801c6fd2807SJeff Garzik * Inherited from caller. 4802c6fd2807SJeff Garzik */ 48030d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 4804c6fd2807SJeff Garzik unsigned int buflen, int write_data) 4805c6fd2807SJeff Garzik { 48069af5c9c9STejun Heo struct ata_port *ap = adev->link->ap; 4807c6fd2807SJeff Garzik unsigned int words = buflen >> 1; 4808c6fd2807SJeff Garzik 4809c6fd2807SJeff Garzik /* Transfer multiple of 2 bytes */ 4810c6fd2807SJeff Garzik if (write_data) 48110d5ff566STejun Heo iowrite16_rep(ap->ioaddr.data_addr, buf, words); 4812c6fd2807SJeff Garzik else 48130d5ff566STejun Heo ioread16_rep(ap->ioaddr.data_addr, buf, words); 4814c6fd2807SJeff Garzik 4815c6fd2807SJeff Garzik /* Transfer trailing 1 byte, if any. */ 4816c6fd2807SJeff Garzik if (unlikely(buflen & 0x01)) { 4817c6fd2807SJeff Garzik u16 align_buf[1] = { 0 }; 4818c6fd2807SJeff Garzik unsigned char *trailing_buf = buf + buflen - 1; 4819c6fd2807SJeff Garzik 4820c6fd2807SJeff Garzik if (write_data) { 4821c6fd2807SJeff Garzik memcpy(align_buf, trailing_buf, 1); 48220d5ff566STejun Heo iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); 4823c6fd2807SJeff Garzik } else { 48240d5ff566STejun Heo align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr)); 4825c6fd2807SJeff Garzik memcpy(trailing_buf, align_buf, 1); 4826c6fd2807SJeff Garzik } 4827c6fd2807SJeff Garzik } 4828c6fd2807SJeff Garzik } 4829c6fd2807SJeff Garzik 4830c6fd2807SJeff Garzik /** 48310d5ff566STejun Heo * ata_data_xfer_noirq - Transfer data by PIO 4832c6fd2807SJeff Garzik * @adev: device to target 4833c6fd2807SJeff Garzik * @buf: data buffer 4834c6fd2807SJeff Garzik * @buflen: buffer length 4835c6fd2807SJeff Garzik * @write_data: read/write 4836c6fd2807SJeff Garzik * 4837c6fd2807SJeff Garzik * Transfer data from/to the device data register by PIO. Do the 4838c6fd2807SJeff Garzik * transfer with interrupts disabled. 4839c6fd2807SJeff Garzik * 4840c6fd2807SJeff Garzik * LOCKING: 4841c6fd2807SJeff Garzik * Inherited from caller. 4842c6fd2807SJeff Garzik */ 48430d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 4844c6fd2807SJeff Garzik unsigned int buflen, int write_data) 4845c6fd2807SJeff Garzik { 4846c6fd2807SJeff Garzik unsigned long flags; 4847c6fd2807SJeff Garzik local_irq_save(flags); 48480d5ff566STejun Heo ata_data_xfer(adev, buf, buflen, write_data); 4849c6fd2807SJeff Garzik local_irq_restore(flags); 4850c6fd2807SJeff Garzik } 4851c6fd2807SJeff Garzik 4852c6fd2807SJeff Garzik 4853c6fd2807SJeff Garzik /** 48545a5dbd18SMark Lord * ata_pio_sector - Transfer a sector of data. 4855c6fd2807SJeff Garzik * @qc: Command on going 4856c6fd2807SJeff Garzik * 48575a5dbd18SMark Lord * Transfer qc->sect_size bytes of data from/to the ATA device. 4858c6fd2807SJeff Garzik * 4859c6fd2807SJeff Garzik * LOCKING: 4860c6fd2807SJeff Garzik * Inherited from caller. 4861c6fd2807SJeff Garzik */ 4862c6fd2807SJeff Garzik 4863c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc) 4864c6fd2807SJeff Garzik { 4865c6fd2807SJeff Garzik int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4866c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4867c6fd2807SJeff Garzik struct page *page; 4868c6fd2807SJeff Garzik unsigned int offset; 4869c6fd2807SJeff Garzik unsigned char *buf; 4870c6fd2807SJeff Garzik 48715a5dbd18SMark Lord if (qc->curbytes == qc->nbytes - qc->sect_size) 4872c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4873c6fd2807SJeff Garzik 487445711f1aSJens Axboe page = sg_page(qc->cursg); 487587260216SJens Axboe offset = qc->cursg->offset + qc->cursg_ofs; 4876c6fd2807SJeff Garzik 4877c6fd2807SJeff Garzik /* get the current page and offset */ 4878c6fd2807SJeff Garzik page = nth_page(page, (offset >> PAGE_SHIFT)); 4879c6fd2807SJeff Garzik offset %= PAGE_SIZE; 4880c6fd2807SJeff Garzik 4881c6fd2807SJeff Garzik DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 4882c6fd2807SJeff Garzik 4883c6fd2807SJeff Garzik if (PageHighMem(page)) { 4884c6fd2807SJeff Garzik unsigned long flags; 4885c6fd2807SJeff Garzik 4886c6fd2807SJeff Garzik /* FIXME: use a bounce buffer */ 4887c6fd2807SJeff Garzik local_irq_save(flags); 4888c6fd2807SJeff Garzik buf = kmap_atomic(page, KM_IRQ0); 4889c6fd2807SJeff Garzik 4890c6fd2807SJeff Garzik /* do the actual data transfer */ 48915a5dbd18SMark Lord ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 4892c6fd2807SJeff Garzik 4893c6fd2807SJeff Garzik kunmap_atomic(buf, KM_IRQ0); 4894c6fd2807SJeff Garzik local_irq_restore(flags); 4895c6fd2807SJeff Garzik } else { 4896c6fd2807SJeff Garzik buf = page_address(page); 48975a5dbd18SMark Lord ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 4898c6fd2807SJeff Garzik } 4899c6fd2807SJeff Garzik 49005a5dbd18SMark Lord qc->curbytes += qc->sect_size; 49015a5dbd18SMark Lord qc->cursg_ofs += qc->sect_size; 4902c6fd2807SJeff Garzik 490387260216SJens Axboe if (qc->cursg_ofs == qc->cursg->length) { 490487260216SJens Axboe qc->cursg = sg_next(qc->cursg); 4905c6fd2807SJeff Garzik qc->cursg_ofs = 0; 4906c6fd2807SJeff Garzik } 4907c6fd2807SJeff Garzik } 4908c6fd2807SJeff Garzik 4909c6fd2807SJeff Garzik /** 49105a5dbd18SMark Lord * ata_pio_sectors - Transfer one or many sectors. 4911c6fd2807SJeff Garzik * @qc: Command on going 4912c6fd2807SJeff Garzik * 49135a5dbd18SMark Lord * Transfer one or many sectors of data from/to the 4914c6fd2807SJeff Garzik * ATA device for the DRQ request. 4915c6fd2807SJeff Garzik * 4916c6fd2807SJeff Garzik * LOCKING: 4917c6fd2807SJeff Garzik * Inherited from caller. 4918c6fd2807SJeff Garzik */ 4919c6fd2807SJeff Garzik 4920c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc) 4921c6fd2807SJeff Garzik { 4922c6fd2807SJeff Garzik if (is_multi_taskfile(&qc->tf)) { 4923c6fd2807SJeff Garzik /* READ/WRITE MULTIPLE */ 4924c6fd2807SJeff Garzik unsigned int nsect; 4925c6fd2807SJeff Garzik 4926c6fd2807SJeff Garzik WARN_ON(qc->dev->multi_count == 0); 4927c6fd2807SJeff Garzik 49285a5dbd18SMark Lord nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 4929726f0785STejun Heo qc->dev->multi_count); 4930c6fd2807SJeff Garzik while (nsect--) 4931c6fd2807SJeff Garzik ata_pio_sector(qc); 4932c6fd2807SJeff Garzik } else 4933c6fd2807SJeff Garzik ata_pio_sector(qc); 49344cc980b3SAlbert Lee 49354cc980b3SAlbert Lee ata_altstatus(qc->ap); /* flush */ 4936c6fd2807SJeff Garzik } 4937c6fd2807SJeff Garzik 4938c6fd2807SJeff Garzik /** 4939c6fd2807SJeff Garzik * atapi_send_cdb - Write CDB bytes to hardware 4940c6fd2807SJeff Garzik * @ap: Port to which ATAPI device is attached. 4941c6fd2807SJeff Garzik * @qc: Taskfile currently active 4942c6fd2807SJeff Garzik * 4943c6fd2807SJeff Garzik * When device has indicated its readiness to accept 4944c6fd2807SJeff Garzik * a CDB, this function is called. Send the CDB. 4945c6fd2807SJeff Garzik * 4946c6fd2807SJeff Garzik * LOCKING: 4947c6fd2807SJeff Garzik * caller. 4948c6fd2807SJeff Garzik */ 4949c6fd2807SJeff Garzik 4950c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 4951c6fd2807SJeff Garzik { 4952c6fd2807SJeff Garzik /* send SCSI cdb */ 4953c6fd2807SJeff Garzik DPRINTK("send cdb\n"); 4954c6fd2807SJeff Garzik WARN_ON(qc->dev->cdb_len < 12); 4955c6fd2807SJeff Garzik 4956c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 4957c6fd2807SJeff Garzik ata_altstatus(ap); /* flush */ 4958c6fd2807SJeff Garzik 4959c6fd2807SJeff Garzik switch (qc->tf.protocol) { 4960c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 4961c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST; 4962c6fd2807SJeff Garzik break; 4963c6fd2807SJeff Garzik case ATA_PROT_ATAPI_NODATA: 4964c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4965c6fd2807SJeff Garzik break; 4966c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 4967c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4968c6fd2807SJeff Garzik /* initiate bmdma */ 4969c6fd2807SJeff Garzik ap->ops->bmdma_start(qc); 4970c6fd2807SJeff Garzik break; 4971c6fd2807SJeff Garzik } 4972c6fd2807SJeff Garzik } 4973c6fd2807SJeff Garzik 4974c6fd2807SJeff Garzik /** 4975c6fd2807SJeff Garzik * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 4976c6fd2807SJeff Garzik * @qc: Command on going 4977c6fd2807SJeff Garzik * @bytes: number of bytes 4978c6fd2807SJeff Garzik * 4979c6fd2807SJeff Garzik * Transfer Transfer data from/to the ATAPI device. 4980c6fd2807SJeff Garzik * 4981c6fd2807SJeff Garzik * LOCKING: 4982c6fd2807SJeff Garzik * Inherited from caller. 4983c6fd2807SJeff Garzik * 4984c6fd2807SJeff Garzik */ 4985c6fd2807SJeff Garzik 4986c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 4987c6fd2807SJeff Garzik { 4988c6fd2807SJeff Garzik int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4989c6fd2807SJeff Garzik struct scatterlist *sg = qc->__sg; 49900874ee76SFUJITA Tomonori struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem); 4991c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 4992c6fd2807SJeff Garzik struct page *page; 4993c6fd2807SJeff Garzik unsigned char *buf; 4994c6fd2807SJeff Garzik unsigned int offset, count; 49950874ee76SFUJITA Tomonori int no_more_sg = 0; 4996c6fd2807SJeff Garzik 4997c6fd2807SJeff Garzik if (qc->curbytes + bytes >= qc->nbytes) 4998c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 4999c6fd2807SJeff Garzik 5000c6fd2807SJeff Garzik next_sg: 50010874ee76SFUJITA Tomonori if (unlikely(no_more_sg)) { 5002c6fd2807SJeff Garzik /* 5003c6fd2807SJeff Garzik * The end of qc->sg is reached and the device expects 5004c6fd2807SJeff Garzik * more data to transfer. In order not to overrun qc->sg 5005c6fd2807SJeff Garzik * and fulfill length specified in the byte count register, 5006c6fd2807SJeff Garzik * - for read case, discard trailing data from the device 5007c6fd2807SJeff Garzik * - for write case, padding zero data to the device 5008c6fd2807SJeff Garzik */ 5009c6fd2807SJeff Garzik u16 pad_buf[1] = { 0 }; 5010c6fd2807SJeff Garzik unsigned int words = bytes >> 1; 5011c6fd2807SJeff Garzik unsigned int i; 5012c6fd2807SJeff Garzik 5013c6fd2807SJeff Garzik if (words) /* warning if bytes > 1 */ 5014c6fd2807SJeff Garzik ata_dev_printk(qc->dev, KERN_WARNING, 5015c6fd2807SJeff Garzik "%u bytes trailing data\n", bytes); 5016c6fd2807SJeff Garzik 5017c6fd2807SJeff Garzik for (i = 0; i < words; i++) 5018c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write); 5019c6fd2807SJeff Garzik 5020c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5021c6fd2807SJeff Garzik return; 5022c6fd2807SJeff Garzik } 5023c6fd2807SJeff Garzik 502487260216SJens Axboe sg = qc->cursg; 5025c6fd2807SJeff Garzik 502645711f1aSJens Axboe page = sg_page(sg); 5027c6fd2807SJeff Garzik offset = sg->offset + qc->cursg_ofs; 5028c6fd2807SJeff Garzik 5029c6fd2807SJeff Garzik /* get the current page and offset */ 5030c6fd2807SJeff Garzik page = nth_page(page, (offset >> PAGE_SHIFT)); 5031c6fd2807SJeff Garzik offset %= PAGE_SIZE; 5032c6fd2807SJeff Garzik 5033c6fd2807SJeff Garzik /* don't overrun current sg */ 5034c6fd2807SJeff Garzik count = min(sg->length - qc->cursg_ofs, bytes); 5035c6fd2807SJeff Garzik 5036c6fd2807SJeff Garzik /* don't cross page boundaries */ 5037c6fd2807SJeff Garzik count = min(count, (unsigned int)PAGE_SIZE - offset); 5038c6fd2807SJeff Garzik 5039c6fd2807SJeff Garzik DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 5040c6fd2807SJeff Garzik 5041c6fd2807SJeff Garzik if (PageHighMem(page)) { 5042c6fd2807SJeff Garzik unsigned long flags; 5043c6fd2807SJeff Garzik 5044c6fd2807SJeff Garzik /* FIXME: use bounce buffer */ 5045c6fd2807SJeff Garzik local_irq_save(flags); 5046c6fd2807SJeff Garzik buf = kmap_atomic(page, KM_IRQ0); 5047c6fd2807SJeff Garzik 5048c6fd2807SJeff Garzik /* do the actual data transfer */ 5049c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5050c6fd2807SJeff Garzik 5051c6fd2807SJeff Garzik kunmap_atomic(buf, KM_IRQ0); 5052c6fd2807SJeff Garzik local_irq_restore(flags); 5053c6fd2807SJeff Garzik } else { 5054c6fd2807SJeff Garzik buf = page_address(page); 5055c6fd2807SJeff Garzik ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5056c6fd2807SJeff Garzik } 5057c6fd2807SJeff Garzik 5058c6fd2807SJeff Garzik bytes -= count; 5059c6fd2807SJeff Garzik qc->curbytes += count; 5060c6fd2807SJeff Garzik qc->cursg_ofs += count; 5061c6fd2807SJeff Garzik 5062c6fd2807SJeff Garzik if (qc->cursg_ofs == sg->length) { 50630874ee76SFUJITA Tomonori if (qc->cursg == lsg) 50640874ee76SFUJITA Tomonori no_more_sg = 1; 50650874ee76SFUJITA Tomonori 506687260216SJens Axboe qc->cursg = sg_next(qc->cursg); 5067c6fd2807SJeff Garzik qc->cursg_ofs = 0; 5068c6fd2807SJeff Garzik } 5069c6fd2807SJeff Garzik 5070c6fd2807SJeff Garzik if (bytes) 5071c6fd2807SJeff Garzik goto next_sg; 5072c6fd2807SJeff Garzik } 5073c6fd2807SJeff Garzik 5074c6fd2807SJeff Garzik /** 5075c6fd2807SJeff Garzik * atapi_pio_bytes - Transfer data from/to the ATAPI device. 5076c6fd2807SJeff Garzik * @qc: Command on going 5077c6fd2807SJeff Garzik * 5078c6fd2807SJeff Garzik * Transfer Transfer data from/to the ATAPI device. 5079c6fd2807SJeff Garzik * 5080c6fd2807SJeff Garzik * LOCKING: 5081c6fd2807SJeff Garzik * Inherited from caller. 5082c6fd2807SJeff Garzik */ 5083c6fd2807SJeff Garzik 5084c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc) 5085c6fd2807SJeff Garzik { 5086c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5087c6fd2807SJeff Garzik struct ata_device *dev = qc->dev; 5088c6fd2807SJeff Garzik unsigned int ireason, bc_lo, bc_hi, bytes; 5089c6fd2807SJeff Garzik int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 5090c6fd2807SJeff Garzik 5091c6fd2807SJeff Garzik /* Abuse qc->result_tf for temp storage of intermediate TF 5092c6fd2807SJeff Garzik * here to save some kernel stack usage. 5093c6fd2807SJeff Garzik * For normal completion, qc->result_tf is not relevant. For 5094c6fd2807SJeff Garzik * error, qc->result_tf is later overwritten by ata_qc_complete(). 5095c6fd2807SJeff Garzik * So, the correctness of qc->result_tf is not affected. 5096c6fd2807SJeff Garzik */ 5097c6fd2807SJeff Garzik ap->ops->tf_read(ap, &qc->result_tf); 5098c6fd2807SJeff Garzik ireason = qc->result_tf.nsect; 5099c6fd2807SJeff Garzik bc_lo = qc->result_tf.lbam; 5100c6fd2807SJeff Garzik bc_hi = qc->result_tf.lbah; 5101c6fd2807SJeff Garzik bytes = (bc_hi << 8) | bc_lo; 5102c6fd2807SJeff Garzik 5103c6fd2807SJeff Garzik /* shall be cleared to zero, indicating xfer of data */ 5104c6fd2807SJeff Garzik if (ireason & (1 << 0)) 5105c6fd2807SJeff Garzik goto err_out; 5106c6fd2807SJeff Garzik 5107c6fd2807SJeff Garzik /* make sure transfer direction matches expected */ 5108c6fd2807SJeff Garzik i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5109c6fd2807SJeff Garzik if (do_write != i_write) 5110c6fd2807SJeff Garzik goto err_out; 5111c6fd2807SJeff Garzik 511244877b4eSTejun Heo VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5113c6fd2807SJeff Garzik 5114c6fd2807SJeff Garzik __atapi_pio_bytes(qc, bytes); 51154cc980b3SAlbert Lee ata_altstatus(ap); /* flush */ 5116c6fd2807SJeff Garzik 5117c6fd2807SJeff Garzik return; 5118c6fd2807SJeff Garzik 5119c6fd2807SJeff Garzik err_out: 5120c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); 5121c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5122c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5123c6fd2807SJeff Garzik } 5124c6fd2807SJeff Garzik 5125c6fd2807SJeff Garzik /** 5126c6fd2807SJeff Garzik * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 5127c6fd2807SJeff Garzik * @ap: the target ata_port 5128c6fd2807SJeff Garzik * @qc: qc on going 5129c6fd2807SJeff Garzik * 5130c6fd2807SJeff Garzik * RETURNS: 5131c6fd2807SJeff Garzik * 1 if ok in workqueue, 0 otherwise. 5132c6fd2807SJeff Garzik */ 5133c6fd2807SJeff Garzik 5134c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) 5135c6fd2807SJeff Garzik { 5136c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5137c6fd2807SJeff Garzik return 1; 5138c6fd2807SJeff Garzik 5139c6fd2807SJeff Garzik if (ap->hsm_task_state == HSM_ST_FIRST) { 5140c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_PIO && 5141c6fd2807SJeff Garzik (qc->tf.flags & ATA_TFLAG_WRITE)) 5142c6fd2807SJeff Garzik return 1; 5143c6fd2807SJeff Garzik 5144c6fd2807SJeff Garzik if (is_atapi_taskfile(&qc->tf) && 5145c6fd2807SJeff Garzik !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5146c6fd2807SJeff Garzik return 1; 5147c6fd2807SJeff Garzik } 5148c6fd2807SJeff Garzik 5149c6fd2807SJeff Garzik return 0; 5150c6fd2807SJeff Garzik } 5151c6fd2807SJeff Garzik 5152c6fd2807SJeff Garzik /** 5153c6fd2807SJeff Garzik * ata_hsm_qc_complete - finish a qc running on standard HSM 5154c6fd2807SJeff Garzik * @qc: Command to complete 5155c6fd2807SJeff Garzik * @in_wq: 1 if called from workqueue, 0 otherwise 5156c6fd2807SJeff Garzik * 5157c6fd2807SJeff Garzik * Finish @qc which is running on standard HSM. 5158c6fd2807SJeff Garzik * 5159c6fd2807SJeff Garzik * LOCKING: 5160cca3974eSJeff Garzik * If @in_wq is zero, spin_lock_irqsave(host lock). 5161c6fd2807SJeff Garzik * Otherwise, none on entry and grabs host lock. 5162c6fd2807SJeff Garzik */ 5163c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 5164c6fd2807SJeff Garzik { 5165c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5166c6fd2807SJeff Garzik unsigned long flags; 5167c6fd2807SJeff Garzik 5168c6fd2807SJeff Garzik if (ap->ops->error_handler) { 5169c6fd2807SJeff Garzik if (in_wq) { 5170c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5171c6fd2807SJeff Garzik 5172cca3974eSJeff Garzik /* EH might have kicked in while host lock is 5173cca3974eSJeff Garzik * released. 5174c6fd2807SJeff Garzik */ 5175c6fd2807SJeff Garzik qc = ata_qc_from_tag(ap, qc->tag); 5176c6fd2807SJeff Garzik if (qc) { 5177c6fd2807SJeff Garzik if (likely(!(qc->err_mask & AC_ERR_HSM))) { 517883625006SAkira Iguchi ap->ops->irq_on(ap); 5179c6fd2807SJeff Garzik ata_qc_complete(qc); 5180c6fd2807SJeff Garzik } else 5181c6fd2807SJeff Garzik ata_port_freeze(ap); 5182c6fd2807SJeff Garzik } 5183c6fd2807SJeff Garzik 5184c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 5185c6fd2807SJeff Garzik } else { 5186c6fd2807SJeff Garzik if (likely(!(qc->err_mask & AC_ERR_HSM))) 5187c6fd2807SJeff Garzik ata_qc_complete(qc); 5188c6fd2807SJeff Garzik else 5189c6fd2807SJeff Garzik ata_port_freeze(ap); 5190c6fd2807SJeff Garzik } 5191c6fd2807SJeff Garzik } else { 5192c6fd2807SJeff Garzik if (in_wq) { 5193c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 519483625006SAkira Iguchi ap->ops->irq_on(ap); 5195c6fd2807SJeff Garzik ata_qc_complete(qc); 5196c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 5197c6fd2807SJeff Garzik } else 5198c6fd2807SJeff Garzik ata_qc_complete(qc); 5199c6fd2807SJeff Garzik } 5200c6fd2807SJeff Garzik } 5201c6fd2807SJeff Garzik 5202c6fd2807SJeff Garzik /** 5203c6fd2807SJeff Garzik * ata_hsm_move - move the HSM to the next state. 5204c6fd2807SJeff Garzik * @ap: the target ata_port 5205c6fd2807SJeff Garzik * @qc: qc on going 5206c6fd2807SJeff Garzik * @status: current device status 5207c6fd2807SJeff Garzik * @in_wq: 1 if called from workqueue, 0 otherwise 5208c6fd2807SJeff Garzik * 5209c6fd2807SJeff Garzik * RETURNS: 5210c6fd2807SJeff Garzik * 1 when poll next status needed, 0 otherwise. 5211c6fd2807SJeff Garzik */ 5212c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 5213c6fd2807SJeff Garzik u8 status, int in_wq) 5214c6fd2807SJeff Garzik { 5215c6fd2807SJeff Garzik unsigned long flags = 0; 5216c6fd2807SJeff Garzik int poll_next; 5217c6fd2807SJeff Garzik 5218c6fd2807SJeff Garzik WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 5219c6fd2807SJeff Garzik 5220c6fd2807SJeff Garzik /* Make sure ata_qc_issue_prot() does not throw things 5221c6fd2807SJeff Garzik * like DMA polling into the workqueue. Notice that 5222c6fd2807SJeff Garzik * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 5223c6fd2807SJeff Garzik */ 5224c6fd2807SJeff Garzik WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); 5225c6fd2807SJeff Garzik 5226c6fd2807SJeff Garzik fsm_start: 5227c6fd2807SJeff Garzik DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 522844877b4eSTejun Heo ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 5229c6fd2807SJeff Garzik 5230c6fd2807SJeff Garzik switch (ap->hsm_task_state) { 5231c6fd2807SJeff Garzik case HSM_ST_FIRST: 5232c6fd2807SJeff Garzik /* Send first data block or PACKET CDB */ 5233c6fd2807SJeff Garzik 5234c6fd2807SJeff Garzik /* If polling, we will stay in the work queue after 5235c6fd2807SJeff Garzik * sending the data. Otherwise, interrupt handler 5236c6fd2807SJeff Garzik * takes over after sending the data. 5237c6fd2807SJeff Garzik */ 5238c6fd2807SJeff Garzik poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 5239c6fd2807SJeff Garzik 5240c6fd2807SJeff Garzik /* check device status */ 5241c6fd2807SJeff Garzik if (unlikely((status & ATA_DRQ) == 0)) { 5242c6fd2807SJeff Garzik /* handle BSY=0, DRQ=0 as error */ 5243c6fd2807SJeff Garzik if (likely(status & (ATA_ERR | ATA_DF))) 5244c6fd2807SJeff Garzik /* device stops HSM for abort/error */ 5245c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 5246c6fd2807SJeff Garzik else 5247c6fd2807SJeff Garzik /* HSM violation. Let EH handle this */ 5248c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5249c6fd2807SJeff Garzik 5250c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5251c6fd2807SJeff Garzik goto fsm_start; 5252c6fd2807SJeff Garzik } 5253c6fd2807SJeff Garzik 5254c6fd2807SJeff Garzik /* Device should not ask for data transfer (DRQ=1) 5255c6fd2807SJeff Garzik * when it finds something wrong. 5256c6fd2807SJeff Garzik * We ignore DRQ here and stop the HSM by 5257c6fd2807SJeff Garzik * changing hsm_task_state to HSM_ST_ERR and 5258c6fd2807SJeff Garzik * let the EH abort the command or reset the device. 5259c6fd2807SJeff Garzik */ 5260c6fd2807SJeff Garzik if (unlikely(status & (ATA_ERR | ATA_DF))) { 526144877b4eSTejun Heo ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device " 526244877b4eSTejun Heo "error, dev_stat 0x%X\n", status); 5263c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5264c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5265c6fd2807SJeff Garzik goto fsm_start; 5266c6fd2807SJeff Garzik } 5267c6fd2807SJeff Garzik 5268c6fd2807SJeff Garzik /* Send the CDB (atapi) or the first data block (ata pio out). 5269c6fd2807SJeff Garzik * During the state transition, interrupt handler shouldn't 5270c6fd2807SJeff Garzik * be invoked before the data transfer is complete and 5271c6fd2807SJeff Garzik * hsm_task_state is changed. Hence, the following locking. 5272c6fd2807SJeff Garzik */ 5273c6fd2807SJeff Garzik if (in_wq) 5274c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5275c6fd2807SJeff Garzik 5276c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_PIO) { 5277c6fd2807SJeff Garzik /* PIO data out protocol. 5278c6fd2807SJeff Garzik * send first data block. 5279c6fd2807SJeff Garzik */ 5280c6fd2807SJeff Garzik 5281c6fd2807SJeff Garzik /* ata_pio_sectors() might change the state 5282c6fd2807SJeff Garzik * to HSM_ST_LAST. so, the state is changed here 5283c6fd2807SJeff Garzik * before ata_pio_sectors(). 5284c6fd2807SJeff Garzik */ 5285c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST; 5286c6fd2807SJeff Garzik ata_pio_sectors(qc); 5287c6fd2807SJeff Garzik } else 5288c6fd2807SJeff Garzik /* send CDB */ 5289c6fd2807SJeff Garzik atapi_send_cdb(ap, qc); 5290c6fd2807SJeff Garzik 5291c6fd2807SJeff Garzik if (in_wq) 5292c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 5293c6fd2807SJeff Garzik 5294c6fd2807SJeff Garzik /* if polling, ata_pio_task() handles the rest. 5295c6fd2807SJeff Garzik * otherwise, interrupt handler takes over from here. 5296c6fd2807SJeff Garzik */ 5297c6fd2807SJeff Garzik break; 5298c6fd2807SJeff Garzik 5299c6fd2807SJeff Garzik case HSM_ST: 5300c6fd2807SJeff Garzik /* complete command or read/write the data register */ 5301c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_ATAPI) { 5302c6fd2807SJeff Garzik /* ATAPI PIO protocol */ 5303c6fd2807SJeff Garzik if ((status & ATA_DRQ) == 0) { 5304c6fd2807SJeff Garzik /* No more data to transfer or device error. 5305c6fd2807SJeff Garzik * Device error will be tagged in HSM_ST_LAST. 5306c6fd2807SJeff Garzik */ 5307c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5308c6fd2807SJeff Garzik goto fsm_start; 5309c6fd2807SJeff Garzik } 5310c6fd2807SJeff Garzik 5311c6fd2807SJeff Garzik /* Device should not ask for data transfer (DRQ=1) 5312c6fd2807SJeff Garzik * when it finds something wrong. 5313c6fd2807SJeff Garzik * We ignore DRQ here and stop the HSM by 5314c6fd2807SJeff Garzik * changing hsm_task_state to HSM_ST_ERR and 5315c6fd2807SJeff Garzik * let the EH abort the command or reset the device. 5316c6fd2807SJeff Garzik */ 5317c6fd2807SJeff Garzik if (unlikely(status & (ATA_ERR | ATA_DF))) { 531844877b4eSTejun Heo ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " 531944877b4eSTejun Heo "device error, dev_stat 0x%X\n", 532044877b4eSTejun Heo status); 5321c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5322c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5323c6fd2807SJeff Garzik goto fsm_start; 5324c6fd2807SJeff Garzik } 5325c6fd2807SJeff Garzik 5326c6fd2807SJeff Garzik atapi_pio_bytes(qc); 5327c6fd2807SJeff Garzik 5328c6fd2807SJeff Garzik if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 5329c6fd2807SJeff Garzik /* bad ireason reported by device */ 5330c6fd2807SJeff Garzik goto fsm_start; 5331c6fd2807SJeff Garzik 5332c6fd2807SJeff Garzik } else { 5333c6fd2807SJeff Garzik /* ATA PIO protocol */ 5334c6fd2807SJeff Garzik if (unlikely((status & ATA_DRQ) == 0)) { 5335c6fd2807SJeff Garzik /* handle BSY=0, DRQ=0 as error */ 5336c6fd2807SJeff Garzik if (likely(status & (ATA_ERR | ATA_DF))) 5337c6fd2807SJeff Garzik /* device stops HSM for abort/error */ 5338c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 5339c6fd2807SJeff Garzik else 534055a8e2c8STejun Heo /* HSM violation. Let EH handle this. 534155a8e2c8STejun Heo * Phantom devices also trigger this 534255a8e2c8STejun Heo * condition. Mark hint. 534355a8e2c8STejun Heo */ 534455a8e2c8STejun Heo qc->err_mask |= AC_ERR_HSM | 534555a8e2c8STejun Heo AC_ERR_NODEV_HINT; 5346c6fd2807SJeff Garzik 5347c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5348c6fd2807SJeff Garzik goto fsm_start; 5349c6fd2807SJeff Garzik } 5350c6fd2807SJeff Garzik 5351c6fd2807SJeff Garzik /* For PIO reads, some devices may ask for 5352c6fd2807SJeff Garzik * data transfer (DRQ=1) alone with ERR=1. 5353c6fd2807SJeff Garzik * We respect DRQ here and transfer one 5354c6fd2807SJeff Garzik * block of junk data before changing the 5355c6fd2807SJeff Garzik * hsm_task_state to HSM_ST_ERR. 5356c6fd2807SJeff Garzik * 5357c6fd2807SJeff Garzik * For PIO writes, ERR=1 DRQ=1 doesn't make 5358c6fd2807SJeff Garzik * sense since the data block has been 5359c6fd2807SJeff Garzik * transferred to the device. 5360c6fd2807SJeff Garzik */ 5361c6fd2807SJeff Garzik if (unlikely(status & (ATA_ERR | ATA_DF))) { 5362c6fd2807SJeff Garzik /* data might be corrputed */ 5363c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 5364c6fd2807SJeff Garzik 5365c6fd2807SJeff Garzik if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 5366c6fd2807SJeff Garzik ata_pio_sectors(qc); 5367c6fd2807SJeff Garzik status = ata_wait_idle(ap); 5368c6fd2807SJeff Garzik } 5369c6fd2807SJeff Garzik 5370c6fd2807SJeff Garzik if (status & (ATA_BUSY | ATA_DRQ)) 5371c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 5372c6fd2807SJeff Garzik 5373c6fd2807SJeff Garzik /* ata_pio_sectors() might change the 5374c6fd2807SJeff Garzik * state to HSM_ST_LAST. so, the state 5375c6fd2807SJeff Garzik * is changed after ata_pio_sectors(). 5376c6fd2807SJeff Garzik */ 5377c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5378c6fd2807SJeff Garzik goto fsm_start; 5379c6fd2807SJeff Garzik } 5380c6fd2807SJeff Garzik 5381c6fd2807SJeff Garzik ata_pio_sectors(qc); 5382c6fd2807SJeff Garzik 5383c6fd2807SJeff Garzik if (ap->hsm_task_state == HSM_ST_LAST && 5384c6fd2807SJeff Garzik (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 5385c6fd2807SJeff Garzik /* all data read */ 5386c6fd2807SJeff Garzik status = ata_wait_idle(ap); 5387c6fd2807SJeff Garzik goto fsm_start; 5388c6fd2807SJeff Garzik } 5389c6fd2807SJeff Garzik } 5390c6fd2807SJeff Garzik 5391c6fd2807SJeff Garzik poll_next = 1; 5392c6fd2807SJeff Garzik break; 5393c6fd2807SJeff Garzik 5394c6fd2807SJeff Garzik case HSM_ST_LAST: 5395c6fd2807SJeff Garzik if (unlikely(!ata_ok(status))) { 5396c6fd2807SJeff Garzik qc->err_mask |= __ac_err_mask(status); 5397c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 5398c6fd2807SJeff Garzik goto fsm_start; 5399c6fd2807SJeff Garzik } 5400c6fd2807SJeff Garzik 5401c6fd2807SJeff Garzik /* no more data to transfer */ 5402c6fd2807SJeff Garzik DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 540344877b4eSTejun Heo ap->print_id, qc->dev->devno, status); 5404c6fd2807SJeff Garzik 5405c6fd2807SJeff Garzik WARN_ON(qc->err_mask); 5406c6fd2807SJeff Garzik 5407c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_IDLE; 5408c6fd2807SJeff Garzik 5409c6fd2807SJeff Garzik /* complete taskfile transaction */ 5410c6fd2807SJeff Garzik ata_hsm_qc_complete(qc, in_wq); 5411c6fd2807SJeff Garzik 5412c6fd2807SJeff Garzik poll_next = 0; 5413c6fd2807SJeff Garzik break; 5414c6fd2807SJeff Garzik 5415c6fd2807SJeff Garzik case HSM_ST_ERR: 5416c6fd2807SJeff Garzik /* make sure qc->err_mask is available to 5417c6fd2807SJeff Garzik * know what's wrong and recover 5418c6fd2807SJeff Garzik */ 5419c6fd2807SJeff Garzik WARN_ON(qc->err_mask == 0); 5420c6fd2807SJeff Garzik 5421c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_IDLE; 5422c6fd2807SJeff Garzik 5423c6fd2807SJeff Garzik /* complete taskfile transaction */ 5424c6fd2807SJeff Garzik ata_hsm_qc_complete(qc, in_wq); 5425c6fd2807SJeff Garzik 5426c6fd2807SJeff Garzik poll_next = 0; 5427c6fd2807SJeff Garzik break; 5428c6fd2807SJeff Garzik default: 5429c6fd2807SJeff Garzik poll_next = 0; 5430c6fd2807SJeff Garzik BUG(); 5431c6fd2807SJeff Garzik } 5432c6fd2807SJeff Garzik 5433c6fd2807SJeff Garzik return poll_next; 5434c6fd2807SJeff Garzik } 5435c6fd2807SJeff Garzik 543665f27f38SDavid Howells static void ata_pio_task(struct work_struct *work) 5437c6fd2807SJeff Garzik { 543865f27f38SDavid Howells struct ata_port *ap = 543965f27f38SDavid Howells container_of(work, struct ata_port, port_task.work); 544065f27f38SDavid Howells struct ata_queued_cmd *qc = ap->port_task_data; 5441c6fd2807SJeff Garzik u8 status; 5442c6fd2807SJeff Garzik int poll_next; 5443c6fd2807SJeff Garzik 5444c6fd2807SJeff Garzik fsm_start: 5445c6fd2807SJeff Garzik WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); 5446c6fd2807SJeff Garzik 5447c6fd2807SJeff Garzik /* 5448c6fd2807SJeff Garzik * This is purely heuristic. This is a fast path. 5449c6fd2807SJeff Garzik * Sometimes when we enter, BSY will be cleared in 5450c6fd2807SJeff Garzik * a chk-status or two. If not, the drive is probably seeking 5451c6fd2807SJeff Garzik * or something. Snooze for a couple msecs, then 5452c6fd2807SJeff Garzik * chk-status again. If still busy, queue delayed work. 5453c6fd2807SJeff Garzik */ 5454c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 5); 5455c6fd2807SJeff Garzik if (status & ATA_BUSY) { 5456c6fd2807SJeff Garzik msleep(2); 5457c6fd2807SJeff Garzik status = ata_busy_wait(ap, ATA_BUSY, 10); 5458c6fd2807SJeff Garzik if (status & ATA_BUSY) { 5459c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); 5460c6fd2807SJeff Garzik return; 5461c6fd2807SJeff Garzik } 5462c6fd2807SJeff Garzik } 5463c6fd2807SJeff Garzik 5464c6fd2807SJeff Garzik /* move the HSM */ 5465c6fd2807SJeff Garzik poll_next = ata_hsm_move(ap, qc, status, 1); 5466c6fd2807SJeff Garzik 5467c6fd2807SJeff Garzik /* another command or interrupt handler 5468c6fd2807SJeff Garzik * may be running at this point. 5469c6fd2807SJeff Garzik */ 5470c6fd2807SJeff Garzik if (poll_next) 5471c6fd2807SJeff Garzik goto fsm_start; 5472c6fd2807SJeff Garzik } 5473c6fd2807SJeff Garzik 5474c6fd2807SJeff Garzik /** 5475c6fd2807SJeff Garzik * ata_qc_new - Request an available ATA command, for queueing 5476c6fd2807SJeff Garzik * @ap: Port associated with device @dev 5477c6fd2807SJeff Garzik * @dev: Device from whom we request an available command structure 5478c6fd2807SJeff Garzik * 5479c6fd2807SJeff Garzik * LOCKING: 5480c6fd2807SJeff Garzik * None. 5481c6fd2807SJeff Garzik */ 5482c6fd2807SJeff Garzik 5483c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 5484c6fd2807SJeff Garzik { 5485c6fd2807SJeff Garzik struct ata_queued_cmd *qc = NULL; 5486c6fd2807SJeff Garzik unsigned int i; 5487c6fd2807SJeff Garzik 5488c6fd2807SJeff Garzik /* no command while frozen */ 5489c6fd2807SJeff Garzik if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 5490c6fd2807SJeff Garzik return NULL; 5491c6fd2807SJeff Garzik 5492c6fd2807SJeff Garzik /* the last tag is reserved for internal command. */ 5493c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 5494c6fd2807SJeff Garzik if (!test_and_set_bit(i, &ap->qc_allocated)) { 5495c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 5496c6fd2807SJeff Garzik break; 5497c6fd2807SJeff Garzik } 5498c6fd2807SJeff Garzik 5499c6fd2807SJeff Garzik if (qc) 5500c6fd2807SJeff Garzik qc->tag = i; 5501c6fd2807SJeff Garzik 5502c6fd2807SJeff Garzik return qc; 5503c6fd2807SJeff Garzik } 5504c6fd2807SJeff Garzik 5505c6fd2807SJeff Garzik /** 5506c6fd2807SJeff Garzik * ata_qc_new_init - Request an available ATA command, and initialize it 5507c6fd2807SJeff Garzik * @dev: Device from whom we request an available command structure 5508c6fd2807SJeff Garzik * 5509c6fd2807SJeff Garzik * LOCKING: 5510c6fd2807SJeff Garzik * None. 5511c6fd2807SJeff Garzik */ 5512c6fd2807SJeff Garzik 5513c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 5514c6fd2807SJeff Garzik { 55159af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 5516c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 5517c6fd2807SJeff Garzik 5518c6fd2807SJeff Garzik qc = ata_qc_new(ap); 5519c6fd2807SJeff Garzik if (qc) { 5520c6fd2807SJeff Garzik qc->scsicmd = NULL; 5521c6fd2807SJeff Garzik qc->ap = ap; 5522c6fd2807SJeff Garzik qc->dev = dev; 5523c6fd2807SJeff Garzik 5524c6fd2807SJeff Garzik ata_qc_reinit(qc); 5525c6fd2807SJeff Garzik } 5526c6fd2807SJeff Garzik 5527c6fd2807SJeff Garzik return qc; 5528c6fd2807SJeff Garzik } 5529c6fd2807SJeff Garzik 5530c6fd2807SJeff Garzik /** 5531c6fd2807SJeff Garzik * ata_qc_free - free unused ata_queued_cmd 5532c6fd2807SJeff Garzik * @qc: Command to complete 5533c6fd2807SJeff Garzik * 5534c6fd2807SJeff Garzik * Designed to free unused ata_queued_cmd object 5535c6fd2807SJeff Garzik * in case something prevents using it. 5536c6fd2807SJeff Garzik * 5537c6fd2807SJeff Garzik * LOCKING: 5538cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5539c6fd2807SJeff Garzik */ 5540c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc) 5541c6fd2807SJeff Garzik { 5542c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5543c6fd2807SJeff Garzik unsigned int tag; 5544c6fd2807SJeff Garzik 5545c6fd2807SJeff Garzik WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5546c6fd2807SJeff Garzik 5547c6fd2807SJeff Garzik qc->flags = 0; 5548c6fd2807SJeff Garzik tag = qc->tag; 5549c6fd2807SJeff Garzik if (likely(ata_tag_valid(tag))) { 5550c6fd2807SJeff Garzik qc->tag = ATA_TAG_POISON; 5551c6fd2807SJeff Garzik clear_bit(tag, &ap->qc_allocated); 5552c6fd2807SJeff Garzik } 5553c6fd2807SJeff Garzik } 5554c6fd2807SJeff Garzik 5555c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc) 5556c6fd2807SJeff Garzik { 5557c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 55589af5c9c9STejun Heo struct ata_link *link = qc->dev->link; 5559c6fd2807SJeff Garzik 5560c6fd2807SJeff Garzik WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5561c6fd2807SJeff Garzik WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5562c6fd2807SJeff Garzik 5563c6fd2807SJeff Garzik if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5564c6fd2807SJeff Garzik ata_sg_clean(qc); 5565c6fd2807SJeff Garzik 5566c6fd2807SJeff Garzik /* command should be marked inactive atomically with qc completion */ 5567da917d69STejun Heo if (qc->tf.protocol == ATA_PROT_NCQ) { 55689af5c9c9STejun Heo link->sactive &= ~(1 << qc->tag); 5569da917d69STejun Heo if (!link->sactive) 5570da917d69STejun Heo ap->nr_active_links--; 5571da917d69STejun Heo } else { 55729af5c9c9STejun Heo link->active_tag = ATA_TAG_POISON; 5573da917d69STejun Heo ap->nr_active_links--; 5574da917d69STejun Heo } 5575da917d69STejun Heo 5576da917d69STejun Heo /* clear exclusive status */ 5577da917d69STejun Heo if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5578da917d69STejun Heo ap->excl_link == link)) 5579da917d69STejun Heo ap->excl_link = NULL; 5580c6fd2807SJeff Garzik 5581c6fd2807SJeff Garzik /* atapi: mark qc as inactive to prevent the interrupt handler 5582c6fd2807SJeff Garzik * from completing the command twice later, before the error handler 5583c6fd2807SJeff Garzik * is called. (when rc != 0 and atapi request sense is needed) 5584c6fd2807SJeff Garzik */ 5585c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_ACTIVE; 5586c6fd2807SJeff Garzik ap->qc_active &= ~(1 << qc->tag); 5587c6fd2807SJeff Garzik 5588c6fd2807SJeff Garzik /* call completion callback */ 5589c6fd2807SJeff Garzik qc->complete_fn(qc); 5590c6fd2807SJeff Garzik } 5591c6fd2807SJeff Garzik 559239599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc) 559339599a53STejun Heo { 559439599a53STejun Heo struct ata_port *ap = qc->ap; 559539599a53STejun Heo 559639599a53STejun Heo qc->result_tf.flags = qc->tf.flags; 55974742d54fSMark Lord ap->ops->tf_read(ap, &qc->result_tf); 559839599a53STejun Heo } 559939599a53STejun Heo 5600c6fd2807SJeff Garzik /** 5601c6fd2807SJeff Garzik * ata_qc_complete - Complete an active ATA command 5602c6fd2807SJeff Garzik * @qc: Command to complete 5603c6fd2807SJeff Garzik * @err_mask: ATA Status register contents 5604c6fd2807SJeff Garzik * 5605c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA 5606c6fd2807SJeff Garzik * command has completed, with either an ok or not-ok status. 5607c6fd2807SJeff Garzik * 5608c6fd2807SJeff Garzik * LOCKING: 5609cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5610c6fd2807SJeff Garzik */ 5611c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc) 5612c6fd2807SJeff Garzik { 5613c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5614c6fd2807SJeff Garzik 5615c6fd2807SJeff Garzik /* XXX: New EH and old EH use different mechanisms to 5616c6fd2807SJeff Garzik * synchronize EH with regular execution path. 5617c6fd2807SJeff Garzik * 5618c6fd2807SJeff Garzik * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5619c6fd2807SJeff Garzik * Normal execution path is responsible for not accessing a 5620c6fd2807SJeff Garzik * failed qc. libata core enforces the rule by returning NULL 5621c6fd2807SJeff Garzik * from ata_qc_from_tag() for failed qcs. 5622c6fd2807SJeff Garzik * 5623c6fd2807SJeff Garzik * Old EH depends on ata_qc_complete() nullifying completion 5624c6fd2807SJeff Garzik * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5625c6fd2807SJeff Garzik * not synchronize with interrupt handler. Only PIO task is 5626c6fd2807SJeff Garzik * taken care of. 5627c6fd2807SJeff Garzik */ 5628c6fd2807SJeff Garzik if (ap->ops->error_handler) { 56294dbfa39bSTejun Heo struct ata_device *dev = qc->dev; 56304dbfa39bSTejun Heo struct ata_eh_info *ehi = &dev->link->eh_info; 56314dbfa39bSTejun Heo 5632c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); 5633c6fd2807SJeff Garzik 5634c6fd2807SJeff Garzik if (unlikely(qc->err_mask)) 5635c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 5636c6fd2807SJeff Garzik 5637c6fd2807SJeff Garzik if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5638c6fd2807SJeff Garzik if (!ata_tag_internal(qc->tag)) { 5639c6fd2807SJeff Garzik /* always fill result TF for failed qc */ 564039599a53STejun Heo fill_result_tf(qc); 5641c6fd2807SJeff Garzik ata_qc_schedule_eh(qc); 5642c6fd2807SJeff Garzik return; 5643c6fd2807SJeff Garzik } 5644c6fd2807SJeff Garzik } 5645c6fd2807SJeff Garzik 5646c6fd2807SJeff Garzik /* read result TF if requested */ 5647c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_RESULT_TF) 564839599a53STejun Heo fill_result_tf(qc); 5649c6fd2807SJeff Garzik 56504dbfa39bSTejun Heo /* Some commands need post-processing after successful 56514dbfa39bSTejun Heo * completion. 56524dbfa39bSTejun Heo */ 56534dbfa39bSTejun Heo switch (qc->tf.command) { 56544dbfa39bSTejun Heo case ATA_CMD_SET_FEATURES: 56554dbfa39bSTejun Heo if (qc->tf.feature != SETFEATURES_WC_ON && 56564dbfa39bSTejun Heo qc->tf.feature != SETFEATURES_WC_OFF) 56574dbfa39bSTejun Heo break; 56584dbfa39bSTejun Heo /* fall through */ 56594dbfa39bSTejun Heo case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 56604dbfa39bSTejun Heo case ATA_CMD_SET_MULTI: /* multi_count changed */ 56614dbfa39bSTejun Heo /* revalidate device */ 56624dbfa39bSTejun Heo ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 56634dbfa39bSTejun Heo ata_port_schedule_eh(ap); 56644dbfa39bSTejun Heo break; 5665054a5fbaSTejun Heo 5666054a5fbaSTejun Heo case ATA_CMD_SLEEP: 5667054a5fbaSTejun Heo dev->flags |= ATA_DFLAG_SLEEPING; 5668054a5fbaSTejun Heo break; 56694dbfa39bSTejun Heo } 56704dbfa39bSTejun Heo 5671c6fd2807SJeff Garzik __ata_qc_complete(qc); 5672c6fd2807SJeff Garzik } else { 5673c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5674c6fd2807SJeff Garzik return; 5675c6fd2807SJeff Garzik 5676c6fd2807SJeff Garzik /* read result TF if failed or requested */ 5677c6fd2807SJeff Garzik if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 567839599a53STejun Heo fill_result_tf(qc); 5679c6fd2807SJeff Garzik 5680c6fd2807SJeff Garzik __ata_qc_complete(qc); 5681c6fd2807SJeff Garzik } 5682c6fd2807SJeff Garzik } 5683c6fd2807SJeff Garzik 5684c6fd2807SJeff Garzik /** 5685c6fd2807SJeff Garzik * ata_qc_complete_multiple - Complete multiple qcs successfully 5686c6fd2807SJeff Garzik * @ap: port in question 5687c6fd2807SJeff Garzik * @qc_active: new qc_active mask 5688c6fd2807SJeff Garzik * @finish_qc: LLDD callback invoked before completing a qc 5689c6fd2807SJeff Garzik * 5690c6fd2807SJeff Garzik * Complete in-flight commands. This functions is meant to be 5691c6fd2807SJeff Garzik * called from low-level driver's interrupt routine to complete 5692c6fd2807SJeff Garzik * requests normally. ap->qc_active and @qc_active is compared 5693c6fd2807SJeff Garzik * and commands are completed accordingly. 5694c6fd2807SJeff Garzik * 5695c6fd2807SJeff Garzik * LOCKING: 5696cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5697c6fd2807SJeff Garzik * 5698c6fd2807SJeff Garzik * RETURNS: 5699c6fd2807SJeff Garzik * Number of completed commands on success, -errno otherwise. 5700c6fd2807SJeff Garzik */ 5701c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 5702c6fd2807SJeff Garzik void (*finish_qc)(struct ata_queued_cmd *)) 5703c6fd2807SJeff Garzik { 5704c6fd2807SJeff Garzik int nr_done = 0; 5705c6fd2807SJeff Garzik u32 done_mask; 5706c6fd2807SJeff Garzik int i; 5707c6fd2807SJeff Garzik 5708c6fd2807SJeff Garzik done_mask = ap->qc_active ^ qc_active; 5709c6fd2807SJeff Garzik 5710c6fd2807SJeff Garzik if (unlikely(done_mask & qc_active)) { 5711c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5712c6fd2807SJeff Garzik "(%08x->%08x)\n", ap->qc_active, qc_active); 5713c6fd2807SJeff Garzik return -EINVAL; 5714c6fd2807SJeff Garzik } 5715c6fd2807SJeff Garzik 5716c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 5717c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 5718c6fd2807SJeff Garzik 5719c6fd2807SJeff Garzik if (!(done_mask & (1 << i))) 5720c6fd2807SJeff Garzik continue; 5721c6fd2807SJeff Garzik 5722c6fd2807SJeff Garzik if ((qc = ata_qc_from_tag(ap, i))) { 5723c6fd2807SJeff Garzik if (finish_qc) 5724c6fd2807SJeff Garzik finish_qc(qc); 5725c6fd2807SJeff Garzik ata_qc_complete(qc); 5726c6fd2807SJeff Garzik nr_done++; 5727c6fd2807SJeff Garzik } 5728c6fd2807SJeff Garzik } 5729c6fd2807SJeff Garzik 5730c6fd2807SJeff Garzik return nr_done; 5731c6fd2807SJeff Garzik } 5732c6fd2807SJeff Garzik 5733c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 5734c6fd2807SJeff Garzik { 5735c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5736c6fd2807SJeff Garzik 5737c6fd2807SJeff Garzik switch (qc->tf.protocol) { 5738c6fd2807SJeff Garzik case ATA_PROT_NCQ: 5739c6fd2807SJeff Garzik case ATA_PROT_DMA: 5740c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 5741c6fd2807SJeff Garzik return 1; 5742c6fd2807SJeff Garzik 5743c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 5744c6fd2807SJeff Garzik case ATA_PROT_PIO: 5745c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) 5746c6fd2807SJeff Garzik return 1; 5747c6fd2807SJeff Garzik 5748c6fd2807SJeff Garzik /* fall through */ 5749c6fd2807SJeff Garzik 5750c6fd2807SJeff Garzik default: 5751c6fd2807SJeff Garzik return 0; 5752c6fd2807SJeff Garzik } 5753c6fd2807SJeff Garzik 5754c6fd2807SJeff Garzik /* never reached */ 5755c6fd2807SJeff Garzik } 5756c6fd2807SJeff Garzik 5757c6fd2807SJeff Garzik /** 5758c6fd2807SJeff Garzik * ata_qc_issue - issue taskfile to device 5759c6fd2807SJeff Garzik * @qc: command to issue to device 5760c6fd2807SJeff Garzik * 5761c6fd2807SJeff Garzik * Prepare an ATA command to submission to device. 5762c6fd2807SJeff Garzik * This includes mapping the data into a DMA-able 5763c6fd2807SJeff Garzik * area, filling in the S/G table, and finally 5764c6fd2807SJeff Garzik * writing the taskfile to hardware, starting the command. 5765c6fd2807SJeff Garzik * 5766c6fd2807SJeff Garzik * LOCKING: 5767cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5768c6fd2807SJeff Garzik */ 5769c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc) 5770c6fd2807SJeff Garzik { 5771c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 57729af5c9c9STejun Heo struct ata_link *link = qc->dev->link; 5773c6fd2807SJeff Garzik 5774c6fd2807SJeff Garzik /* Make sure only one non-NCQ command is outstanding. The 5775c6fd2807SJeff Garzik * check is skipped for old EH because it reuses active qc to 5776c6fd2807SJeff Garzik * request ATAPI sense. 5777c6fd2807SJeff Garzik */ 57789af5c9c9STejun Heo WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5779c6fd2807SJeff Garzik 5780c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_NCQ) { 57819af5c9c9STejun Heo WARN_ON(link->sactive & (1 << qc->tag)); 5782da917d69STejun Heo 5783da917d69STejun Heo if (!link->sactive) 5784da917d69STejun Heo ap->nr_active_links++; 57859af5c9c9STejun Heo link->sactive |= 1 << qc->tag; 5786c6fd2807SJeff Garzik } else { 57879af5c9c9STejun Heo WARN_ON(link->sactive); 5788da917d69STejun Heo 5789da917d69STejun Heo ap->nr_active_links++; 57909af5c9c9STejun Heo link->active_tag = qc->tag; 5791c6fd2807SJeff Garzik } 5792c6fd2807SJeff Garzik 5793c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_ACTIVE; 5794c6fd2807SJeff Garzik ap->qc_active |= 1 << qc->tag; 5795c6fd2807SJeff Garzik 5796c6fd2807SJeff Garzik if (ata_should_dma_map(qc)) { 5797c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SG) { 5798c6fd2807SJeff Garzik if (ata_sg_setup(qc)) 5799c6fd2807SJeff Garzik goto sg_err; 5800c6fd2807SJeff Garzik } else if (qc->flags & ATA_QCFLAG_SINGLE) { 5801c6fd2807SJeff Garzik if (ata_sg_setup_one(qc)) 5802c6fd2807SJeff Garzik goto sg_err; 5803c6fd2807SJeff Garzik } 5804c6fd2807SJeff Garzik } else { 5805c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_DMAMAP; 5806c6fd2807SJeff Garzik } 5807c6fd2807SJeff Garzik 5808054a5fbaSTejun Heo /* if device is sleeping, schedule softreset and abort the link */ 5809054a5fbaSTejun Heo if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5810054a5fbaSTejun Heo link->eh_info.action |= ATA_EH_SOFTRESET; 5811054a5fbaSTejun Heo ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5812054a5fbaSTejun Heo ata_link_abort(link); 5813054a5fbaSTejun Heo return; 5814054a5fbaSTejun Heo } 5815054a5fbaSTejun Heo 5816c6fd2807SJeff Garzik ap->ops->qc_prep(qc); 5817c6fd2807SJeff Garzik 5818c6fd2807SJeff Garzik qc->err_mask |= ap->ops->qc_issue(qc); 5819c6fd2807SJeff Garzik if (unlikely(qc->err_mask)) 5820c6fd2807SJeff Garzik goto err; 5821c6fd2807SJeff Garzik return; 5822c6fd2807SJeff Garzik 5823c6fd2807SJeff Garzik sg_err: 5824c6fd2807SJeff Garzik qc->flags &= ~ATA_QCFLAG_DMAMAP; 5825c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_SYSTEM; 5826c6fd2807SJeff Garzik err: 5827c6fd2807SJeff Garzik ata_qc_complete(qc); 5828c6fd2807SJeff Garzik } 5829c6fd2807SJeff Garzik 5830c6fd2807SJeff Garzik /** 5831c6fd2807SJeff Garzik * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 5832c6fd2807SJeff Garzik * @qc: command to issue to device 5833c6fd2807SJeff Garzik * 5834c6fd2807SJeff Garzik * Using various libata functions and hooks, this function 5835c6fd2807SJeff Garzik * starts an ATA command. ATA commands are grouped into 5836c6fd2807SJeff Garzik * classes called "protocols", and issuing each type of protocol 5837c6fd2807SJeff Garzik * is slightly different. 5838c6fd2807SJeff Garzik * 5839c6fd2807SJeff Garzik * May be used as the qc_issue() entry in ata_port_operations. 5840c6fd2807SJeff Garzik * 5841c6fd2807SJeff Garzik * LOCKING: 5842cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5843c6fd2807SJeff Garzik * 5844c6fd2807SJeff Garzik * RETURNS: 5845c6fd2807SJeff Garzik * Zero on success, AC_ERR_* mask on failure 5846c6fd2807SJeff Garzik */ 5847c6fd2807SJeff Garzik 5848c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) 5849c6fd2807SJeff Garzik { 5850c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 5851c6fd2807SJeff Garzik 5852c6fd2807SJeff Garzik /* Use polling pio if the LLD doesn't handle 5853c6fd2807SJeff Garzik * interrupt driven pio and atapi CDB interrupt. 5854c6fd2807SJeff Garzik */ 5855c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_POLLING) { 5856c6fd2807SJeff Garzik switch (qc->tf.protocol) { 5857c6fd2807SJeff Garzik case ATA_PROT_PIO: 5858e3472cbeSAlbert Lee case ATA_PROT_NODATA: 5859c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 5860c6fd2807SJeff Garzik case ATA_PROT_ATAPI_NODATA: 5861c6fd2807SJeff Garzik qc->tf.flags |= ATA_TFLAG_POLLING; 5862c6fd2807SJeff Garzik break; 5863c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 5864c6fd2807SJeff Garzik if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 5865c6fd2807SJeff Garzik /* see ata_dma_blacklisted() */ 5866c6fd2807SJeff Garzik BUG(); 5867c6fd2807SJeff Garzik break; 5868c6fd2807SJeff Garzik default: 5869c6fd2807SJeff Garzik break; 5870c6fd2807SJeff Garzik } 5871c6fd2807SJeff Garzik } 5872c6fd2807SJeff Garzik 5873c6fd2807SJeff Garzik /* select the device */ 5874c6fd2807SJeff Garzik ata_dev_select(ap, qc->dev->devno, 1, 0); 5875c6fd2807SJeff Garzik 5876c6fd2807SJeff Garzik /* start the command */ 5877c6fd2807SJeff Garzik switch (qc->tf.protocol) { 5878c6fd2807SJeff Garzik case ATA_PROT_NODATA: 5879c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5880c6fd2807SJeff Garzik ata_qc_set_polling(qc); 5881c6fd2807SJeff Garzik 5882c6fd2807SJeff Garzik ata_tf_to_host(ap, &qc->tf); 5883c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5884c6fd2807SJeff Garzik 5885c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5886c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5887c6fd2807SJeff Garzik 5888c6fd2807SJeff Garzik break; 5889c6fd2807SJeff Garzik 5890c6fd2807SJeff Garzik case ATA_PROT_DMA: 5891c6fd2807SJeff Garzik WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 5892c6fd2807SJeff Garzik 5893c6fd2807SJeff Garzik ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 5894c6fd2807SJeff Garzik ap->ops->bmdma_setup(qc); /* set up bmdma */ 5895c6fd2807SJeff Garzik ap->ops->bmdma_start(qc); /* initiate bmdma */ 5896c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_LAST; 5897c6fd2807SJeff Garzik break; 5898c6fd2807SJeff Garzik 5899c6fd2807SJeff Garzik case ATA_PROT_PIO: 5900c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5901c6fd2807SJeff Garzik ata_qc_set_polling(qc); 5902c6fd2807SJeff Garzik 5903c6fd2807SJeff Garzik ata_tf_to_host(ap, &qc->tf); 5904c6fd2807SJeff Garzik 5905c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_WRITE) { 5906c6fd2807SJeff Garzik /* PIO data out protocol */ 5907c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_FIRST; 5908c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5909c6fd2807SJeff Garzik 5910c6fd2807SJeff Garzik /* always send first data block using 5911c6fd2807SJeff Garzik * the ata_pio_task() codepath. 5912c6fd2807SJeff Garzik */ 5913c6fd2807SJeff Garzik } else { 5914c6fd2807SJeff Garzik /* PIO data in protocol */ 5915c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST; 5916c6fd2807SJeff Garzik 5917c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5918c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5919c6fd2807SJeff Garzik 5920c6fd2807SJeff Garzik /* if polling, ata_pio_task() handles the rest. 5921c6fd2807SJeff Garzik * otherwise, interrupt handler takes over from here. 5922c6fd2807SJeff Garzik */ 5923c6fd2807SJeff Garzik } 5924c6fd2807SJeff Garzik 5925c6fd2807SJeff Garzik break; 5926c6fd2807SJeff Garzik 5927c6fd2807SJeff Garzik case ATA_PROT_ATAPI: 5928c6fd2807SJeff Garzik case ATA_PROT_ATAPI_NODATA: 5929c6fd2807SJeff Garzik if (qc->tf.flags & ATA_TFLAG_POLLING) 5930c6fd2807SJeff Garzik ata_qc_set_polling(qc); 5931c6fd2807SJeff Garzik 5932c6fd2807SJeff Garzik ata_tf_to_host(ap, &qc->tf); 5933c6fd2807SJeff Garzik 5934c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_FIRST; 5935c6fd2807SJeff Garzik 5936c6fd2807SJeff Garzik /* send cdb by polling if no cdb interrupt */ 5937c6fd2807SJeff Garzik if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 5938c6fd2807SJeff Garzik (qc->tf.flags & ATA_TFLAG_POLLING)) 5939c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5940c6fd2807SJeff Garzik break; 5941c6fd2807SJeff Garzik 5942c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 5943c6fd2807SJeff Garzik WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 5944c6fd2807SJeff Garzik 5945c6fd2807SJeff Garzik ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 5946c6fd2807SJeff Garzik ap->ops->bmdma_setup(qc); /* set up bmdma */ 5947c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_FIRST; 5948c6fd2807SJeff Garzik 5949c6fd2807SJeff Garzik /* send cdb by polling if no cdb interrupt */ 5950c6fd2807SJeff Garzik if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5951c6fd2807SJeff Garzik ata_port_queue_task(ap, ata_pio_task, qc, 0); 5952c6fd2807SJeff Garzik break; 5953c6fd2807SJeff Garzik 5954c6fd2807SJeff Garzik default: 5955c6fd2807SJeff Garzik WARN_ON(1); 5956c6fd2807SJeff Garzik return AC_ERR_SYSTEM; 5957c6fd2807SJeff Garzik } 5958c6fd2807SJeff Garzik 5959c6fd2807SJeff Garzik return 0; 5960c6fd2807SJeff Garzik } 5961c6fd2807SJeff Garzik 5962c6fd2807SJeff Garzik /** 5963c6fd2807SJeff Garzik * ata_host_intr - Handle host interrupt for given (port, task) 5964c6fd2807SJeff Garzik * @ap: Port on which interrupt arrived (possibly...) 5965c6fd2807SJeff Garzik * @qc: Taskfile currently active in engine 5966c6fd2807SJeff Garzik * 5967c6fd2807SJeff Garzik * Handle host interrupt for given queued command. Currently, 5968c6fd2807SJeff Garzik * only DMA interrupts are handled. All other commands are 5969c6fd2807SJeff Garzik * handled via polling with interrupts disabled (nIEN bit). 5970c6fd2807SJeff Garzik * 5971c6fd2807SJeff Garzik * LOCKING: 5972cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 5973c6fd2807SJeff Garzik * 5974c6fd2807SJeff Garzik * RETURNS: 5975c6fd2807SJeff Garzik * One if interrupt was handled, zero if not (shared irq). 5976c6fd2807SJeff Garzik */ 5977c6fd2807SJeff Garzik 5978c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap, 5979c6fd2807SJeff Garzik struct ata_queued_cmd *qc) 5980c6fd2807SJeff Garzik { 59819af5c9c9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 5982c6fd2807SJeff Garzik u8 status, host_stat = 0; 5983c6fd2807SJeff Garzik 5984c6fd2807SJeff Garzik VPRINTK("ata%u: protocol %d task_state %d\n", 598544877b4eSTejun Heo ap->print_id, qc->tf.protocol, ap->hsm_task_state); 5986c6fd2807SJeff Garzik 5987c6fd2807SJeff Garzik /* Check whether we are expecting interrupt in this state */ 5988c6fd2807SJeff Garzik switch (ap->hsm_task_state) { 5989c6fd2807SJeff Garzik case HSM_ST_FIRST: 5990c6fd2807SJeff Garzik /* Some pre-ATAPI-4 devices assert INTRQ 5991c6fd2807SJeff Garzik * at this state when ready to receive CDB. 5992c6fd2807SJeff Garzik */ 5993c6fd2807SJeff Garzik 5994c6fd2807SJeff Garzik /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 5995c6fd2807SJeff Garzik * The flag was turned on only for atapi devices. 5996c6fd2807SJeff Garzik * No need to check is_atapi_taskfile(&qc->tf) again. 5997c6fd2807SJeff Garzik */ 5998c6fd2807SJeff Garzik if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5999c6fd2807SJeff Garzik goto idle_irq; 6000c6fd2807SJeff Garzik break; 6001c6fd2807SJeff Garzik case HSM_ST_LAST: 6002c6fd2807SJeff Garzik if (qc->tf.protocol == ATA_PROT_DMA || 6003c6fd2807SJeff Garzik qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 6004c6fd2807SJeff Garzik /* check status of DMA engine */ 6005c6fd2807SJeff Garzik host_stat = ap->ops->bmdma_status(ap); 600644877b4eSTejun Heo VPRINTK("ata%u: host_stat 0x%X\n", 600744877b4eSTejun Heo ap->print_id, host_stat); 6008c6fd2807SJeff Garzik 6009c6fd2807SJeff Garzik /* if it's not our irq... */ 6010c6fd2807SJeff Garzik if (!(host_stat & ATA_DMA_INTR)) 6011c6fd2807SJeff Garzik goto idle_irq; 6012c6fd2807SJeff Garzik 6013c6fd2807SJeff Garzik /* before we do anything else, clear DMA-Start bit */ 6014c6fd2807SJeff Garzik ap->ops->bmdma_stop(qc); 6015c6fd2807SJeff Garzik 6016c6fd2807SJeff Garzik if (unlikely(host_stat & ATA_DMA_ERR)) { 6017c6fd2807SJeff Garzik /* error when transfering data to/from memory */ 6018c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HOST_BUS; 6019c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_ERR; 6020c6fd2807SJeff Garzik } 6021c6fd2807SJeff Garzik } 6022c6fd2807SJeff Garzik break; 6023c6fd2807SJeff Garzik case HSM_ST: 6024c6fd2807SJeff Garzik break; 6025c6fd2807SJeff Garzik default: 6026c6fd2807SJeff Garzik goto idle_irq; 6027c6fd2807SJeff Garzik } 6028c6fd2807SJeff Garzik 6029c6fd2807SJeff Garzik /* check altstatus */ 6030c6fd2807SJeff Garzik status = ata_altstatus(ap); 6031c6fd2807SJeff Garzik if (status & ATA_BUSY) 6032c6fd2807SJeff Garzik goto idle_irq; 6033c6fd2807SJeff Garzik 6034c6fd2807SJeff Garzik /* check main status, clearing INTRQ */ 6035c6fd2807SJeff Garzik status = ata_chk_status(ap); 6036c6fd2807SJeff Garzik if (unlikely(status & ATA_BUSY)) 6037c6fd2807SJeff Garzik goto idle_irq; 6038c6fd2807SJeff Garzik 6039c6fd2807SJeff Garzik /* ack bmdma irq events */ 6040c6fd2807SJeff Garzik ap->ops->irq_clear(ap); 6041c6fd2807SJeff Garzik 6042c6fd2807SJeff Garzik ata_hsm_move(ap, qc, status, 0); 6043ea54763fSTejun Heo 6044ea54763fSTejun Heo if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 6045ea54763fSTejun Heo qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 6046ea54763fSTejun Heo ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 6047ea54763fSTejun Heo 6048c6fd2807SJeff Garzik return 1; /* irq handled */ 6049c6fd2807SJeff Garzik 6050c6fd2807SJeff Garzik idle_irq: 6051c6fd2807SJeff Garzik ap->stats.idle_irq++; 6052c6fd2807SJeff Garzik 6053c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP 6054c6fd2807SJeff Garzik if ((ap->stats.idle_irq % 1000) == 0) { 60556d32d30fSJeff Garzik ata_chk_status(ap); 60566d32d30fSJeff Garzik ap->ops->irq_clear(ap); 6057c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 6058c6fd2807SJeff Garzik return 1; 6059c6fd2807SJeff Garzik } 6060c6fd2807SJeff Garzik #endif 6061c6fd2807SJeff Garzik return 0; /* irq not handled */ 6062c6fd2807SJeff Garzik } 6063c6fd2807SJeff Garzik 6064c6fd2807SJeff Garzik /** 6065c6fd2807SJeff Garzik * ata_interrupt - Default ATA host interrupt handler 6066c6fd2807SJeff Garzik * @irq: irq line (unused) 6067cca3974eSJeff Garzik * @dev_instance: pointer to our ata_host information structure 6068c6fd2807SJeff Garzik * 6069c6fd2807SJeff Garzik * Default interrupt handler for PCI IDE devices. Calls 6070c6fd2807SJeff Garzik * ata_host_intr() for each port that is not disabled. 6071c6fd2807SJeff Garzik * 6072c6fd2807SJeff Garzik * LOCKING: 6073cca3974eSJeff Garzik * Obtains host lock during operation. 6074c6fd2807SJeff Garzik * 6075c6fd2807SJeff Garzik * RETURNS: 6076c6fd2807SJeff Garzik * IRQ_NONE or IRQ_HANDLED. 6077c6fd2807SJeff Garzik */ 6078c6fd2807SJeff Garzik 60797d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance) 6080c6fd2807SJeff Garzik { 6081cca3974eSJeff Garzik struct ata_host *host = dev_instance; 6082c6fd2807SJeff Garzik unsigned int i; 6083c6fd2807SJeff Garzik unsigned int handled = 0; 6084c6fd2807SJeff Garzik unsigned long flags; 6085c6fd2807SJeff Garzik 6086c6fd2807SJeff Garzik /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 6087cca3974eSJeff Garzik spin_lock_irqsave(&host->lock, flags); 6088c6fd2807SJeff Garzik 6089cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6090c6fd2807SJeff Garzik struct ata_port *ap; 6091c6fd2807SJeff Garzik 6092cca3974eSJeff Garzik ap = host->ports[i]; 6093c6fd2807SJeff Garzik if (ap && 6094c6fd2807SJeff Garzik !(ap->flags & ATA_FLAG_DISABLED)) { 6095c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 6096c6fd2807SJeff Garzik 60979af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 6098c6fd2807SJeff Garzik if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 6099c6fd2807SJeff Garzik (qc->flags & ATA_QCFLAG_ACTIVE)) 6100c6fd2807SJeff Garzik handled |= ata_host_intr(ap, qc); 6101c6fd2807SJeff Garzik } 6102c6fd2807SJeff Garzik } 6103c6fd2807SJeff Garzik 6104cca3974eSJeff Garzik spin_unlock_irqrestore(&host->lock, flags); 6105c6fd2807SJeff Garzik 6106c6fd2807SJeff Garzik return IRQ_RETVAL(handled); 6107c6fd2807SJeff Garzik } 6108c6fd2807SJeff Garzik 6109c6fd2807SJeff Garzik /** 6110c6fd2807SJeff Garzik * sata_scr_valid - test whether SCRs are accessible 6111936fd732STejun Heo * @link: ATA link to test SCR accessibility for 6112c6fd2807SJeff Garzik * 6113936fd732STejun Heo * Test whether SCRs are accessible for @link. 6114c6fd2807SJeff Garzik * 6115c6fd2807SJeff Garzik * LOCKING: 6116c6fd2807SJeff Garzik * None. 6117c6fd2807SJeff Garzik * 6118c6fd2807SJeff Garzik * RETURNS: 6119c6fd2807SJeff Garzik * 1 if SCRs are accessible, 0 otherwise. 6120c6fd2807SJeff Garzik */ 6121936fd732STejun Heo int sata_scr_valid(struct ata_link *link) 6122c6fd2807SJeff Garzik { 6123936fd732STejun Heo struct ata_port *ap = link->ap; 6124936fd732STejun Heo 6125a16abc0bSTejun Heo return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 6126c6fd2807SJeff Garzik } 6127c6fd2807SJeff Garzik 6128c6fd2807SJeff Garzik /** 6129c6fd2807SJeff Garzik * sata_scr_read - read SCR register of the specified port 6130936fd732STejun Heo * @link: ATA link to read SCR for 6131c6fd2807SJeff Garzik * @reg: SCR to read 6132c6fd2807SJeff Garzik * @val: Place to store read value 6133c6fd2807SJeff Garzik * 6134936fd732STejun Heo * Read SCR register @reg of @link into *@val. This function is 6135633273a3STejun Heo * guaranteed to succeed if @link is ap->link, the cable type of 6136633273a3STejun Heo * the port is SATA and the port implements ->scr_read. 6137c6fd2807SJeff Garzik * 6138c6fd2807SJeff Garzik * LOCKING: 6139633273a3STejun Heo * None if @link is ap->link. Kernel thread context otherwise. 6140c6fd2807SJeff Garzik * 6141c6fd2807SJeff Garzik * RETURNS: 6142c6fd2807SJeff Garzik * 0 on success, negative errno on failure. 6143c6fd2807SJeff Garzik */ 6144936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val) 6145c6fd2807SJeff Garzik { 6146633273a3STejun Heo if (ata_is_host_link(link)) { 6147936fd732STejun Heo struct ata_port *ap = link->ap; 6148936fd732STejun Heo 6149936fd732STejun Heo if (sata_scr_valid(link)) 6150da3dbb17STejun Heo return ap->ops->scr_read(ap, reg, val); 6151c6fd2807SJeff Garzik return -EOPNOTSUPP; 6152c6fd2807SJeff Garzik } 6153c6fd2807SJeff Garzik 6154633273a3STejun Heo return sata_pmp_scr_read(link, reg, val); 6155633273a3STejun Heo } 6156633273a3STejun Heo 6157c6fd2807SJeff Garzik /** 6158c6fd2807SJeff Garzik * sata_scr_write - write SCR register of the specified port 6159936fd732STejun Heo * @link: ATA link to write SCR for 6160c6fd2807SJeff Garzik * @reg: SCR to write 6161c6fd2807SJeff Garzik * @val: value to write 6162c6fd2807SJeff Garzik * 6163936fd732STejun Heo * Write @val to SCR register @reg of @link. This function is 6164633273a3STejun Heo * guaranteed to succeed if @link is ap->link, the cable type of 6165633273a3STejun Heo * the port is SATA and the port implements ->scr_read. 6166c6fd2807SJeff Garzik * 6167c6fd2807SJeff Garzik * LOCKING: 6168633273a3STejun Heo * None if @link is ap->link. Kernel thread context otherwise. 6169c6fd2807SJeff Garzik * 6170c6fd2807SJeff Garzik * RETURNS: 6171c6fd2807SJeff Garzik * 0 on success, negative errno on failure. 6172c6fd2807SJeff Garzik */ 6173936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val) 6174c6fd2807SJeff Garzik { 6175633273a3STejun Heo if (ata_is_host_link(link)) { 6176936fd732STejun Heo struct ata_port *ap = link->ap; 6177936fd732STejun Heo 6178936fd732STejun Heo if (sata_scr_valid(link)) 6179da3dbb17STejun Heo return ap->ops->scr_write(ap, reg, val); 6180c6fd2807SJeff Garzik return -EOPNOTSUPP; 6181c6fd2807SJeff Garzik } 6182c6fd2807SJeff Garzik 6183633273a3STejun Heo return sata_pmp_scr_write(link, reg, val); 6184633273a3STejun Heo } 6185633273a3STejun Heo 6186c6fd2807SJeff Garzik /** 6187c6fd2807SJeff Garzik * sata_scr_write_flush - write SCR register of the specified port and flush 6188936fd732STejun Heo * @link: ATA link to write SCR for 6189c6fd2807SJeff Garzik * @reg: SCR to write 6190c6fd2807SJeff Garzik * @val: value to write 6191c6fd2807SJeff Garzik * 6192c6fd2807SJeff Garzik * This function is identical to sata_scr_write() except that this 6193c6fd2807SJeff Garzik * function performs flush after writing to the register. 6194c6fd2807SJeff Garzik * 6195c6fd2807SJeff Garzik * LOCKING: 6196633273a3STejun Heo * None if @link is ap->link. Kernel thread context otherwise. 6197c6fd2807SJeff Garzik * 6198c6fd2807SJeff Garzik * RETURNS: 6199c6fd2807SJeff Garzik * 0 on success, negative errno on failure. 6200c6fd2807SJeff Garzik */ 6201936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 6202c6fd2807SJeff Garzik { 6203633273a3STejun Heo if (ata_is_host_link(link)) { 6204936fd732STejun Heo struct ata_port *ap = link->ap; 6205da3dbb17STejun Heo int rc; 6206da3dbb17STejun Heo 6207936fd732STejun Heo if (sata_scr_valid(link)) { 6208da3dbb17STejun Heo rc = ap->ops->scr_write(ap, reg, val); 6209da3dbb17STejun Heo if (rc == 0) 6210da3dbb17STejun Heo rc = ap->ops->scr_read(ap, reg, &val); 6211da3dbb17STejun Heo return rc; 6212c6fd2807SJeff Garzik } 6213c6fd2807SJeff Garzik return -EOPNOTSUPP; 6214c6fd2807SJeff Garzik } 6215c6fd2807SJeff Garzik 6216633273a3STejun Heo return sata_pmp_scr_write(link, reg, val); 6217633273a3STejun Heo } 6218633273a3STejun Heo 6219c6fd2807SJeff Garzik /** 6220936fd732STejun Heo * ata_link_online - test whether the given link is online 6221936fd732STejun Heo * @link: ATA link to test 6222c6fd2807SJeff Garzik * 6223936fd732STejun Heo * Test whether @link is online. Note that this function returns 6224936fd732STejun Heo * 0 if online status of @link cannot be obtained, so 6225936fd732STejun Heo * ata_link_online(link) != !ata_link_offline(link). 6226c6fd2807SJeff Garzik * 6227c6fd2807SJeff Garzik * LOCKING: 6228c6fd2807SJeff Garzik * None. 6229c6fd2807SJeff Garzik * 6230c6fd2807SJeff Garzik * RETURNS: 6231c6fd2807SJeff Garzik * 1 if the port online status is available and online. 6232c6fd2807SJeff Garzik */ 6233936fd732STejun Heo int ata_link_online(struct ata_link *link) 6234c6fd2807SJeff Garzik { 6235c6fd2807SJeff Garzik u32 sstatus; 6236c6fd2807SJeff Garzik 6237936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6238936fd732STejun Heo (sstatus & 0xf) == 0x3) 6239c6fd2807SJeff Garzik return 1; 6240c6fd2807SJeff Garzik return 0; 6241c6fd2807SJeff Garzik } 6242c6fd2807SJeff Garzik 6243c6fd2807SJeff Garzik /** 6244936fd732STejun Heo * ata_link_offline - test whether the given link is offline 6245936fd732STejun Heo * @link: ATA link to test 6246c6fd2807SJeff Garzik * 6247936fd732STejun Heo * Test whether @link is offline. Note that this function 6248936fd732STejun Heo * returns 0 if offline status of @link cannot be obtained, so 6249936fd732STejun Heo * ata_link_online(link) != !ata_link_offline(link). 6250c6fd2807SJeff Garzik * 6251c6fd2807SJeff Garzik * LOCKING: 6252c6fd2807SJeff Garzik * None. 6253c6fd2807SJeff Garzik * 6254c6fd2807SJeff Garzik * RETURNS: 6255c6fd2807SJeff Garzik * 1 if the port offline status is available and offline. 6256c6fd2807SJeff Garzik */ 6257936fd732STejun Heo int ata_link_offline(struct ata_link *link) 6258c6fd2807SJeff Garzik { 6259c6fd2807SJeff Garzik u32 sstatus; 6260c6fd2807SJeff Garzik 6261936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6262936fd732STejun Heo (sstatus & 0xf) != 0x3) 6263c6fd2807SJeff Garzik return 1; 6264c6fd2807SJeff Garzik return 0; 6265c6fd2807SJeff Garzik } 6266c6fd2807SJeff Garzik 6267c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev) 6268c6fd2807SJeff Garzik { 6269c6fd2807SJeff Garzik unsigned int err_mask; 6270c6fd2807SJeff Garzik u8 cmd; 6271c6fd2807SJeff Garzik 6272c6fd2807SJeff Garzik if (!ata_try_flush_cache(dev)) 6273c6fd2807SJeff Garzik return 0; 6274c6fd2807SJeff Garzik 62756fc49adbSTejun Heo if (dev->flags & ATA_DFLAG_FLUSH_EXT) 6276c6fd2807SJeff Garzik cmd = ATA_CMD_FLUSH_EXT; 6277c6fd2807SJeff Garzik else 6278c6fd2807SJeff Garzik cmd = ATA_CMD_FLUSH; 6279c6fd2807SJeff Garzik 62804f34337bSAlan Cox /* This is wrong. On a failed flush we get back the LBA of the lost 62814f34337bSAlan Cox sector and we should (assuming it wasn't aborted as unknown) issue 62824f34337bSAlan Cox a further flush command to continue the writeback until it 62834f34337bSAlan Cox does not error */ 6284c6fd2807SJeff Garzik err_mask = ata_do_simple_cmd(dev, cmd); 6285c6fd2807SJeff Garzik if (err_mask) { 6286c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); 6287c6fd2807SJeff Garzik return -EIO; 6288c6fd2807SJeff Garzik } 6289c6fd2807SJeff Garzik 6290c6fd2807SJeff Garzik return 0; 6291c6fd2807SJeff Garzik } 6292c6fd2807SJeff Garzik 62936ffa01d8STejun Heo #ifdef CONFIG_PM 6294cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 6295cca3974eSJeff Garzik unsigned int action, unsigned int ehi_flags, 6296cca3974eSJeff Garzik int wait) 6297c6fd2807SJeff Garzik { 6298c6fd2807SJeff Garzik unsigned long flags; 6299c6fd2807SJeff Garzik int i, rc; 6300c6fd2807SJeff Garzik 6301cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6302cca3974eSJeff Garzik struct ata_port *ap = host->ports[i]; 6303e3667ebfSTejun Heo struct ata_link *link; 6304c6fd2807SJeff Garzik 6305c6fd2807SJeff Garzik /* Previous resume operation might still be in 6306c6fd2807SJeff Garzik * progress. Wait for PM_PENDING to clear. 6307c6fd2807SJeff Garzik */ 6308c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_PM_PENDING) { 6309c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6310c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6311c6fd2807SJeff Garzik } 6312c6fd2807SJeff Garzik 6313c6fd2807SJeff Garzik /* request PM ops to EH */ 6314c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6315c6fd2807SJeff Garzik 6316c6fd2807SJeff Garzik ap->pm_mesg = mesg; 6317c6fd2807SJeff Garzik if (wait) { 6318c6fd2807SJeff Garzik rc = 0; 6319c6fd2807SJeff Garzik ap->pm_result = &rc; 6320c6fd2807SJeff Garzik } 6321c6fd2807SJeff Garzik 6322c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_PM_PENDING; 6323e3667ebfSTejun Heo __ata_port_for_each_link(link, ap) { 6324e3667ebfSTejun Heo link->eh_info.action |= action; 6325e3667ebfSTejun Heo link->eh_info.flags |= ehi_flags; 6326e3667ebfSTejun Heo } 6327c6fd2807SJeff Garzik 6328c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 6329c6fd2807SJeff Garzik 6330c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6331c6fd2807SJeff Garzik 6332c6fd2807SJeff Garzik /* wait and check result */ 6333c6fd2807SJeff Garzik if (wait) { 6334c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6335c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6336c6fd2807SJeff Garzik if (rc) 6337c6fd2807SJeff Garzik return rc; 6338c6fd2807SJeff Garzik } 6339c6fd2807SJeff Garzik } 6340c6fd2807SJeff Garzik 6341c6fd2807SJeff Garzik return 0; 6342c6fd2807SJeff Garzik } 6343c6fd2807SJeff Garzik 6344c6fd2807SJeff Garzik /** 6345cca3974eSJeff Garzik * ata_host_suspend - suspend host 6346cca3974eSJeff Garzik * @host: host to suspend 6347c6fd2807SJeff Garzik * @mesg: PM message 6348c6fd2807SJeff Garzik * 6349cca3974eSJeff Garzik * Suspend @host. Actual operation is performed by EH. This 6350c6fd2807SJeff Garzik * function requests EH to perform PM operations and waits for EH 6351c6fd2807SJeff Garzik * to finish. 6352c6fd2807SJeff Garzik * 6353c6fd2807SJeff Garzik * LOCKING: 6354c6fd2807SJeff Garzik * Kernel thread context (may sleep). 6355c6fd2807SJeff Garzik * 6356c6fd2807SJeff Garzik * RETURNS: 6357c6fd2807SJeff Garzik * 0 on success, -errno on failure. 6358c6fd2807SJeff Garzik */ 6359cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 6360c6fd2807SJeff Garzik { 63619666f400STejun Heo int rc; 6362c6fd2807SJeff Garzik 6363cca3974eSJeff Garzik rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 63649666f400STejun Heo if (rc == 0) 6365cca3974eSJeff Garzik host->dev->power.power_state = mesg; 6366c6fd2807SJeff Garzik return rc; 6367c6fd2807SJeff Garzik } 6368c6fd2807SJeff Garzik 6369c6fd2807SJeff Garzik /** 6370cca3974eSJeff Garzik * ata_host_resume - resume host 6371cca3974eSJeff Garzik * @host: host to resume 6372c6fd2807SJeff Garzik * 6373cca3974eSJeff Garzik * Resume @host. Actual operation is performed by EH. This 6374c6fd2807SJeff Garzik * function requests EH to perform PM operations and returns. 6375c6fd2807SJeff Garzik * Note that all resume operations are performed parallely. 6376c6fd2807SJeff Garzik * 6377c6fd2807SJeff Garzik * LOCKING: 6378c6fd2807SJeff Garzik * Kernel thread context (may sleep). 6379c6fd2807SJeff Garzik */ 6380cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host) 6381c6fd2807SJeff Garzik { 6382cca3974eSJeff Garzik ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, 6383c6fd2807SJeff Garzik ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 6384cca3974eSJeff Garzik host->dev->power.power_state = PMSG_ON; 6385c6fd2807SJeff Garzik } 63866ffa01d8STejun Heo #endif 6387c6fd2807SJeff Garzik 6388c6fd2807SJeff Garzik /** 6389c6fd2807SJeff Garzik * ata_port_start - Set port up for dma. 6390c6fd2807SJeff Garzik * @ap: Port to initialize 6391c6fd2807SJeff Garzik * 6392c6fd2807SJeff Garzik * Called just after data structures for each port are 6393c6fd2807SJeff Garzik * initialized. Allocates space for PRD table. 6394c6fd2807SJeff Garzik * 6395c6fd2807SJeff Garzik * May be used as the port_start() entry in ata_port_operations. 6396c6fd2807SJeff Garzik * 6397c6fd2807SJeff Garzik * LOCKING: 6398c6fd2807SJeff Garzik * Inherited from caller. 6399c6fd2807SJeff Garzik */ 6400c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap) 6401c6fd2807SJeff Garzik { 6402c6fd2807SJeff Garzik struct device *dev = ap->dev; 6403c6fd2807SJeff Garzik int rc; 6404c6fd2807SJeff Garzik 6405f0d36efdSTejun Heo ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6406f0d36efdSTejun Heo GFP_KERNEL); 6407c6fd2807SJeff Garzik if (!ap->prd) 6408c6fd2807SJeff Garzik return -ENOMEM; 6409c6fd2807SJeff Garzik 6410c6fd2807SJeff Garzik rc = ata_pad_alloc(ap, dev); 6411f0d36efdSTejun Heo if (rc) 6412c6fd2807SJeff Garzik return rc; 6413c6fd2807SJeff Garzik 6414f0d36efdSTejun Heo DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, 6415f0d36efdSTejun Heo (unsigned long long)ap->prd_dma); 6416c6fd2807SJeff Garzik return 0; 6417c6fd2807SJeff Garzik } 6418c6fd2807SJeff Garzik 6419c6fd2807SJeff Garzik /** 6420c6fd2807SJeff Garzik * ata_dev_init - Initialize an ata_device structure 6421c6fd2807SJeff Garzik * @dev: Device structure to initialize 6422c6fd2807SJeff Garzik * 6423c6fd2807SJeff Garzik * Initialize @dev in preparation for probing. 6424c6fd2807SJeff Garzik * 6425c6fd2807SJeff Garzik * LOCKING: 6426c6fd2807SJeff Garzik * Inherited from caller. 6427c6fd2807SJeff Garzik */ 6428c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev) 6429c6fd2807SJeff Garzik { 64309af5c9c9STejun Heo struct ata_link *link = dev->link; 64319af5c9c9STejun Heo struct ata_port *ap = link->ap; 6432c6fd2807SJeff Garzik unsigned long flags; 6433c6fd2807SJeff Garzik 6434c6fd2807SJeff Garzik /* SATA spd limit is bound to the first device */ 64359af5c9c9STejun Heo link->sata_spd_limit = link->hw_sata_spd_limit; 64369af5c9c9STejun Heo link->sata_spd = 0; 6437c6fd2807SJeff Garzik 6438c6fd2807SJeff Garzik /* High bits of dev->flags are used to record warm plug 6439c6fd2807SJeff Garzik * requests which occur asynchronously. Synchronize using 6440cca3974eSJeff Garzik * host lock. 6441c6fd2807SJeff Garzik */ 6442c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6443c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_INIT_MASK; 64443dcc323fSTejun Heo dev->horkage = 0; 6445c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6446c6fd2807SJeff Garzik 6447c6fd2807SJeff Garzik memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, 6448c6fd2807SJeff Garzik sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); 6449c6fd2807SJeff Garzik dev->pio_mask = UINT_MAX; 6450c6fd2807SJeff Garzik dev->mwdma_mask = UINT_MAX; 6451c6fd2807SJeff Garzik dev->udma_mask = UINT_MAX; 6452c6fd2807SJeff Garzik } 6453c6fd2807SJeff Garzik 6454c6fd2807SJeff Garzik /** 64554fb37a25STejun Heo * ata_link_init - Initialize an ata_link structure 64564fb37a25STejun Heo * @ap: ATA port link is attached to 64574fb37a25STejun Heo * @link: Link structure to initialize 64588989805dSTejun Heo * @pmp: Port multiplier port number 64594fb37a25STejun Heo * 64604fb37a25STejun Heo * Initialize @link. 64614fb37a25STejun Heo * 64624fb37a25STejun Heo * LOCKING: 64634fb37a25STejun Heo * Kernel thread context (may sleep) 64644fb37a25STejun Heo */ 6465fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 64664fb37a25STejun Heo { 64674fb37a25STejun Heo int i; 64684fb37a25STejun Heo 64694fb37a25STejun Heo /* clear everything except for devices */ 64704fb37a25STejun Heo memset(link, 0, offsetof(struct ata_link, device[0])); 64714fb37a25STejun Heo 64724fb37a25STejun Heo link->ap = ap; 64738989805dSTejun Heo link->pmp = pmp; 64744fb37a25STejun Heo link->active_tag = ATA_TAG_POISON; 64754fb37a25STejun Heo link->hw_sata_spd_limit = UINT_MAX; 64764fb37a25STejun Heo 64774fb37a25STejun Heo /* can't use iterator, ap isn't initialized yet */ 64784fb37a25STejun Heo for (i = 0; i < ATA_MAX_DEVICES; i++) { 64794fb37a25STejun Heo struct ata_device *dev = &link->device[i]; 64804fb37a25STejun Heo 64814fb37a25STejun Heo dev->link = link; 64824fb37a25STejun Heo dev->devno = dev - link->device; 64834fb37a25STejun Heo ata_dev_init(dev); 64844fb37a25STejun Heo } 64854fb37a25STejun Heo } 64864fb37a25STejun Heo 64874fb37a25STejun Heo /** 64884fb37a25STejun Heo * sata_link_init_spd - Initialize link->sata_spd_limit 64894fb37a25STejun Heo * @link: Link to configure sata_spd_limit for 64904fb37a25STejun Heo * 64914fb37a25STejun Heo * Initialize @link->[hw_]sata_spd_limit to the currently 64924fb37a25STejun Heo * configured value. 64934fb37a25STejun Heo * 64944fb37a25STejun Heo * LOCKING: 64954fb37a25STejun Heo * Kernel thread context (may sleep). 64964fb37a25STejun Heo * 64974fb37a25STejun Heo * RETURNS: 64984fb37a25STejun Heo * 0 on success, -errno on failure. 64994fb37a25STejun Heo */ 6500fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link) 65014fb37a25STejun Heo { 65024fb37a25STejun Heo u32 scontrol, spd; 65034fb37a25STejun Heo int rc; 65044fb37a25STejun Heo 65054fb37a25STejun Heo rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 65064fb37a25STejun Heo if (rc) 65074fb37a25STejun Heo return rc; 65084fb37a25STejun Heo 65094fb37a25STejun Heo spd = (scontrol >> 4) & 0xf; 65104fb37a25STejun Heo if (spd) 65114fb37a25STejun Heo link->hw_sata_spd_limit &= (1 << spd) - 1; 65124fb37a25STejun Heo 65134fb37a25STejun Heo link->sata_spd_limit = link->hw_sata_spd_limit; 65144fb37a25STejun Heo 65154fb37a25STejun Heo return 0; 65164fb37a25STejun Heo } 65174fb37a25STejun Heo 65184fb37a25STejun Heo /** 6519f3187195STejun Heo * ata_port_alloc - allocate and initialize basic ATA port resources 6520f3187195STejun Heo * @host: ATA host this allocated port belongs to 6521c6fd2807SJeff Garzik * 6522f3187195STejun Heo * Allocate and initialize basic ATA port resources. 6523f3187195STejun Heo * 6524f3187195STejun Heo * RETURNS: 6525f3187195STejun Heo * Allocate ATA port on success, NULL on failure. 6526c6fd2807SJeff Garzik * 6527c6fd2807SJeff Garzik * LOCKING: 6528f3187195STejun Heo * Inherited from calling layer (may sleep). 6529c6fd2807SJeff Garzik */ 6530f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host) 6531c6fd2807SJeff Garzik { 6532f3187195STejun Heo struct ata_port *ap; 6533c6fd2807SJeff Garzik 6534f3187195STejun Heo DPRINTK("ENTER\n"); 6535f3187195STejun Heo 6536f3187195STejun Heo ap = kzalloc(sizeof(*ap), GFP_KERNEL); 6537f3187195STejun Heo if (!ap) 6538f3187195STejun Heo return NULL; 6539f3187195STejun Heo 6540f4d6d004STejun Heo ap->pflags |= ATA_PFLAG_INITIALIZING; 6541cca3974eSJeff Garzik ap->lock = &host->lock; 6542c6fd2807SJeff Garzik ap->flags = ATA_FLAG_DISABLED; 6543f3187195STejun Heo ap->print_id = -1; 6544c6fd2807SJeff Garzik ap->ctl = ATA_DEVCTL_OBS; 6545cca3974eSJeff Garzik ap->host = host; 6546f3187195STejun Heo ap->dev = host->dev; 6547c6fd2807SJeff Garzik ap->last_ctl = 0xFF; 6548c6fd2807SJeff Garzik 6549c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG) 6550c6fd2807SJeff Garzik /* turn on all debugging levels */ 6551c6fd2807SJeff Garzik ap->msg_enable = 0x00FF; 6552c6fd2807SJeff Garzik #elif defined(ATA_DEBUG) 6553c6fd2807SJeff Garzik ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 6554c6fd2807SJeff Garzik #else 6555c6fd2807SJeff Garzik ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 6556c6fd2807SJeff Garzik #endif 6557c6fd2807SJeff Garzik 655865f27f38SDavid Howells INIT_DELAYED_WORK(&ap->port_task, NULL); 655965f27f38SDavid Howells INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 656065f27f38SDavid Howells INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 6561c6fd2807SJeff Garzik INIT_LIST_HEAD(&ap->eh_done_q); 6562c6fd2807SJeff Garzik init_waitqueue_head(&ap->eh_wait_q); 65635ddf24c5STejun Heo init_timer_deferrable(&ap->fastdrain_timer); 65645ddf24c5STejun Heo ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 65655ddf24c5STejun Heo ap->fastdrain_timer.data = (unsigned long)ap; 6566c6fd2807SJeff Garzik 6567c6fd2807SJeff Garzik ap->cbl = ATA_CBL_NONE; 6568c6fd2807SJeff Garzik 65698989805dSTejun Heo ata_link_init(ap, &ap->link, 0); 6570c6fd2807SJeff Garzik 6571c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP 6572c6fd2807SJeff Garzik ap->stats.unhandled_irq = 1; 6573c6fd2807SJeff Garzik ap->stats.idle_irq = 1; 6574c6fd2807SJeff Garzik #endif 6575c6fd2807SJeff Garzik return ap; 6576c6fd2807SJeff Garzik } 6577c6fd2807SJeff Garzik 6578f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res) 6579f0d36efdSTejun Heo { 6580f0d36efdSTejun Heo struct ata_host *host = dev_get_drvdata(gendev); 6581f0d36efdSTejun Heo int i; 6582f0d36efdSTejun Heo 6583f0d36efdSTejun Heo for (i = 0; i < host->n_ports; i++) { 6584f0d36efdSTejun Heo struct ata_port *ap = host->ports[i]; 6585f0d36efdSTejun Heo 6586ecef7253STejun Heo if (!ap) 6587ecef7253STejun Heo continue; 6588ecef7253STejun Heo 6589ecef7253STejun Heo if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop) 6590f0d36efdSTejun Heo ap->ops->port_stop(ap); 6591f0d36efdSTejun Heo } 6592f0d36efdSTejun Heo 6593ecef7253STejun Heo if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop) 6594f0d36efdSTejun Heo host->ops->host_stop(host); 65951aa56ccaSTejun Heo 65961aa506e4STejun Heo for (i = 0; i < host->n_ports; i++) { 65971aa506e4STejun Heo struct ata_port *ap = host->ports[i]; 65981aa506e4STejun Heo 65994911487aSTejun Heo if (!ap) 66004911487aSTejun Heo continue; 66014911487aSTejun Heo 66024911487aSTejun Heo if (ap->scsi_host) 66031aa506e4STejun Heo scsi_host_put(ap->scsi_host); 66041aa506e4STejun Heo 6605633273a3STejun Heo kfree(ap->pmp_link); 66064911487aSTejun Heo kfree(ap); 66071aa506e4STejun Heo host->ports[i] = NULL; 66081aa506e4STejun Heo } 66091aa506e4STejun Heo 66101aa56ccaSTejun Heo dev_set_drvdata(gendev, NULL); 6611f0d36efdSTejun Heo } 6612f0d36efdSTejun Heo 6613c6fd2807SJeff Garzik /** 6614f3187195STejun Heo * ata_host_alloc - allocate and init basic ATA host resources 6615f3187195STejun Heo * @dev: generic device this host is associated with 6616f3187195STejun Heo * @max_ports: maximum number of ATA ports associated with this host 6617f3187195STejun Heo * 6618f3187195STejun Heo * Allocate and initialize basic ATA host resources. LLD calls 6619f3187195STejun Heo * this function to allocate a host, initializes it fully and 6620f3187195STejun Heo * attaches it using ata_host_register(). 6621f3187195STejun Heo * 6622f3187195STejun Heo * @max_ports ports are allocated and host->n_ports is 6623f3187195STejun Heo * initialized to @max_ports. The caller is allowed to decrease 6624f3187195STejun Heo * host->n_ports before calling ata_host_register(). The unused 6625f3187195STejun Heo * ports will be automatically freed on registration. 6626f3187195STejun Heo * 6627f3187195STejun Heo * RETURNS: 6628f3187195STejun Heo * Allocate ATA host on success, NULL on failure. 6629f3187195STejun Heo * 6630f3187195STejun Heo * LOCKING: 6631f3187195STejun Heo * Inherited from calling layer (may sleep). 6632f3187195STejun Heo */ 6633f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 6634f3187195STejun Heo { 6635f3187195STejun Heo struct ata_host *host; 6636f3187195STejun Heo size_t sz; 6637f3187195STejun Heo int i; 6638f3187195STejun Heo 6639f3187195STejun Heo DPRINTK("ENTER\n"); 6640f3187195STejun Heo 6641f3187195STejun Heo if (!devres_open_group(dev, NULL, GFP_KERNEL)) 6642f3187195STejun Heo return NULL; 6643f3187195STejun Heo 6644f3187195STejun Heo /* alloc a container for our list of ATA ports (buses) */ 6645f3187195STejun Heo sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 6646f3187195STejun Heo /* alloc a container for our list of ATA ports (buses) */ 6647f3187195STejun Heo host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 6648f3187195STejun Heo if (!host) 6649f3187195STejun Heo goto err_out; 6650f3187195STejun Heo 6651f3187195STejun Heo devres_add(dev, host); 6652f3187195STejun Heo dev_set_drvdata(dev, host); 6653f3187195STejun Heo 6654f3187195STejun Heo spin_lock_init(&host->lock); 6655f3187195STejun Heo host->dev = dev; 6656f3187195STejun Heo host->n_ports = max_ports; 6657f3187195STejun Heo 6658f3187195STejun Heo /* allocate ports bound to this host */ 6659f3187195STejun Heo for (i = 0; i < max_ports; i++) { 6660f3187195STejun Heo struct ata_port *ap; 6661f3187195STejun Heo 6662f3187195STejun Heo ap = ata_port_alloc(host); 6663f3187195STejun Heo if (!ap) 6664f3187195STejun Heo goto err_out; 6665f3187195STejun Heo 6666f3187195STejun Heo ap->port_no = i; 6667f3187195STejun Heo host->ports[i] = ap; 6668f3187195STejun Heo } 6669f3187195STejun Heo 6670f3187195STejun Heo devres_remove_group(dev, NULL); 6671f3187195STejun Heo return host; 6672f3187195STejun Heo 6673f3187195STejun Heo err_out: 6674f3187195STejun Heo devres_release_group(dev, NULL); 6675f3187195STejun Heo return NULL; 6676f3187195STejun Heo } 6677f3187195STejun Heo 6678f3187195STejun Heo /** 6679f5cda257STejun Heo * ata_host_alloc_pinfo - alloc host and init with port_info array 6680f5cda257STejun Heo * @dev: generic device this host is associated with 6681f5cda257STejun Heo * @ppi: array of ATA port_info to initialize host with 6682f5cda257STejun Heo * @n_ports: number of ATA ports attached to this host 6683f5cda257STejun Heo * 6684f5cda257STejun Heo * Allocate ATA host and initialize with info from @ppi. If NULL 6685f5cda257STejun Heo * terminated, @ppi may contain fewer entries than @n_ports. The 6686f5cda257STejun Heo * last entry will be used for the remaining ports. 6687f5cda257STejun Heo * 6688f5cda257STejun Heo * RETURNS: 6689f5cda257STejun Heo * Allocate ATA host on success, NULL on failure. 6690f5cda257STejun Heo * 6691f5cda257STejun Heo * LOCKING: 6692f5cda257STejun Heo * Inherited from calling layer (may sleep). 6693f5cda257STejun Heo */ 6694f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev, 6695f5cda257STejun Heo const struct ata_port_info * const * ppi, 6696f5cda257STejun Heo int n_ports) 6697f5cda257STejun Heo { 6698f5cda257STejun Heo const struct ata_port_info *pi; 6699f5cda257STejun Heo struct ata_host *host; 6700f5cda257STejun Heo int i, j; 6701f5cda257STejun Heo 6702f5cda257STejun Heo host = ata_host_alloc(dev, n_ports); 6703f5cda257STejun Heo if (!host) 6704f5cda257STejun Heo return NULL; 6705f5cda257STejun Heo 6706f5cda257STejun Heo for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 6707f5cda257STejun Heo struct ata_port *ap = host->ports[i]; 6708f5cda257STejun Heo 6709f5cda257STejun Heo if (ppi[j]) 6710f5cda257STejun Heo pi = ppi[j++]; 6711f5cda257STejun Heo 6712f5cda257STejun Heo ap->pio_mask = pi->pio_mask; 6713f5cda257STejun Heo ap->mwdma_mask = pi->mwdma_mask; 6714f5cda257STejun Heo ap->udma_mask = pi->udma_mask; 6715f5cda257STejun Heo ap->flags |= pi->flags; 67160c88758bSTejun Heo ap->link.flags |= pi->link_flags; 6717f5cda257STejun Heo ap->ops = pi->port_ops; 6718f5cda257STejun Heo 6719f5cda257STejun Heo if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6720f5cda257STejun Heo host->ops = pi->port_ops; 6721f5cda257STejun Heo if (!host->private_data && pi->private_data) 6722f5cda257STejun Heo host->private_data = pi->private_data; 6723f5cda257STejun Heo } 6724f5cda257STejun Heo 6725f5cda257STejun Heo return host; 6726f5cda257STejun Heo } 6727f5cda257STejun Heo 6728f5cda257STejun Heo /** 6729ecef7253STejun Heo * ata_host_start - start and freeze ports of an ATA host 6730ecef7253STejun Heo * @host: ATA host to start ports for 6731ecef7253STejun Heo * 6732ecef7253STejun Heo * Start and then freeze ports of @host. Started status is 6733ecef7253STejun Heo * recorded in host->flags, so this function can be called 6734ecef7253STejun Heo * multiple times. Ports are guaranteed to get started only 6735f3187195STejun Heo * once. If host->ops isn't initialized yet, its set to the 6736f3187195STejun Heo * first non-dummy port ops. 6737ecef7253STejun Heo * 6738ecef7253STejun Heo * LOCKING: 6739ecef7253STejun Heo * Inherited from calling layer (may sleep). 6740ecef7253STejun Heo * 6741ecef7253STejun Heo * RETURNS: 6742ecef7253STejun Heo * 0 if all ports are started successfully, -errno otherwise. 6743ecef7253STejun Heo */ 6744ecef7253STejun Heo int ata_host_start(struct ata_host *host) 6745ecef7253STejun Heo { 6746ecef7253STejun Heo int i, rc; 6747ecef7253STejun Heo 6748ecef7253STejun Heo if (host->flags & ATA_HOST_STARTED) 6749ecef7253STejun Heo return 0; 6750ecef7253STejun Heo 6751ecef7253STejun Heo for (i = 0; i < host->n_ports; i++) { 6752ecef7253STejun Heo struct ata_port *ap = host->ports[i]; 6753ecef7253STejun Heo 6754f3187195STejun Heo if (!host->ops && !ata_port_is_dummy(ap)) 6755f3187195STejun Heo host->ops = ap->ops; 6756f3187195STejun Heo 6757ecef7253STejun Heo if (ap->ops->port_start) { 6758ecef7253STejun Heo rc = ap->ops->port_start(ap); 6759ecef7253STejun Heo if (rc) { 6760ecef7253STejun Heo ata_port_printk(ap, KERN_ERR, "failed to " 6761ecef7253STejun Heo "start port (errno=%d)\n", rc); 6762ecef7253STejun Heo goto err_out; 6763ecef7253STejun Heo } 6764ecef7253STejun Heo } 6765ecef7253STejun Heo 6766ecef7253STejun Heo ata_eh_freeze_port(ap); 6767ecef7253STejun Heo } 6768ecef7253STejun Heo 6769ecef7253STejun Heo host->flags |= ATA_HOST_STARTED; 6770ecef7253STejun Heo return 0; 6771ecef7253STejun Heo 6772ecef7253STejun Heo err_out: 6773ecef7253STejun Heo while (--i >= 0) { 6774ecef7253STejun Heo struct ata_port *ap = host->ports[i]; 6775ecef7253STejun Heo 6776ecef7253STejun Heo if (ap->ops->port_stop) 6777ecef7253STejun Heo ap->ops->port_stop(ap); 6778ecef7253STejun Heo } 6779ecef7253STejun Heo return rc; 6780ecef7253STejun Heo } 6781ecef7253STejun Heo 6782ecef7253STejun Heo /** 6783cca3974eSJeff Garzik * ata_sas_host_init - Initialize a host struct 6784cca3974eSJeff Garzik * @host: host to initialize 6785cca3974eSJeff Garzik * @dev: device host is attached to 6786cca3974eSJeff Garzik * @flags: host flags 6787c6fd2807SJeff Garzik * @ops: port_ops 6788c6fd2807SJeff Garzik * 6789c6fd2807SJeff Garzik * LOCKING: 6790c6fd2807SJeff Garzik * PCI/etc. bus probe sem. 6791c6fd2807SJeff Garzik * 6792c6fd2807SJeff Garzik */ 6793f3187195STejun Heo /* KILLME - the only user left is ipr */ 6794cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev, 6795cca3974eSJeff Garzik unsigned long flags, const struct ata_port_operations *ops) 6796c6fd2807SJeff Garzik { 6797cca3974eSJeff Garzik spin_lock_init(&host->lock); 6798cca3974eSJeff Garzik host->dev = dev; 6799cca3974eSJeff Garzik host->flags = flags; 6800cca3974eSJeff Garzik host->ops = ops; 6801c6fd2807SJeff Garzik } 6802c6fd2807SJeff Garzik 6803c6fd2807SJeff Garzik /** 6804f3187195STejun Heo * ata_host_register - register initialized ATA host 6805f3187195STejun Heo * @host: ATA host to register 6806f3187195STejun Heo * @sht: template for SCSI host 6807c6fd2807SJeff Garzik * 6808f3187195STejun Heo * Register initialized ATA host. @host is allocated using 6809f3187195STejun Heo * ata_host_alloc() and fully initialized by LLD. This function 6810f3187195STejun Heo * starts ports, registers @host with ATA and SCSI layers and 6811f3187195STejun Heo * probe registered devices. 6812c6fd2807SJeff Garzik * 6813c6fd2807SJeff Garzik * LOCKING: 6814f3187195STejun Heo * Inherited from calling layer (may sleep). 6815c6fd2807SJeff Garzik * 6816c6fd2807SJeff Garzik * RETURNS: 6817f3187195STejun Heo * 0 on success, -errno otherwise. 6818c6fd2807SJeff Garzik */ 6819f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6820c6fd2807SJeff Garzik { 6821f3187195STejun Heo int i, rc; 6822c6fd2807SJeff Garzik 6823f3187195STejun Heo /* host must have been started */ 6824f3187195STejun Heo if (!(host->flags & ATA_HOST_STARTED)) { 6825f3187195STejun Heo dev_printk(KERN_ERR, host->dev, 6826f3187195STejun Heo "BUG: trying to register unstarted host\n"); 6827f3187195STejun Heo WARN_ON(1); 6828f3187195STejun Heo return -EINVAL; 682902f076aaSAlan Cox } 6830f0d36efdSTejun Heo 6831f3187195STejun Heo /* Blow away unused ports. This happens when LLD can't 6832f3187195STejun Heo * determine the exact number of ports to allocate at 6833f3187195STejun Heo * allocation time. 6834f3187195STejun Heo */ 6835f3187195STejun Heo for (i = host->n_ports; host->ports[i]; i++) 6836f3187195STejun Heo kfree(host->ports[i]); 6837f0d36efdSTejun Heo 6838f3187195STejun Heo /* give ports names and add SCSI hosts */ 6839f3187195STejun Heo for (i = 0; i < host->n_ports; i++) 6840f3187195STejun Heo host->ports[i]->print_id = ata_print_id++; 6841c6fd2807SJeff Garzik 6842f3187195STejun Heo rc = ata_scsi_add_hosts(host, sht); 6843ecef7253STejun Heo if (rc) 6844f3187195STejun Heo return rc; 6845ecef7253STejun Heo 6846fafbae87STejun Heo /* associate with ACPI nodes */ 6847fafbae87STejun Heo ata_acpi_associate(host); 6848fafbae87STejun Heo 6849f3187195STejun Heo /* set cable, sata_spd_limit and report */ 6850cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6851cca3974eSJeff Garzik struct ata_port *ap = host->ports[i]; 6852f3187195STejun Heo unsigned long xfer_mask; 6853f3187195STejun Heo 6854f3187195STejun Heo /* set SATA cable type if still unset */ 6855f3187195STejun Heo if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6856f3187195STejun Heo ap->cbl = ATA_CBL_SATA; 6857c6fd2807SJeff Garzik 6858c6fd2807SJeff Garzik /* init sata_spd_limit to the current value */ 68594fb37a25STejun Heo sata_link_init_spd(&ap->link); 6860c6fd2807SJeff Garzik 6861cbcdd875STejun Heo /* print per-port info to dmesg */ 6862f3187195STejun Heo xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6863f3187195STejun Heo ap->udma_mask); 6864f3187195STejun Heo 6865abf6e8edSTejun Heo if (!ata_port_is_dummy(ap)) { 6866cbcdd875STejun Heo ata_port_printk(ap, KERN_INFO, 6867cbcdd875STejun Heo "%cATA max %s %s\n", 6868a16abc0bSTejun Heo (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6869f3187195STejun Heo ata_mode_string(xfer_mask), 6870cbcdd875STejun Heo ap->link.eh_info.desc); 6871abf6e8edSTejun Heo ata_ehi_clear_desc(&ap->link.eh_info); 6872abf6e8edSTejun Heo } else 6873f3187195STejun Heo ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6874c6fd2807SJeff Garzik } 6875c6fd2807SJeff Garzik 6876f3187195STejun Heo /* perform each probe synchronously */ 6877f3187195STejun Heo DPRINTK("probe begin\n"); 6878f3187195STejun Heo for (i = 0; i < host->n_ports; i++) { 6879f3187195STejun Heo struct ata_port *ap = host->ports[i]; 6880f3187195STejun Heo int rc; 6881f3187195STejun Heo 6882f3187195STejun Heo /* probe */ 6883c6fd2807SJeff Garzik if (ap->ops->error_handler) { 68849af5c9c9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 6885c6fd2807SJeff Garzik unsigned long flags; 6886c6fd2807SJeff Garzik 6887c6fd2807SJeff Garzik ata_port_probe(ap); 6888c6fd2807SJeff Garzik 6889c6fd2807SJeff Garzik /* kick EH for boot probing */ 6890c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6891c6fd2807SJeff Garzik 6892f58229f8STejun Heo ehi->probe_mask = 6893f58229f8STejun Heo (1 << ata_link_max_devices(&ap->link)) - 1; 6894c6fd2807SJeff Garzik ehi->action |= ATA_EH_SOFTRESET; 6895c6fd2807SJeff Garzik ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6896c6fd2807SJeff Garzik 6897f4d6d004STejun Heo ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6898c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_LOADING; 6899c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 6900c6fd2807SJeff Garzik 6901c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 6902c6fd2807SJeff Garzik 6903c6fd2807SJeff Garzik /* wait for EH to finish */ 6904c6fd2807SJeff Garzik ata_port_wait_eh(ap); 6905c6fd2807SJeff Garzik } else { 690644877b4eSTejun Heo DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6907c6fd2807SJeff Garzik rc = ata_bus_probe(ap); 690844877b4eSTejun Heo DPRINTK("ata%u: bus probe end\n", ap->print_id); 6909c6fd2807SJeff Garzik 6910c6fd2807SJeff Garzik if (rc) { 6911c6fd2807SJeff Garzik /* FIXME: do something useful here? 6912c6fd2807SJeff Garzik * Current libata behavior will 6913c6fd2807SJeff Garzik * tear down everything when 6914c6fd2807SJeff Garzik * the module is removed 6915c6fd2807SJeff Garzik * or the h/w is unplugged. 6916c6fd2807SJeff Garzik */ 6917c6fd2807SJeff Garzik } 6918c6fd2807SJeff Garzik } 6919c6fd2807SJeff Garzik } 6920c6fd2807SJeff Garzik 6921c6fd2807SJeff Garzik /* probes are done, now scan each port's disk(s) */ 6922c6fd2807SJeff Garzik DPRINTK("host probe begin\n"); 6923cca3974eSJeff Garzik for (i = 0; i < host->n_ports; i++) { 6924cca3974eSJeff Garzik struct ata_port *ap = host->ports[i]; 6925c6fd2807SJeff Garzik 69261ae46317STejun Heo ata_scsi_scan_host(ap, 1); 6927c6fd2807SJeff Garzik } 6928c6fd2807SJeff Garzik 6929f3187195STejun Heo return 0; 6930f3187195STejun Heo } 6931f3187195STejun Heo 6932f3187195STejun Heo /** 6933f5cda257STejun Heo * ata_host_activate - start host, request IRQ and register it 6934f5cda257STejun Heo * @host: target ATA host 6935f5cda257STejun Heo * @irq: IRQ to request 6936f5cda257STejun Heo * @irq_handler: irq_handler used when requesting IRQ 6937f5cda257STejun Heo * @irq_flags: irq_flags used when requesting IRQ 6938f5cda257STejun Heo * @sht: scsi_host_template to use when registering the host 6939f5cda257STejun Heo * 6940f5cda257STejun Heo * After allocating an ATA host and initializing it, most libata 6941f5cda257STejun Heo * LLDs perform three steps to activate the host - start host, 6942f5cda257STejun Heo * request IRQ and register it. This helper takes necessasry 6943f5cda257STejun Heo * arguments and performs the three steps in one go. 6944f5cda257STejun Heo * 6945f5cda257STejun Heo * LOCKING: 6946f5cda257STejun Heo * Inherited from calling layer (may sleep). 6947f5cda257STejun Heo * 6948f5cda257STejun Heo * RETURNS: 6949f5cda257STejun Heo * 0 on success, -errno otherwise. 6950f5cda257STejun Heo */ 6951f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq, 6952f5cda257STejun Heo irq_handler_t irq_handler, unsigned long irq_flags, 6953f5cda257STejun Heo struct scsi_host_template *sht) 6954f5cda257STejun Heo { 6955cbcdd875STejun Heo int i, rc; 6956f5cda257STejun Heo 6957f5cda257STejun Heo rc = ata_host_start(host); 6958f5cda257STejun Heo if (rc) 6959f5cda257STejun Heo return rc; 6960f5cda257STejun Heo 6961f5cda257STejun Heo rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6962f5cda257STejun Heo dev_driver_string(host->dev), host); 6963f5cda257STejun Heo if (rc) 6964f5cda257STejun Heo return rc; 6965f5cda257STejun Heo 6966cbcdd875STejun Heo for (i = 0; i < host->n_ports; i++) 6967cbcdd875STejun Heo ata_port_desc(host->ports[i], "irq %d", irq); 69684031826bSTejun Heo 6969f5cda257STejun Heo rc = ata_host_register(host, sht); 6970f5cda257STejun Heo /* if failed, just free the IRQ and leave ports alone */ 6971f5cda257STejun Heo if (rc) 6972f5cda257STejun Heo devm_free_irq(host->dev, irq, host); 6973f5cda257STejun Heo 6974f5cda257STejun Heo return rc; 6975f5cda257STejun Heo } 6976f5cda257STejun Heo 6977f5cda257STejun Heo /** 6978c6fd2807SJeff Garzik * ata_port_detach - Detach ATA port in prepration of device removal 6979c6fd2807SJeff Garzik * @ap: ATA port to be detached 6980c6fd2807SJeff Garzik * 6981c6fd2807SJeff Garzik * Detach all ATA devices and the associated SCSI devices of @ap; 6982c6fd2807SJeff Garzik * then, remove the associated SCSI host. @ap is guaranteed to 6983c6fd2807SJeff Garzik * be quiescent on return from this function. 6984c6fd2807SJeff Garzik * 6985c6fd2807SJeff Garzik * LOCKING: 6986c6fd2807SJeff Garzik * Kernel thread context (may sleep). 6987c6fd2807SJeff Garzik */ 6988741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap) 6989c6fd2807SJeff Garzik { 6990c6fd2807SJeff Garzik unsigned long flags; 699141bda9c9STejun Heo struct ata_link *link; 6992f58229f8STejun Heo struct ata_device *dev; 6993c6fd2807SJeff Garzik 6994c6fd2807SJeff Garzik if (!ap->ops->error_handler) 6995c6fd2807SJeff Garzik goto skip_eh; 6996c6fd2807SJeff Garzik 6997c6fd2807SJeff Garzik /* tell EH we're leaving & flush EH */ 6998c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 6999c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_UNLOADING; 7000c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 7001c6fd2807SJeff Garzik 7002c6fd2807SJeff Garzik ata_port_wait_eh(ap); 7003c6fd2807SJeff Garzik 7004c6fd2807SJeff Garzik /* EH is now guaranteed to see UNLOADING, so no new device 7005c6fd2807SJeff Garzik * will be attached. Disable all existing devices. 7006c6fd2807SJeff Garzik */ 7007c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 7008c6fd2807SJeff Garzik 700941bda9c9STejun Heo ata_port_for_each_link(link, ap) { 701041bda9c9STejun Heo ata_link_for_each_dev(dev, link) 7011f58229f8STejun Heo ata_dev_disable(dev); 701241bda9c9STejun Heo } 7013c6fd2807SJeff Garzik 7014c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 7015c6fd2807SJeff Garzik 7016c6fd2807SJeff Garzik /* Final freeze & EH. All in-flight commands are aborted. EH 7017c6fd2807SJeff Garzik * will be skipped and retrials will be terminated with bad 7018c6fd2807SJeff Garzik * target. 7019c6fd2807SJeff Garzik */ 7020c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 7021c6fd2807SJeff Garzik ata_port_freeze(ap); /* won't be thawed */ 7022c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 7023c6fd2807SJeff Garzik 7024c6fd2807SJeff Garzik ata_port_wait_eh(ap); 702545a66c1cSOleg Nesterov cancel_rearming_delayed_work(&ap->hotplug_task); 7026c6fd2807SJeff Garzik 7027c6fd2807SJeff Garzik skip_eh: 7028c6fd2807SJeff Garzik /* remove the associated SCSI host */ 7029cca3974eSJeff Garzik scsi_remove_host(ap->scsi_host); 7030c6fd2807SJeff Garzik } 7031c6fd2807SJeff Garzik 7032c6fd2807SJeff Garzik /** 70330529c159STejun Heo * ata_host_detach - Detach all ports of an ATA host 70340529c159STejun Heo * @host: Host to detach 70350529c159STejun Heo * 70360529c159STejun Heo * Detach all ports of @host. 70370529c159STejun Heo * 70380529c159STejun Heo * LOCKING: 70390529c159STejun Heo * Kernel thread context (may sleep). 70400529c159STejun Heo */ 70410529c159STejun Heo void ata_host_detach(struct ata_host *host) 70420529c159STejun Heo { 70430529c159STejun Heo int i; 70440529c159STejun Heo 70450529c159STejun Heo for (i = 0; i < host->n_ports; i++) 70460529c159STejun Heo ata_port_detach(host->ports[i]); 70470529c159STejun Heo } 70480529c159STejun Heo 7049c6fd2807SJeff Garzik /** 7050c6fd2807SJeff Garzik * ata_std_ports - initialize ioaddr with standard port offsets. 7051c6fd2807SJeff Garzik * @ioaddr: IO address structure to be initialized 7052c6fd2807SJeff Garzik * 7053c6fd2807SJeff Garzik * Utility function which initializes data_addr, error_addr, 7054c6fd2807SJeff Garzik * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 7055c6fd2807SJeff Garzik * device_addr, status_addr, and command_addr to standard offsets 7056c6fd2807SJeff Garzik * relative to cmd_addr. 7057c6fd2807SJeff Garzik * 7058c6fd2807SJeff Garzik * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 7059c6fd2807SJeff Garzik */ 7060c6fd2807SJeff Garzik 7061c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr) 7062c6fd2807SJeff Garzik { 7063c6fd2807SJeff Garzik ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 7064c6fd2807SJeff Garzik ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 7065c6fd2807SJeff Garzik ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 7066c6fd2807SJeff Garzik ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 7067c6fd2807SJeff Garzik ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 7068c6fd2807SJeff Garzik ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 7069c6fd2807SJeff Garzik ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 7070c6fd2807SJeff Garzik ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 7071c6fd2807SJeff Garzik ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 7072c6fd2807SJeff Garzik ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 7073c6fd2807SJeff Garzik } 7074c6fd2807SJeff Garzik 7075c6fd2807SJeff Garzik 7076c6fd2807SJeff Garzik #ifdef CONFIG_PCI 7077c6fd2807SJeff Garzik 7078c6fd2807SJeff Garzik /** 7079c6fd2807SJeff Garzik * ata_pci_remove_one - PCI layer callback for device removal 7080c6fd2807SJeff Garzik * @pdev: PCI device that was removed 7081c6fd2807SJeff Garzik * 7082b878ca5dSTejun Heo * PCI layer indicates to libata via this hook that hot-unplug or 7083b878ca5dSTejun Heo * module unload event has occurred. Detach all ports. Resource 7084b878ca5dSTejun Heo * release is handled via devres. 7085c6fd2807SJeff Garzik * 7086c6fd2807SJeff Garzik * LOCKING: 7087c6fd2807SJeff Garzik * Inherited from PCI layer (may sleep). 7088c6fd2807SJeff Garzik */ 7089c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev) 7090c6fd2807SJeff Garzik { 70912855568bSJeff Garzik struct device *dev = &pdev->dev; 7092cca3974eSJeff Garzik struct ata_host *host = dev_get_drvdata(dev); 7093c6fd2807SJeff Garzik 7094f0d36efdSTejun Heo ata_host_detach(host); 7095c6fd2807SJeff Garzik } 7096c6fd2807SJeff Garzik 7097c6fd2807SJeff Garzik /* move to PCI subsystem */ 7098c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 7099c6fd2807SJeff Garzik { 7100c6fd2807SJeff Garzik unsigned long tmp = 0; 7101c6fd2807SJeff Garzik 7102c6fd2807SJeff Garzik switch (bits->width) { 7103c6fd2807SJeff Garzik case 1: { 7104c6fd2807SJeff Garzik u8 tmp8 = 0; 7105c6fd2807SJeff Garzik pci_read_config_byte(pdev, bits->reg, &tmp8); 7106c6fd2807SJeff Garzik tmp = tmp8; 7107c6fd2807SJeff Garzik break; 7108c6fd2807SJeff Garzik } 7109c6fd2807SJeff Garzik case 2: { 7110c6fd2807SJeff Garzik u16 tmp16 = 0; 7111c6fd2807SJeff Garzik pci_read_config_word(pdev, bits->reg, &tmp16); 7112c6fd2807SJeff Garzik tmp = tmp16; 7113c6fd2807SJeff Garzik break; 7114c6fd2807SJeff Garzik } 7115c6fd2807SJeff Garzik case 4: { 7116c6fd2807SJeff Garzik u32 tmp32 = 0; 7117c6fd2807SJeff Garzik pci_read_config_dword(pdev, bits->reg, &tmp32); 7118c6fd2807SJeff Garzik tmp = tmp32; 7119c6fd2807SJeff Garzik break; 7120c6fd2807SJeff Garzik } 7121c6fd2807SJeff Garzik 7122c6fd2807SJeff Garzik default: 7123c6fd2807SJeff Garzik return -EINVAL; 7124c6fd2807SJeff Garzik } 7125c6fd2807SJeff Garzik 7126c6fd2807SJeff Garzik tmp &= bits->mask; 7127c6fd2807SJeff Garzik 7128c6fd2807SJeff Garzik return (tmp == bits->val) ? 1 : 0; 7129c6fd2807SJeff Garzik } 7130c6fd2807SJeff Garzik 71316ffa01d8STejun Heo #ifdef CONFIG_PM 7132c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 7133c6fd2807SJeff Garzik { 7134c6fd2807SJeff Garzik pci_save_state(pdev); 7135c6fd2807SJeff Garzik pci_disable_device(pdev); 71364c90d971STejun Heo 71374c90d971STejun Heo if (mesg.event == PM_EVENT_SUSPEND) 7138c6fd2807SJeff Garzik pci_set_power_state(pdev, PCI_D3hot); 7139c6fd2807SJeff Garzik } 7140c6fd2807SJeff Garzik 7141553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev) 7142c6fd2807SJeff Garzik { 7143553c4aa6STejun Heo int rc; 7144553c4aa6STejun Heo 7145c6fd2807SJeff Garzik pci_set_power_state(pdev, PCI_D0); 7146c6fd2807SJeff Garzik pci_restore_state(pdev); 7147553c4aa6STejun Heo 7148f0d36efdSTejun Heo rc = pcim_enable_device(pdev); 7149553c4aa6STejun Heo if (rc) { 7150553c4aa6STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7151553c4aa6STejun Heo "failed to enable device after resume (%d)\n", rc); 7152553c4aa6STejun Heo return rc; 7153553c4aa6STejun Heo } 7154553c4aa6STejun Heo 7155c6fd2807SJeff Garzik pci_set_master(pdev); 7156553c4aa6STejun Heo return 0; 7157c6fd2807SJeff Garzik } 7158c6fd2807SJeff Garzik 7159c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 7160c6fd2807SJeff Garzik { 7161cca3974eSJeff Garzik struct ata_host *host = dev_get_drvdata(&pdev->dev); 7162c6fd2807SJeff Garzik int rc = 0; 7163c6fd2807SJeff Garzik 7164cca3974eSJeff Garzik rc = ata_host_suspend(host, mesg); 7165c6fd2807SJeff Garzik if (rc) 7166c6fd2807SJeff Garzik return rc; 7167c6fd2807SJeff Garzik 7168c6fd2807SJeff Garzik ata_pci_device_do_suspend(pdev, mesg); 7169c6fd2807SJeff Garzik 7170c6fd2807SJeff Garzik return 0; 7171c6fd2807SJeff Garzik } 7172c6fd2807SJeff Garzik 7173c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev) 7174c6fd2807SJeff Garzik { 7175cca3974eSJeff Garzik struct ata_host *host = dev_get_drvdata(&pdev->dev); 7176553c4aa6STejun Heo int rc; 7177c6fd2807SJeff Garzik 7178553c4aa6STejun Heo rc = ata_pci_device_do_resume(pdev); 7179553c4aa6STejun Heo if (rc == 0) 7180cca3974eSJeff Garzik ata_host_resume(host); 7181553c4aa6STejun Heo return rc; 7182c6fd2807SJeff Garzik } 71836ffa01d8STejun Heo #endif /* CONFIG_PM */ 71846ffa01d8STejun Heo 7185c6fd2807SJeff Garzik #endif /* CONFIG_PCI */ 7186c6fd2807SJeff Garzik 7187c6fd2807SJeff Garzik 7188c6fd2807SJeff Garzik static int __init ata_init(void) 7189c6fd2807SJeff Garzik { 7190c6fd2807SJeff Garzik ata_probe_timeout *= HZ; 7191c6fd2807SJeff Garzik ata_wq = create_workqueue("ata"); 7192c6fd2807SJeff Garzik if (!ata_wq) 7193c6fd2807SJeff Garzik return -ENOMEM; 7194c6fd2807SJeff Garzik 7195c6fd2807SJeff Garzik ata_aux_wq = create_singlethread_workqueue("ata_aux"); 7196c6fd2807SJeff Garzik if (!ata_aux_wq) { 7197c6fd2807SJeff Garzik destroy_workqueue(ata_wq); 7198c6fd2807SJeff Garzik return -ENOMEM; 7199c6fd2807SJeff Garzik } 7200c6fd2807SJeff Garzik 7201c6fd2807SJeff Garzik printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 7202c6fd2807SJeff Garzik return 0; 7203c6fd2807SJeff Garzik } 7204c6fd2807SJeff Garzik 7205c6fd2807SJeff Garzik static void __exit ata_exit(void) 7206c6fd2807SJeff Garzik { 7207c6fd2807SJeff Garzik destroy_workqueue(ata_wq); 7208c6fd2807SJeff Garzik destroy_workqueue(ata_aux_wq); 7209c6fd2807SJeff Garzik } 7210c6fd2807SJeff Garzik 7211a4625085SBrian King subsys_initcall(ata_init); 7212c6fd2807SJeff Garzik module_exit(ata_exit); 7213c6fd2807SJeff Garzik 7214c6fd2807SJeff Garzik static unsigned long ratelimit_time; 7215c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock); 7216c6fd2807SJeff Garzik 7217c6fd2807SJeff Garzik int ata_ratelimit(void) 7218c6fd2807SJeff Garzik { 7219c6fd2807SJeff Garzik int rc; 7220c6fd2807SJeff Garzik unsigned long flags; 7221c6fd2807SJeff Garzik 7222c6fd2807SJeff Garzik spin_lock_irqsave(&ata_ratelimit_lock, flags); 7223c6fd2807SJeff Garzik 7224c6fd2807SJeff Garzik if (time_after(jiffies, ratelimit_time)) { 7225c6fd2807SJeff Garzik rc = 1; 7226c6fd2807SJeff Garzik ratelimit_time = jiffies + (HZ/5); 7227c6fd2807SJeff Garzik } else 7228c6fd2807SJeff Garzik rc = 0; 7229c6fd2807SJeff Garzik 7230c6fd2807SJeff Garzik spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 7231c6fd2807SJeff Garzik 7232c6fd2807SJeff Garzik return rc; 7233c6fd2807SJeff Garzik } 7234c6fd2807SJeff Garzik 7235c6fd2807SJeff Garzik /** 7236c6fd2807SJeff Garzik * ata_wait_register - wait until register value changes 7237c6fd2807SJeff Garzik * @reg: IO-mapped register 7238c6fd2807SJeff Garzik * @mask: Mask to apply to read register value 7239c6fd2807SJeff Garzik * @val: Wait condition 7240c6fd2807SJeff Garzik * @interval_msec: polling interval in milliseconds 7241c6fd2807SJeff Garzik * @timeout_msec: timeout in milliseconds 7242c6fd2807SJeff Garzik * 7243c6fd2807SJeff Garzik * Waiting for some bits of register to change is a common 7244c6fd2807SJeff Garzik * operation for ATA controllers. This function reads 32bit LE 7245c6fd2807SJeff Garzik * IO-mapped register @reg and tests for the following condition. 7246c6fd2807SJeff Garzik * 7247c6fd2807SJeff Garzik * (*@reg & mask) != val 7248c6fd2807SJeff Garzik * 7249c6fd2807SJeff Garzik * If the condition is met, it returns; otherwise, the process is 7250c6fd2807SJeff Garzik * repeated after @interval_msec until timeout. 7251c6fd2807SJeff Garzik * 7252c6fd2807SJeff Garzik * LOCKING: 7253c6fd2807SJeff Garzik * Kernel thread context (may sleep) 7254c6fd2807SJeff Garzik * 7255c6fd2807SJeff Garzik * RETURNS: 7256c6fd2807SJeff Garzik * The final register value. 7257c6fd2807SJeff Garzik */ 7258c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 7259c6fd2807SJeff Garzik unsigned long interval_msec, 7260c6fd2807SJeff Garzik unsigned long timeout_msec) 7261c6fd2807SJeff Garzik { 7262c6fd2807SJeff Garzik unsigned long timeout; 7263c6fd2807SJeff Garzik u32 tmp; 7264c6fd2807SJeff Garzik 7265c6fd2807SJeff Garzik tmp = ioread32(reg); 7266c6fd2807SJeff Garzik 7267c6fd2807SJeff Garzik /* Calculate timeout _after_ the first read to make sure 7268c6fd2807SJeff Garzik * preceding writes reach the controller before starting to 7269c6fd2807SJeff Garzik * eat away the timeout. 7270c6fd2807SJeff Garzik */ 7271c6fd2807SJeff Garzik timeout = jiffies + (timeout_msec * HZ) / 1000; 7272c6fd2807SJeff Garzik 7273c6fd2807SJeff Garzik while ((tmp & mask) == val && time_before(jiffies, timeout)) { 7274c6fd2807SJeff Garzik msleep(interval_msec); 7275c6fd2807SJeff Garzik tmp = ioread32(reg); 7276c6fd2807SJeff Garzik } 7277c6fd2807SJeff Garzik 7278c6fd2807SJeff Garzik return tmp; 7279c6fd2807SJeff Garzik } 7280c6fd2807SJeff Garzik 7281c6fd2807SJeff Garzik /* 7282c6fd2807SJeff Garzik * Dummy port_ops 7283c6fd2807SJeff Garzik */ 7284c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap) { } 7285c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap) { return 0; } 7286c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } 7287c6fd2807SJeff Garzik 7288c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap) 7289c6fd2807SJeff Garzik { 7290c6fd2807SJeff Garzik return ATA_DRDY; 7291c6fd2807SJeff Garzik } 7292c6fd2807SJeff Garzik 7293c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 7294c6fd2807SJeff Garzik { 7295c6fd2807SJeff Garzik return AC_ERR_SYSTEM; 7296c6fd2807SJeff Garzik } 7297c6fd2807SJeff Garzik 7298c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = { 7299c6fd2807SJeff Garzik .check_status = ata_dummy_check_status, 7300c6fd2807SJeff Garzik .check_altstatus = ata_dummy_check_status, 7301c6fd2807SJeff Garzik .dev_select = ata_noop_dev_select, 7302c6fd2807SJeff Garzik .qc_prep = ata_noop_qc_prep, 7303c6fd2807SJeff Garzik .qc_issue = ata_dummy_qc_issue, 7304c6fd2807SJeff Garzik .freeze = ata_dummy_noret, 7305c6fd2807SJeff Garzik .thaw = ata_dummy_noret, 7306c6fd2807SJeff Garzik .error_handler = ata_dummy_noret, 7307c6fd2807SJeff Garzik .post_internal_cmd = ata_dummy_qc_noret, 7308c6fd2807SJeff Garzik .irq_clear = ata_dummy_noret, 7309c6fd2807SJeff Garzik .port_start = ata_dummy_ret0, 7310c6fd2807SJeff Garzik .port_stop = ata_dummy_noret, 7311c6fd2807SJeff Garzik }; 7312c6fd2807SJeff Garzik 731321b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = { 731421b0ad4fSTejun Heo .port_ops = &ata_dummy_port_ops, 731521b0ad4fSTejun Heo }; 731621b0ad4fSTejun Heo 7317c6fd2807SJeff Garzik /* 7318c6fd2807SJeff Garzik * libata is essentially a library of internal helper functions for 7319c6fd2807SJeff Garzik * low-level ATA host controller drivers. As such, the API/ABI is 7320c6fd2807SJeff Garzik * likely to change as new drivers are added and updated. 7321c6fd2807SJeff Garzik * Do not depend on ABI/API stability. 7322c6fd2807SJeff Garzik */ 7323c6fd2807SJeff Garzik 7324c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7325c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7326c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7327c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 732821b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7329c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param); 7330c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports); 7331cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init); 7332f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc); 7333f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7334ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start); 7335f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register); 7336f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate); 73370529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach); 7338c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init); 7339c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one); 7340c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move); 7341c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete); 7342c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7343c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 7344c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load); 7345c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read); 7346c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select); 7347c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select); 734843727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status); 7349c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7350c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7351c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status); 7352c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus); 7353c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command); 7354c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start); 7355d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start); 7356c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt); 735704351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode); 73580d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer); 73590d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); 736031cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7361c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep); 7362d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); 7363c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7364c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup); 7365c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start); 7366c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 7367c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status); 7368c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop); 7369c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze); 7370c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw); 7371c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); 7372c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 7373c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 7374c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe); 737510305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable); 7376c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd); 7377936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce); 7378936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume); 7379c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset); 7380c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset); 7381c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset); 7382c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset); 7383c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset); 7384cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset); 7385c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset); 7386c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset); 7387c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify); 7388c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair); 7389c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable); 7390c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit); 7391c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register); 7392c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep); 739388ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7394d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready); 7395c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task); 7396c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 7397c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7398c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7399c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7400c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7401c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr); 7402c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid); 7403c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read); 7404c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write); 7405c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7406936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online); 7407936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline); 74086ffa01d8STejun Heo #ifdef CONFIG_PM 7409cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend); 7410cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume); 74116ffa01d8STejun Heo #endif /* CONFIG_PM */ 7412c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string); 7413c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string); 741410305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode); 7415c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7416c6fd2807SJeff Garzik 7417c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7418c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute); 7419c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge); 7420c6fd2807SJeff Garzik 7421c6fd2807SJeff Garzik #ifdef CONFIG_PCI 7422c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits); 7423d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); 74241626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); 7425d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); 7426c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one); 7427c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one); 74286ffa01d8STejun Heo #ifdef CONFIG_PM 7429c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7430c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7431c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7432c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume); 74336ffa01d8STejun Heo #endif /* CONFIG_PM */ 7434c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter); 7435c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 7436c6fd2807SJeff Garzik #endif /* CONFIG_PCI */ 7437c6fd2807SJeff Garzik 743831f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); 74393af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset); 74403af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset); 74413af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset); 74423af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh); 74433af9a77aSTejun Heo 7444b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7445b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7446b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7447cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc); 7448cbcdd875STejun Heo #ifdef CONFIG_PCI 7449cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7450cbcdd875STejun Heo #endif /* CONFIG_PCI */ 7451c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout); 7452c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7453dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort); 7454c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort); 7455c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze); 74567d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification); 7457c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7458c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7459c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7460c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7461c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh); 746283625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on); 7463a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify); 7464be0d18dfSAlan Cox 7465be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire); 7466be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire); 7467be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown); 7468be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata); 7469