diff options
Diffstat (limited to 'drivers/ata/libata-core.c')
| -rw-r--r-- | drivers/ata/libata-core.c | 1009 | 
1 files changed, 668 insertions, 341 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 7f77c67d267..677c0c1b03b 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1,7 +1,7 @@  /*   *  libata-core.c - helper library for ATA   * - *  Maintained by:  Jeff Garzik <jgarzik@pobox.com> + *  Maintained by:  Tejun Heo <tj@kernel.org>   *    		    Please ALWAYS copy linux-ide@vger.kernel.org   *		    on emails.   * @@ -66,6 +66,8 @@  #include <asm/byteorder.h>  #include <linux/cdrom.h>  #include <linux/ratelimit.h> +#include <linux/pm_runtime.h> +#include <linux/platform_device.h>  #include "libata.h"  #include "libata-transport.h" @@ -79,6 +81,8 @@ const struct ata_port_operations ata_base_port_ops = {  	.prereset		= ata_std_prereset,  	.postreset		= ata_std_postreset,  	.error_handler		= ata_std_error_handler, +	.sched_eh		= ata_std_sched_eh, +	.end_eh			= ata_std_end_eh,  };  const struct ata_port_operations sata_port_ops = { @@ -94,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);  static void ata_dev_xfermask(struct ata_device *dev);  static unsigned long ata_dev_blacklisted(const struct ata_device *dev); -unsigned int ata_print_id = 1; +atomic_t ata_print_id = ATOMIC_INIT(0);  struct ata_force_param {  	const char	*name; @@ -335,8 +339,7 @@ void ata_force_cbl(struct ata_port *ap)  			continue;  		ap->cbl = fe->param.cbl; -		ata_port_printk(ap, KERN_NOTICE, -				"FORCE: cable set to %s\n", fe->param.name); +		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);  		return;  	}  } @@ -378,8 +381,7 @@ static void ata_force_link_limits(struct ata_link *link)  		/* only honor the first spd limit */  		if (!did_spd && fe->param.spd_limit) {  			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; -			ata_link_printk(link, KERN_NOTICE, -					"FORCE: PHY spd limit set to %s\n", +			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",  					fe->param.name);  			did_spd = true;  		} @@ -387,7 +389,7 @@ static void ata_force_link_limits(struct ata_link *link)  		/* let lflags stack */  		if (fe->param.lflags) {  			link->flags |= fe->param.lflags; -			ata_link_printk(link, KERN_NOTICE, +			ata_link_notice(link,  					"FORCE: link flag 0x%x forced -> 0x%x\n",  					fe->param.lflags, link->flags);  		} @@ -442,8 +444,8 @@ static void ata_force_xfermask(struct ata_device *dev)  			dev->pio_mask = pio_mask;  		} -		ata_dev_printk(dev, KERN_NOTICE, -			"FORCE: xfer_mask set to %s\n", fe->param.name); +		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", +			       fe->param.name);  		return;  	}  } @@ -486,8 +488,8 @@ static void ata_force_horkage(struct ata_device *dev)  		dev->horkage |= fe->param.horkage_on;  		dev->horkage &= ~fe->param.horkage_off; -		ata_dev_printk(dev, KERN_NOTICE, -			"FORCE: horkage modified (%s)\n", fe->param.name); +		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", +			       fe->param.name);  	}  } @@ -567,10 +569,10 @@ void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)  	fis[14] = 0;  	fis[15] = tf->ctl; -	fis[16] = 0; -	fis[17] = 0; -	fis[18] = 0; -	fis[19] = 0; +	fis[16] = tf->auxiliary & 0xff; +	fis[17] = (tf->auxiliary >> 8) & 0xff; +	fis[18] = (tf->auxiliary >> 16) & 0xff; +	fis[19] = (tf->auxiliary >> 24) & 0xff;  }  /** @@ -711,8 +713,8 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)  		sect = tf->lbal;  		if (!sect) { -			ata_dev_printk(dev, KERN_WARNING, "device reported " -				       "invalid CHS sector 0\n"); +			ata_dev_warn(dev, +				     "device reported invalid CHS sector 0\n");  			sect = 1; /* oh well */  		} @@ -773,7 +775,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,  		tf->lbam = (block >> 8) & 0xff;  		tf->lbal = block & 0xff; -		tf->device = 1 << 6; +		tf->device = ATA_LBA;  		if (tf->flags & ATA_TFLAG_FUA)  			tf->device |= 1 << 7;  	} else if (dev->flags & ATA_DFLAG_LBA) { @@ -1230,8 +1232,9 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);  	if (err_mask) { -		ata_dev_printk(dev, KERN_WARNING, "failed to read native " -			       "max address (err_mask=0x%x)\n", err_mask); +		ata_dev_warn(dev, +			     "failed to read native max address (err_mask=0x%x)\n", +			     err_mask);  		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))  			return -EACCES;  		return -EIO; @@ -1292,8 +1295,9 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)  	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);  	if (err_mask) { -		ata_dev_printk(dev, KERN_WARNING, "failed to set " -			       "max address (err_mask=0x%x)\n", err_mask); +		ata_dev_warn(dev, +			     "failed to set max address (err_mask=0x%x)\n", +			     err_mask);  		if (err_mask == AC_ERR_DEV &&  		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))  			return -EACCES; @@ -1336,8 +1340,8 @@ static int ata_hpa_resize(struct ata_device *dev)  		 * be unlocked, skip HPA resizing.  		 */  		if (rc == -EACCES || !unlock_hpa) { -			ata_dev_printk(dev, KERN_WARNING, "HPA support seems " -				       "broken, skipping HPA handling\n"); +			ata_dev_warn(dev, +				     "HPA support seems broken, skipping HPA handling\n");  			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;  			/* we can continue if device aborted the command */ @@ -1355,14 +1359,13 @@ static int ata_hpa_resize(struct ata_device *dev)  			return 0;  		if (native_sectors > sectors) -			ata_dev_printk(dev, KERN_INFO, +			ata_dev_info(dev,  				"HPA detected: current %llu, native %llu\n",  				(unsigned long long)sectors,  				(unsigned long long)native_sectors);  		else if (native_sectors < sectors) -			ata_dev_printk(dev, KERN_WARNING, -				"native sectors (%llu) is smaller than " -				"sectors (%llu)\n", +			ata_dev_warn(dev, +				"native sectors (%llu) is smaller than sectors (%llu)\n",  				(unsigned long long)native_sectors,  				(unsigned long long)sectors);  		return 0; @@ -1372,10 +1375,10 @@ static int ata_hpa_resize(struct ata_device *dev)  	rc = ata_set_max_sectors(dev, native_sectors);  	if (rc == -EACCES) {  		/* if device aborted the command, skip HPA resizing */ -		ata_dev_printk(dev, KERN_WARNING, "device aborted resize " -			       "(%llu -> %llu), skipping HPA handling\n", -			       (unsigned long long)sectors, -			       (unsigned long long)native_sectors); +		ata_dev_warn(dev, +			     "device aborted resize (%llu -> %llu), skipping HPA handling\n", +			     (unsigned long long)sectors, +			     (unsigned long long)native_sectors);  		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;  		return 0;  	} else if (rc) @@ -1384,14 +1387,14 @@ static int ata_hpa_resize(struct ata_device *dev)  	/* re-read IDENTIFY data */  	rc = ata_dev_reread_id(dev, 0);  	if (rc) { -		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " -			       "data after HPA resizing\n"); +		ata_dev_err(dev, +			    "failed to re-read IDENTIFY data after HPA resizing\n");  		return rc;  	}  	if (print_info) {  		u64 new_sectors = ata_id_n_sectors(dev->id); -		ata_dev_printk(dev, KERN_INFO, +		ata_dev_info(dev,  			"HPA unlocked: %llu -> %llu, native %llu\n",  			(unsigned long long)sectors,  			(unsigned long long)new_sectors, @@ -1521,7 +1524,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)   *	@dev: Device to which the command is sent   *	@tf: Taskfile registers for the command and the result   *	@cdb: CDB for packet command - *	@dma_dir: Data tranfer direction of the command + *	@dma_dir: Data transfer direction of the command   *	@sgl: sg list for the data buffer of the command   *	@n_elem: Number of sg entries   *	@timeout: Timeout in msecs (0 for default) @@ -1599,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,  	qc->tf = *tf;  	if (cdb)  		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); + +	/* some SATA bridges need us to indicate data xfer direction */ +	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && +	    dma_dir == DMA_FROM_DEVICE) +		qc->tf.feature |= ATAPI_DMADIR; +  	qc->flags |= ATA_QCFLAG_RESULT_TF;  	qc->dma_dir = dma_dir;  	if (dma_dir != DMA_NONE) { @@ -1655,8 +1664,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,  				ata_qc_complete(qc);  			if (ata_msg_warn(ap)) -				ata_dev_printk(dev, KERN_WARNING, -					"qc timeout (cmd 0x%x)\n", command); +				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", +					     command);  		}  		spin_unlock_irqrestore(ap->lock, flags); @@ -1703,7 +1712,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,   *	@dev: Device to which the command is sent   *	@tf: Taskfile registers for the command and the result   *	@cdb: CDB for packet command - *	@dma_dir: Data tranfer direction of the command + *	@dma_dir: Data transfer direction of the command   *	@buf: Data buffer of the command   *	@buflen: Length of data buffer   *	@timeout: Timeout in msecs (0 for default) @@ -1870,7 +1879,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,  	int rc;  	if (ata_msg_ctl(ap)) -		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); +		ata_dev_dbg(dev, "%s: ENTER\n", __func__);  retry:  	ata_tf_init(dev, &tf); @@ -1909,14 +1918,13 @@ retry:  	if (err_mask) {  		if (err_mask & AC_ERR_NODEV_HINT) { -			ata_dev_printk(dev, KERN_DEBUG, -				       "NODEV after polling detection\n"); +			ata_dev_dbg(dev, "NODEV after polling detection\n");  			return -ENOENT;  		}  		if (is_semb) { -			ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " -				       "device w/ SEMB sig, disabled\n"); +			ata_dev_info(dev, +		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");  			/* SEMB is not supported yet */  			*p_class = ATA_DEV_SEMB_UNSUP;  			return 0; @@ -1942,8 +1950,8 @@ retry:  			 * both flavors of IDENTIFYs which happens  			 * sometimes with phantom devices.  			 */ -			ata_dev_printk(dev, KERN_DEBUG, -				       "both IDENTIFYs aborted, assuming NODEV\n"); +			ata_dev_dbg(dev, +				    "both IDENTIFYs aborted, assuming NODEV\n");  			return -ENOENT;  		} @@ -1953,9 +1961,9 @@ retry:  	}  	if (dev->horkage & ATA_HORKAGE_DUMP_ID) { -		ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " -			       "class=%d may_fallback=%d tried_spinup=%d\n", -			       class, may_fallback, tried_spinup); +		ata_dev_dbg(dev, "dumping IDENTIFY data, " +			    "class=%d may_fallback=%d tried_spinup=%d\n", +			    class, may_fallback, tried_spinup);  		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,  			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);  	} @@ -1974,6 +1982,12 @@ retry:  	if (class == ATA_DEV_ATA) {  		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))  			goto err_out; +		if (ap->host->flags & ATA_HOST_IGNORE_ATA && +							ata_id_is_ata(id)) { +			ata_dev_dbg(dev, +				"host indicates ignore ATA devices, ignored\n"); +			return -ENOENT; +		}  	} else {  		if (ata_id_is_ata(id))  			goto err_out; @@ -2034,8 +2048,8 @@ retry:   err_out:  	if (ata_msg_warn(ap)) -		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " -			       "(%s, err_mask=0x%x)\n", reason, err_mask); +		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", +			     reason, err_mask);  	return rc;  } @@ -2065,9 +2079,8 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)  	 * guaranteed by setting sata_spd_limit to target_limit above.  	 */  	if (plink->sata_spd > target) { -		ata_dev_printk(dev, KERN_INFO, -			       "applying link speed limit horkage to %s\n", -			       sata_spd_string(target)); +		ata_dev_info(dev, "applying link speed limit horkage to %s\n", +			     sata_spd_string(target));  		return -EAGAIN;  	}  	return 0; @@ -2110,8 +2123,9 @@ static int ata_dev_config_ncq(struct ata_device *dev,  		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,  			SATA_FPDMA_AA);  		if (err_mask) { -			ata_dev_printk(dev, KERN_ERR, "failed to enable AA" -				"(error_mask=0x%x)\n", err_mask); +			ata_dev_err(dev, +				    "failed to enable AA (error_mask=0x%x)\n", +				    err_mask);  			if (err_mask != AC_ERR_DEV) {  				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;  				return -EIO; @@ -2125,6 +2139,29 @@ static int ata_dev_config_ncq(struct ata_device *dev,  	else  		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,  			ddepth, aa_desc); + +	if ((ap->flags & ATA_FLAG_FPDMA_AUX) && +	    ata_id_has_ncq_send_and_recv(dev->id)) { +		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, +					     0, ap->sector_buf, 1); +		if (err_mask) { +			ata_dev_dbg(dev, +				    "failed to get NCQ Send/Recv Log Emask 0x%x\n", +				    err_mask); +		} else { +			u8 *cmds = dev->ncq_send_recv_cmds; + +			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; +			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); + +			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { +				ata_dev_dbg(dev, "disabling queued TRIM support\n"); +				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= +					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; +			} +		} +	} +  	return 0;  } @@ -2148,37 +2185,35 @@ int ata_dev_configure(struct ata_device *dev)  	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;  	const u16 *id = dev->id;  	unsigned long xfer_mask; +	unsigned int err_mask;  	char revbuf[7];		/* XYZ-99\0 */  	char fwrevbuf[ATA_ID_FW_REV_LEN+1];  	char modelbuf[ATA_ID_PROD_LEN+1];  	int rc;  	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { -		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", -			       __func__); +		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);  		return 0;  	}  	if (ata_msg_probe(ap)) -		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); +		ata_dev_dbg(dev, "%s: ENTER\n", __func__);  	/* set horkage */  	dev->horkage |= ata_dev_blacklisted(dev);  	ata_force_horkage(dev);  	if (dev->horkage & ATA_HORKAGE_DISABLE) { -		ata_dev_printk(dev, KERN_INFO, -			       "unsupported device, disabling\n"); +		ata_dev_info(dev, "unsupported device, disabling\n");  		ata_dev_disable(dev);  		return 0;  	}  	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&  	    dev->class == ATA_DEV_ATAPI) { -		ata_dev_printk(dev, KERN_WARNING, -			"WARNING: ATAPI is %s, device ignored.\n", -			atapi_enabled ? "not supported with this driver" -				      : "disabled"); +		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", +			     atapi_enabled ? "not supported with this driver" +			     : "disabled");  		ata_dev_disable(dev);  		return 0;  	} @@ -2187,6 +2222,16 @@ int ata_dev_configure(struct ata_device *dev)  	if (rc)  		return rc; +	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ +	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && +	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) +		dev->horkage |= ATA_HORKAGE_NOLPM; + +	if (dev->horkage & ATA_HORKAGE_NOLPM) { +		ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); +		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; +	} +  	/* let ACPI work its magic */  	rc = ata_acpi_on_devcfg(dev);  	if (rc) @@ -2199,12 +2244,12 @@ int ata_dev_configure(struct ata_device *dev)  	/* print device capabilities */  	if (ata_msg_probe(ap)) -		ata_dev_printk(dev, KERN_DEBUG, -			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " -			       "85:%04x 86:%04x 87:%04x 88:%04x\n", -			       __func__, -			       id[49], id[82], id[83], id[84], -			       id[85], id[86], id[87], id[88]); +		ata_dev_dbg(dev, +			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " +			    "85:%04x 86:%04x 87:%04x 88:%04x\n", +			    __func__, +			    id[49], id[82], id[83], id[84], +			    id[85], id[86], id[87], id[88]);  	/* initialize to-be-configured parameters */  	dev->flags &= ~ATA_DFLAG_CFG_MASK; @@ -2238,17 +2283,15 @@ int ata_dev_configure(struct ata_device *dev)  		if (ata_id_is_cfa(id)) {  			/* CPRM may make this media unusable */  			if (id[ATA_ID_CFA_KEY_MGMT] & 1) -				ata_dev_printk(dev, KERN_WARNING, -					       "supports DRM functions and may " -					       "not be fully accessable.\n"); +				ata_dev_warn(dev, +	"supports DRM functions and may not be fully accessible\n");  			snprintf(revbuf, 7, "CFA");  		} else {  			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));  			/* Warn the user if the device has TPM extensions */  			if (ata_id_has_tpm(id)) -				ata_dev_printk(dev, KERN_WARNING, -					       "supports DRM functions and may " -					       "not be fully accessable.\n"); +				ata_dev_warn(dev, +	"supports DRM functions and may not be fully accessible\n");  		}  		dev->n_sectors = ata_id_n_sectors(id); @@ -2285,12 +2328,11 @@ int ata_dev_configure(struct ata_device *dev)  			/* print device info to dmesg */  			if (ata_msg_drv(ap) && print_info) { -				ata_dev_printk(dev, KERN_INFO, -					"%s: %s, %s, max %s\n", -					revbuf, modelbuf, fwrevbuf, -					ata_mode_string(xfer_mask)); -				ata_dev_printk(dev, KERN_INFO, -					"%Lu sectors, multi %u: %s %s\n", +				ata_dev_info(dev, "%s: %s, %s, max %s\n", +					     revbuf, modelbuf, fwrevbuf, +					     ata_mode_string(xfer_mask)); +				ata_dev_info(dev, +					     "%llu sectors, multi %u: %s %s\n",  					(unsigned long long)dev->n_sectors,  					dev->multi_count, lba_desc, ncq_desc);  			} @@ -2311,18 +2353,41 @@ int ata_dev_configure(struct ata_device *dev)  			/* print device info to dmesg */  			if (ata_msg_drv(ap) && print_info) { -				ata_dev_printk(dev, KERN_INFO, -					"%s: %s, %s, max %s\n", -					revbuf,	modelbuf, fwrevbuf, -					ata_mode_string(xfer_mask)); -				ata_dev_printk(dev, KERN_INFO, -					"%Lu sectors, multi %u, CHS %u/%u/%u\n", -					(unsigned long long)dev->n_sectors, -					dev->multi_count, dev->cylinders, -					dev->heads, dev->sectors); +				ata_dev_info(dev, "%s: %s, %s, max %s\n", +					     revbuf,	modelbuf, fwrevbuf, +					     ata_mode_string(xfer_mask)); +				ata_dev_info(dev, +					     "%llu sectors, multi %u, CHS %u/%u/%u\n", +					     (unsigned long long)dev->n_sectors, +					     dev->multi_count, dev->cylinders, +					     dev->heads, dev->sectors);  			}  		} +		/* Check and mark DevSlp capability. Get DevSlp timing variables +		 * from SATA Settings page of Identify Device Data Log. +		 */ +		if (ata_id_has_devslp(dev->id)) { +			u8 *sata_setting = ap->sector_buf; +			int i, j; + +			dev->flags |= ATA_DFLAG_DEVSLP; +			err_mask = ata_read_log_page(dev, +						     ATA_LOG_SATA_ID_DEV_DATA, +						     ATA_LOG_SATA_SETTINGS, +						     sata_setting, +						     1); +			if (err_mask) +				ata_dev_dbg(dev, +					    "failed to get Identify Device Data, Emask 0x%x\n", +					    err_mask); +			else +				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { +					j = ATA_LOG_DEVSLP_OFFSET + i; +					dev->devslp_timing[i] = sata_setting[j]; +				} +		} +  		dev->cdb_len = 16;  	} @@ -2336,8 +2401,7 @@ int ata_dev_configure(struct ata_device *dev)  		rc = atapi_cdb_len(id);  		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {  			if (ata_msg_warn(ap)) -				ata_dev_printk(dev, KERN_WARNING, -					       "unsupported CDB len\n"); +				ata_dev_warn(dev, "unsupported CDB len\n");  			rc = -EINVAL;  			goto err_out_nosup;  		} @@ -2352,15 +2416,13 @@ int ata_dev_configure(struct ata_device *dev)  		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&  		    (!sata_pmp_attached(ap) ||  		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { -			unsigned int err_mask; -  			/* issue SET feature command to turn this on */  			err_mask = ata_dev_set_feature(dev,  					SETFEATURES_SATA_ENABLE, SATA_AN);  			if (err_mask) -				ata_dev_printk(dev, KERN_ERR, -					"failed to enable ATAPI AN " -					"(err_mask=0x%x)\n", err_mask); +				ata_dev_err(dev, +					    "failed to enable ATAPI AN (err_mask=0x%x)\n", +					    err_mask);  			else {  				dev->flags |= ATA_DFLAG_AN;  				atapi_an_string = ", ATAPI AN"; @@ -2372,19 +2434,24 @@ int ata_dev_configure(struct ata_device *dev)  			cdb_intr_string = ", CDB intr";  		} -		if (atapi_dmadir || atapi_id_dmadir(dev->id)) { +		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {  			dev->flags |= ATA_DFLAG_DMADIR;  			dma_dir_string = ", DMADIR";  		} +		if (ata_id_has_da(dev->id)) { +			dev->flags |= ATA_DFLAG_DA; +			zpodd_init(dev); +		} +  		/* print device info to dmesg */  		if (ata_msg_drv(ap) && print_info) -			ata_dev_printk(dev, KERN_INFO, -				       "ATAPI: %s, %s, max %s%s%s%s\n", -				       modelbuf, fwrevbuf, -				       ata_mode_string(xfer_mask), -				       cdb_intr_string, atapi_an_string, -				       dma_dir_string); +			ata_dev_info(dev, +				     "ATAPI: %s, %s, max %s%s%s%s\n", +				     modelbuf, fwrevbuf, +				     ata_mode_string(xfer_mask), +				     cdb_intr_string, atapi_an_string, +				     dma_dir_string);  	}  	/* determine max_sectors */ @@ -2396,8 +2463,7 @@ int ata_dev_configure(struct ata_device *dev)  	   200 sectors */  	if (ata_dev_knobble(dev)) {  		if (ata_msg_drv(ap) && print_info) -			ata_dev_printk(dev, KERN_INFO, -				       "applying bridge limits\n"); +			ata_dev_info(dev, "applying bridge limits\n");  		dev->udma_mask &= ATA_UDMA5;  		dev->max_sectors = ATA_MAX_SECTORS;  	} @@ -2412,6 +2478,9 @@ int ata_dev_configure(struct ata_device *dev)  		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,  					 dev->max_sectors); +	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) +		dev->max_sectors = ATA_MAX_SECTORS_LBA48; +  	if (ap->ops->dev_config)  		ap->ops->dev_config(dev); @@ -2423,26 +2492,23 @@ int ata_dev_configure(struct ata_device *dev)  		   bugs */  		if (print_info) { -			ata_dev_printk(dev, KERN_WARNING, +			ata_dev_warn(dev,  "Drive reports diagnostics failure. This may indicate a drive\n"); -			ata_dev_printk(dev, KERN_WARNING, +			ata_dev_warn(dev,  "fault or invalid emulation. Contact drive vendor for information.\n");  		}  	}  	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { -		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " -			       "firmware update to be fully functional.\n"); -		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor " -			       "or visit http://ata.wiki.kernel.org.\n"); +		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); +		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");  	}  	return 0;  err_out_nosup:  	if (ata_msg_probe(ap)) -		ata_dev_printk(dev, KERN_DEBUG, -			       "%s: EXIT, err\n", __func__); +		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);  	return rc;  } @@ -2543,6 +2609,7 @@ int ata_bus_probe(struct ata_port *ap)  		 * bus as we may be talking too fast.  		 */  		dev->pio_mode = XFER_PIO_0; +		dev->dma_mode = 0xff;  		/* If the controller has a pio mode setup function  		 * then use it to set the chipset to rights. Don't @@ -2663,13 +2730,11 @@ static void sata_print_link_status(struct ata_link *link)  	if (ata_phys_link_online(link)) {  		tmp = (sstatus >> 4) & 0xf; -		ata_link_printk(link, KERN_INFO, -				"SATA link up %s (SStatus %X SControl %X)\n", -				sata_spd_string(tmp), sstatus, scontrol); +		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", +			      sata_spd_string(tmp), sstatus, scontrol);  	} else { -		ata_link_printk(link, KERN_INFO, -				"SATA link down (SStatus %X SControl %X)\n", -				sstatus, scontrol); +		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", +			      sstatus, scontrol);  	}  } @@ -2758,8 +2823,8 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)  	link->sata_spd_limit = mask; -	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", -			sata_spd_string(fls(mask))); +	ata_link_warn(link, "limiting SATA link speed to %s\n", +		      sata_spd_string(fls(mask)));  	return 0;  } @@ -2927,6 +2992,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)  	if (xfer_mode == t->mode)  		return t; + +	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", +			__func__, xfer_mode); +  	return NULL;  } @@ -2954,7 +3023,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,  	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */  		memset(&p, 0, sizeof(p)); -		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { +		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {  			if (speed <= XFER_PIO_2)  				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];  			else if ((speed <= XFER_PIO_4) || @@ -3136,8 +3205,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)  			snprintf(buf, sizeof(buf), "%s",  				 ata_mode_string(xfer_mask)); -		ata_dev_printk(dev, KERN_WARNING, -			       "limiting speed to %s\n", buf); +		ata_dev_warn(dev, "limiting speed to %s\n", buf);  	}  	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, @@ -3164,9 +3232,9 @@ static int ata_dev_set_mode(struct ata_device *dev)  		dev_err_whine = " (SET_XFERMODE skipped)";  	else {  		if (nosetxfer) -			ata_dev_printk(dev, KERN_WARNING, -				       "NOSETXFER but PATA detected - can't " -				       "skip SETXFER, might malfunction\n"); +			ata_dev_warn(dev, +				     "NOSETXFER but PATA detected - can't " +				     "skip SETXFER, might malfunction\n");  		err_mask = ata_dev_set_xfermode(dev);  	} @@ -3216,15 +3284,14 @@ static int ata_dev_set_mode(struct ata_device *dev)  	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",  		dev->xfer_shift, (int)dev->xfer_mode); -	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", -		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), -		       dev_err_whine); +	ata_dev_info(dev, "configured for %s%s\n", +		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), +		     dev_err_whine);  	return 0;   fail: -	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " -		       "(err_mask=0x%x)\n", err_mask); +	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);  	return -EIO;  } @@ -3266,10 +3333,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)  		ata_force_xfermask(dev);  		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); -		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);  		if (libata_dma_mask & mode_mask) -			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); +			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, +						     dev->udma_mask);  		else  			dma_mask = 0; @@ -3286,7 +3353,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)  	/* step 2: always set host PIO timings */  	ata_for_each_dev(dev, link, ENABLED) {  		if (dev->pio_mode == 0xff) { -			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); +			ata_dev_warn(dev, "no PIO support\n");  			rc = -EINVAL;  			goto out;  		} @@ -3404,7 +3471,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,  		if (!warned && time_after(now, start + 5 * HZ) &&  		    (deadline - now > 3 * HZ)) { -			ata_link_printk(link, KERN_WARNING, +			ata_link_warn(link,  				"link is slow to respond, please be patient "  				"(ready=%d)\n", tmp);  			warned = 1; @@ -3552,16 +3619,14 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,  	} while ((scontrol & 0xf0f) != 0x300 && --tries);  	if ((scontrol & 0xf0f) != 0x300) { -		ata_link_printk(link, KERN_ERR, -				"failed to resume link (SControl %X)\n", -				scontrol); +		ata_link_warn(link, "failed to resume link (SControl %X)\n", +			     scontrol);  		return 0;  	}  	if (tries < ATA_LINK_RESUME_TRIES) -		ata_link_printk(link, KERN_WARNING, -				"link resume succeeded after %d retries\n", -				ATA_LINK_RESUME_TRIES - tries); +		ata_link_warn(link, "link resume succeeded after %d retries\n", +			      ATA_LINK_RESUME_TRIES - tries);  	if ((rc = sata_link_debounce(link, params, deadline)))  		return rc; @@ -3606,7 +3671,7 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,  	switch (policy) {  	case ATA_LPM_MAX_POWER:  		/* disable all LPM transitions */ -		scontrol |= (0x3 << 8); +		scontrol |= (0x7 << 8);  		/* initiate transition to active state */  		if (spm_wakeup) {  			scontrol |= (0x4 << 12); @@ -3616,11 +3681,17 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,  	case ATA_LPM_MED_POWER:  		/* allow LPM to PARTIAL */  		scontrol &= ~(0x1 << 8); -		scontrol |= (0x2 << 8); +		scontrol |= (0x6 << 8);  		break;  	case ATA_LPM_MIN_POWER: -		/* no restrictions on LPM transitions */ -		scontrol &= ~(0x3 << 8); +		if (ata_link_nr_enabled(link) > 0) +			/* no restrictions on LPM transitions */ +			scontrol &= ~(0x7 << 8); +		else { +			/* empty port, power off */ +			scontrol &= ~0xf; +			scontrol |= (0x1 << 2); +		}  		break;  	default:  		WARN_ON(1); @@ -3672,8 +3743,9 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)  		rc = sata_link_resume(link, timing, deadline);  		/* whine about phy resume failure but proceed */  		if (rc && rc != -EOPNOTSUPP) -			ata_link_printk(link, KERN_WARNING, "failed to resume " -					"link for reset (errno=%d)\n", rc); +			ata_link_warn(link, +				      "failed to resume link for reset (errno=%d)\n", +				      rc);  	}  	/* no point in trying softreset on offline link */ @@ -3789,8 +3861,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,  		/* online is set iff link is online && reset succeeded */  		if (online)  			*online = false; -		ata_link_printk(link, KERN_ERR, -				"COMRESET failed (errno=%d)\n", rc); +		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);  	}  	DPRINTK("EXIT, rc=%d\n", rc);  	return rc; @@ -3874,8 +3945,8 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,  	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];  	if (dev->class != new_class) { -		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", -			       dev->class, new_class); +		ata_dev_info(dev, "class mismatch %d != %d\n", +			     dev->class, new_class);  		return 0;  	} @@ -3885,14 +3956,14 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,  	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));  	if (strcmp(model[0], model[1])) { -		ata_dev_printk(dev, KERN_INFO, "model number mismatch " -			       "'%s' != '%s'\n", model[0], model[1]); +		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", +			     model[0], model[1]);  		return 0;  	}  	if (strcmp(serial[0], serial[1])) { -		ata_dev_printk(dev, KERN_INFO, "serial number mismatch " -			       "'%s' != '%s'\n", serial[0], serial[1]); +		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", +			     serial[0], serial[1]);  		return 0;  	} @@ -3962,8 +4033,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,  	    new_class != ATA_DEV_ATA &&  	    new_class != ATA_DEV_ATAPI &&  	    new_class != ATA_DEV_SEMB) { -		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", -			       dev->class, new_class); +		ata_dev_info(dev, "class mismatch %u != %u\n", +			     dev->class, new_class);  		rc = -ENODEV;  		goto fail;  	} @@ -3984,9 +4055,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,  		return 0;  	/* n_sectors has changed */ -	ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", -		       (unsigned long long)n_sectors, -		       (unsigned long long)dev->n_sectors); +	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", +		     (unsigned long long)n_sectors, +		     (unsigned long long)dev->n_sectors);  	/*  	 * Something could have caused HPA to be unlocked @@ -3995,9 +4066,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,  	 */  	if (dev->n_native_sectors == n_native_sectors &&  	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { -		ata_dev_printk(dev, KERN_WARNING, -			       "new n_sectors matches native, probably " -			       "late HPA unlock, n_sectors updated\n"); +		ata_dev_warn(dev, +			     "new n_sectors matches native, probably " +			     "late HPA unlock, n_sectors updated\n");  		/* use the larger n_sectors */  		return 0;  	} @@ -4011,9 +4082,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,  	if (dev->n_native_sectors == n_native_sectors &&  	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&  	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { -		ata_dev_printk(dev, KERN_WARNING, -			       "old n_sectors matches native, probably " -			       "late HPA lock, will try to unlock HPA\n"); +		ata_dev_warn(dev, +			     "old n_sectors matches native, probably " +			     "late HPA lock, will try to unlock HPA\n");  		/* try unlocking HPA */  		dev->flags |= ATA_DFLAG_UNLOCK_HPA;  		rc = -EIO; @@ -4024,7 +4095,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,  	dev->n_native_sectors = n_native_sectors;  	dev->n_sectors = n_sectors;   fail: -	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); +	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);  	return rc;  } @@ -4064,12 +4135,15 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },  	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },  	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA }, +	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },  	/* Odd clown on sil3726/4726 PMPs */  	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },  	/* Weird ATAPI devices */  	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },  	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA }, +	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 }, +	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },  	/* Devices we expect to fail diagnostics */ @@ -4099,6 +4173,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |  						ATA_HORKAGE_FIRMWARE_WARN }, +	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ +	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA }, +	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA }, +  	/* Blacklist entries taken from Silicon Image 3124/3132  	   Windows driver .inf file - also several Linux problem reports */  	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, }, @@ -4129,15 +4207,44 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {  	/* Devices that do not need bridging limits applied */  	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, }, +	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },  	/* Devices which aren't very happy with higher link speeds */  	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, }, +	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },  	/*  	 * Devices which choke on SETXFER.  Applies only if both the  	 * device and controller are SATA.  	 */ -	{ "PIONEER DVD-RW  DVRTD08",	"1.00",	ATA_HORKAGE_NOSETXFER }, +	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER }, +	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER }, +	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER }, +	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER }, +	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER }, + +	/* devices that don't properly handle queued TRIM commands */ +	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, }, +	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, }, +	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, }, +	{ "Crucial_CT???M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, }, + +	/* +	 * Some WD SATA-I drives spin up and down erratically when the link +	 * is put into the slumber mode.  We don't have full list of the +	 * affected devices.  Disable LPM if the device matches one of the +	 * known prefixes and is SATA-1.  As a side effect LPM partial is +	 * lost too. +	 * +	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 +	 */ +	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM }, +	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM }, +	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM }, +	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM }, +	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM }, +	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM }, +	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },  	/* End Marker */  	{ } @@ -4209,7 +4316,7 @@ static int glob_match (const char *text, const char *pattern)  		return 0;  /* End of both strings: match */  	return 1;  /* No match */  } -  +  static unsigned long ata_dev_blacklisted(const struct ata_device *dev)  {  	unsigned char model_num[ATA_ID_PROD_LEN + 1]; @@ -4350,15 +4457,15 @@ static void ata_dev_xfermask(struct ata_device *dev)  	if (ata_dma_blacklisted(dev)) {  		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); -		ata_dev_printk(dev, KERN_WARNING, -			       "device is on DMA blacklist, disabling DMA\n"); +		ata_dev_warn(dev, +			     "device is on DMA blacklist, disabling DMA\n");  	}  	if ((host->flags & ATA_HOST_SIMPLEX) &&  	    host->simplex_claimed && host->simplex_claimed != ap) {  		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); -		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " -			       "other device, disabling DMA\n"); +		ata_dev_warn(dev, +			     "simplex DMA is claimed by other device, disabling DMA\n");  	}  	if (ap->flags & ATA_FLAG_NO_IORDY) @@ -4378,8 +4485,8 @@ static void ata_dev_xfermask(struct ata_device *dev)  	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))  		/* UDMA/44 or higher would be available */  		if (cable_is_40wire(ap)) { -			ata_dev_printk(dev, KERN_WARNING, -				 "limited to UDMA/33 due to 40-wire cable\n"); +			ata_dev_warn(dev, +				     "limited to UDMA/33 due to 40-wire cable\n");  			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);  		} @@ -4467,6 +4574,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)  	DPRINTK("EXIT, err_mask=%x\n", err_mask);  	return err_mask;  } +EXPORT_SYMBOL_GPL(ata_dev_set_feature);  /**   *	ata_dev_init_params - Issue INIT DEV PARAMS command @@ -4679,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)   *	ata_qc_new - Request an available ATA command, for queueing   *	@ap: target port   * + *	Some ATA host controllers may implement a queue depth which is less + *	than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond + *	the hardware limitation. + *   *	LOCKING:   *	None.   */ @@ -4686,21 +4798,27 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)  static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)  {  	struct ata_queued_cmd *qc = NULL; -	unsigned int i; +	unsigned int max_queue = ap->host->n_tags; +	unsigned int i, tag;  	/* no command while frozen */  	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))  		return NULL; -	/* the last tag is reserved for internal command. */ -	for (i = 0; i < ATA_MAX_QUEUE - 1; i++) -		if (!test_and_set_bit(i, &ap->qc_allocated)) { -			qc = __ata_qc_from_tag(ap, i); +	for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { +		tag = tag < max_queue ? tag : 0; + +		/* the last tag is reserved for internal command. */ +		if (tag == ATA_TAG_INTERNAL) +			continue; + +		if (!test_and_set_bit(tag, &ap->qc_allocated)) { +			qc = __ata_qc_from_tag(ap, tag); +			qc->tag = tag; +			ap->last_tag = tag;  			break;  		} - -	if (qc) -		qc->tag = i; +	}  	return qc;  } @@ -4807,9 +4925,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)  {  	struct ata_device *dev = qc->dev; -	if (ata_tag_internal(qc->tag)) -		return; -  	if (ata_is_nodata(qc->tf.protocol))  		return; @@ -4858,14 +4973,23 @@ void ata_qc_complete(struct ata_queued_cmd *qc)  		if (unlikely(qc->err_mask))  			qc->flags |= ATA_QCFLAG_FAILED; -		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { -			/* always fill result TF for failed qc */ +		/* +		 * Finish internal commands without any further processing +		 * and always with the result TF filled. +		 */ +		if (unlikely(ata_tag_internal(qc->tag))) {  			fill_result_tf(qc); +			__ata_qc_complete(qc); +			return; +		} -			if (!ata_tag_internal(qc->tag)) -				ata_qc_schedule_eh(qc); -			else -				__ata_qc_complete(qc); +		/* +		 * Non-internal qc has failed.  Fill the result TF and +		 * summon EH. +		 */ +		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { +			fill_result_tf(qc); +			ata_qc_schedule_eh(qc);  			return;  		} @@ -4940,8 +5064,8 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)  	done_mask = ap->qc_active ^ qc_active;  	if (unlikely(done_mask & qc_active)) { -		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " -				"(%08x->%08x)\n", ap->qc_active, qc_active); +		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", +			     ap->qc_active, qc_active);  		return -EINVAL;  	} @@ -5240,112 +5364,210 @@ bool ata_link_offline(struct ata_link *link)  }  #ifdef CONFIG_PM -static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, -			       unsigned int action, unsigned int ehi_flags, -			       int wait) +static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, +				unsigned int action, unsigned int ehi_flags, +				bool async)  { +	struct ata_link *link;  	unsigned long flags; -	int i, rc; -	for (i = 0; i < host->n_ports; i++) { -		struct ata_port *ap = host->ports[i]; -		struct ata_link *link; +	/* Previous resume operation might still be in +	 * progress.  Wait for PM_PENDING to clear. +	 */ +	if (ap->pflags & ATA_PFLAG_PM_PENDING) { +		ata_port_wait_eh(ap); +		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); +	} -		/* Previous resume operation might still be in -		 * progress.  Wait for PM_PENDING to clear. -		 */ -		if (ap->pflags & ATA_PFLAG_PM_PENDING) { -			ata_port_wait_eh(ap); -			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); -		} +	/* request PM ops to EH */ +	spin_lock_irqsave(ap->lock, flags); -		/* request PM ops to EH */ -		spin_lock_irqsave(ap->lock, flags); +	ap->pm_mesg = mesg; +	ap->pflags |= ATA_PFLAG_PM_PENDING; +	ata_for_each_link(link, ap, HOST_FIRST) { +		link->eh_info.action |= action; +		link->eh_info.flags |= ehi_flags; +	} -		ap->pm_mesg = mesg; -		if (wait) { -			rc = 0; -			ap->pm_result = &rc; -		} +	ata_port_schedule_eh(ap); -		ap->pflags |= ATA_PFLAG_PM_PENDING; -		ata_for_each_link(link, ap, HOST_FIRST) { -			link->eh_info.action |= action; -			link->eh_info.flags |= ehi_flags; -		} +	spin_unlock_irqrestore(ap->lock, flags); -		ata_port_schedule_eh(ap); +	if (!async) { +		ata_port_wait_eh(ap); +		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); +	} +} -		spin_unlock_irqrestore(ap->lock, flags); +/* + * On some hardware, device fails to respond after spun down for suspend.  As + * the device won't be used before being resumed, we don't need to touch the + * device.  Ask EH to skip the usual stuff and proceed directly to suspend. + * + * http://thread.gmane.org/gmane.linux.ide/46764 + */ +static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET +						 | ATA_EHI_NO_AUTOPSY +						 | ATA_EHI_NO_RECOVERY; -		/* wait and check result */ -		if (wait) { -			ata_port_wait_eh(ap); -			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); -			if (rc) -				return rc; -		} +static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) +{ +	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); +} + +static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) +{ +	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); +} + +static int ata_port_pm_suspend(struct device *dev) +{ +	struct ata_port *ap = to_ata_port(dev); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	ata_port_suspend(ap, PMSG_SUSPEND); +	return 0; +} + +static int ata_port_pm_freeze(struct device *dev) +{ +	struct ata_port *ap = to_ata_port(dev); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	ata_port_suspend(ap, PMSG_FREEZE); +	return 0; +} + +static int ata_port_pm_poweroff(struct device *dev) +{ +	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); +	return 0; +} + +static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY +						| ATA_EHI_QUIET; + +static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) +{ +	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); +} + +static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) +{ +	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); +} + +static int ata_port_pm_resume(struct device *dev) +{ +	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); +	pm_runtime_disable(dev); +	pm_runtime_set_active(dev); +	pm_runtime_enable(dev); +	return 0; +} + +/* + * For ODDs, the upper layer will poll for media change every few seconds, + * which will make it enter and leave suspend state every few seconds. And + * as each suspend will cause a hard/soft reset, the gain of runtime suspend + * is very little and the ODD may malfunction after constantly being reset. + * So the idle callback here will not proceed to suspend if a non-ZPODD capable + * ODD is attached to the port. + */ +static int ata_port_runtime_idle(struct device *dev) +{ +	struct ata_port *ap = to_ata_port(dev); +	struct ata_link *link; +	struct ata_device *adev; + +	ata_for_each_link(link, ap, HOST_FIRST) { +		ata_for_each_dev(adev, link, ENABLED) +			if (adev->class == ATA_DEV_ATAPI && +			    !zpodd_dev_enabled(adev)) +				return -EBUSY;  	}  	return 0;  } +static int ata_port_runtime_suspend(struct device *dev) +{ +	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); +	return 0; +} + +static int ata_port_runtime_resume(struct device *dev) +{ +	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); +	return 0; +} + +static const struct dev_pm_ops ata_port_pm_ops = { +	.suspend = ata_port_pm_suspend, +	.resume = ata_port_pm_resume, +	.freeze = ata_port_pm_freeze, +	.thaw = ata_port_pm_resume, +	.poweroff = ata_port_pm_poweroff, +	.restore = ata_port_pm_resume, + +	.runtime_suspend = ata_port_runtime_suspend, +	.runtime_resume = ata_port_runtime_resume, +	.runtime_idle = ata_port_runtime_idle, +}; + +/* sas ports don't participate in pm runtime management of ata_ports, + * and need to resume ata devices at the domain level, not the per-port + * level. sas suspend/resume is async to allow parallel port recovery + * since sas has multiple ata_port instances per Scsi_Host. + */ +void ata_sas_port_suspend(struct ata_port *ap) +{ +	ata_port_suspend_async(ap, PMSG_SUSPEND); +} +EXPORT_SYMBOL_GPL(ata_sas_port_suspend); + +void ata_sas_port_resume(struct ata_port *ap) +{ +	ata_port_resume_async(ap, PMSG_RESUME); +} +EXPORT_SYMBOL_GPL(ata_sas_port_resume); +  /**   *	ata_host_suspend - suspend host   *	@host: host to suspend   *	@mesg: PM message   * - *	Suspend @host.  Actual operation is performed by EH.  This - *	function requests EH to perform PM operations and waits for EH - *	to finish. - * - *	LOCKING: - *	Kernel thread context (may sleep). - * - *	RETURNS: - *	0 on success, -errno on failure. + *	Suspend @host.  Actual operation is performed by port suspend.   */  int ata_host_suspend(struct ata_host *host, pm_message_t mesg)  { -	unsigned int ehi_flags = ATA_EHI_QUIET; -	int rc; - -	/* -	 * On some hardware, device fails to respond after spun down -	 * for suspend.  As the device won't be used before being -	 * resumed, we don't need to touch the device.  Ask EH to skip -	 * the usual stuff and proceed directly to suspend. -	 * -	 * http://thread.gmane.org/gmane.linux.ide/46764 -	 */ -	if (mesg.event == PM_EVENT_SUSPEND) -		ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; - -	rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); -	if (rc == 0) -		host->dev->power.power_state = mesg; -	return rc; +	host->dev->power.power_state = mesg; +	return 0;  }  /**   *	ata_host_resume - resume host   *	@host: host to resume   * - *	Resume @host.  Actual operation is performed by EH.  This - *	function requests EH to perform PM operations and returns. - *	Note that all resume operations are performed parallely. - * - *	LOCKING: - *	Kernel thread context (may sleep). + *	Resume @host.  Actual operation is performed by port resume.   */  void ata_host_resume(struct ata_host *host)  { -	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, -			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);  	host->dev->power.power_state = PMSG_ON;  }  #endif +struct device_type ata_port_type = { +	.name = "ata_port", +#ifdef CONFIG_PM +	.pm = &ata_port_pm_ops, +#endif +}; +  /**   *	ata_dev_init - Initialize an ata_device structure   *	@dev: Device structure to initialize @@ -5472,10 +5694,11 @@ struct ata_port *ata_port_alloc(struct ata_host *host)  	ap = kzalloc(sizeof(*ap), GFP_KERNEL);  	if (!ap)  		return NULL; -	 -	ap->pflags |= ATA_PFLAG_INITIALIZING; + +	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;  	ap->lock = &host->lock;  	ap->print_id = -1; +	ap->local_port_no = -1;  	ap->host = host;  	ap->dev = host->dev; @@ -5833,9 +6056,9 @@ int ata_host_start(struct ata_host *host)  			rc = ap->ops->port_start(ap);  			if (rc) {  				if (rc != -ENODEV) -					dev_printk(KERN_ERR, host->dev, -						"failed to start port %d " -						"(errno=%d)\n", i, rc); +					dev_err(host->dev, +						"failed to start port %d (errno=%d)\n", +						i, rc);  				goto err_out;  			}  		} @@ -5859,31 +6082,59 @@ int ata_host_start(struct ata_host *host)  }  /** - *	ata_sas_host_init - Initialize a host struct + *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)   *	@host:	host to initialize   *	@dev:	device host is attached to - *	@flags:	host flags   *	@ops:	port_ops   * - *	LOCKING: - *	PCI/etc. bus probe sem. - *   */ -/* KILLME - the only user left is ipr */  void ata_host_init(struct ata_host *host, struct device *dev, -		   unsigned long flags, struct ata_port_operations *ops) +		   struct ata_port_operations *ops)  {  	spin_lock_init(&host->lock);  	mutex_init(&host->eh_mutex); +	host->n_tags = ATA_MAX_QUEUE - 1;  	host->dev = dev; -	host->flags = flags;  	host->ops = ops;  } +void __ata_port_probe(struct ata_port *ap) +{ +	struct ata_eh_info *ehi = &ap->link.eh_info; +	unsigned long flags; + +	/* kick EH for boot probing */ +	spin_lock_irqsave(ap->lock, flags); + +	ehi->probe_mask |= ATA_ALL_DEVICES; +	ehi->action |= ATA_EH_RESET; +	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; + +	ap->pflags &= ~ATA_PFLAG_INITIALIZING; +	ap->pflags |= ATA_PFLAG_LOADING; +	ata_port_schedule_eh(ap); + +	spin_unlock_irqrestore(ap->lock, flags); +} + +int ata_port_probe(struct ata_port *ap) +{ +	int rc = 0; + +	if (ap->ops->error_handler) { +		__ata_port_probe(ap); +		ata_port_wait_eh(ap); +	} else { +		DPRINTK("ata%u: bus probe begin\n", ap->print_id); +		rc = ata_bus_probe(ap); +		DPRINTK("ata%u: bus probe end\n", ap->print_id); +	} +	return rc; +} +  static void async_port_probe(void *data, async_cookie_t cookie)  { -	int rc;  	struct ata_port *ap = data;  	/* @@ -5896,47 +6147,14 @@ static void async_port_probe(void *data, async_cookie_t cookie)  	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)  		async_synchronize_cookie(cookie); -	/* probe */ -	if (ap->ops->error_handler) { -		struct ata_eh_info *ehi = &ap->link.eh_info; -		unsigned long flags; - -		/* kick EH for boot probing */ -		spin_lock_irqsave(ap->lock, flags); - -		ehi->probe_mask |= ATA_ALL_DEVICES; -		ehi->action |= ATA_EH_RESET; -		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; - -		ap->pflags &= ~ATA_PFLAG_INITIALIZING; -		ap->pflags |= ATA_PFLAG_LOADING; -		ata_port_schedule_eh(ap); - -		spin_unlock_irqrestore(ap->lock, flags); - -		/* wait for EH to finish */ -		ata_port_wait_eh(ap); -	} else { -		DPRINTK("ata%u: bus probe begin\n", ap->print_id); -		rc = ata_bus_probe(ap); -		DPRINTK("ata%u: bus probe end\n", ap->print_id); - -		if (rc) { -			/* FIXME: do something useful here? -			 * Current libata behavior will -			 * tear down everything when -			 * the module is removed -			 * or the h/w is unplugged. -			 */ -		} -	} +	(void)ata_port_probe(ap);  	/* in order to keep device order, we need to synchronize at this point */  	async_synchronize_cookie(cookie);  	ata_scsi_scan_host(ap, 1); -  } +  /**   *	ata_host_register - register initialized ATA host   *	@host: ATA host to register @@ -5957,10 +6175,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)  {  	int i, rc; +	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); +  	/* host must have been started */  	if (!(host->flags & ATA_HOST_STARTED)) { -		dev_printk(KERN_ERR, host->dev, -			   "BUG: trying to register unstarted host\n"); +		dev_err(host->dev, "BUG: trying to register unstarted host\n");  		WARN_ON(1);  		return -EINVAL;  	} @@ -5973,10 +6192,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)  		kfree(host->ports[i]);  	/* give ports names and add SCSI hosts */ -	for (i = 0; i < host->n_ports; i++) -		host->ports[i]->print_id = ata_print_id++; +	for (i = 0; i < host->n_ports; i++) { +		host->ports[i]->print_id = atomic_inc_return(&ata_print_id); +		host->ports[i]->local_port_no = i + 1; +	} -	  	/* Create associated sysfs transport objects  */  	for (i = 0; i < host->n_ports; i++) {  		rc = ata_tport_add(host->dev,host->ports[i]); @@ -5989,9 +6209,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)  	if (rc)  		goto err_tadd; -	/* associate with ACPI nodes */ -	ata_acpi_associate(host); -  	/* set cable, sata_spd_limit and report */  	for (i = 0; i < host->n_ports; i++) {  		struct ata_port *ap = host->ports[i]; @@ -6011,14 +6228,13 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)  					      ap->udma_mask);  		if (!ata_port_is_dummy(ap)) { -			ata_port_printk(ap, KERN_INFO, -					"%cATA max %s %s\n", -					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', -					ata_mode_string(xfer_mask), -					ap->link.eh_info.desc); +			ata_port_info(ap, "%cATA max %s %s\n", +				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', +				      ata_mode_string(xfer_mask), +				      ap->link.eh_info.desc);  			ata_ehi_clear_desc(&ap->link.eh_info);  		} else -			ata_port_printk(ap, KERN_INFO, "DUMMY\n"); +			ata_port_info(ap, "DUMMY\n");  	}  	/* perform each probe asynchronously */ @@ -6106,6 +6322,8 @@ int ata_host_activate(struct ata_host *host, int irq,  static void ata_port_detach(struct ata_port *ap)  {  	unsigned long flags; +	struct ata_link *link; +	struct ata_device *dev;  	if (!ap->ops->error_handler)  		goto skip_eh; @@ -6122,18 +6340,24 @@ static void ata_port_detach(struct ata_port *ap)  	/* it better be dead now */  	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); -	cancel_rearming_delayed_work(&ap->hotplug_task); +	cancel_delayed_work_sync(&ap->hotplug_task);   skip_eh: +	/* clean up zpodd on port removal */ +	ata_for_each_link(link, ap, HOST_FIRST) { +		ata_for_each_dev(dev, link, ALL) { +			if (zpodd_dev_enabled(dev)) +				zpodd_exit(dev); +		} +	}  	if (ap->pmp_link) {  		int i;  		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)  			ata_tlink_delete(&ap->pmp_link[i]);  	} -	ata_tport_delete(ap); -  	/* remove the associated SCSI host */  	scsi_remove_host(ap->scsi_host); +	ata_tport_delete(ap);  }  /** @@ -6171,8 +6395,7 @@ void ata_host_detach(struct ata_host *host)   */  void ata_pci_remove_one(struct pci_dev *pdev)  { -	struct device *dev = &pdev->dev; -	struct ata_host *host = dev_get_drvdata(dev); +	struct ata_host *host = pci_get_drvdata(pdev);  	ata_host_detach(host);  } @@ -6230,8 +6453,8 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)  	rc = pcim_enable_device(pdev);  	if (rc) { -		dev_printk(KERN_ERR, &pdev->dev, -			   "failed to enable device after resume (%d)\n", rc); +		dev_err(&pdev->dev, +			"failed to enable device after resume (%d)\n", rc);  		return rc;  	} @@ -6241,7 +6464,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)  int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)  { -	struct ata_host *host = dev_get_drvdata(&pdev->dev); +	struct ata_host *host = pci_get_drvdata(pdev);  	int rc = 0;  	rc = ata_host_suspend(host, mesg); @@ -6255,7 +6478,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)  int ata_pci_device_resume(struct pci_dev *pdev)  { -	struct ata_host *host = dev_get_drvdata(&pdev->dev); +	struct ata_host *host = pci_get_drvdata(pdev);  	int rc;  	rc = ata_pci_device_do_resume(pdev); @@ -6267,6 +6490,26 @@ int ata_pci_device_resume(struct pci_dev *pdev)  #endif /* CONFIG_PCI */ +/** + *	ata_platform_remove_one - Platform layer callback for device removal + *	@pdev: Platform device that was removed + * + *	Platform layer indicates to libata via this hook that hot-unplug or + *	module unload event has occurred.  Detach all ports.  Resource + *	release is handled via devres. + * + *	LOCKING: + *	Inherited from platform layer (may sleep). + */ +int ata_platform_remove_one(struct platform_device *pdev) +{ +	struct ata_host *host = platform_get_drvdata(pdev); + +	ata_host_detach(host); + +	return 0; +} +  static int __init ata_parse_force_one(char **cur,  				      struct ata_force_ent *force_ent,  				      const char **reason) @@ -6325,6 +6568,9 @@ static int __init ata_parse_force_one(char **cur,  		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },  		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },  		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, +		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE }, +		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR }, +		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },  	};  	char *start = *cur, *p = *cur;  	char *id, *val, *endp; @@ -6464,7 +6710,7 @@ static int __init ata_init(void)  		ata_sff_exit();  		rc = -ENOMEM;  		goto err_out; -	}		 +	}  	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");  	return 0; @@ -6581,6 +6827,8 @@ struct ata_port_operations ata_dummy_port_ops = {  	.qc_prep		= ata_noop_qc_prep,  	.qc_issue		= ata_dummy_qc_issue,  	.error_handler		= ata_dummy_error_handler, +	.sched_eh		= ata_std_sched_eh, +	.end_eh			= ata_std_end_eh,  };  const struct ata_port_info ata_dummy_port_info = { @@ -6588,6 +6836,82 @@ const struct ata_port_info ata_dummy_port_info = {  };  /* + * Utility print functions + */ +int ata_port_printk(const struct ata_port *ap, const char *level, +		    const char *fmt, ...) +{ +	struct va_format vaf; +	va_list args; +	int r; + +	va_start(args, fmt); + +	vaf.fmt = fmt; +	vaf.va = &args; + +	r = printk("%sata%u: %pV", level, ap->print_id, &vaf); + +	va_end(args); + +	return r; +} +EXPORT_SYMBOL(ata_port_printk); + +int ata_link_printk(const struct ata_link *link, const char *level, +		    const char *fmt, ...) +{ +	struct va_format vaf; +	va_list args; +	int r; + +	va_start(args, fmt); + +	vaf.fmt = fmt; +	vaf.va = &args; + +	if (sata_pmp_attached(link->ap) || link->ap->slave_link) +		r = printk("%sata%u.%02u: %pV", +			   level, link->ap->print_id, link->pmp, &vaf); +	else +		r = printk("%sata%u: %pV", +			   level, link->ap->print_id, &vaf); + +	va_end(args); + +	return r; +} +EXPORT_SYMBOL(ata_link_printk); + +int ata_dev_printk(const struct ata_device *dev, const char *level, +		    const char *fmt, ...) +{ +	struct va_format vaf; +	va_list args; +	int r; + +	va_start(args, fmt); + +	vaf.fmt = fmt; +	vaf.va = &args; + +	r = printk("%sata%u.%02u: %pV", +		   level, dev->link->ap->print_id, dev->link->pmp + dev->devno, +		   &vaf); + +	va_end(args); + +	return r; +} +EXPORT_SYMBOL(ata_dev_printk); + +void ata_print_version(const struct device *dev, const char *version) +{ +	dev_printk(KERN_DEBUG, dev, "version %s\n", version); +} +EXPORT_SYMBOL(ata_print_version); + +/*   * libata is essentially a library of internal helper functions for   * low-level ATA host controller drivers.  As such, the API/ABI is   * likely to change as new drivers are added and updated. @@ -6647,6 +6971,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);  EXPORT_SYMBOL_GPL(ata_scsi_slave_config);  EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);  EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); +EXPORT_SYMBOL_GPL(__ata_change_queue_depth);  EXPORT_SYMBOL_GPL(sata_scr_valid);  EXPORT_SYMBOL_GPL(sata_scr_read);  EXPORT_SYMBOL_GPL(sata_scr_write); @@ -6679,6 +7004,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_resume);  #endif /* CONFIG_PM */  #endif /* CONFIG_PCI */ +EXPORT_SYMBOL_GPL(ata_platform_remove_one); +  EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);  EXPORT_SYMBOL_GPL(ata_ehi_push_desc);  EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);  | 
