diff options
Diffstat (limited to 'drivers/misc')
123 files changed, 19835 insertions, 1505 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8dacd4c9ee8..ee9402324a2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -54,6 +54,7 @@ config AD525X_DPOT_SPI  config ATMEL_PWM  	tristate "Atmel AT32/AT91 PWM support"  	depends on HAVE_CLK +	depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45  	help  	  This option enables device driver support for the PWM channels  	  on certain Atmel processors.  Pulse Width Modulation is used for @@ -200,7 +201,7 @@ config ICS932S401  config ATMEL_SSC  	tristate "Device driver for Atmel SSC peripheral" -	depends on HAS_IOMEM +	depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST)  	---help---  	  This option enables device driver support for Atmel Synchronized  	  Serial Communication peripheral (SSC). @@ -235,7 +236,7 @@ config SGI_XP  config CS5535_MFGPT  	tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" -	depends on PCI && X86 && MFD_CS5535 +	depends on MFD_CS5535  	default n  	help  	  This driver provides access to MFGPT functionality for other @@ -300,8 +301,8 @@ config SGI_GRU_DEBUG  	depends on SGI_GRU  	default n  	---help--- -	This option enables addition debugging code for the SGI GRU driver. If -	you are unsure, say N. +	This option enables additional debugging code for the SGI GRU driver. +	If you are unsure, say N.  config APDS9802ALS  	tristate "Medfield Avago APDS9802 ALS Sensor module" @@ -381,19 +382,6 @@ config HMC6352  	  This driver provides support for the Honeywell HMC6352 compass,  	  providing configuration and heading data via sysfs. -config EP93XX_PWM -	tristate "EP93xx PWM support" -	depends on ARCH_EP93XX -	help -	  This option enables device driver support for the PWM channels -	  on the Cirrus EP93xx processors.  The EP9307 chip only has one -	  PWM channel all the others have two, the second channel is an -	  alternate function of the EGPIO14 pin.  A sysfs interface is -	  provided to control the PWM channels. - -	  To compile this driver as a module, choose M here: the module will -	  be called ep93xx_pwm. -  config DS1682  	tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"  	depends on I2C @@ -481,7 +469,7 @@ config BMP085_SPI  config PCH_PHUB  	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"  	select GENERIC_NET_UTILS -	depends on PCI +	depends on PCI && (X86_32 || COMPILE_TEST)  	help  	  This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of  	  Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded @@ -528,6 +516,15 @@ config SRAM  	  the genalloc API. It is supposed to be used for small on-chip SRAM  	  areas found on many SoCs. +config VEXPRESS_SYSCFG +	bool "Versatile Express System Configuration driver" +	depends on VEXPRESS_CONFIG +	default y +	help +	  ARM Ltd. Versatile Express uses specialised platform configuration +	  bus. System Configuration interface is one of the possible means +	  of generating transactions on this bus. +  source "drivers/misc/c2port/Kconfig"  source "drivers/misc/eeprom/Kconfig"  source "drivers/misc/cb710/Kconfig" @@ -537,4 +534,7 @@ source "drivers/misc/carma/Kconfig"  source "drivers/misc/altera-stapl/Kconfig"  source "drivers/misc/mei/Kconfig"  source "drivers/misc/vmw_vmci/Kconfig" +source "drivers/misc/mic/Kconfig" +source "drivers/misc/genwqe/Kconfig" +source "drivers/misc/echo/Kconfig"  endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index c235d5b6831..d59ce1261b3 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_APDS9802ALS)	+= apds9802als.o  obj-$(CONFIG_ISL29003)		+= isl29003.o  obj-$(CONFIG_ISL29020)		+= isl29020.o  obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o -obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o  obj-$(CONFIG_DS1682)		+= ds1682.o  obj-$(CONFIG_TI_DAC7512)	+= ti_dac7512.o  obj-$(CONFIG_C2PORT)		+= c2port/ @@ -53,3 +52,7 @@ obj-$(CONFIG_INTEL_MEI)		+= mei/  obj-$(CONFIG_VMWARE_VMCI)	+= vmw_vmci/  obj-$(CONFIG_LATTICE_ECP3_CONFIG)	+= lattice-ecp3-config.o  obj-$(CONFIG_SRAM)		+= sram.o +obj-y				+= mic/ +obj-$(CONFIG_GENWQE)		+= genwqe/ +obj-$(CONFIG_ECHO)		+= echo/ +obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 0daadcf1ed7..a43053daad0 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -72,7 +72,6 @@  #include <linux/module.h>  #include <linux/device.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/delay.h>  #include <linux/slab.h> @@ -641,7 +640,7 @@ static const struct attribute_group ad525x_group_commands = {  	.attrs = ad525x_attributes_commands,  }; -int ad_dpot_add_files(struct device *dev, +static int ad_dpot_add_files(struct device *dev,  		unsigned features, unsigned rdac)  {  	int err = sysfs_create_file(&dev->kobj, @@ -666,7 +665,7 @@ int ad_dpot_add_files(struct device *dev,  	return err;  } -inline void ad_dpot_remove_files(struct device *dev, +static inline void ad_dpot_remove_files(struct device *dev,  		unsigned features, unsigned rdac)  {  	sysfs_remove_file(&dev->kobj, diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index 0c6e037153d..c6cc3dc8ae1 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c @@ -22,7 +22,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/i2c.h>  #include <linux/err.h> diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c index 1256a4bf1c0..c72e96b523e 100644 --- a/drivers/misc/arm-charlcd.c +++ b/drivers/misc/arm-charlcd.c @@ -11,6 +11,7 @@  #include <linux/module.h>  #include <linux/interrupt.h>  #include <linux/platform_device.h> +#include <linux/of.h>  #include <linux/completion.h>  #include <linux/delay.h>  #include <linux/io.h> @@ -297,7 +298,7 @@ static int __init charlcd_probe(struct platform_device *pdev)  	lcd->irq = platform_get_irq(pdev, 0);  	/* If no IRQ is supplied, we'll survive without it */  	if (lcd->irq >= 0) { -		if (request_irq(lcd->irq, charlcd_interrupt, IRQF_DISABLED, +		if (request_irq(lcd->irq, charlcd_interrupt, 0,  				DRIVERNAME, lcd)) {  			ret = -EIO;  			goto out_no_irq; @@ -366,11 +367,17 @@ static const struct dev_pm_ops charlcd_pm_ops = {  	.resume = charlcd_resume,  }; +static const struct of_device_id charlcd_match[] = { +	{ .compatible = "arm,versatile-lcd", }, +	{} +}; +  static struct platform_driver charlcd_driver = {  	.driver = {  		.name = DRIVERNAME,  		.owner = THIS_MODULE,  		.pm = &charlcd_pm_ops, +		.of_match_table = of_match_ptr(charlcd_match),  	},  	.remove = __exit_p(charlcd_remove),  }; diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index 5be808406ed..22de1372764 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -150,6 +150,12 @@ static int ssc_probe(struct platform_device *pdev)  		return -ENODEV;  	ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; +	if (pdev->dev.of_node) { +		struct device_node *np = pdev->dev.of_node; +		ssc->clk_from_rk_pin = +			of_property_read_bool(np, "atmel,clk-from-rk-pin"); +	} +  	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	ssc->regs = devm_ioremap_resource(&pdev->dev, regs);  	if (IS_ERR(ssc->regs)) diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c index 494d0500bda..a6dc56e1bc5 100644 --- a/drivers/misc/atmel_pwm.c +++ b/drivers/misc/atmel_pwm.c @@ -90,8 +90,10 @@ int pwm_channel_alloc(int index, struct pwm_channel *ch)  	unsigned long	flags;  	int		status = 0; -	/* insist on PWM init, with this signal pinned out */ -	if (!pwm || !(pwm->mask & 1 << index)) +	if (!pwm) +		return -EPROBE_DEFER; + +	if (!(pwm->mask & 1 << index))  		return -ENODEV;  	if (index < 0 || index >= PWM_NCHAN || !ch) diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c index 057580e026c..48ea33d15a7 100644 --- a/drivers/misc/bh1780gli.c +++ b/drivers/misc/bh1780gli.c @@ -23,6 +23,7 @@  #include <linux/platform_device.h>  #include <linux/delay.h>  #include <linux/module.h> +#include <linux/of.h>  #define BH1780_REG_CONTROL	0x80  #define BH1780_REG_PARTID	0x8A @@ -244,6 +245,15 @@ static const struct i2c_device_id bh1780_id[] = {  	{ },  }; +#ifdef CONFIG_OF +static const struct of_device_id of_bh1780_match[] = { +	{ .compatible = "rohm,bh1780gli", }, +	{}, +}; + +MODULE_DEVICE_TABLE(of, of_bh1780_match); +#endif +  static struct i2c_driver bh1780_driver = {  	.probe		= bh1780_probe,  	.remove		= bh1780_remove, @@ -251,6 +261,7 @@ static struct i2c_driver bh1780_driver = {  	.driver = {  		.name = "bh1780",  		.pm	= &bh1780_pm, +		.of_match_table = of_match_ptr(of_bh1780_match),  	},  }; diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c index 3abfcecf842..a7c16295b81 100644 --- a/drivers/misc/bmp085-i2c.c +++ b/drivers/misc/bmp085-i2c.c @@ -49,7 +49,7 @@ static int bmp085_i2c_probe(struct i2c_client *client,  		return err;  	} -	return bmp085_probe(&client->dev, regmap); +	return bmp085_probe(&client->dev, regmap, client->irq);  }  static int bmp085_i2c_remove(struct i2c_client *client) diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c index d6a52659cf2..864ecac3237 100644 --- a/drivers/misc/bmp085-spi.c +++ b/drivers/misc/bmp085-spi.c @@ -41,7 +41,7 @@ static int bmp085_spi_probe(struct spi_device *client)  		return err;  	} -	return bmp085_probe(&client->dev, regmap); +	return bmp085_probe(&client->dev, regmap, client->irq);  }  static int bmp085_spi_remove(struct spi_device *client) diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 849e2fed4da..9b313f7810f 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c @@ -47,11 +47,12 @@  #include <linux/module.h>  #include <linux/device.h> -#include <linux/init.h>  #include <linux/slab.h> -#include <linux/delay.h>  #include <linux/of.h>  #include "bmp085.h" +#include <linux/interrupt.h> +#include <linux/completion.h> +#include <linux/gpio.h>  #define BMP085_CHIP_ID			0x55  #define BMP085_CALIBRATION_DATA_START	0xAA @@ -84,8 +85,19 @@ struct bmp085_data {  	unsigned long last_temp_measurement;  	u8	chip_id;  	s32	b6; /* calculated temperature correction coefficient */ +	int	irq; +	struct	completion done;  }; +static irqreturn_t bmp085_eoc_isr(int irq, void *devid) +{ +	struct bmp085_data *data = devid; + +	complete(&data->done); + +	return IRQ_HANDLED; +} +  static s32 bmp085_read_calibration_data(struct bmp085_data *data)  {  	u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; @@ -116,6 +128,9 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)  	s32 status;  	mutex_lock(&data->lock); + +	init_completion(&data->done); +  	status = regmap_write(data->regmap, BMP085_CTRL_REG,  			      BMP085_TEMP_MEASUREMENT);  	if (status < 0) { @@ -123,7 +138,8 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)  			"Error while requesting temperature measurement.\n");  		goto exit;  	} -	msleep(BMP085_TEMP_CONVERSION_TIME); +	wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( +					    BMP085_TEMP_CONVERSION_TIME));  	status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,  				 &tmp, sizeof(tmp)); @@ -147,6 +163,9 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)  	s32 status;  	mutex_lock(&data->lock); + +	init_completion(&data->done); +  	status = regmap_write(data->regmap, BMP085_CTRL_REG,  			BMP085_PRESSURE_MEASUREMENT +  			(data->oversampling_setting << 6)); @@ -157,8 +176,8 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)  	}  	/* wait for the end of conversion */ -	msleep(2+(3 << data->oversampling_setting)); - +	wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( +					2+(3 << data->oversampling_setting)));  	/* copy data into a u32 (4 bytes), but skip the first byte. */  	status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,  				 ((u8 *)&tmp)+1, 3); @@ -374,7 +393,7 @@ int bmp085_detect(struct device *dev)  }  EXPORT_SYMBOL_GPL(bmp085_detect); -static void __init bmp085_get_of_properties(struct bmp085_data *data) +static void bmp085_get_of_properties(struct bmp085_data *data)  {  #ifdef CONFIG_OF  	struct device_node *np = data->dev->of_node; @@ -420,7 +439,7 @@ struct regmap_config bmp085_regmap_config = {  };  EXPORT_SYMBOL_GPL(bmp085_regmap_config); -int bmp085_probe(struct device *dev, struct regmap *regmap) +int bmp085_probe(struct device *dev, struct regmap *regmap, int irq)  {  	struct bmp085_data *data;  	int err = 0; @@ -434,6 +453,15 @@ int bmp085_probe(struct device *dev, struct regmap *regmap)  	dev_set_drvdata(dev, data);  	data->dev = dev;  	data->regmap = regmap; +	data->irq = irq; + +	if (data->irq > 0) { +		err = devm_request_irq(dev, data->irq, bmp085_eoc_isr, +					      IRQF_TRIGGER_RISING, "bmp085", +					      data); +		if (err < 0) +			goto exit_free; +	}  	/* Initialize the BMP085 chip */  	err = bmp085_init_client(data); diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h index 2b8f615bca9..8b8e3b1f5ca 100644 --- a/drivers/misc/bmp085.h +++ b/drivers/misc/bmp085.h @@ -26,7 +26,7 @@  extern struct regmap_config bmp085_regmap_config; -int bmp085_probe(struct device *dev, struct regmap *regmap); +int bmp085_probe(struct device *dev, struct regmap *regmap, int irq);  int bmp085_remove(struct device *dev);  int bmp085_detect(struct device *dev); diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c index c6bd7e84de2..7be89832db1 100644 --- a/drivers/misc/carma/carma-fpga-program.c +++ b/drivers/misc/carma/carma-fpga-program.c @@ -10,6 +10,8 @@   */  #include <linux/dma-mapping.h> +#include <linux/of_address.h> +#include <linux/of_irq.h>  #include <linux/of_platform.h>  #include <linux/completion.h>  #include <linux/miscdevice.h> diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 7b56563f8b7..14d90eae605 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -88,6 +88,8 @@   * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)   */ +#include <linux/of_address.h> +#include <linux/of_irq.h>  #include <linux/of_platform.h>  #include <linux/dma-mapping.h>  #include <linux/miscdevice.h> @@ -99,7 +101,6 @@  #include <linux/kernel.h>  #include <linux/module.h>  #include <linux/poll.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/kref.h>  #include <linux/io.h> @@ -631,8 +632,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)  	struct dma_async_tx_descriptor *tx;  	dma_cookie_t cookie;  	dma_addr_t dst, src; -	unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | -				  DMA_COMPL_SKIP_SRC_UNMAP; +	unsigned long dma_flags = 0;  	dst_sg = buf->vb.sglist;  	dst_nents = buf->vb.sglen; diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index 2e50f811ff5..fb397e7d1cc 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c @@ -176,7 +176,7 @@ static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)  {  	struct cb710_chip *chip = pci_get_drvdata(pdev); -	free_irq(pdev->irq, chip); +	devm_free_irq(&pdev->dev, pdev->irq, chip);  	pci_save_state(pdev);  	pci_disable_device(pdev);  	if (state.event & PM_EVENT_SLEEP) diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index 154b02e5094..b909fb30232 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c @@ -32,7 +32,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/i2c.h>  #include <linux/string.h>  #include <linux/list.h> @@ -86,7 +85,6 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,  {  	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);  	struct i2c_client *client = to_i2c_client(dev); -	char *endp;  	u64 val;  	__le32 val_le;  	int rc; @@ -94,8 +92,8 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,  	dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);  	/* Decode input */ -	val = simple_strtoull(buf, &endp, 0); -	if (buf == endp) { +	rc = kstrtoull(buf, 0, &val); +	if (rc < 0) {  		dev_dbg(dev, "input string not a number\n");  		return -EINVAL;  	} diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig new file mode 100644 index 00000000000..f1d41ea9cd4 --- /dev/null +++ b/drivers/misc/echo/Kconfig @@ -0,0 +1,9 @@ +config ECHO +	tristate "Line Echo Canceller support" +	default n +	---help--- +	  This driver provides line echo cancelling support for mISDN and +	  Zaptel drivers. + +	  To compile this driver as a module, choose M here. The module +	  will be called echo. diff --git a/drivers/misc/echo/Makefile b/drivers/misc/echo/Makefile new file mode 100644 index 00000000000..7d4caac12a8 --- /dev/null +++ b/drivers/misc/echo/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ECHO) += echo.o diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c new file mode 100644 index 00000000000..9597e9523ca --- /dev/null +++ b/drivers/misc/echo/echo.c @@ -0,0 +1,674 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller.  This code is being developed + *          against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + *         and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe + * + * Based on a bit from here, a bit from there, eye of toad, ear of + * bat, 15 years of failed attempts by David and a few fried brain + * cells. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*! \file */ + +/* Implementation Notes +   David Rowe +   April 2007 + +   This code started life as Steve's NLMS algorithm with a tap +   rotation algorithm to handle divergence during double talk.  I +   added a Geigel Double Talk Detector (DTD) [2] and performed some +   G168 tests.  However I had trouble meeting the G168 requirements, +   especially for double talk - there were always cases where my DTD +   failed, for example where near end speech was under the 6dB +   threshold required for declaring double talk. + +   So I tried a two path algorithm [1], which has so far given better +   results.  The original tap rotation/Geigel algorithm is available +   in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. +   It's probably possible to make it work if some one wants to put some +   serious work into it. + +   At present no special treatment is provided for tones, which +   generally cause NLMS algorithms to diverge.  Initial runs of a +   subset of the G168 tests for tones (e.g ./echo_test 6) show the +   current algorithm is passing OK, which is kind of surprising.  The +   full set of tests needs to be performed to confirm this result. + +   One other interesting change is that I have managed to get the NLMS +   code to work with 16 bit coefficients, rather than the original 32 +   bit coefficents.  This reduces the MIPs and storage required. +   I evaulated the 16 bit port using g168_tests.sh and listening tests +   on 4 real-world samples. + +   I also attempted the implementation of a block based NLMS update +   [2] but although this passes g168_tests.sh it didn't converge well +   on the real-world samples.  I have no idea why, perhaps a scaling +   problem.  The block based code is also available in SVN +   http://svn.rowetel.com/software/oslec/tags/before_16bit.  If this +   code can be debugged, it will lead to further reduction in MIPS, as +   the block update code maps nicely onto DSP instruction sets (it's a +   dot product) compared to the current sample-by-sample update. + +   Steve also has some nice notes on echo cancellers in echo.h + +   References: + +   [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo +       Path Models", IEEE Transactions on communications, COM-25, +       No. 6, June +       1977. +       http://www.rowetel.com/images/echo/dual_path_paper.pdf + +   [2] The classic, very useful paper that tells you how to +       actually build a real world echo canceller: +	 Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice +	 Echo Canceller with a TMS320020, +	 http://www.rowetel.com/images/echo/spra129.pdf + +   [3] I have written a series of blog posts on this work, here is +       Part 1: http://www.rowetel.com/blog/?p=18 + +   [4] The source code http://svn.rowetel.com/software/oslec/ + +   [5] A nice reference on LMS filters: +	 http://en.wikipedia.org/wiki/Least_mean_squares_filter + +   Credits: + +   Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan +   Muthukrishnan for their suggestions and email discussions.  Thanks +   also to those people who collected echo samples for me such as +   Mark, Pawel, and Pavel. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "echo.h" + +#define MIN_TX_POWER_FOR_ADAPTION	64 +#define MIN_RX_POWER_FOR_ADAPTION	64 +#define DTD_HANGOVER			600	/* 600 samples, or 75ms     */ +#define DC_LOG2BETA			3	/* log2() of DC filter Beta */ + +/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ + +#ifdef __bfin__ +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ +	int i; +	int offset1; +	int offset2; +	int factor; +	int exp; +	int16_t *phist; +	int n; + +	if (shift > 0) +		factor = clean << shift; +	else +		factor = clean >> -shift; + +	/* Update the FIR taps */ + +	offset2 = ec->curr_pos; +	offset1 = ec->taps - offset2; +	phist = &ec->fir_state_bg.history[offset2]; + +	/* st: and en: help us locate the assembler in echo.s */ + +	/* asm("st:"); */ +	n = ec->taps; +	for (i = 0; i < n; i++) { +		exp = *phist++ * factor; +		ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); +	} +	/* asm("en:"); */ + +	/* Note the asm for the inner loop above generated by Blackfin gcc +	   4.1.1 is pretty good (note even parallel instructions used): + +	   R0 = W [P0++] (X); +	   R0 *= R2; +	   R0 = R0 + R3 (NS) || +	   R1 = W [P1] (X) || +	   nop; +	   R0 >>>= 15; +	   R0 = R0 + R1; +	   W [P1++] = R0; + +	   A block based update algorithm would be much faster but the +	   above can't be improved on much.  Every instruction saved in +	   the loop above is 2 MIPs/ch!  The for loop above is where the +	   Blackfin spends most of it's time - about 17 MIPs/ch measured +	   with speedtest.c with 256 taps (32ms).  Write-back and +	   Write-through cache gave about the same performance. +	 */ +} + +/* +   IDEAS for further optimisation of lms_adapt_bg(): + +   1/ The rounding is quite costly.  Could we keep as 32 bit coeffs +   then make filter pluck the MS 16-bits of the coeffs when filtering? +   However this would lower potential optimisation of filter, as I +   think the dual-MAC architecture requires packed 16 bit coeffs. + +   2/ Block based update would be more efficient, as per comments above, +   could use dual MAC architecture. + +   3/ Look for same sample Blackfin LMS code, see if we can get dual-MAC +   packing. + +   4/ Execute the whole e/c in a block of say 20ms rather than sample +   by sample.  Processing a few samples every ms is inefficient. +*/ + +#else +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ +	int i; + +	int offset1; +	int offset2; +	int factor; +	int exp; + +	if (shift > 0) +		factor = clean << shift; +	else +		factor = clean >> -shift; + +	/* Update the FIR taps */ + +	offset2 = ec->curr_pos; +	offset1 = ec->taps - offset2; + +	for (i = ec->taps - 1; i >= offset1; i--) { +		exp = (ec->fir_state_bg.history[i - offset1] * factor); +		ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); +	} +	for (; i >= 0; i--) { +		exp = (ec->fir_state_bg.history[i + offset2] * factor); +		ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); +	} +} +#endif + +static inline int top_bit(unsigned int bits) +{ +	if (bits == 0) +		return -1; +	else +		return (int)fls((int32_t) bits) - 1; +} + +struct oslec_state *oslec_create(int len, int adaption_mode) +{ +	struct oslec_state *ec; +	int i; +	const int16_t *history; + +	ec = kzalloc(sizeof(*ec), GFP_KERNEL); +	if (!ec) +		return NULL; + +	ec->taps = len; +	ec->log2taps = top_bit(len); +	ec->curr_pos = ec->taps - 1; + +	ec->fir_taps16[0] = +	    kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); +	if (!ec->fir_taps16[0]) +		goto error_oom_0; + +	ec->fir_taps16[1] = +	    kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); +	if (!ec->fir_taps16[1]) +		goto error_oom_1; + +	history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps); +	if (!history) +		goto error_state; +	history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps); +	if (!history) +		goto error_state_bg; + +	for (i = 0; i < 5; i++) +		ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; + +	ec->cng_level = 1000; +	oslec_adaption_mode(ec, adaption_mode); + +	ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); +	if (!ec->snapshot) +		goto error_snap; + +	ec->cond_met = 0; +	ec->pstates = 0; +	ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; +	ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; +	ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; +	ec->lbgn = ec->lbgn_acc = 0; +	ec->lbgn_upper = 200; +	ec->lbgn_upper_acc = ec->lbgn_upper << 13; + +	return ec; + +error_snap: +	fir16_free(&ec->fir_state_bg); +error_state_bg: +	fir16_free(&ec->fir_state); +error_state: +	kfree(ec->fir_taps16[1]); +error_oom_1: +	kfree(ec->fir_taps16[0]); +error_oom_0: +	kfree(ec); +	return NULL; +} +EXPORT_SYMBOL_GPL(oslec_create); + +void oslec_free(struct oslec_state *ec) +{ +	int i; + +	fir16_free(&ec->fir_state); +	fir16_free(&ec->fir_state_bg); +	for (i = 0; i < 2; i++) +		kfree(ec->fir_taps16[i]); +	kfree(ec->snapshot); +	kfree(ec); +} +EXPORT_SYMBOL_GPL(oslec_free); + +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode) +{ +	ec->adaption_mode = adaption_mode; +} +EXPORT_SYMBOL_GPL(oslec_adaption_mode); + +void oslec_flush(struct oslec_state *ec) +{ +	int i; + +	ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; +	ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; +	ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + +	ec->lbgn = ec->lbgn_acc = 0; +	ec->lbgn_upper = 200; +	ec->lbgn_upper_acc = ec->lbgn_upper << 13; + +	ec->nonupdate_dwell = 0; + +	fir16_flush(&ec->fir_state); +	fir16_flush(&ec->fir_state_bg); +	ec->fir_state.curr_pos = ec->taps - 1; +	ec->fir_state_bg.curr_pos = ec->taps - 1; +	for (i = 0; i < 2; i++) +		memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t)); + +	ec->curr_pos = ec->taps - 1; +	ec->pstates = 0; +} +EXPORT_SYMBOL_GPL(oslec_flush); + +void oslec_snapshot(struct oslec_state *ec) +{ +	memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t)); +} +EXPORT_SYMBOL_GPL(oslec_snapshot); + +/* Dual Path Echo Canceller */ + +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) +{ +	int32_t echo_value; +	int clean_bg; +	int tmp; +	int tmp1; + +	/* +	 * Input scaling was found be required to prevent problems when tx +	 * starts clipping.  Another possible way to handle this would be the +	 * filter coefficent scaling. +	 */ + +	ec->tx = tx; +	ec->rx = rx; +	tx >>= 1; +	rx >>= 1; + +	/* +	 * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision +	 * required otherwise values do not track down to 0. Zero at DC, Pole +	 * at (1-Beta) on real axis.  Some chip sets (like Si labs) don't +	 * need this, but something like a $10 X100P card does.  Any DC really +	 * slows down convergence. +	 * +	 * Note: removes some low frequency from the signal, this reduces the +	 * speech quality when listening to samples through headphones but may +	 * not be obvious through a telephone handset. +	 * +	 * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta +	 * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. +	 */ + +	if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { +		tmp = rx << 15; + +		/* +		 * Make sure the gain of the HPF is 1.0. This can still +		 * saturate a little under impulse conditions, and it might +		 * roll to 32768 and need clipping on sustained peak level +		 * signals. However, the scale of such clipping is small, and +		 * the error due to any saturation should not markedly affect +		 * the downstream processing. +		 */ +		tmp -= (tmp >> 4); + +		ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2; + +		/* +		 * hard limit filter to prevent clipping.  Note that at this +		 * stage rx should be limited to +/- 16383 due to right shift +		 * above +		 */ +		tmp1 = ec->rx_1 >> 15; +		if (tmp1 > 16383) +			tmp1 = 16383; +		if (tmp1 < -16383) +			tmp1 = -16383; +		rx = tmp1; +		ec->rx_2 = tmp; +	} + +	/* Block average of power in the filter states.  Used for +	   adaption power calculation. */ + +	{ +		int new, old; + +		/* efficient "out with the old and in with the new" algorithm so +		   we don't have to recalculate over the whole block of +		   samples. */ +		new = (int)tx * (int)tx; +		old = (int)ec->fir_state.history[ec->fir_state.curr_pos] * +		    (int)ec->fir_state.history[ec->fir_state.curr_pos]; +		ec->pstates += +		    ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps; +		if (ec->pstates < 0) +			ec->pstates = 0; +	} + +	/* Calculate short term average levels using simple single pole IIRs */ + +	ec->ltxacc += abs(tx) - ec->ltx; +	ec->ltx = (ec->ltxacc + (1 << 4)) >> 5; +	ec->lrxacc += abs(rx) - ec->lrx; +	ec->lrx = (ec->lrxacc + (1 << 4)) >> 5; + +	/* Foreground filter */ + +	ec->fir_state.coeffs = ec->fir_taps16[0]; +	echo_value = fir16(&ec->fir_state, tx); +	ec->clean = rx - echo_value; +	ec->lcleanacc += abs(ec->clean) - ec->lclean; +	ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5; + +	/* Background filter */ + +	echo_value = fir16(&ec->fir_state_bg, tx); +	clean_bg = rx - echo_value; +	ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg; +	ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5; + +	/* Background Filter adaption */ + +	/* Almost always adap bg filter, just simple DT and energy +	   detection to minimise adaption in cases of strong double talk. +	   However this is not critical for the dual path algorithm. +	 */ +	ec->factor = 0; +	ec->shift = 0; +	if ((ec->nonupdate_dwell == 0)) { +		int p, logp, shift; + +		/* Determine: + +		   f = Beta * clean_bg_rx/P ------ (1) + +		   where P is the total power in the filter states. + +		   The Boffins have shown that if we obey (1) we converge +		   quickly and avoid instability. + +		   The correct factor f must be in Q30, as this is the fixed +		   point format required by the lms_adapt_bg() function, +		   therefore the scaled version of (1) is: + +		   (2^30) * f  = (2^30) * Beta * clean_bg_rx/P +		   factor      = (2^30) * Beta * clean_bg_rx/P     ----- (2) + +		   We have chosen Beta = 0.25 by experiment, so: + +		   factor      = (2^30) * (2^-2) * clean_bg_rx/P + +		   (30 - 2 - log2(P)) +		   factor      = clean_bg_rx 2                     ----- (3) + +		   To avoid a divide we approximate log2(P) as top_bit(P), +		   which returns the position of the highest non-zero bit in +		   P.  This approximation introduces an error as large as a +		   factor of 2, but the algorithm seems to handle it OK. + +		   Come to think of it a divide may not be a big deal on a +		   modern DSP, so its probably worth checking out the cycles +		   for a divide versus a top_bit() implementation. +		 */ + +		p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates; +		logp = top_bit(p) + ec->log2taps; +		shift = 30 - 2 - logp; +		ec->shift = shift; + +		lms_adapt_bg(ec, clean_bg, shift); +	} + +	/* very simple DTD to make sure we dont try and adapt with strong +	   near end speech */ + +	ec->adapt = 0; +	if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx)) +		ec->nonupdate_dwell = DTD_HANGOVER; +	if (ec->nonupdate_dwell) +		ec->nonupdate_dwell--; + +	/* Transfer logic */ + +	/* These conditions are from the dual path paper [1], I messed with +	   them a bit to improve performance. */ + +	if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && +	    (ec->nonupdate_dwell == 0) && +	    /* (ec->Lclean_bg < 0.875*ec->Lclean) */ +	    (8 * ec->lclean_bg < 7 * ec->lclean) && +	    /* (ec->Lclean_bg < 0.125*ec->Ltx) */ +	    (8 * ec->lclean_bg < ec->ltx)) { +		if (ec->cond_met == 6) { +			/* +			 * BG filter has had better results for 6 consecutive +			 * samples +			 */ +			ec->adapt = 1; +			memcpy(ec->fir_taps16[0], ec->fir_taps16[1], +			       ec->taps * sizeof(int16_t)); +		} else +			ec->cond_met++; +	} else +		ec->cond_met = 0; + +	/* Non-Linear Processing */ + +	ec->clean_nlp = ec->clean; +	if (ec->adaption_mode & ECHO_CAN_USE_NLP) { +		/* +		 * Non-linear processor - a fancy way to say "zap small +		 * signals, to avoid residual echo due to (uLaw/ALaw) +		 * non-linearity in the channel.". +		 */ + +		if ((16 * ec->lclean < ec->ltx)) { +			/* +			 * Our e/c has improved echo by at least 24 dB (each +			 * factor of 2 is 6dB, so 2*2*2*2=16 is the same as +			 * 6+6+6+6=24dB) +			 */ +			if (ec->adaption_mode & ECHO_CAN_USE_CNG) { +				ec->cng_level = ec->lbgn; + +				/* +				 * Very elementary comfort noise generation. +				 * Just random numbers rolled off very vaguely +				 * Hoth-like.  DR: This noise doesn't sound +				 * quite right to me - I suspect there are some +				 * overflow issues in the filtering as it's too +				 * "crackly". +				 * TODO: debug this, maybe just play noise at +				 * high level or look at spectrum. +				 */ + +				ec->cng_rndnum = +				    1664525U * ec->cng_rndnum + 1013904223U; +				ec->cng_filter = +				    ((ec->cng_rndnum & 0xFFFF) - 32768 + +				     5 * ec->cng_filter) >> 3; +				ec->clean_nlp = +				    (ec->cng_filter * ec->cng_level * 8) >> 14; + +			} else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) { +				/* This sounds much better than CNG */ +				if (ec->clean_nlp > ec->lbgn) +					ec->clean_nlp = ec->lbgn; +				if (ec->clean_nlp < -ec->lbgn) +					ec->clean_nlp = -ec->lbgn; +			} else { +				/* +				 * just mute the residual, doesn't sound very +				 * good, used mainly in G168 tests +				 */ +				ec->clean_nlp = 0; +			} +		} else { +			/* +			 * Background noise estimator.  I tried a few +			 * algorithms here without much luck.  This very simple +			 * one seems to work best, we just average the level +			 * using a slow (1 sec time const) filter if the +			 * current level is less than a (experimentally +			 * derived) constant.  This means we dont include high +			 * level signals like near end speech.  When combined +			 * with CNG or especially CLIP seems to work OK. +			 */ +			if (ec->lclean < 40) { +				ec->lbgn_acc += abs(ec->clean) - ec->lbgn; +				ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12; +			} +		} +	} + +	/* Roll around the taps buffer */ +	if (ec->curr_pos <= 0) +		ec->curr_pos = ec->taps; +	ec->curr_pos--; + +	if (ec->adaption_mode & ECHO_CAN_DISABLE) +		ec->clean_nlp = rx; + +	/* Output scaled back up again to match input scaling */ + +	return (int16_t) ec->clean_nlp << 1; +} +EXPORT_SYMBOL_GPL(oslec_update); + +/* This function is separated from the echo canceller is it is usually called +   as part of the tx process.  See rx HP (DC blocking) filter above, it's +   the same design. + +   Some soft phones send speech signals with a lot of low frequency +   energy, e.g. down to 20Hz.  This can make the hybrid non-linear +   which causes the echo canceller to fall over.  This filter can help +   by removing any low frequency before it gets to the tx port of the +   hybrid. + +   It can also help by removing and DC in the tx signal.  DC is bad +   for LMS algorithms. + +   This is one of the classic DC removal filters, adjusted to provide +   sufficient bass rolloff to meet the above requirement to protect hybrids +   from things that upset them. The difference between successive samples +   produces a lousy HPF, and then a suitably placed pole flattens things out. +   The final result is a nicely rolled off bass end. The filtering is +   implemented with extended fractional precision, which noise shapes things, +   giving very clean DC removal. +*/ + +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) +{ +	int tmp; +	int tmp1; + +	if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { +		tmp = tx << 15; + +		/* +		 * Make sure the gain of the HPF is 1.0. The first can still +		 * saturate a little under impulse conditions, and it might +		 * roll to 32768 and need clipping on sustained peak level +		 * signals. However, the scale of such clipping is small, and +		 * the error due to any saturation should not markedly affect +		 * the downstream processing. +		 */ +		tmp -= (tmp >> 4); + +		ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2; +		tmp1 = ec->tx_1 >> 15; +		if (tmp1 > 32767) +			tmp1 = 32767; +		if (tmp1 < -32767) +			tmp1 = -32767; +		tx = tmp1; +		ec->tx_2 = tmp; +	} + +	return tx; +} +EXPORT_SYMBOL_GPL(oslec_hpf_tx); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Rowe"); +MODULE_DESCRIPTION("Open Source Line Echo Canceller"); +MODULE_VERSION("0.3.0"); diff --git a/drivers/misc/echo/echo.h b/drivers/misc/echo/echo.h new file mode 100644 index 00000000000..9b08c63e636 --- /dev/null +++ b/drivers/misc/echo/echo.h @@ -0,0 +1,187 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller.  This code is being developed + *          against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + *         and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007 David Rowe + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ECHO_H +#define __ECHO_H + +/* +Line echo cancellation for voice + +What does it do? + +This module aims to provide G.168-2002 compliant echo cancellation, to remove +electrical echoes (e.g. from 2-4 wire hybrids) from voice calls. + +How does it work? + +The heart of the echo cancellor is FIR filter. This is adapted to match the +echo impulse response of the telephone line. It must be long enough to +adequately cover the duration of that impulse response. The signal transmitted +to the telephone line is passed through the FIR filter. Once the FIR is +properly adapted, the resulting output is an estimate of the echo signal +received from the line. This is subtracted from the received signal. The result +is an estimate of the signal which originated at the far end of the line, free +from echos of our own transmitted signal. + +The least mean squares (LMS) algorithm is attributed to Widrow and Hoff, and +was introduced in 1960. It is the commonest form of filter adaption used in +things like modem line equalisers and line echo cancellers. There it works very +well.  However, it only works well for signals of constant amplitude. It works +very poorly for things like speech echo cancellation, where the signal level +varies widely.  This is quite easy to fix. If the signal level is normalised - +similar to applying AGC - LMS can work as well for a signal of varying +amplitude as it does for a modem signal. This normalised least mean squares +(NLMS) algorithm is the commonest one used for speech echo cancellation. Many +other algorithms exist - e.g. RLS (essentially the same as Kalman filtering), +FAP, etc. Some perform significantly better than NLMS.  However, factors such +as computational complexity and patents favour the use of NLMS. + +A simple refinement to NLMS can improve its performance with speech. NLMS tends +to adapt best to the strongest parts of a signal. If the signal is white noise, +the NLMS algorithm works very well. However, speech has more low frequency than +high frequency content. Pre-whitening (i.e. filtering the signal to flatten its +spectrum) the echo signal improves the adapt rate for speech, and ensures the +final residual signal is not heavily biased towards high frequencies. A very +low complexity filter is adequate for this, so pre-whitening adds little to the +compute requirements of the echo canceller. + +An FIR filter adapted using pre-whitened NLMS performs well, provided certain +conditions are met: + +    - The transmitted signal has poor self-correlation. +    - There is no signal being generated within the environment being +      cancelled. + +The difficulty is that neither of these can be guaranteed. + +If the adaption is performed while transmitting noise (or something fairly +noise like, such as voice) the adaption works very well. If the adaption is +performed while transmitting something highly correlative (typically narrow +band energy such as signalling tones or DTMF), the adaption can go seriously +wrong. The reason is there is only one solution for the adaption on a near +random signal - the impulse response of the line. For a repetitive signal, +there are any number of solutions which converge the adaption, and nothing +guides the adaption to choose the generalised one. Allowing an untrained +canceller to converge on this kind of narrowband energy probably a good thing, +since at least it cancels the tones. Allowing a well converged canceller to +continue converging on such energy is just a way to ruin its generalised +adaption. A narrowband detector is needed, so adapation can be suspended at +appropriate times. + +The adaption process is based on trying to eliminate the received signal. When +there is any signal from within the environment being cancelled it may upset +the adaption process. Similarly, if the signal we are transmitting is small, +noise may dominate and disturb the adaption process. If we can ensure that the +adaption is only performed when we are transmitting a significant signal level, +and the environment is not, things will be OK. Clearly, it is easy to tell when +we are sending a significant signal. Telling, if the environment is generating +a significant signal, and doing it with sufficient speed that the adaption will +not have diverged too much more we stop it, is a little harder. + +The key problem in detecting when the environment is sourcing significant +energy is that we must do this very quickly. Given a reasonably long sample of +the received signal, there are a number of strategies which may be used to +assess whether that signal contains a strong far end component. However, by the +time that assessment is complete the far end signal will have already caused +major mis-convergence in the adaption process. An assessment algorithm is +needed which produces a fairly accurate result from a very short burst of far +end energy. + +How do I use it? + +The echo cancellor processes both the transmit and receive streams sample by +sample. The processing function is not declared inline. Unfortunately, +cancellation requires many operations per sample, so the call overhead is only +a minor burden. +*/ + +#include "fir.h" +#include "oslec.h" + +/* +    G.168 echo canceller descriptor. This defines the working state for a line +    echo canceller. +*/ +struct oslec_state { +	int16_t tx; +	int16_t rx; +	int16_t clean; +	int16_t clean_nlp; + +	int nonupdate_dwell; +	int curr_pos; +	int taps; +	int log2taps; +	int adaption_mode; + +	int cond_met; +	int32_t pstates; +	int16_t adapt; +	int32_t factor; +	int16_t shift; + +	/* Average levels and averaging filter states */ +	int ltxacc; +	int lrxacc; +	int lcleanacc; +	int lclean_bgacc; +	int ltx; +	int lrx; +	int lclean; +	int lclean_bg; +	int lbgn; +	int lbgn_acc; +	int lbgn_upper; +	int lbgn_upper_acc; + +	/* foreground and background filter states */ +	struct fir16_state_t fir_state; +	struct fir16_state_t fir_state_bg; +	int16_t *fir_taps16[2]; + +	/* DC blocking filter states */ +	int tx_1; +	int tx_2; +	int rx_1; +	int rx_2; + +	/* optional High Pass Filter states */ +	int32_t xvtx[5]; +	int32_t yvtx[5]; +	int32_t xvrx[5]; +	int32_t yvrx[5]; + +	/* Parameters for the optional Hoth noise generator */ +	int cng_level; +	int cng_rndnum; +	int cng_filter; + +	/* snapshot sample of coeffs used for development */ +	int16_t *snapshot; +}; + +#endif /* __ECHO_H */ diff --git a/drivers/misc/echo/fir.h b/drivers/misc/echo/fir.h new file mode 100644 index 00000000000..7b9fabf1fea --- /dev/null +++ b/drivers/misc/echo/fir.h @@ -0,0 +1,216 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * fir.h - General telephony FIR routines + * + * Written by Steve Underwood <steveu@coppice.org> + * + * Copyright (C) 2002 Steve Underwood + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#if !defined(_FIR_H_) +#define _FIR_H_ + +/* +   Blackfin NOTES & IDEAS: + +   A simple dot product function is used to implement the filter.  This performs +   just one MAC/cycle which is inefficient but was easy to implement as a first +   pass.  The current Blackfin code also uses an unrolled form of the filter +   history to avoid 0 length hardware loop issues.  This is wasteful of +   memory. + +   Ideas for improvement: + +   1/ Rewrite filter for dual MAC inner loop.  The issue here is handling +   history sample offsets that are 16 bit aligned - the dual MAC needs +   32 bit aligmnent.  There are some good examples in libbfdsp. + +   2/ Use the hardware circular buffer facility tohalve memory usage. + +   3/ Consider using internal memory. + +   Using less memory might also improve speed as cache misses will be +   reduced. A drop in MIPs and memory approaching 50% should be +   possible. + +   The foreground and background filters currenlty use a total of +   about 10 MIPs/ch as measured with speedtest.c on a 256 TAP echo +   can. +*/ + +/* + * 16 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 16 bit integer coefficients. + */ +struct fir16_state_t { +	int taps; +	int curr_pos; +	const int16_t *coeffs; +	int16_t *history; +}; + +/* + * 32 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 32 bit integer coefficients, and filtering + * 16 bit integer data. + */ +struct fir32_state_t { +	int taps; +	int curr_pos; +	const int32_t *coeffs; +	int16_t *history; +}; + +/* + * Floating point FIR descriptor. This defines the working state for a single + * instance of an FIR filter using floating point coefficients and data. + */ +struct fir_float_state_t { +	int taps; +	int curr_pos; +	const float *coeffs; +	float *history; +}; + +static inline const int16_t *fir16_create(struct fir16_state_t *fir, +					      const int16_t *coeffs, int taps) +{ +	fir->taps = taps; +	fir->curr_pos = taps - 1; +	fir->coeffs = coeffs; +#if defined(__bfin__) +	fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL); +#else +	fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); +#endif +	return fir->history; +} + +static inline void fir16_flush(struct fir16_state_t *fir) +{ +#if defined(__bfin__) +	memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t)); +#else +	memset(fir->history, 0, fir->taps * sizeof(int16_t)); +#endif +} + +static inline void fir16_free(struct fir16_state_t *fir) +{ +	kfree(fir->history); +} + +#ifdef __bfin__ +static inline int32_t dot_asm(short *x, short *y, int len) +{ +	int dot; + +	len--; + +	__asm__("I0 = %1;\n\t" +		"I1 = %2;\n\t" +		"A0 = 0;\n\t" +		"R0.L = W[I0++] || R1.L = W[I1++];\n\t" +		"LOOP dot%= LC0 = %3;\n\t" +		"LOOP_BEGIN dot%=;\n\t" +		"A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t" +		"LOOP_END dot%=;\n\t" +		"A0 += R0.L*R1.L (IS);\n\t" +		"R0 = A0;\n\t" +		"%0 = R0;\n\t" +		: "=&d"(dot) +		: "a"(x), "a"(y), "a"(len) +		: "I0", "I1", "A1", "A0", "R0", "R1" +	); + +	return dot; +} +#endif + +static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample) +{ +	int32_t y; +#if defined(__bfin__) +	fir->history[fir->curr_pos] = sample; +	fir->history[fir->curr_pos + fir->taps] = sample; +	y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos], +		    fir->taps); +#else +	int i; +	int offset1; +	int offset2; + +	fir->history[fir->curr_pos] = sample; + +	offset2 = fir->curr_pos; +	offset1 = fir->taps - offset2; +	y = 0; +	for (i = fir->taps - 1; i >= offset1; i--) +		y += fir->coeffs[i] * fir->history[i - offset1]; +	for (; i >= 0; i--) +		y += fir->coeffs[i] * fir->history[i + offset2]; +#endif +	if (fir->curr_pos <= 0) +		fir->curr_pos = fir->taps; +	fir->curr_pos--; +	return (int16_t) (y >> 15); +} + +static inline const int16_t *fir32_create(struct fir32_state_t *fir, +					      const int32_t *coeffs, int taps) +{ +	fir->taps = taps; +	fir->curr_pos = taps - 1; +	fir->coeffs = coeffs; +	fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); +	return fir->history; +} + +static inline void fir32_flush(struct fir32_state_t *fir) +{ +	memset(fir->history, 0, fir->taps * sizeof(int16_t)); +} + +static inline void fir32_free(struct fir32_state_t *fir) +{ +	kfree(fir->history); +} + +static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample) +{ +	int i; +	int32_t y; +	int offset1; +	int offset2; + +	fir->history[fir->curr_pos] = sample; +	offset2 = fir->curr_pos; +	offset1 = fir->taps - offset2; +	y = 0; +	for (i = fir->taps - 1; i >= offset1; i--) +		y += fir->coeffs[i] * fir->history[i - offset1]; +	for (; i >= 0; i--) +		y += fir->coeffs[i] * fir->history[i + offset2]; +	if (fir->curr_pos <= 0) +		fir->curr_pos = fir->taps; +	fir->curr_pos--; +	return (int16_t) (y >> 15); +} + +#endif diff --git a/drivers/misc/echo/oslec.h b/drivers/misc/echo/oslec.h new file mode 100644 index 00000000000..f4175360ce2 --- /dev/null +++ b/drivers/misc/echo/oslec.h @@ -0,0 +1,94 @@ +/* + *  OSLEC - A line echo canceller.  This code is being developed + *          against and partially complies with G168. Using code from SpanDSP + * + * Written by Steve Underwood <steveu@coppice.org> + *         and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __OSLEC_H +#define __OSLEC_H + +/* Mask bits for the adaption mode */ +#define ECHO_CAN_USE_ADAPTION	0x01 +#define ECHO_CAN_USE_NLP	0x02 +#define ECHO_CAN_USE_CNG	0x04 +#define ECHO_CAN_USE_CLIP	0x08 +#define ECHO_CAN_USE_TX_HPF	0x10 +#define ECHO_CAN_USE_RX_HPF	0x20 +#define ECHO_CAN_DISABLE	0x40 + +/** + * oslec_state: G.168 echo canceller descriptor. + * + * This defines the working state for a line echo canceller. + */ +struct oslec_state; + +/** + * oslec_create - Create a voice echo canceller context. + * @len: The length of the canceller, in samples. + * @return: The new canceller context, or NULL if the canceller could not be + * created. + */ +struct oslec_state *oslec_create(int len, int adaption_mode); + +/** + * oslec_free - Free a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_free(struct oslec_state *ec); + +/** + * oslec_flush - Flush (reinitialise) a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_flush(struct oslec_state *ec); + +/** + * oslec_adaption_mode - set the adaption mode of a voice echo canceller context. + * @ec The echo canceller context. + * @adaption_mode: The mode. + */ +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode); + +void oslec_snapshot(struct oslec_state *ec); + +/** + * oslec_update: Process a sample through a voice echo canceller. + * @ec: The echo canceller context. + * @tx: The transmitted audio sample. + * @rx: The received audio sample. + * + * The return value is the clean (echo cancelled) received sample. + */ +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx); + +/** + * oslec_hpf_tx: Process to high pass filter the tx signal. + * @ec: The echo canceller context. + * @tx: The transmitted auio sample. + * + * The return value is the HP filtered transmit sample, send this to your D/A. + */ +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx); + +#endif /* __OSLEC_H */ diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 04f2e1fa9dd..9536852fd4c 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -96,4 +96,17 @@ config EEPROM_DIGSY_MTC_CFG  	  If unsure, say N. +config EEPROM_SUNXI_SID +	tristate "Allwinner sunxi security ID support" +	depends on ARCH_SUNXI && SYSFS +	help +	  This is a driver for the 'security ID' available on various Allwinner +	  devices. + +	  Due to the potential risks involved with changing e-fuses, +	  this driver is read-only. + +	  This driver can also be built as a module. If so, the module +	  will be called sunxi_sid. +  endmenu diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile index fc1e81d2926..9507aec95e9 100644 --- a/drivers/misc/eeprom/Makefile +++ b/drivers/misc/eeprom/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_EEPROM_LEGACY)	+= eeprom.o  obj-$(CONFIG_EEPROM_MAX6875)	+= max6875.o  obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o  obj-$(CONFIG_EEPROM_93XX46)	+= eeprom_93xx46.o +obj-$(CONFIG_EEPROM_SUNXI_SID)	+= sunxi_sid.o  obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 5d4fd69d04c..d87f77f790d 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -22,7 +22,7 @@  #include <linux/jiffies.h>  #include <linux/of.h>  #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h>  /*   * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. @@ -428,6 +428,9 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,  {  	struct at24_data *at24; +	if (unlikely(off >= attr->size)) +		return -EFBIG; +  	at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));  	return at24_write(at24, buf, off, count);  } diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 840b3594a5a..634f72929e1 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -10,7 +10,6 @@   */  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/slab.h>  #include <linux/delay.h> @@ -462,10 +461,17 @@ static int at25_remove(struct spi_device *spi)  /*-------------------------------------------------------------------------*/ +static const struct of_device_id at25_of_match[] = { +	{ .compatible = "atmel,at25", }, +	{ } +}; +MODULE_DEVICE_TABLE(of, at25_of_match); +  static struct spi_driver at25_driver = {  	.driver = {  		.name		= "at25",  		.owner		= THIS_MODULE, +		.of_match_table = at25_of_match,  	},  	.probe		= at25_probe,  	.remove		= at25_remove, diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index c169e07654c..33f8673d23a 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -3,7 +3,7 @@   *                           Philip Edelbrock <phil@netroedge.com>   * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>   * Copyright (C) 2003 IBM Corp. - * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by @@ -17,7 +17,6 @@   */  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/slab.h>  #include <linux/jiffies.h> diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 94cfc121257..9ebeacdb8ec 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -11,7 +11,6 @@  #include <linux/delay.h>  #include <linux/device.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/mutex.h>  #include <linux/slab.h> @@ -202,7 +201,7 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,  	edev = dev_get_drvdata(dev);  	if (unlikely(off >= edev->bin.size)) -		return 0; +		return -EFBIG;  	if ((off + count) > edev->bin.size)  		count = edev->bin.size - off;  	if (unlikely(!count)) @@ -378,7 +377,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi)  		device_remove_file(&spi->dev, &dev_attr_erase);  	sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); -	spi_set_drvdata(spi, NULL);  	kfree(edev);  	return 0;  } diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index e36157d5d3a..580ff9df552 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -27,7 +27,6 @@   */  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/module.h>  #include <linux/slab.h>  #include <linux/i2c.h> diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c new file mode 100644 index 00000000000..3f2b625b203 --- /dev/null +++ b/drivers/misc/eeprom/sunxi_sid.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * http://www.linux-sunxi.org + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * This driver exposes the Allwinner security ID, efuses exported in byte- + * sized chunks. + */ + +#include <linux/compiler.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#define DRV_NAME "sunxi-sid" + +struct sunxi_sid_data { +	void __iomem *reg_base; +	unsigned int keysize; +}; + +/* We read the entire key, due to a 32 bit read alignment requirement. Since we + * want to return the requested byte, this results in somewhat slower code and + * uses 4 times more reads as needed but keeps code simpler. Since the SID is + * only very rarely probed, this is not really an issue. + */ +static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data, +			      const unsigned int offset) +{ +	u32 sid_key; + +	if (offset >= sid_data->keysize) +		return 0; + +	sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4)); +	sid_key >>= (offset % 4) * 8; + +	return sid_key; /* Only return the last byte */ +} + +static ssize_t sid_read(struct file *fd, struct kobject *kobj, +			struct bin_attribute *attr, char *buf, +			loff_t pos, size_t size) +{ +	struct platform_device *pdev; +	struct sunxi_sid_data *sid_data; +	int i; + +	pdev = to_platform_device(kobj_to_dev(kobj)); +	sid_data = platform_get_drvdata(pdev); + +	if (pos < 0 || pos >= sid_data->keysize) +		return 0; +	if (size > sid_data->keysize - pos) +		size = sid_data->keysize - pos; + +	for (i = 0; i < size; i++) +		buf[i] = sunxi_sid_read_byte(sid_data, pos + i); + +	return i; +} + +static struct bin_attribute sid_bin_attr = { +	.attr = { .name = "eeprom", .mode = S_IRUGO, }, +	.read = sid_read, +}; + +static int sunxi_sid_remove(struct platform_device *pdev) +{ +	device_remove_bin_file(&pdev->dev, &sid_bin_attr); +	dev_dbg(&pdev->dev, "driver unloaded\n"); + +	return 0; +} + +static const struct of_device_id sunxi_sid_of_match[] = { +	{ .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16}, +	{ .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512}, +	{/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static int sunxi_sid_probe(struct platform_device *pdev) +{ +	struct sunxi_sid_data *sid_data; +	struct resource *res; +	const struct of_device_id *of_dev_id; +	u8 *entropy; +	unsigned int i; + +	sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data), +				GFP_KERNEL); +	if (!sid_data) +		return -ENOMEM; + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(sid_data->reg_base)) +		return PTR_ERR(sid_data->reg_base); + +	of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev); +	if (!of_dev_id) +		return -ENODEV; +	sid_data->keysize = (int)of_dev_id->data; + +	platform_set_drvdata(pdev, sid_data); + +	sid_bin_attr.size = sid_data->keysize; +	if (device_create_bin_file(&pdev->dev, &sid_bin_attr)) +		return -ENODEV; + +	entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL); +	for (i = 0; i < sid_data->keysize; i++) +		entropy[i] = sunxi_sid_read_byte(sid_data, i); +	add_device_randomness(entropy, sid_data->keysize); +	kfree(entropy); + +	dev_dbg(&pdev->dev, "loaded\n"); + +	return 0; +} + +static struct platform_driver sunxi_sid_driver = { +	.probe = sunxi_sid_probe, +	.remove = sunxi_sid_remove, +	.driver = { +		.name = DRV_NAME, +		.owner = THIS_MODULE, +		.of_match_table = sunxi_sid_of_match, +	}, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 0e8df41aaf1..2cf2bbc0b92 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)  {  	char name[ENCLOSURE_NAME_SIZE]; +	/* +	 * In odd circumstances, like multipath devices, something else may +	 * already have removed the links, so check for this condition first. +	 */ +	if (!cdev->dev->kobj.sd) +		return; +  	enclosure_link_name(cdev, name);  	sysfs_remove_link(&cdev->dev->kobj, name);  	sysfs_remove_link(&cdev->cdev.kobj, "device"); diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c deleted file mode 100644 index cdb67a9c195..00000000000 --- a/drivers/misc/ep93xx_pwm.c +++ /dev/null @@ -1,286 +0,0 @@ -/* - *  Simple PWM driver for EP93XX - * - *	(c) Copyright 2009  Matthieu Crapet <mcrapet@gmail.com> - *	(c) Copyright 2009  H Hartley Sweeten <hsweeten@visionengravers.com> - * - *	This program is free software; you can redistribute it and/or - *	modify it under the terms of the GNU General Public License - *	as published by the Free Software Foundation; either version - *	2 of the License, or (at your option) any later version. - * - *  EP9307 has only one channel: - *    - PWMOUT - * - *  EP9301/02/12/15 have two channels: - *    - PWMOUT - *    - PWMOUT1 (alternate function for EGPIO14) - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/io.h> - -#include <mach/platform.h> - -#define EP93XX_PWMx_TERM_COUNT	0x00 -#define EP93XX_PWMx_DUTY_CYCLE	0x04 -#define EP93XX_PWMx_ENABLE	0x08 -#define EP93XX_PWMx_INVERT	0x0C - -#define EP93XX_PWM_MAX_COUNT	0xFFFF - -struct ep93xx_pwm { -	void __iomem	*mmio_base; -	struct clk	*clk; -	u32		duty_percent; -}; - -/* - * /sys/devices/platform/ep93xx-pwm.N - *   /min_freq      read-only   minimum pwm output frequency - *   /max_req       read-only   maximum pwm output frequency - *   /freq          read-write  pwm output frequency (0 = disable output) - *   /duty_percent  read-write  pwm duty cycle percent (1..99) - *   /invert        read-write  invert pwm output - */ - -static ssize_t ep93xx_pwm_get_min_freq(struct device *dev, -		struct device_attribute *attr, char *buf) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); -	unsigned long rate = clk_get_rate(pwm->clk); - -	return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1)); -} - -static ssize_t ep93xx_pwm_get_max_freq(struct device *dev, -		struct device_attribute *attr, char *buf) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); -	unsigned long rate = clk_get_rate(pwm->clk); - -	return sprintf(buf, "%ld\n", rate / 2); -} - -static ssize_t ep93xx_pwm_get_freq(struct device *dev, -		struct device_attribute *attr, char *buf) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - -	if (readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1) { -		unsigned long rate = clk_get_rate(pwm->clk); -		u16 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); - -		return sprintf(buf, "%ld\n", rate / (term + 1)); -	} else { -		return sprintf(buf, "disabled\n"); -	} -} - -static ssize_t ep93xx_pwm_set_freq(struct device *dev, -		struct device_attribute *attr, const char *buf, size_t count) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); -	long val; -	int err; - -	err = kstrtol(buf, 10, &val); -	if (err) -		return -EINVAL; - -	if (val == 0) { -		writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE); -	} else if (val <= (clk_get_rate(pwm->clk) / 2)) { -		u32 term, duty; - -		val = (clk_get_rate(pwm->clk) / val) - 1; -		if (val > EP93XX_PWM_MAX_COUNT) -			val = EP93XX_PWM_MAX_COUNT; -		if (val < 1) -			val = 1; - -		term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); -		duty = ((val + 1) * pwm->duty_percent / 100) - 1; - -		/* If pwm is running, order is important */ -		if (val > term) { -			writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); -			writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); -		} else { -			writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); -			writel(val, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); -		} - -		if (!readl(pwm->mmio_base + EP93XX_PWMx_ENABLE) & 0x1) -			writel(0x1, pwm->mmio_base + EP93XX_PWMx_ENABLE); -	} else { -		return -EINVAL; -	} - -	return count; -} - -static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev, -		struct device_attribute *attr, char *buf) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - -	return sprintf(buf, "%d\n", pwm->duty_percent); -} - -static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev, -		struct device_attribute *attr, const char *buf, size_t count) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); -	long val; -	int err; - -	err = kstrtol(buf, 10, &val); -	if (err) -		return -EINVAL; - -	if (val > 0 && val < 100) { -		u32 term = readl(pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); -		u32 duty = ((term + 1) * val / 100) - 1; - -		writel(duty, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); -		pwm->duty_percent = val; -		return count; -	} - -	return -EINVAL; -} - -static ssize_t ep93xx_pwm_get_invert(struct device *dev, -		struct device_attribute *attr, char *buf) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); -	int inverted = readl(pwm->mmio_base + EP93XX_PWMx_INVERT) & 0x1; - -	return sprintf(buf, "%d\n", inverted); -} - -static ssize_t ep93xx_pwm_set_invert(struct device *dev, -		struct device_attribute *attr, const char *buf, size_t count) -{ -	struct platform_device *pdev = to_platform_device(dev); -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); -	long val; -	int err; - -	err = kstrtol(buf, 10, &val); -	if (err) -		return -EINVAL; - -	if (val == 0) -		writel(0x0, pwm->mmio_base + EP93XX_PWMx_INVERT); -	else if (val == 1) -		writel(0x1, pwm->mmio_base + EP93XX_PWMx_INVERT); -	else -		return -EINVAL; - -	return count; -} - -static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); -static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); -static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, -		   ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); -static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, -		   ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); -static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, -		   ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); - -static struct attribute *ep93xx_pwm_attrs[] = { -	&dev_attr_min_freq.attr, -	&dev_attr_max_freq.attr, -	&dev_attr_freq.attr, -	&dev_attr_duty_percent.attr, -	&dev_attr_invert.attr, -	NULL -}; - -static const struct attribute_group ep93xx_pwm_sysfs_files = { -	.attrs	= ep93xx_pwm_attrs, -}; - -static int ep93xx_pwm_probe(struct platform_device *pdev) -{ -	struct ep93xx_pwm *pwm; -	struct resource *res; -	int ret; - -	pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); -	if (!pwm) -		return -ENOMEM; - -	pwm->clk = devm_clk_get(&pdev->dev, "pwm_clk"); -	if (IS_ERR(pwm->clk)) -		return PTR_ERR(pwm->clk); - -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	pwm->mmio_base = devm_ioremap_resource(&pdev->dev, res); -	if (IS_ERR(pwm->mmio_base)) -		return PTR_ERR(pwm->mmio_base); - -	ret = ep93xx_pwm_acquire_gpio(pdev); -	if (ret) -		return ret; - -	ret = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); -	if (ret) { -		ep93xx_pwm_release_gpio(pdev); -		return ret; -	} - -	pwm->duty_percent = 50; - -	/* disable pwm at startup. Avoids zero value. */ -	writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE); -	writel(EP93XX_PWM_MAX_COUNT, pwm->mmio_base + EP93XX_PWMx_TERM_COUNT); -	writel(EP93XX_PWM_MAX_COUNT/2, pwm->mmio_base + EP93XX_PWMx_DUTY_CYCLE); - -	clk_enable(pwm->clk); - -	platform_set_drvdata(pdev, pwm); -	return 0; -} - -static int ep93xx_pwm_remove(struct platform_device *pdev) -{ -	struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - -	writel(0x0, pwm->mmio_base + EP93XX_PWMx_ENABLE); -	clk_disable(pwm->clk); -	sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); -	ep93xx_pwm_release_gpio(pdev); - -	return 0; -} - -static struct platform_driver ep93xx_pwm_driver = { -	.driver		= { -		.name	= "ep93xx-pwm", -		.owner	= THIS_MODULE, -	}, -	.probe		= ep93xx_pwm_probe, -	.remove		= ep93xx_pwm_remove, -}; -module_platform_driver(ep93xx_pwm_driver); - -MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, " -	      "H Hartley Sweeten <hsweeten@visionengravers.com>"); -MODULE_DESCRIPTION("EP93xx PWM driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:ep93xx-pwm"); diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c index a725c79c35f..71d2793b372 100644 --- a/drivers/misc/fsa9480.c +++ b/drivers/misc/fsa9480.c @@ -396,7 +396,7 @@ static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)  				IRQF_TRIGGER_FALLING | IRQF_ONESHOT,  				"fsa9480 micro USB", usbsw);  		if (ret) { -			dev_err(&client->dev, "failed to reqeust IRQ\n"); +			dev_err(&client->dev, "failed to request IRQ\n");  			return ret;  		} diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig new file mode 100644 index 00000000000..6069d8cd79d --- /dev/null +++ b/drivers/misc/genwqe/Kconfig @@ -0,0 +1,13 @@ +# +# IBM Accelerator Family 'GenWQE' +# + +menuconfig GENWQE +	tristate "GenWQE PCIe Accelerator" +	depends on PCI && 64BIT +	select CRC_ITU_T +	default n +	help +	  Enables PCIe card driver for IBM GenWQE accelerators. +	  The user-space interface is described in +	  include/linux/genwqe/genwqe_card.h. diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile new file mode 100644 index 00000000000..98a2b4f0b18 --- /dev/null +++ b/drivers/misc/genwqe/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for GenWQE driver +# + +obj-$(CONFIG_GENWQE) := genwqe_card.o +genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \ +	card_debugfs.o card_utils.o diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c new file mode 100644 index 00000000000..74d51c9bb85 --- /dev/null +++ b/drivers/misc/genwqe/card_base.c @@ -0,0 +1,1205 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Module initialization and PCIe setup. Card health monitoring and + * recovery functionality. Character device creation and deletion are + * controlled from here. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/err.h> +#include <linux/aer.h> +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/device.h> +#include <linux/log2.h> +#include <linux/genwqe/genwqe_card.h> + +#include "card_base.h" +#include "card_ddcb.h" + +MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>"); +MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>"); +MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>"); +MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>"); + +MODULE_DESCRIPTION("GenWQE Card"); +MODULE_VERSION(DRV_VERS_STRING); +MODULE_LICENSE("GPL"); + +static char genwqe_driver_name[] = GENWQE_DEVNAME; +static struct class *class_genwqe; +static struct dentry *debugfs_genwqe; +static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; + +/* PCI structure for identifying device by PCI vendor and device ID */ +static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = { +	{ .vendor      = PCI_VENDOR_ID_IBM, +	  .device      = PCI_DEVICE_GENWQE, +	  .subvendor   = PCI_SUBVENDOR_ID_IBM, +	  .subdevice   = PCI_SUBSYSTEM_ID_GENWQE5, +	  .class       = (PCI_CLASSCODE_GENWQE5 << 8), +	  .class_mask  = ~0, +	  .driver_data = 0 }, + +	/* Initial SR-IOV bring-up image */ +	{ .vendor      = PCI_VENDOR_ID_IBM, +	  .device      = PCI_DEVICE_GENWQE, +	  .subvendor   = PCI_SUBVENDOR_ID_IBM_SRIOV, +	  .subdevice   = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, +	  .class       = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), +	  .class_mask  = ~0, +	  .driver_data = 0 }, + +	{ .vendor      = PCI_VENDOR_ID_IBM,  /* VF Vendor ID */ +	  .device      = 0x0000,  /* VF Device ID */ +	  .subvendor   = PCI_SUBVENDOR_ID_IBM_SRIOV, +	  .subdevice   = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, +	  .class       = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), +	  .class_mask  = ~0, +	  .driver_data = 0 }, + +	/* Fixed up image */ +	{ .vendor      = PCI_VENDOR_ID_IBM, +	  .device      = PCI_DEVICE_GENWQE, +	  .subvendor   = PCI_SUBVENDOR_ID_IBM_SRIOV, +	  .subdevice   = PCI_SUBSYSTEM_ID_GENWQE5, +	  .class       = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), +	  .class_mask  = ~0, +	  .driver_data = 0 }, + +	{ .vendor      = PCI_VENDOR_ID_IBM,  /* VF Vendor ID */ +	  .device      = 0x0000,  /* VF Device ID */ +	  .subvendor   = PCI_SUBVENDOR_ID_IBM_SRIOV, +	  .subdevice   = PCI_SUBSYSTEM_ID_GENWQE5, +	  .class       = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), +	  .class_mask  = ~0, +	  .driver_data = 0 }, + +	/* Even one more ... */ +	{ .vendor      = PCI_VENDOR_ID_IBM, +	  .device      = PCI_DEVICE_GENWQE, +	  .subvendor   = PCI_SUBVENDOR_ID_IBM, +	  .subdevice   = PCI_SUBSYSTEM_ID_GENWQE5_NEW, +	  .class       = (PCI_CLASSCODE_GENWQE5 << 8), +	  .class_mask  = ~0, +	  .driver_data = 0 }, + +	{ 0, }			/* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, genwqe_device_table); + +/** + * genwqe_dev_alloc() - Create and prepare a new card descriptor + * + * Return: Pointer to card descriptor, or ERR_PTR(err) on error + */ +static struct genwqe_dev *genwqe_dev_alloc(void) +{ +	unsigned int i = 0, j; +	struct genwqe_dev *cd; + +	for (i = 0; i < GENWQE_CARD_NO_MAX; i++) { +		if (genwqe_devices[i] == NULL) +			break; +	} +	if (i >= GENWQE_CARD_NO_MAX) +		return ERR_PTR(-ENODEV); + +	cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL); +	if (!cd) +		return ERR_PTR(-ENOMEM); + +	cd->card_idx = i; +	cd->class_genwqe = class_genwqe; +	cd->debugfs_genwqe = debugfs_genwqe; + +	init_waitqueue_head(&cd->queue_waitq); + +	spin_lock_init(&cd->file_lock); +	INIT_LIST_HEAD(&cd->file_list); + +	cd->card_state = GENWQE_CARD_UNUSED; +	spin_lock_init(&cd->print_lock); + +	cd->ddcb_software_timeout = genwqe_ddcb_software_timeout; +	cd->kill_timeout = genwqe_kill_timeout; + +	for (j = 0; j < GENWQE_MAX_VFS; j++) +		cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec; + +	genwqe_devices[i] = cd; +	return cd; +} + +static void genwqe_dev_free(struct genwqe_dev *cd) +{ +	if (!cd) +		return; + +	genwqe_devices[cd->card_idx] = NULL; +	kfree(cd); +} + +/** + * genwqe_bus_reset() - Card recovery + * + * pci_reset_function() will recover the device and ensure that the + * registers are accessible again when it completes with success. If + * not, the card will stay dead and registers will be unaccessible + * still. + */ +static int genwqe_bus_reset(struct genwqe_dev *cd) +{ +	int bars, rc = 0; +	struct pci_dev *pci_dev = cd->pci_dev; +	void __iomem *mmio; + +	if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE) +		return -EIO; + +	mmio = cd->mmio; +	cd->mmio = NULL; +	pci_iounmap(pci_dev, mmio); + +	bars = pci_select_bars(pci_dev, IORESOURCE_MEM); +	pci_release_selected_regions(pci_dev, bars); + +	/* +	 * Firmware/BIOS might change memory mapping during bus reset. +	 * Settings like enable bus-mastering, ... are backuped and +	 * restored by the pci_reset_function(). +	 */ +	dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__); +	rc = pci_reset_function(pci_dev); +	if (rc) { +		dev_err(&pci_dev->dev, +			"[%s] err: failed reset func (rc %d)\n", __func__, rc); +		return rc; +	} +	dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc); + +	/* +	 * Here is the right spot to clear the register read +	 * failure. pci_bus_reset() does this job in real systems. +	 */ +	cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | +			    GENWQE_INJECT_GFIR_FATAL | +			    GENWQE_INJECT_GFIR_INFO); + +	rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); +	if (rc) { +		dev_err(&pci_dev->dev, +			"[%s] err: request bars failed (%d)\n", __func__, rc); +		return -EIO; +	} + +	cd->mmio = pci_iomap(pci_dev, 0, 0); +	if (cd->mmio == NULL) { +		dev_err(&pci_dev->dev, +			"[%s] err: mapping BAR0 failed\n", __func__); +		return -ENOMEM; +	} +	return 0; +} + +/* + * Hardware circumvention section. Certain bitstreams in our test-lab + * had different kinds of problems. Here is where we adjust those + * bitstreams to function will with this version of our device driver. + * + * Thise circumventions are applied to the physical function only. + * The magical numbers below are identifying development/manufacturing + * versions of the bitstream used on the card. + * + * Turn off error reporting for old/manufacturing images. + */ + +bool genwqe_need_err_masking(struct genwqe_dev *cd) +{ +	return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +static void genwqe_tweak_hardware(struct genwqe_dev *cd) +{ +	struct pci_dev *pci_dev = cd->pci_dev; + +	/* Mask FIRs for development images */ +	if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) && +	    ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) { +		dev_warn(&pci_dev->dev, +			 "FIRs masked due to bitstream %016llx.%016llx\n", +			 cd->slu_unitcfg, cd->app_unitcfg); + +		__genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR, +				0xFFFFFFFFFFFFFFFFull); + +		__genwqe_writeq(cd, IO_APP_ERR_ACT_MASK, +				0x0000000000000000ull); +	} +} + +/** + * genwqe_recovery_on_fatal_gfir_required() - Version depended actions + * + * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must + * be ignored. This is e.g. true for the bitstream we gave to the card + * manufacturer, but also for some old bitstreams we released to our + * test-lab. + */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd) +{ +	return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull; +} + +int genwqe_flash_readback_fails(struct genwqe_dev *cd) +{ +	return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +/** + * genwqe_T_psec() - Calculate PF/VF timeout register content + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + */ +/* T = 1/f */ +static int genwqe_T_psec(struct genwqe_dev *cd) +{ +	u16 speed;	/* 1/f -> 250,  200,  166,  175 */ +	static const int T[] = { 4000, 5000, 6000, 5714 }; + +	speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); +	if (speed >= ARRAY_SIZE(T)) +		return -1;	/* illegal value */ + +	return T[speed]; +} + +/** + * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution + * + * Do this _after_ card_reset() is called. Otherwise the values will + * vanish. The settings need to be done when the queues are inactive. + * + * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16. + * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16. + */ +static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd) +{ +	u32 T = genwqe_T_psec(cd); +	u64 x; + +	if (genwqe_pf_jobtimeout_msec == 0) +		return false; + +	/* PF: large value needed, flash update 2sec per block */ +	x = ilog2(genwqe_pf_jobtimeout_msec * +		  16000000000uL/(T * 15)) - 10; + +	genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, +			  0xff00 | (x & 0xff), 0); +	return true; +} + +/** + * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution + */ +static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd) +{ +	struct pci_dev *pci_dev = cd->pci_dev; +	unsigned int vf; +	u32 T = genwqe_T_psec(cd); +	u64 x; + +	for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) { + +		if (cd->vf_jobtimeout_msec[vf] == 0) +			continue; + +		x = ilog2(cd->vf_jobtimeout_msec[vf] * +			  16000000000uL/(T * 15)) - 10; + +		genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, +				  0xff00 | (x & 0xff), vf + 1); +	} +	return true; +} + +static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd) +{ +	unsigned int type, e = 0; + +	for (type = 0; type < GENWQE_DBG_UNITS; type++) { +		switch (type) { +		case GENWQE_DBG_UNIT0: +			e = genwqe_ffdc_buff_size(cd, 0); +			break; +		case GENWQE_DBG_UNIT1: +			e = genwqe_ffdc_buff_size(cd, 1); +			break; +		case GENWQE_DBG_UNIT2: +			e = genwqe_ffdc_buff_size(cd, 2); +			break; +		case GENWQE_DBG_REGS: +			e = GENWQE_FFDC_REGS; +			break; +		} + +		/* currently support only the debug units mentioned here */ +		cd->ffdc[type].entries = e; +		cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg), +					      GFP_KERNEL); +		/* +		 * regs == NULL is ok, the using code treats this as no regs, +		 * Printing warning is ok in this case. +		 */ +	} +	return 0; +} + +static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd) +{ +	unsigned int type; + +	for (type = 0; type < GENWQE_DBG_UNITS; type++) { +		kfree(cd->ffdc[type].regs); +		cd->ffdc[type].regs = NULL; +	} +} + +static int genwqe_read_ids(struct genwqe_dev *cd) +{ +	int err = 0; +	int slu_id; +	struct pci_dev *pci_dev = cd->pci_dev; + +	cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); +	if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) { +		dev_err(&pci_dev->dev, +			"err: SLUID=%016llx\n", cd->slu_unitcfg); +		err = -EIO; +		goto out_err; +	} + +	slu_id = genwqe_get_slu_id(cd); +	if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) { +		dev_err(&pci_dev->dev, +			"err: incompatible SLU Architecture %u\n", slu_id); +		err = -ENOENT; +		goto out_err; +	} + +	cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); +	if (cd->app_unitcfg == IO_ILLEGAL_VALUE) { +		dev_err(&pci_dev->dev, +			"err: APPID=%016llx\n", cd->app_unitcfg); +		err = -EIO; +		goto out_err; +	} +	genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name)); + +	/* +	 * Is access to all registers possible? If we are a VF the +	 * answer is obvious. If we run fully virtualized, we need to +	 * check if we can access all registers. If we do not have +	 * full access we will cause an UR and some informational FIRs +	 * in the PF, but that should not harm. +	 */ +	if (pci_dev->is_virtfn) +		cd->is_privileged = 0; +	else +		cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) +				     != IO_ILLEGAL_VALUE); + + out_err: +	return err; +} + +static int genwqe_start(struct genwqe_dev *cd) +{ +	int err; +	struct pci_dev *pci_dev = cd->pci_dev; + +	err = genwqe_read_ids(cd); +	if (err) +		return err; + +	if (genwqe_is_privileged(cd)) { +		/* do this after the tweaks. alloc fail is acceptable */ +		genwqe_ffdc_buffs_alloc(cd); +		genwqe_stop_traps(cd); + +		/* Collect registers e.g. FIRs, UNITIDs, traces ... */ +		genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs, +				      cd->ffdc[GENWQE_DBG_REGS].entries, 0); + +		genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0, +				      cd->ffdc[GENWQE_DBG_UNIT0].regs, +				      cd->ffdc[GENWQE_DBG_UNIT0].entries); + +		genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1, +				      cd->ffdc[GENWQE_DBG_UNIT1].regs, +				      cd->ffdc[GENWQE_DBG_UNIT1].entries); + +		genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2, +				      cd->ffdc[GENWQE_DBG_UNIT2].regs, +				      cd->ffdc[GENWQE_DBG_UNIT2].entries); + +		genwqe_start_traps(cd); + +		if (cd->card_state == GENWQE_CARD_FATAL_ERROR) { +			dev_warn(&pci_dev->dev, +				 "[%s] chip reload/recovery!\n", __func__); + +			/* +			 * Stealth Mode: Reload chip on either hot +			 * reset or PERST. +			 */ +			cd->softreset = 0x7Cull; +			__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, +				       cd->softreset); + +			err = genwqe_bus_reset(cd); +			if (err != 0) { +				dev_err(&pci_dev->dev, +					"[%s] err: bus reset failed!\n", +					__func__); +				goto out; +			} + +			/* +			 * Re-read the IDs because +			 * it could happen that the bitstream load +			 * failed! +			 */ +			err = genwqe_read_ids(cd); +			if (err) +				goto out; +		} +	} + +	err = genwqe_setup_service_layer(cd);  /* does a reset to the card */ +	if (err != 0) { +		dev_err(&pci_dev->dev, +			"[%s] err: could not setup servicelayer!\n", __func__); +		err = -ENODEV; +		goto out; +	} + +	if (genwqe_is_privileged(cd)) {	 /* code is running _after_ reset */ +		genwqe_tweak_hardware(cd); + +		genwqe_setup_pf_jtimer(cd); +		genwqe_setup_vf_jtimer(cd); +	} + +	err = genwqe_device_create(cd); +	if (err < 0) { +		dev_err(&pci_dev->dev, +			"err: chdev init failed! (err=%d)\n", err); +		goto out_release_service_layer; +	} +	return 0; + + out_release_service_layer: +	genwqe_release_service_layer(cd); + out: +	if (genwqe_is_privileged(cd)) +		genwqe_ffdc_buffs_free(cd); +	return -EIO; +} + +/** + * genwqe_stop() - Stop card operation + * + * Recovery notes: + *   As long as genwqe_thread runs we might access registers during + *   error data capture. Same is with the genwqe_health_thread. + *   When genwqe_bus_reset() fails this function might called two times: + *   first by the genwqe_health_thread() and later by genwqe_remove() to + *   unbind the device. We must be able to survive that. + * + * This function must be robust enough to be called twice. + */ +static int genwqe_stop(struct genwqe_dev *cd) +{ +	genwqe_finish_queue(cd);	    /* no register access */ +	genwqe_device_remove(cd);	    /* device removed, procs killed */ +	genwqe_release_service_layer(cd);   /* here genwqe_thread is stopped */ + +	if (genwqe_is_privileged(cd)) { +		pci_disable_sriov(cd->pci_dev);	/* access pci config space */ +		genwqe_ffdc_buffs_free(cd); +	} + +	return 0; +} + +/** + * genwqe_recover_card() - Try to recover the card if it is possible + * + * If fatal_err is set no register access is possible anymore. It is + * likely that genwqe_start fails in that situation. Proper error + * handling is required in this case. + * + * genwqe_bus_reset() will cause the pci code to call genwqe_remove() + * and later genwqe_probe() for all virtual functions. + */ +static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err) +{ +	int rc; +	struct pci_dev *pci_dev = cd->pci_dev; + +	genwqe_stop(cd); + +	/* +	 * Make sure chip is not reloaded to maintain FFDC. Write SLU +	 * Reset Register, CPLDReset field to 0. +	 */ +	if (!fatal_err) { +		cd->softreset = 0x70ull; +		__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); +	} + +	rc = genwqe_bus_reset(cd); +	if (rc != 0) { +		dev_err(&pci_dev->dev, +			"[%s] err: card recovery impossible!\n", __func__); +		return rc; +	} + +	rc = genwqe_start(cd); +	if (rc < 0) { +		dev_err(&pci_dev->dev, +			"[%s] err: failed to launch device!\n", __func__); +		return rc; +	} +	return 0; +} + +static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir) +{ +	*gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +	return (*gfir & GFIR_ERR_TRIGGER) && +		genwqe_recovery_on_fatal_gfir_required(cd); +} + +/** + * genwqe_fir_checking() - Check the fault isolation registers of the card + * + * If this code works ok, can be tried out with help of the genwqe_poke tool: + *   sudo ./tools/genwqe_poke 0x8 0xfefefefefef + * + * Now the relevant FIRs/sFIRs should be printed out and the driver should + * invoke recovery (devices are removed and readded). + */ +static u64 genwqe_fir_checking(struct genwqe_dev *cd) +{ +	int j, iterations = 0; +	u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec; +	u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr; +	struct pci_dev *pci_dev = cd->pci_dev; + + healthMonitor: +	iterations++; +	if (iterations > 16) { +		dev_err(&pci_dev->dev, "* exit looping after %d times\n", +			iterations); +		goto fatal_error; +	} + +	gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +	if (gfir != 0x0) +		dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", +				    IO_SLC_CFGREG_GFIR, gfir); +	if (gfir == IO_ILLEGAL_VALUE) +		goto fatal_error; + +	/* +	 * Avoid printing when to GFIR bit is on prevents contignous +	 * printout e.g. for the following bug: +	 *   FIR set without a 2ndary FIR/FIR cannot be cleared +	 * Comment out the following if to get the prints: +	 */ +	if (gfir == 0) +		return 0; + +	gfir_masked = gfir & GFIR_ERR_TRIGGER;  /* fatal errors */ + +	for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */ + +		/* read the primary FIR (pfir) */ +		fir_addr = (uid << 24) + 0x08; +		fir = __genwqe_readq(cd, fir_addr); +		if (fir == 0x0) +			continue;  /* no error in this unit */ + +		dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir); +		if (fir == IO_ILLEGAL_VALUE) +			goto fatal_error; + +		/* read primary FEC */ +		fec_addr = (uid << 24) + 0x18; +		fec = __genwqe_readq(cd, fec_addr); + +		dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec); +		if (fec == IO_ILLEGAL_VALUE) +			goto fatal_error; + +		for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) { + +			/* secondary fir empty, skip it */ +			if ((fir & mask) == 0x0) +				continue; + +			sfir_addr = (uid << 24) + 0x100 + 0x08 * j; +			sfir = __genwqe_readq(cd, sfir_addr); + +			if (sfir == IO_ILLEGAL_VALUE) +				goto fatal_error; +			dev_err(&pci_dev->dev, +				"* 0x%08x 0x%016llx\n", sfir_addr, sfir); + +			sfec_addr = (uid << 24) + 0x300 + 0x08 * j; +			sfec = __genwqe_readq(cd, sfec_addr); + +			if (sfec == IO_ILLEGAL_VALUE) +				goto fatal_error; +			dev_err(&pci_dev->dev, +				"* 0x%08x 0x%016llx\n", sfec_addr, sfec); + +			gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +			if (gfir == IO_ILLEGAL_VALUE) +				goto fatal_error; + +			/* gfir turned on during routine! get out and +			   start over. */ +			if ((gfir_masked == 0x0) && +			    (gfir & GFIR_ERR_TRIGGER)) { +				goto healthMonitor; +			} + +			/* do not clear if we entered with a fatal gfir */ +			if (gfir_masked == 0x0) { + +				/* NEW clear by mask the logged bits */ +				sfir_addr = (uid << 24) + 0x100 + 0x08 * j; +				__genwqe_writeq(cd, sfir_addr, sfir); + +				dev_dbg(&pci_dev->dev, +					"[HM] Clearing  2ndary FIR 0x%08x " +					"with 0x%016llx\n", sfir_addr, sfir); + +				/* +				 * note, these cannot be error-Firs +				 * since gfir_masked is 0 after sfir +				 * was read. Also, it is safe to do +				 * this write if sfir=0. Still need to +				 * clear the primary. This just means +				 * there is no secondary FIR. +				 */ + +				/* clear by mask the logged bit. */ +				fir_clr_addr = (uid << 24) + 0x10; +				__genwqe_writeq(cd, fir_clr_addr, mask); + +				dev_dbg(&pci_dev->dev, +					"[HM] Clearing primary FIR 0x%08x " +					"with 0x%016llx\n", fir_clr_addr, +					mask); +			} +		} +	} +	gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +	if (gfir == IO_ILLEGAL_VALUE) +		goto fatal_error; + +	if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) { +		/* +		 * Check once more that it didn't go on after all the +		 * FIRS were cleared. +		 */ +		dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n", +			iterations); +		goto healthMonitor; +	} +	return gfir_masked; + + fatal_error: +	return IO_ILLEGAL_VALUE; +} + +/** + * genwqe_health_thread() - Health checking thread + * + * This thread is only started for the PF of the card. + * + * This thread monitors the health of the card. A critical situation + * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In + * this case we need to be recovered from outside. Writing to + * registers will very likely not work either. + * + * This thread must only exit if kthread_should_stop() becomes true. + * + * Condition for the health-thread to trigger: + *   a) when a kthread_stop() request comes in or + *   b) a critical GFIR occured + * + * Informational GFIRs are checked and potentially printed in + * health_check_interval seconds. + */ +static int genwqe_health_thread(void *data) +{ +	int rc, should_stop = 0; +	struct genwqe_dev *cd = data; +	struct pci_dev *pci_dev = cd->pci_dev; +	u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; + +	while (!kthread_should_stop()) { +		rc = wait_event_interruptible_timeout(cd->health_waitq, +			 (genwqe_health_check_cond(cd, &gfir) || +			  (should_stop = kthread_should_stop())), +				genwqe_health_check_interval * HZ); + +		if (should_stop) +			break; + +		if (gfir == IO_ILLEGAL_VALUE) { +			dev_err(&pci_dev->dev, +				"[%s] GFIR=%016llx\n", __func__, gfir); +			goto fatal_error; +		} + +		slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); +		if (slu_unitcfg == IO_ILLEGAL_VALUE) { +			dev_err(&pci_dev->dev, +				"[%s] SLU_UNITCFG=%016llx\n", +				__func__, slu_unitcfg); +			goto fatal_error; +		} + +		app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); +		if (app_unitcfg == IO_ILLEGAL_VALUE) { +			dev_err(&pci_dev->dev, +				"[%s] APP_UNITCFG=%016llx\n", +				__func__, app_unitcfg); +			goto fatal_error; +		} + +		gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +		if (gfir == IO_ILLEGAL_VALUE) { +			dev_err(&pci_dev->dev, +				"[%s] %s: GFIR=%016llx\n", __func__, +				(gfir & GFIR_ERR_TRIGGER) ? "err" : "info", +				gfir); +			goto fatal_error; +		} + +		gfir_masked = genwqe_fir_checking(cd); +		if (gfir_masked == IO_ILLEGAL_VALUE) +			goto fatal_error; + +		/* +		 * GFIR ErrorTrigger bits set => reset the card! +		 * Never do this for old/manufacturing images! +		 */ +		if ((gfir_masked) && !cd->skip_recovery && +		    genwqe_recovery_on_fatal_gfir_required(cd)) { + +			cd->card_state = GENWQE_CARD_FATAL_ERROR; + +			rc = genwqe_recover_card(cd, 0); +			if (rc < 0) { +				/* FIXME Card is unusable and needs unbind! */ +				goto fatal_error; +			} +		} + +		cd->last_gfir = gfir; +		cond_resched(); +	} + +	return 0; + + fatal_error: +	dev_err(&pci_dev->dev, +		"[%s] card unusable. Please trigger unbind!\n", __func__); + +	/* Bring down logical devices to inform user space via udev remove. */ +	cd->card_state = GENWQE_CARD_FATAL_ERROR; +	genwqe_stop(cd); + +	/* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */ +	while (!kthread_should_stop()) +		cond_resched(); + +	return -EIO; +} + +static int genwqe_health_check_start(struct genwqe_dev *cd) +{ +	int rc; + +	if (genwqe_health_check_interval <= 0) +		return 0;	/* valid for disabling the service */ + +	/* moved before request_irq() */ +	/* init_waitqueue_head(&cd->health_waitq); */ + +	cd->health_thread = kthread_run(genwqe_health_thread, cd, +					GENWQE_DEVNAME "%d_health", +					cd->card_idx); +	if (IS_ERR(cd->health_thread)) { +		rc = PTR_ERR(cd->health_thread); +		cd->health_thread = NULL; +		return rc; +	} +	return 0; +} + +static int genwqe_health_thread_running(struct genwqe_dev *cd) +{ +	return cd->health_thread != NULL; +} + +static int genwqe_health_check_stop(struct genwqe_dev *cd) +{ +	int rc; + +	if (!genwqe_health_thread_running(cd)) +		return -EIO; + +	rc = kthread_stop(cd->health_thread); +	cd->health_thread = NULL; +	return 0; +} + +/** + * genwqe_pci_setup() - Allocate PCIe related resources for our card + */ +static int genwqe_pci_setup(struct genwqe_dev *cd) +{ +	int err, bars; +	struct pci_dev *pci_dev = cd->pci_dev; + +	bars = pci_select_bars(pci_dev, IORESOURCE_MEM); +	err = pci_enable_device_mem(pci_dev); +	if (err) { +		dev_err(&pci_dev->dev, +			"err: failed to enable pci memory (err=%d)\n", err); +		goto err_out; +	} + +	/* Reserve PCI I/O and memory resources */ +	err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); +	if (err) { +		dev_err(&pci_dev->dev, +			"[%s] err: request bars failed (%d)\n", __func__, err); +		err = -EIO; +		goto err_disable_device; +	} + +	/* check for 64-bit DMA address supported (DAC) */ +	if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { +		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); +		if (err) { +			dev_err(&pci_dev->dev, +				"err: DMA64 consistent mask error\n"); +			err = -EIO; +			goto out_release_resources; +		} +	/* check for 32-bit DMA address supported (SAC) */ +	} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { +		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); +		if (err) { +			dev_err(&pci_dev->dev, +				"err: DMA32 consistent mask error\n"); +			err = -EIO; +			goto out_release_resources; +		} +	} else { +		dev_err(&pci_dev->dev, +			"err: neither DMA32 nor DMA64 supported\n"); +		err = -EIO; +		goto out_release_resources; +	} + +	pci_set_master(pci_dev); +	pci_enable_pcie_error_reporting(pci_dev); + +	/* request complete BAR-0 space (length = 0) */ +	cd->mmio_len = pci_resource_len(pci_dev, 0); +	cd->mmio = pci_iomap(pci_dev, 0, 0); +	if (cd->mmio == NULL) { +		dev_err(&pci_dev->dev, +			"[%s] err: mapping BAR0 failed\n", __func__); +		err = -ENOMEM; +		goto out_release_resources; +	} + +	cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); + +	err = genwqe_read_ids(cd); +	if (err) +		goto out_iounmap; + +	return 0; + + out_iounmap: +	pci_iounmap(pci_dev, cd->mmio); + out_release_resources: +	pci_release_selected_regions(pci_dev, bars); + err_disable_device: +	pci_disable_device(pci_dev); + err_out: +	return err; +} + +/** + * genwqe_pci_remove() - Free PCIe related resources for our card + */ +static void genwqe_pci_remove(struct genwqe_dev *cd) +{ +	int bars; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (cd->mmio) +		pci_iounmap(pci_dev, cd->mmio); + +	bars = pci_select_bars(pci_dev, IORESOURCE_MEM); +	pci_release_selected_regions(pci_dev, bars); +	pci_disable_device(pci_dev); +} + +/** + * genwqe_probe() - Device initialization + * @pdev:	PCI device information struct + * + * Callable for multiple cards. This function is called on bind. + * + * Return: 0 if succeeded, < 0 when failed + */ +static int genwqe_probe(struct pci_dev *pci_dev, +			const struct pci_device_id *id) +{ +	int err; +	struct genwqe_dev *cd; + +	genwqe_init_crc32(); + +	cd = genwqe_dev_alloc(); +	if (IS_ERR(cd)) { +		dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n", +			(int)PTR_ERR(cd)); +		return PTR_ERR(cd); +	} + +	dev_set_drvdata(&pci_dev->dev, cd); +	cd->pci_dev = pci_dev; + +	err = genwqe_pci_setup(cd); +	if (err < 0) { +		dev_err(&pci_dev->dev, +			"err: problems with PCI setup (err=%d)\n", err); +		goto out_free_dev; +	} + +	err = genwqe_start(cd); +	if (err < 0) { +		dev_err(&pci_dev->dev, +			"err: cannot start card services! (err=%d)\n", err); +		goto out_pci_remove; +	} + +	if (genwqe_is_privileged(cd)) { +		err = genwqe_health_check_start(cd); +		if (err < 0) { +			dev_err(&pci_dev->dev, +				"err: cannot start health checking! " +				"(err=%d)\n", err); +			goto out_stop_services; +		} +	} +	return 0; + + out_stop_services: +	genwqe_stop(cd); + out_pci_remove: +	genwqe_pci_remove(cd); + out_free_dev: +	genwqe_dev_free(cd); +	return err; +} + +/** + * genwqe_remove() - Called when device is removed (hot-plugable) + * + * Or when driver is unloaded respecitively when unbind is done. + */ +static void genwqe_remove(struct pci_dev *pci_dev) +{ +	struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); + +	genwqe_health_check_stop(cd); + +	/* +	 * genwqe_stop() must survive if it is called twice +	 * sequentially. This happens when the health thread calls it +	 * and fails on genwqe_bus_reset(). +	 */ +	genwqe_stop(cd); +	genwqe_pci_remove(cd); +	genwqe_dev_free(cd); +} + +/* + * genwqe_err_error_detected() - Error detection callback + * + * This callback is called by the PCI subsystem whenever a PCI bus + * error is detected. + */ +static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, +						 enum pci_channel_state state) +{ +	struct genwqe_dev *cd; + +	dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state); + +	if (pci_dev == NULL) +		return PCI_ERS_RESULT_NEED_RESET; + +	cd = dev_get_drvdata(&pci_dev->dev); +	if (cd == NULL) +		return PCI_ERS_RESULT_NEED_RESET; + +	switch (state) { +	case pci_channel_io_normal: +		return PCI_ERS_RESULT_CAN_RECOVER; +	case pci_channel_io_frozen: +		return PCI_ERS_RESULT_NEED_RESET; +	case pci_channel_io_perm_failure: +		return PCI_ERS_RESULT_DISCONNECT; +	} + +	return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev) +{ +	return PCI_ERS_RESULT_NONE; +} + +static void genwqe_err_resume(struct pci_dev *dev) +{ +} + +static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) +{ +	struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); + +	if (numvfs > 0) { +		genwqe_setup_vf_jtimer(cd); +		pci_enable_sriov(dev, numvfs); +		return numvfs; +	} +	if (numvfs == 0) { +		pci_disable_sriov(dev); +		return 0; +	} +	return 0; +} + +static struct pci_error_handlers genwqe_err_handler = { +	.error_detected = genwqe_err_error_detected, +	.mmio_enabled	= genwqe_err_result_none, +	.link_reset	= genwqe_err_result_none, +	.slot_reset	= genwqe_err_result_none, +	.resume		= genwqe_err_resume, +}; + +static struct pci_driver genwqe_driver = { +	.name	  = genwqe_driver_name, +	.id_table = genwqe_device_table, +	.probe	  = genwqe_probe, +	.remove	  = genwqe_remove, +	.sriov_configure = genwqe_sriov_configure, +	.err_handler = &genwqe_err_handler, +}; + +/** + * genwqe_init_module() - Driver registration and initialization + */ +static int __init genwqe_init_module(void) +{ +	int rc; + +	class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME); +	if (IS_ERR(class_genwqe)) { +		pr_err("[%s] create class failed\n", __func__); +		return -ENOMEM; +	} + +	debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL); +	if (!debugfs_genwqe) { +		rc = -ENOMEM; +		goto err_out; +	} + +	rc = pci_register_driver(&genwqe_driver); +	if (rc != 0) { +		pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc); +		goto err_out0; +	} + +	return rc; + + err_out0: +	debugfs_remove(debugfs_genwqe); + err_out: +	class_destroy(class_genwqe); +	return rc; +} + +/** + * genwqe_exit_module() - Driver exit + */ +static void __exit genwqe_exit_module(void) +{ +	pci_unregister_driver(&genwqe_driver); +	debugfs_remove(debugfs_genwqe); +	class_destroy(class_genwqe); +} + +module_init(genwqe_init_module); +module_exit(genwqe_exit_module); diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h new file mode 100644 index 00000000000..0e608a28860 --- /dev/null +++ b/drivers/misc/genwqe/card_base.h @@ -0,0 +1,577 @@ +#ifndef __CARD_BASE_H__ +#define __CARD_BASE_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Interfaces within the GenWQE module. Defines genwqe_card and + * ddcb_queue as well as ddcb_requ. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/cdev.h> +#include <linux/stringify.h> +#include <linux/pci.h> +#include <linux/semaphore.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/version.h> +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include <linux/genwqe/genwqe_card.h> +#include "genwqe_driver.h" + +#define GENWQE_MSI_IRQS			4  /* Just one supported, no MSIx */ +#define GENWQE_FLAG_MSI_ENABLED		(1 << 0) + +#define GENWQE_MAX_VFS			15 /* maximum 15 VFs are possible */ +#define GENWQE_MAX_FUNCS		16 /* 1 PF and 15 VFs */ +#define GENWQE_CARD_NO_MAX		(16 * GENWQE_MAX_FUNCS) + +/* Compile parameters, some of them appear in debugfs for later adjustment */ +#define genwqe_ddcb_max			32 /* DDCBs on the work-queue */ +#define genwqe_polling_enabled		0  /* in case of irqs not working */ +#define genwqe_ddcb_software_timeout	10 /* timeout per DDCB in seconds */ +#define genwqe_kill_timeout		8  /* time until process gets killed */ +#define genwqe_vf_jobtimeout_msec	250  /* 250 msec */ +#define genwqe_pf_jobtimeout_msec	8000 /* 8 sec should be ok */ +#define genwqe_health_check_interval	4 /* <= 0: disabled */ + +/* Sysfs attribute groups used when we create the genwqe device */ +extern const struct attribute_group *genwqe_attribute_groups[]; + +/* + * Config space for Genwqe5 A7: + * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 + * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 + * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] + * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 + */ +#define PCI_DEVICE_GENWQE		0x044b /* Genwqe DeviceID */ + +#define PCI_SUBSYSTEM_ID_GENWQE5	0x035f /* Genwqe A5 Subsystem-ID */ +#define PCI_SUBSYSTEM_ID_GENWQE5_NEW	0x044b /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5		0x1200 /* UNKNOWN */ + +#define PCI_SUBVENDOR_ID_IBM_SRIOV	0x0000 +#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV	0x0000 /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5_SRIOV	0x1200 /* UNKNOWN */ + +#define	GENWQE_SLU_ARCH_REQ		2 /* Required SLU architecture level */ + +/** + * struct genwqe_reg - Genwqe data dump functionality + */ +struct genwqe_reg { +	u32 addr; +	u32 idx; +	u64 val; +}; + +/* + * enum genwqe_dbg_type - Specify chip unit to dump/debug + */ +enum genwqe_dbg_type { +	GENWQE_DBG_UNIT0 = 0,  /* captured before prev errs cleared */ +	GENWQE_DBG_UNIT1 = 1, +	GENWQE_DBG_UNIT2 = 2, +	GENWQE_DBG_UNIT3 = 3, +	GENWQE_DBG_UNIT4 = 4, +	GENWQE_DBG_UNIT5 = 5, +	GENWQE_DBG_UNIT6 = 6, +	GENWQE_DBG_UNIT7 = 7, +	GENWQE_DBG_REGS  = 8, +	GENWQE_DBG_DMA   = 9, +	GENWQE_DBG_UNITS = 10, /* max number of possible debug units  */ +}; + +/* Software error injection to simulate card failures */ +#define GENWQE_INJECT_HARDWARE_FAILURE	0x00000001 /* injects -1 reg reads */ +#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ +#define GENWQE_INJECT_GFIR_FATAL	0x00000004 /* GFIR = 0x0000ffff */ +#define GENWQE_INJECT_GFIR_INFO		0x00000008 /* GFIR = 0xffff0000 */ + +/* + * Genwqe card description and management data. + * + * Error-handling in case of card malfunction + * ------------------------------------------ + * + * If the card is detected to be defective the outside environment + * will cause the PCI layer to call deinit (the cleanup function for + * probe). This is the same effect like doing a unbind/bind operation + * on the card. + * + * The genwqe card driver implements a health checking thread which + * verifies the card function. If this detects a problem the cards + * device is being shutdown and restarted again, along with a reset of + * the card and queue. + * + * All functions accessing the card device return either -EIO or -ENODEV + * code to indicate the malfunction to the user. The user has to close + * the file descriptor and open a new one, once the card becomes + * available again. + * + * If the open file descriptor is setup to receive SIGIO, the signal is + * genereated for the application which has to provide a handler to + * react on it. If the application does not close the open + * file descriptor a SIGKILL is send to enforce freeing the cards + * resources. + * + * I did not find a different way to prevent kernel problems due to + * reference counters for the cards character devices getting out of + * sync. The character device deallocation does not block, even if + * there is still an open file descriptor pending. If this pending + * descriptor is closed, the data structures used by the character + * device is reinstantiated, which will lead to the reference counter + * dropping below the allowed values. + * + * Card recovery + * ------------- + * + * To test the internal driver recovery the following command can be used: + *   sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' + */ + + +/** + * struct dma_mapping_type - Mapping type definition + * + * To avoid memcpying data arround we use user memory directly. To do + * this we need to pin/swap-in the memory and request a DMA address + * for it. + */ +enum dma_mapping_type { +	GENWQE_MAPPING_RAW = 0,		/* contignous memory buffer */ +	GENWQE_MAPPING_SGL_TEMP,	/* sglist dynamically used */ +	GENWQE_MAPPING_SGL_PINNED,	/* sglist used with pinning */ +}; + +/** + * struct dma_mapping - Information about memory mappings done by the driver + */ +struct dma_mapping { +	enum dma_mapping_type type; + +	void *u_vaddr;			/* user-space vaddr/non-aligned */ +	void *k_vaddr;			/* kernel-space vaddr/non-aligned */ +	dma_addr_t dma_addr;		/* physical DMA address */ + +	struct page **page_list;	/* list of pages used by user buff */ +	dma_addr_t *dma_list;		/* list of dma addresses per page */ +	unsigned int nr_pages;		/* number of pages */ +	unsigned int size;		/* size in bytes */ + +	struct list_head card_list;	/* list of usr_maps for card */ +	struct list_head pin_list;	/* list of pinned memory for dev */ +}; + +static inline void genwqe_mapping_init(struct dma_mapping *m, +				       enum dma_mapping_type type) +{ +	memset(m, 0, sizeof(*m)); +	m->type = type; +} + +/** + * struct ddcb_queue - DDCB queue data + * @ddcb_max:          Number of DDCBs on the queue + * @ddcb_next:         Next free DDCB + * @ddcb_act:          Next DDCB supposed to finish + * @ddcb_seq:          Sequence number of last DDCB + * @ddcbs_in_flight:   Currently enqueued DDCBs + * @ddcbs_completed:   Number of already completed DDCBs + * @busy:              Number of -EBUSY returns + * @ddcb_daddr:        DMA address of first DDCB in the queue + * @ddcb_vaddr:        Kernel virtual address of first DDCB in the queue + * @ddcb_req:          Associated requests (one per DDCB) + * @ddcb_waitqs:       Associated wait queues (one per DDCB) + * @ddcb_lock:         Lock to protect queuing operations + * @ddcb_waitq:        Wait on next DDCB finishing + */ + +struct ddcb_queue { +	int ddcb_max;			/* amount of DDCBs  */ +	int ddcb_next;			/* next available DDCB num */ +	int ddcb_act;			/* DDCB to be processed */ +	u16 ddcb_seq;			/* slc seq num */ +	unsigned int ddcbs_in_flight;	/* number of ddcbs in processing */ +	unsigned int ddcbs_completed; +	unsigned int ddcbs_max_in_flight; +	unsigned int busy;		/* how many times -EBUSY? */ + +	dma_addr_t ddcb_daddr;		/* DMA address */ +	struct ddcb *ddcb_vaddr;	/* kernel virtual addr for DDCBs */ +	struct ddcb_requ **ddcb_req;	/* ddcb processing parameter */ +	wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ + +	spinlock_t ddcb_lock;		/* exclusive access to queue */ +	wait_queue_head_t ddcb_waitq;	/* wait for ddcb processing */ + +	/* registers or the respective queue to be used */ +	u32 IO_QUEUE_CONFIG; +	u32 IO_QUEUE_STATUS; +	u32 IO_QUEUE_SEGMENT; +	u32 IO_QUEUE_INITSQN; +	u32 IO_QUEUE_WRAP; +	u32 IO_QUEUE_OFFSET; +	u32 IO_QUEUE_WTIME; +	u32 IO_QUEUE_ERRCNTS; +	u32 IO_QUEUE_LRW; +}; + +/* + * GFIR, SLU_UNITCFG, APP_UNITCFG + *   8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. + */ +#define GENWQE_FFDC_REGS	(3 + (8 * (2 + 2 * 64))) + +struct genwqe_ffdc { +	unsigned int entries; +	struct genwqe_reg *regs; +}; + +/** + * struct genwqe_dev - GenWQE device information + * @card_state:       Card operation state, see above + * @ffdc:             First Failure Data Capture buffers for each unit + * @card_thread:      Working thread to operate the DDCB queue + * @card_waitq:       Wait queue used in card_thread + * @queue:            DDCB queue + * @health_thread:    Card monitoring thread (only for PFs) + * @health_waitq:     Wait queue used in health_thread + * @pci_dev:          Associated PCI device (function) + * @mmio:             Base address of 64-bit register space + * @mmio_len:         Length of register area + * @file_lock:        Lock to protect access to file_list + * @file_list:        List of all processes with open GenWQE file descriptors + * + * This struct contains all information needed to communicate with a + * GenWQE card. It is initialized when a GenWQE device is found and + * destroyed when it goes away. It holds data to maintain the queue as + * well as data needed to feed the user interfaces. + */ +struct genwqe_dev { +	enum genwqe_card_state card_state; +	spinlock_t print_lock; + +	int card_idx;			/* card index 0..CARD_NO_MAX-1 */ +	u64 flags;			/* general flags */ + +	/* FFDC data gathering */ +	struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; + +	/* DDCB workqueue */ +	struct task_struct *card_thread; +	wait_queue_head_t queue_waitq; +	struct ddcb_queue queue;	/* genwqe DDCB queue */ +	unsigned int irqs_processed; + +	/* Card health checking thread */ +	struct task_struct *health_thread; +	wait_queue_head_t health_waitq; + +	/* char device */ +	dev_t  devnum_genwqe;		/* major/minor num card */ +	struct class *class_genwqe;	/* reference to class object */ +	struct device *dev;		/* for device creation */ +	struct cdev cdev_genwqe;	/* char device for card */ + +	struct dentry *debugfs_root;	/* debugfs card root directory */ +	struct dentry *debugfs_genwqe;	/* debugfs driver root directory */ + +	/* pci resources */ +	struct pci_dev *pci_dev;	/* PCI device */ +	void __iomem *mmio;		/* BAR-0 MMIO start */ +	unsigned long mmio_len; +	u16 num_vfs; +	u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; +	int is_privileged;		/* access to all regs possible */ + +	/* config regs which we need often */ +	u64 slu_unitcfg; +	u64 app_unitcfg; +	u64 softreset; +	u64 err_inject; +	u64 last_gfir; +	char app_name[5]; + +	spinlock_t file_lock;		/* lock for open files */ +	struct list_head file_list;	/* list of open files */ + +	/* debugfs parameters */ +	int ddcb_software_timeout;	/* wait until DDCB times out */ +	int skip_recovery;		/* circumvention if recovery fails */ +	int kill_timeout;		/* wait after sending SIGKILL */ +}; + +/** + * enum genwqe_requ_state - State of a DDCB execution request + */ +enum genwqe_requ_state { +	GENWQE_REQU_NEW      = 0, +	GENWQE_REQU_ENQUEUED = 1, +	GENWQE_REQU_TAPPED   = 2, +	GENWQE_REQU_FINISHED = 3, +	GENWQE_REQU_STATE_MAX, +}; + +/** + * struct genwqe_sgl - Scatter gather list describing user-space memory + * @sgl:            scatter gather list needs to be 128 byte aligned + * @sgl_dma_addr:   dma address of sgl + * @sgl_size:       size of area used for sgl + * @user_addr:      user-space address of memory area + * @user_size:      size of user-space memory area + * @page:           buffer for partial pages if needed + * @page_dma_addr:  dma address partial pages + */ +struct genwqe_sgl { +	dma_addr_t sgl_dma_addr; +	struct sg_entry *sgl; +	size_t sgl_size;	/* size of sgl */ + +	void __user *user_addr; /* user-space base-address */ +	size_t user_size;       /* size of memory area */ + +	unsigned long nr_pages; +	unsigned long fpage_offs; +	size_t fpage_size; +	size_t lpage_size; + +	void *fpage; +	dma_addr_t fpage_dma_addr; + +	void *lpage; +	dma_addr_t lpage_dma_addr; +}; + +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, +			  void __user *user_addr, size_t user_size); + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, +		     dma_addr_t *dma_list); + +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl); + +/** + * struct ddcb_requ - Kernel internal representation of the DDCB request + * @cmd:          User space representation of the DDCB execution request + */ +struct ddcb_requ { +	/* kernel specific content */ +	enum genwqe_requ_state req_state; /* request status */ +	int num;			  /* ddcb_no for this request */ +	struct ddcb_queue *queue;	  /* associated queue */ + +	struct dma_mapping  dma_mappings[DDCB_FIXUPS]; +	struct genwqe_sgl sgls[DDCB_FIXUPS]; + +	/* kernel/user shared content */ +	struct genwqe_ddcb_cmd cmd;	/* ddcb_no for this request */ +	struct genwqe_debug_data debug_data; +}; + +/** + * struct genwqe_file - Information for open GenWQE devices + */ +struct genwqe_file { +	struct genwqe_dev *cd; +	struct genwqe_driver *client; +	struct file *filp; + +	struct fasync_struct *async_queue; +	struct task_struct *owner; +	struct list_head list;		/* entry in list of open files */ + +	spinlock_t map_lock;		/* lock for dma_mappings */ +	struct list_head map_list;	/* list of dma_mappings */ + +	spinlock_t pin_lock;		/* lock for pinned memory */ +	struct list_head pin_list;	/* list of pinned memory */ +}; + +int  genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ +int  genwqe_finish_queue(struct genwqe_dev *cd); +int  genwqe_release_service_layer(struct genwqe_dev *cd); + +/** + * genwqe_get_slu_id() - Read Service Layer Unit Id + * Return: 0x00: Development code + *         0x01: SLC1 (old) + *         0x02: SLC2 (sept2012) + *         0x03: SLC2 (feb2013, generic driver) + */ +static inline int genwqe_get_slu_id(struct genwqe_dev *cd) +{ +	return (int)((cd->slu_unitcfg >> 32) & 0xff); +} + +int  genwqe_ddcbs_in_flight(struct genwqe_dev *cd); + +u8   genwqe_card_type(struct genwqe_dev *cd); +int  genwqe_card_reset(struct genwqe_dev *cd); +int  genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); + +int  genwqe_device_create(struct genwqe_dev *cd); +int  genwqe_device_remove(struct genwqe_dev *cd); + +/* debugfs */ +int  genwqe_init_debugfs(struct genwqe_dev *cd); +void genqwe_exit_debugfs(struct genwqe_dev *cd); + +int  genwqe_read_softreset(struct genwqe_dev *cd); + +/* Hardware Circumventions */ +int  genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); +int  genwqe_flash_readback_fails(struct genwqe_dev *cd); + +/** + * genwqe_write_vreg() - Write register in VF window + * @cd:    genwqe device + * @reg:   register address + * @val:   value to write + * @func:  0: PF, 1: VF0, ..., 15: VF14 + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); + +/** + * genwqe_read_vreg() - Read register in VF window + * @cd:    genwqe device + * @reg:   register address + * @func:  0: PF, 1: VF0, ..., 15: VF14 + * + * Return: content of the register + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); + +/* FFDC Buffer Management */ +int  genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); +int  genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, +			   struct genwqe_reg *regs, unsigned int max_regs); +int  genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, +			   unsigned int max_regs, int all); +int  genwqe_ffdc_dump_dma(struct genwqe_dev *cd, +			  struct genwqe_reg *regs, unsigned int max_regs); + +int  genwqe_init_debug_data(struct genwqe_dev *cd, +			    struct genwqe_debug_data *d); + +void genwqe_init_crc32(void); +int  genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); + +/* Memory allocation/deallocation; dma address handling */ +int  genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, +		      void *uaddr, unsigned long size, +		      struct ddcb_requ *req); + +int  genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, +			struct ddcb_requ *req); + +static inline bool dma_mapping_used(struct dma_mapping *m) +{ +	if (!m) +		return 0; +	return m->size != 0; +} + +/** + * __genwqe_execute_ddcb() - Execute DDCB request with addr translation + * + * This function will do the address translation changes to the DDCBs + * according to the definitions required by the ATS field. It looks up + * the memory allocation buffer or does vmap/vunmap for the respective + * user-space buffers, inclusive page pinning and scatter gather list + * buildup and teardown. + */ +int  __genwqe_execute_ddcb(struct genwqe_dev *cd, +			   struct genwqe_ddcb_cmd *cmd); + +/** + * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation + * + * This version will not do address translation or any modifcation of + * the DDCB data. It is used e.g. for the MoveFlash DDCB which is + * entirely prepared by the driver itself. That means the appropriate + * DMA addresses are already in the DDCB and do not need any + * modification. + */ +int  __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, +			       struct genwqe_ddcb_cmd *cmd); + +int  __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int  __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int  __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); + +/* register access */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, +				 dma_addr_t *dma_handle); +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, +			      void *vaddr, dma_addr_t dma_handle); + +/* Base clock frequency in MHz */ +int  genwqe_base_clock_frequency(struct genwqe_dev *cd); + +/* Before FFDC is captured the traps should be stopped. */ +void genwqe_stop_traps(struct genwqe_dev *cd); +void genwqe_start_traps(struct genwqe_dev *cd); + +/* Hardware circumvention */ +bool genwqe_need_err_masking(struct genwqe_dev *cd); + +/** + * genwqe_is_privileged() - Determine operation mode for PCI function + * + * On Intel with SRIOV support we see: + *   PF: is_physfn = 1 is_virtfn = 0 + *   VF: is_physfn = 0 is_virtfn = 1 + * + * On Systems with no SRIOV support _and_ virtualized systems we get: + *       is_physfn = 0 is_virtfn = 0 + * + * Other vendors have individual pci device ids to distinguish between + * virtual function drivers and physical function drivers. GenWQE + * unfortunately has just on pci device id for both, VFs and PF. + * + * The following code is used to distinguish if the card is running in + * privileged mode, either as true PF or in a virtualized system with + * full register access e.g. currently on PowerPC. + * + * if (pci_dev->is_virtfn) + *          cd->is_privileged = 0; + *  else + *          cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + *				 != IO_ILLEGAL_VALUE); + */ +static inline int genwqe_is_privileged(struct genwqe_dev *cd) +{ +	return cd->is_privileged; +} + +#endif	/* __CARD_BASE_H__ */ diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c new file mode 100644 index 00000000000..c8046db2d5a --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.c @@ -0,0 +1,1380 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Device Driver Control Block (DDCB) queue support. Definition of + * interrupt handlers for queue support as well as triggering the + * health monitor code in case of problems. The current hardware uses + * an MSI interrupt which is shared between error handling and + * functional code. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/crc-itu-t.h> + +#include "card_base.h" +#include "card_ddcb.h" + +/* + * N: next DDCB, this is where the next DDCB will be put. + * A: active DDCB, this is where the code will look for the next completion. + * x: DDCB is enqueued, we are waiting for its completion. + + * Situation (1): Empty queue + *  +---+---+---+---+---+---+---+---+ + *  | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + *  |   |   |   |   |   |   |   |   | + *  +---+---+---+---+---+---+---+---+ + *           A/N + *  enqueued_ddcbs = A - N = 2 - 2 = 0 + * + * Situation (2): Wrapped, N > A + *  +---+---+---+---+---+---+---+---+ + *  | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + *  |   |   | x | x |   |   |   |   | + *  +---+---+---+---+---+---+---+---+ + *            A       N + *  enqueued_ddcbs = N - A = 4 - 2 = 2 + * + * Situation (3): Queue wrapped, A > N + *  +---+---+---+---+---+---+---+---+ + *  | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + *  | x | x |   |   | x | x | x | x | + *  +---+---+---+---+---+---+---+---+ + *            N       A + *  enqueued_ddcbs = queue_max  - (A - N) = 8 - (4 - 2) = 6 + * + * Situation (4a): Queue full N > A + *  +---+---+---+---+---+---+---+---+ + *  | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + *  | x | x | x | x | x | x | x |   | + *  +---+---+---+---+---+---+---+---+ + *    A                           N + * + *  enqueued_ddcbs = N - A = 7 - 0 = 7 + * + * Situation (4a): Queue full A > N + *  +---+---+---+---+---+---+---+---+ + *  | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + *  | x | x | x |   | x | x | x | x | + *  +---+---+---+---+---+---+---+---+ + *                N   A + *  enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7 + */ + +static int queue_empty(struct ddcb_queue *queue) +{ +	return queue->ddcb_next == queue->ddcb_act; +} + +static int queue_enqueued_ddcbs(struct ddcb_queue *queue) +{ +	if (queue->ddcb_next >= queue->ddcb_act) +		return queue->ddcb_next - queue->ddcb_act; + +	return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); +} + +static int queue_free_ddcbs(struct ddcb_queue *queue) +{ +	int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; + +	if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */ +		return 0; +	} +	return free_ddcbs; +} + +/* + * Use of the PRIV field in the DDCB for queue debugging: + * + * (1) Trying to get rid of a DDCB which saw a timeout: + *     pddcb->priv[6] = 0xcc;   # cleared + * + * (2) Append a DDCB via NEXT bit: + *     pddcb->priv[7] = 0xaa;	# appended + * + * (3) DDCB needed tapping: + *     pddcb->priv[7] = 0xbb;   # tapped + * + * (4) DDCB marked as correctly finished: + *     pddcb->priv[6] = 0xff;	# finished + */ + +static inline void ddcb_mark_tapped(struct ddcb *pddcb) +{ +	pddcb->priv[7] = 0xbb;  /* tapped */ +} + +static inline void ddcb_mark_appended(struct ddcb *pddcb) +{ +	pddcb->priv[7] = 0xaa;	/* appended */ +} + +static inline void ddcb_mark_cleared(struct ddcb *pddcb) +{ +	pddcb->priv[6] = 0xcc; /* cleared */ +} + +static inline void ddcb_mark_finished(struct ddcb *pddcb) +{ +	pddcb->priv[6] = 0xff;	/* finished */ +} + +static inline void ddcb_mark_unused(struct ddcb *pddcb) +{ +	pddcb->priv_64 = cpu_to_be64(0); /* not tapped */ +} + +/** + * genwqe_crc16() - Generate 16-bit crc as required for DDCBs + * @buff:       pointer to data buffer + * @len:        length of data for calculation + * @init:       initial crc (0xffff at start) + * + * Polynomial = x^16 + x^12 + x^5 + 1   (0x1021) + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff + *          should result in a crc16 of 0x89c3 + * + * Return: crc16 checksum in big endian format ! + */ +static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init) +{ +	return crc_itu_t(init, buff, len); +} + +static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ +	int i; +	struct ddcb *pddcb; +	unsigned long flags; +	struct pci_dev *pci_dev = cd->pci_dev; + +	spin_lock_irqsave(&cd->print_lock, flags); + +	dev_info(&pci_dev->dev, +		 "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n", +		 cd->card_idx, queue->ddcb_act, queue->ddcb_next); + +	pddcb = queue->ddcb_vaddr; +	for (i = 0; i < queue->ddcb_max; i++) { +		dev_err(&pci_dev->dev, +			"  %c %-3d: RETC=%03x SEQ=%04x " +			"HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", +			i == queue->ddcb_act ? '>' : ' ', +			i, +			be16_to_cpu(pddcb->retc_16), +			be16_to_cpu(pddcb->seqnum_16), +			pddcb->hsi, +			pddcb->shi, +			be64_to_cpu(pddcb->priv_64), +			pddcb->cmd); +		pddcb++; +	} +	spin_unlock_irqrestore(&cd->print_lock, flags); +} + +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void) +{ +	struct ddcb_requ *req; + +	req = kzalloc(sizeof(*req), GFP_ATOMIC); +	if (!req) +		return NULL; + +	return &req->cmd; +} + +void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) +{ +	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); +	kfree(req); +} + +static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) +{ +	return req->req_state; +} + +static inline void ddcb_requ_set_state(struct ddcb_requ *req, +				       enum genwqe_requ_state new_state) +{ +	req->req_state = new_state; +} + +static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req) +{ +	return req->cmd.ddata_addr != 0x0; +} + +/** + * ddcb_requ_finished() - Returns the hardware state of the associated DDCB + * @cd:          pointer to genwqe device descriptor + * @req:         DDCB work request + * + * Status of ddcb_requ mirrors this hardware state, but is copied in + * the ddcb_requ on interrupt/polling function. The lowlevel code + * should check the hardware state directly, the higher level code + * should check the copy. + * + * This function will also return true if the state of the queue is + * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the + * shutdown case. + */ +static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req) +{ +	return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) || +		(cd->card_state != GENWQE_CARD_USED); +} + +/** + * enqueue_ddcb() - Enqueue a DDCB + * @cd:         pointer to genwqe device descriptor + * @queue:	queue this operation should be done on + * @ddcb_no:    pointer to ddcb number being tapped + * + * Start execution of DDCB by tapping or append to queue via NEXT + * bit. This is done by an atomic 'compare and swap' instruction and + * checking SHI and HSI of the previous DDCB. + * + * This function must only be called with ddcb_lock held. + * + * Return: 1 if new DDCB is appended to previous + *         2 if DDCB queue is tapped via register/simulation + */ +#define RET_DDCB_APPENDED 1 +#define RET_DDCB_TAPPED   2 + +static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, +			struct ddcb *pddcb, int ddcb_no) +{ +	unsigned int try; +	int prev_no; +	struct ddcb *prev_ddcb; +	__be32 old, new, icrc_hsi_shi; +	u64 num; + +	/* +	 * For performance checks a Dispatch Timestamp can be put into +	 * DDCB It is supposed to use the SLU's free running counter, +	 * but this requires PCIe cycles. +	 */ +	ddcb_mark_unused(pddcb); + +	/* check previous DDCB if already fetched */ +	prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1; +	prev_ddcb = &queue->ddcb_vaddr[prev_no]; + +	/* +	 * It might have happened that the HSI.FETCHED bit is +	 * set. Retry in this case. Therefore I expect maximum 2 times +	 * trying. +	 */ +	ddcb_mark_appended(pddcb); +	for (try = 0; try < 2; try++) { +		old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + +		/* try to append via NEXT bit if prev DDCB is not completed */ +		if ((old & DDCB_COMPLETED_BE32) != 0x00000000) +			break; + +		new = (old | DDCB_NEXT_BE32); + +		wmb(); +		icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); + +		if (icrc_hsi_shi == old) +			return RET_DDCB_APPENDED; /* appended to queue */ +	} + +	/* Queue must be re-started by updating QUEUE_OFFSET */ +	ddcb_mark_tapped(pddcb); +	num = (u64)ddcb_no << 8; + +	wmb(); +	__genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ + +	return RET_DDCB_TAPPED; +} + +/** + * copy_ddcb_results() - Copy output state from real DDCB to request + * + * Copy DDCB ASV to request struct. There is no endian + * conversion made, since data structure in ASV is still + * unknown here. + * + * This is needed by: + *   - genwqe_purge_ddcb() + *   - genwqe_check_ddcb_queue() + */ +static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no) +{ +	struct ddcb_queue *queue = req->queue; +	struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; + +	memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH); + +	/* copy status flags of the variant part */ +	req->cmd.vcrc     = be16_to_cpu(pddcb->vcrc_16); +	req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); +	req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); + +	req->cmd.attn     = be16_to_cpu(pddcb->attn_16); +	req->cmd.progress = be32_to_cpu(pddcb->progress_32); +	req->cmd.retc     = be16_to_cpu(pddcb->retc_16); + +	if (ddcb_requ_collect_debug_data(req)) { +		int prev_no = (ddcb_no == 0) ? +			queue->ddcb_max - 1 : ddcb_no - 1; +		struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no]; + +		memcpy(&req->debug_data.ddcb_finished, pddcb, +		       sizeof(req->debug_data.ddcb_finished)); +		memcpy(&req->debug_data.ddcb_prev, prev_pddcb, +		       sizeof(req->debug_data.ddcb_prev)); +	} +} + +/** + * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests. + * @cd:         pointer to genwqe device descriptor + * + * Return: Number of DDCBs which were finished + */ +static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, +				   struct ddcb_queue *queue) +{ +	unsigned long flags; +	int ddcbs_finished = 0; +	struct pci_dev *pci_dev = cd->pci_dev; + +	spin_lock_irqsave(&queue->ddcb_lock, flags); + +	/* FIXME avoid soft locking CPU */ +	while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) { + +		struct ddcb *pddcb; +		struct ddcb_requ *req; +		u16 vcrc, vcrc_16, retc_16; + +		pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + +		if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == +		    0x00000000) +			goto go_home; /* not completed, continue waiting */ + +		/* Note: DDCB could be purged */ + +		req = queue->ddcb_req[queue->ddcb_act]; +		if (req == NULL) { +			/* this occurs if DDCB is purged, not an error */ +			/* Move active DDCB further; Nothing to do anymore. */ +			goto pick_next_one; +		} + +		/* +		 * HSI=0x44 (fetched and completed), but RETC is +		 * 0x101, or even worse 0x000. +		 * +		 * In case of seeing the queue in inconsistent state +		 * we read the errcnts and the queue status to provide +		 * a trigger for our PCIe analyzer stop capturing. +		 */ +		retc_16 = be16_to_cpu(pddcb->retc_16); +		if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) { +			u64 errcnts, status; +			u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr; + +			errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS); +			status  = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + +			dev_err(&pci_dev->dev, +				"[%s] SEQN=%04x HSI=%02x RETC=%03x " +				" Q_ERRCNTS=%016llx Q_STATUS=%016llx\n" +				" DDCB_DMA_ADDR=%016llx\n", +				__func__, be16_to_cpu(pddcb->seqnum_16), +				pddcb->hsi, retc_16, errcnts, status, +				queue->ddcb_daddr + ddcb_offs); +		} + +		copy_ddcb_results(req, queue->ddcb_act); +		queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */ + +		dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num); +		genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + +		ddcb_mark_finished(pddcb); + +		/* calculate CRC_16 to see if VCRC is correct */ +		vcrc = genwqe_crc16(pddcb->asv, +				   VCRC_LENGTH(req->cmd.asv_length), +				   0xffff); +		vcrc_16 = be16_to_cpu(pddcb->vcrc_16); +		if (vcrc != vcrc_16) { +			printk_ratelimited(KERN_ERR +				"%s %s: err: wrong VCRC pre=%02x vcrc_len=%d " +				"bytes vcrc_data=%04x is not vcrc_card=%04x\n", +				GENWQE_DEVNAME, dev_name(&pci_dev->dev), +				pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), +				vcrc, vcrc_16); +		} + +		ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); +		queue->ddcbs_completed++; +		queue->ddcbs_in_flight--; + +		/* wake up process waiting for this DDCB */ +		wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + +pick_next_one: +		queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; +		ddcbs_finished++; +	} + + go_home: +	spin_unlock_irqrestore(&queue->ddcb_lock, flags); +	return ddcbs_finished; +} + +/** + * __genwqe_wait_ddcb(): Waits until DDCB is completed + * @cd:         pointer to genwqe device descriptor + * @req:        pointer to requsted DDCB parameters + * + * The Service Layer will update the RETC in DDCB when processing is + * pending or done. + * + * Return: > 0 remaining jiffies, DDCB completed + *           -ETIMEDOUT	when timeout + *           -ERESTARTSYS when ^C + *           -EINVAL when unknown error condition + * + * When an error is returned the called needs to ensure that + * purge_ddcb() is being called to get the &req removed from the + * queue. + */ +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ +	int rc; +	unsigned int ddcb_no; +	struct ddcb_queue *queue; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (req == NULL) +		return -EINVAL; + +	queue = req->queue; +	if (queue == NULL) +		return -EINVAL; + +	ddcb_no = req->num; +	if (ddcb_no >= queue->ddcb_max) +		return -EINVAL; + +	rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no], +				ddcb_requ_finished(cd, req), +				genwqe_ddcb_software_timeout * HZ); + +	/* +	 * We need to distinguish 3 cases here: +	 *   1. rc == 0              timeout occured +	 *   2. rc == -ERESTARTSYS   signal received +	 *   3. rc > 0               remaining jiffies condition is true +	 */ +	if (rc == 0) { +		struct ddcb_queue *queue = req->queue; +		struct ddcb *pddcb; + +		/* +		 * Timeout may be caused by long task switching time. +		 * When timeout happens, check if the request has +		 * meanwhile completed. +		 */ +		genwqe_check_ddcb_queue(cd, req->queue); +		if (ddcb_requ_finished(cd, req)) +			return rc; + +		dev_err(&pci_dev->dev, +			"[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n", +			__func__, req->num, rc,	ddcb_requ_get_state(req), +			req); +		dev_err(&pci_dev->dev, +			"[%s]      IO_QUEUE_STATUS=0x%016llx\n", __func__, +			__genwqe_readq(cd, queue->IO_QUEUE_STATUS)); + +		pddcb = &queue->ddcb_vaddr[req->num]; +		genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + +		print_ddcb_info(cd, req->queue); +		return -ETIMEDOUT; + +	} else if (rc == -ERESTARTSYS) { +		return rc; +		/* +		 * EINTR:       Stops the application +		 * ERESTARTSYS: Restartable systemcall; called again +		 */ + +	} else if (rc < 0) { +		dev_err(&pci_dev->dev, +			"[%s] err: DDCB#%d unknown result (rc=%d) %d!\n", +			__func__, req->num, rc, ddcb_requ_get_state(req)); +		return -EINVAL; +	} + +	/* Severe error occured. Driver is forced to stop operation */ +	if (cd->card_state != GENWQE_CARD_USED) { +		dev_err(&pci_dev->dev, +			"[%s] err: DDCB#%d forced to stop (rc=%d)\n", +			__func__, req->num, rc); +		return -EIO; +	} +	return rc; +} + +/** + * get_next_ddcb() - Get next available DDCB + * @cd:         pointer to genwqe device descriptor + * + * DDCB's content is completely cleared but presets for PRE and + * SEQNUM. This function must only be called when ddcb_lock is held. + * + * Return: NULL if no empty DDCB available otherwise ptr to next DDCB. + */ +static struct ddcb *get_next_ddcb(struct genwqe_dev *cd, +				  struct ddcb_queue *queue, +				  int *num) +{ +	u64 *pu64; +	struct ddcb *pddcb; + +	if (queue_free_ddcbs(queue) == 0) /* queue is  full */ +		return NULL; + +	/* find new ddcb */ +	pddcb = &queue->ddcb_vaddr[queue->ddcb_next]; + +	/* if it is not completed, we are not allowed to use it */ +	/* barrier(); */ +	if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000) +		return NULL; + +	*num = queue->ddcb_next;	/* internal DDCB number */ +	queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; + +	/* clear important DDCB fields */ +	pu64 = (u64 *)pddcb; +	pu64[0] = 0ULL;		/* offs 0x00 (ICRC,HSI,SHI,...) */ +	pu64[1] = 0ULL;		/* offs 0x01 (ACFUNC,CMD...) */ + +	/* destroy previous results in ASV */ +	pu64[0x80/8] = 0ULL;	/* offs 0x80 (ASV + 0) */ +	pu64[0x88/8] = 0ULL;	/* offs 0x88 (ASV + 0x08) */ +	pu64[0x90/8] = 0ULL;	/* offs 0x90 (ASV + 0x10) */ +	pu64[0x98/8] = 0ULL;	/* offs 0x98 (ASV + 0x18) */ +	pu64[0xd0/8] = 0ULL;	/* offs 0xd0 (RETC,ATTN...) */ + +	pddcb->pre = DDCB_PRESET_PRE; /* 128 */ +	pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++); +	return pddcb; +} + +/** + * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue + * @cd:         genwqe device descriptor + * @req:        DDCB request + * + * This will fail when the request was already FETCHED. In this case + * we need to wait until it is finished. Else the DDCB can be + * reused. This function also ensures that the request data structure + * is removed from ddcb_req[]. + * + * Do not forget to call this function when genwqe_wait_ddcb() fails, + * such that the request gets really removed from ddcb_req[]. + * + * Return: 0 success + */ +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ +	struct ddcb *pddcb = NULL; +	unsigned int t; +	unsigned long flags; +	struct ddcb_queue *queue = req->queue; +	struct pci_dev *pci_dev = cd->pci_dev; +	u64 queue_status; +	__be32 icrc_hsi_shi = 0x0000; +	__be32 old, new; + +	/* unsigned long flags; */ +	if (genwqe_ddcb_software_timeout <= 0) { +		dev_err(&pci_dev->dev, +			"[%s] err: software timeout is not set!\n", __func__); +		return -EFAULT; +	} + +	pddcb = &queue->ddcb_vaddr[req->num]; + +	for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) { + +		spin_lock_irqsave(&queue->ddcb_lock, flags); + +		/* Check if req was meanwhile finished */ +		if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) +			goto go_home; + +		/* try to set PURGE bit if FETCHED/COMPLETED are not set */ +		old = pddcb->icrc_hsi_shi_32;	/* read SHI/HSI in BE32 */ +		if ((old & DDCB_FETCHED_BE32) == 0x00000000) { + +			new = (old | DDCB_PURGE_BE32); +			icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32, +					       old, new); +			if (icrc_hsi_shi == old) +				goto finish_ddcb; +		} + +		/* normal finish with HSI bit */ +		barrier(); +		icrc_hsi_shi = pddcb->icrc_hsi_shi_32; +		if (icrc_hsi_shi & DDCB_COMPLETED_BE32) +			goto finish_ddcb; + +		spin_unlock_irqrestore(&queue->ddcb_lock, flags); + +		/* +		 * Here the check_ddcb() function will most likely +		 * discover this DDCB to be finished some point in +		 * time. It will mark the req finished and free it up +		 * in the list. +		 */ + +		copy_ddcb_results(req, req->num); /* for the failing case */ +		msleep(100); /* sleep for 1/10 second and try again */ +		continue; + +finish_ddcb: +		copy_ddcb_results(req, req->num); +		ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); +		queue->ddcbs_in_flight--; +		queue->ddcb_req[req->num] = NULL; /* delete from array */ +		ddcb_mark_cleared(pddcb); + +		/* Move active DDCB further; Nothing to do here anymore. */ + +		/* +		 * We need to ensure that there is at least one free +		 * DDCB in the queue. To do that, we must update +		 * ddcb_act only if the COMPLETED bit is set for the +		 * DDCB we are working on else we treat that DDCB even +		 * if we PURGED it as occupied (hardware is supposed +		 * to set the COMPLETED bit yet!). +		 */ +		icrc_hsi_shi = pddcb->icrc_hsi_shi_32; +		if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) && +		    (queue->ddcb_act == req->num)) { +			queue->ddcb_act = ((queue->ddcb_act + 1) % +					   queue->ddcb_max); +		} +go_home: +		spin_unlock_irqrestore(&queue->ddcb_lock, flags); +		return 0; +	} + +	/* +	 * If the card is dead and the queue is forced to stop, we +	 * might see this in the queue status register. +	 */ +	queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + +	dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num); +	genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + +	dev_err(&pci_dev->dev, +		"[%s] err: DDCB#%d not purged and not completed " +		"after %d seconds QSTAT=%016llx!!\n", +		__func__, req->num, genwqe_ddcb_software_timeout, +		queue_status); + +	print_ddcb_info(cd, req->queue); + +	return -EFAULT; +} + +int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) +{ +	int len; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (d == NULL) { +		dev_err(&pci_dev->dev, +			"[%s] err: invalid memory for debug data!\n", +			__func__); +		return -EFAULT; +	} + +	len  = sizeof(d->driver_version); +	snprintf(d->driver_version, len, "%s", DRV_VERS_STRING); +	d->slu_unitcfg = cd->slu_unitcfg; +	d->app_unitcfg = cd->app_unitcfg; +	return 0; +} + +/** + * __genwqe_enqueue_ddcb() - Enqueue a DDCB + * @cd:          pointer to genwqe device descriptor + * @req:         pointer to DDCB execution request + * + * Return: 0 if enqueuing succeeded + *         -EIO if card is unusable/PCIe problems + *         -EBUSY if enqueuing failed + */ +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ +	struct ddcb *pddcb; +	unsigned long flags; +	struct ddcb_queue *queue; +	struct pci_dev *pci_dev = cd->pci_dev; +	u16 icrc; + +	if (cd->card_state != GENWQE_CARD_USED) { +		printk_ratelimited(KERN_ERR +			"%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", +			GENWQE_DEVNAME, dev_name(&pci_dev->dev), +			__func__, req->num); +		return -EIO; +	} + +	queue = req->queue = &cd->queue; + +	/* FIXME circumvention to improve performance when no irq is +	 * there. +	 */ +	if (genwqe_polling_enabled) +		genwqe_check_ddcb_queue(cd, queue); + +	/* +	 * It must be ensured to process all DDCBs in successive +	 * order. Use a lock here in order to prevent nested DDCB +	 * enqueuing. +	 */ +	spin_lock_irqsave(&queue->ddcb_lock, flags); + +	pddcb = get_next_ddcb(cd, queue, &req->num);	/* get ptr and num */ +	if (pddcb == NULL) { +		spin_unlock_irqrestore(&queue->ddcb_lock, flags); +		queue->busy++; +		return -EBUSY; +	} + +	if (queue->ddcb_req[req->num] != NULL) { +		spin_unlock_irqrestore(&queue->ddcb_lock, flags); + +		dev_err(&pci_dev->dev, +			"[%s] picked DDCB %d with req=%p still in use!!\n", +			__func__, req->num, req); +		return -EFAULT; +	} +	ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED); +	queue->ddcb_req[req->num] = req; + +	pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts); +	pddcb->cmd = req->cmd.cmd; +	pddcb->acfunc = req->cmd.acfunc;	/* functional unit */ + +	/* +	 * We know that we can get retc 0x104 with CRC error, do not +	 * stop the queue in those cases for this command. XDIR = 1 +	 * does not work for old SLU versions. +	 * +	 * Last bitstream with the old XDIR behavior had SLU_ID +	 * 0x34199. +	 */ +	if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull) +		pddcb->xdir = 0x1; +	else +		pddcb->xdir = 0x0; + + +	pddcb->psp = (((req->cmd.asiv_length / 8) << 4) | +		      ((req->cmd.asv_length  / 8))); +	pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts); + +	/* +	 * If copying the whole DDCB_ASIV_LENGTH is impacting +	 * performance we need to change it to +	 * req->cmd.asiv_length. But simulation benefits from some +	 * non-architectured bits behind the architectured content. +	 * +	 * How much data is copied depends on the availability of the +	 * ATS field, which was introduced late. If the ATS field is +	 * supported ASIV is 8 bytes shorter than it used to be. Since +	 * the ATS field is copied too, the code should do exactly +	 * what it did before, but I wanted to make copying of the ATS +	 * field very explicit. +	 */ +	if (genwqe_get_slu_id(cd) <= 0x2) { +		memcpy(&pddcb->__asiv[0],	/* destination */ +		       &req->cmd.__asiv[0],	/* source */ +		       DDCB_ASIV_LENGTH);	/* req->cmd.asiv_length */ +	} else { +		pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); +		memcpy(&pddcb->n.asiv[0],	/* destination */ +			&req->cmd.asiv[0],	/* source */ +			DDCB_ASIV_LENGTH_ATS);	/* req->cmd.asiv_length */ +	} + +	pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */ + +	/* +	 * Calculate CRC_16 for corresponding range PSP(7:4). Include +	 * empty 4 bytes prior to the data. +	 */ +	icrc = genwqe_crc16((const u8 *)pddcb, +			   ICRC_LENGTH(req->cmd.asiv_length), 0xffff); +	pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16); + +	/* enable DDCB completion irq */ +	if (!genwqe_polling_enabled) +		pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32; + +	dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num); +	genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + +	if (ddcb_requ_collect_debug_data(req)) { +		/* use the kernel copy of debug data. copying back to +		   user buffer happens later */ + +		genwqe_init_debug_data(cd, &req->debug_data); +		memcpy(&req->debug_data.ddcb_before, pddcb, +		       sizeof(req->debug_data.ddcb_before)); +	} + +	enqueue_ddcb(cd, queue, pddcb, req->num); +	queue->ddcbs_in_flight++; + +	if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight) +		queue->ddcbs_max_in_flight = queue->ddcbs_in_flight; + +	ddcb_requ_set_state(req, GENWQE_REQU_TAPPED); +	spin_unlock_irqrestore(&queue->ddcb_lock, flags); +	wake_up_interruptible(&cd->queue_waitq); + +	return 0; +} + +/** + * __genwqe_execute_raw_ddcb() - Setup and execute DDCB + * @cd:         pointer to genwqe device descriptor + * @req:        user provided DDCB request + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, +			     struct genwqe_ddcb_cmd *cmd) +{ +	int rc = 0; +	struct pci_dev *pci_dev = cd->pci_dev; +	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + +	if (cmd->asiv_length > DDCB_ASIV_LENGTH) { +		dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", +			__func__, cmd->asiv_length); +		return -EINVAL; +	} +	if (cmd->asv_length > DDCB_ASV_LENGTH) { +		dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", +			__func__, cmd->asiv_length); +		return -EINVAL; +	} +	rc = __genwqe_enqueue_ddcb(cd, req); +	if (rc != 0) +		return rc; + +	rc = __genwqe_wait_ddcb(cd, req); +	if (rc < 0)		/* error or signal interrupt */ +		goto err_exit; + +	if (ddcb_requ_collect_debug_data(req)) { +		if (copy_to_user((struct genwqe_debug_data __user *) +				 (unsigned long)cmd->ddata_addr, +				 &req->debug_data, +				 sizeof(struct genwqe_debug_data))) +			return -EFAULT; +	} + +	/* +	 * Higher values than 0x102 indicate completion with faults, +	 * lower values than 0x102 indicate processing faults. Note +	 * that DDCB might have been purged. E.g. Cntl+C. +	 */ +	if (cmd->retc != DDCB_RETC_COMPLETE) { +		/* This might happen e.g. flash read, and needs to be +		   handled by the upper layer code. */ +		rc = -EBADMSG;	/* not processed/error retc */ +	} + +	return rc; + + err_exit: +	__genwqe_purge_ddcb(cd, req); + +	if (ddcb_requ_collect_debug_data(req)) { +		if (copy_to_user((struct genwqe_debug_data __user *) +				 (unsigned long)cmd->ddata_addr, +				 &req->debug_data, +				 sizeof(struct genwqe_debug_data))) +			return -EFAULT; +	} +	return rc; +} + +/** + * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished + * + * We use this as condition for our wait-queue code. + */ +static int genwqe_next_ddcb_ready(struct genwqe_dev *cd) +{ +	unsigned long flags; +	struct ddcb *pddcb; +	struct ddcb_queue *queue = &cd->queue; + +	spin_lock_irqsave(&queue->ddcb_lock, flags); + +	if (queue_empty(queue)) { /* emtpy queue */ +		spin_unlock_irqrestore(&queue->ddcb_lock, flags); +		return 0; +	} + +	pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; +	if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */ +		spin_unlock_irqrestore(&queue->ddcb_lock, flags); +		return 1; +	} + +	spin_unlock_irqrestore(&queue->ddcb_lock, flags); +	return 0; +} + +/** + * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight + * + * Keep track on the number of DDCBs which ware currently in the + * queue. This is needed for statistics as well as conditon if we want + * to wait or better do polling in case of no interrupts available. + */ +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd) +{ +	unsigned long flags; +	int ddcbs_in_flight = 0; +	struct ddcb_queue *queue = &cd->queue; + +	spin_lock_irqsave(&queue->ddcb_lock, flags); +	ddcbs_in_flight += queue->ddcbs_in_flight; +	spin_unlock_irqrestore(&queue->ddcb_lock, flags); + +	return ddcbs_in_flight; +} + +static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ +	int rc, i; +	struct ddcb *pddcb; +	u64 val64; +	unsigned int queue_size; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (genwqe_ddcb_max < 2) +		return -EINVAL; + +	queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + +	queue->ddcbs_in_flight = 0;  /* statistics */ +	queue->ddcbs_max_in_flight = 0; +	queue->ddcbs_completed = 0; +	queue->busy = 0; + +	queue->ddcb_seq	  = 0x100; /* start sequence number */ +	queue->ddcb_max	  = genwqe_ddcb_max; /* module parameter */ +	queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, +						&queue->ddcb_daddr); +	if (queue->ddcb_vaddr == NULL) { +		dev_err(&pci_dev->dev, +			"[%s] **err: could not allocate DDCB **\n", __func__); +		return -ENOMEM; +	} +	memset(queue->ddcb_vaddr, 0, queue_size); + +	queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) * +				  queue->ddcb_max, GFP_KERNEL); +	if (!queue->ddcb_req) { +		rc = -ENOMEM; +		goto free_ddcbs; +	} + +	queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) * +				     queue->ddcb_max, GFP_KERNEL); +	if (!queue->ddcb_waitqs) { +		rc = -ENOMEM; +		goto free_requs; +	} + +	for (i = 0; i < queue->ddcb_max; i++) { +		pddcb = &queue->ddcb_vaddr[i];		     /* DDCBs */ +		pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32; +		pddcb->retc_16 = cpu_to_be16(0xfff); + +		queue->ddcb_req[i] = NULL;		     /* requests */ +		init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ +	} + +	queue->ddcb_act  = 0; +	queue->ddcb_next = 0;	/* queue is empty */ + +	spin_lock_init(&queue->ddcb_lock); +	init_waitqueue_head(&queue->ddcb_waitq); + +	val64 = ((u64)(queue->ddcb_max - 1) <<  8); /* lastptr */ +	__genwqe_writeq(cd, queue->IO_QUEUE_CONFIG,  0x07);  /* iCRC/vCRC */ +	__genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); +	__genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); +	__genwqe_writeq(cd, queue->IO_QUEUE_WRAP,    val64); +	return 0; + + free_requs: +	kfree(queue->ddcb_req); +	queue->ddcb_req = NULL; + free_ddcbs: +	__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, +				queue->ddcb_daddr); +	queue->ddcb_vaddr = NULL; +	queue->ddcb_daddr = 0ull; +	return -ENODEV; + +} + +static int ddcb_queue_initialized(struct ddcb_queue *queue) +{ +	return queue->ddcb_vaddr != NULL; +} + +static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ +	unsigned int queue_size; + +	queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + +	kfree(queue->ddcb_req); +	queue->ddcb_req = NULL; + +	if (queue->ddcb_vaddr) { +		__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, +					queue->ddcb_daddr); +		queue->ddcb_vaddr = NULL; +		queue->ddcb_daddr = 0ull; +	} +} + +static irqreturn_t genwqe_pf_isr(int irq, void *dev_id) +{ +	u64 gfir; +	struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; +	struct pci_dev *pci_dev = cd->pci_dev; + +	/* +	 * In case of fatal FIR error the queue is stopped, such that +	 * we can safely check it without risking anything. +	 */ +	cd->irqs_processed++; +	wake_up_interruptible(&cd->queue_waitq); + +	/* +	 * Checking for errors before kicking the queue might be +	 * safer, but slower for the good-case ... See above. +	 */ +	gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +	if ((gfir & GFIR_ERR_TRIGGER) != 0x0) { + +		wake_up_interruptible(&cd->health_waitq); + +		/* +		 * By default GFIRs causes recovery actions. This +		 * count is just for debug when recovery is masked. +		 */ +		printk_ratelimited(KERN_ERR +				   "%s %s: [%s] GFIR=%016llx\n", +				   GENWQE_DEVNAME, dev_name(&pci_dev->dev), +				   __func__, gfir); +	} + +	return IRQ_HANDLED; +} + +static irqreturn_t genwqe_vf_isr(int irq, void *dev_id) +{ +	struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + +	cd->irqs_processed++; +	wake_up_interruptible(&cd->queue_waitq); + +	return IRQ_HANDLED; +} + +/** + * genwqe_card_thread() - Work thread for the DDCB queue + * + * The idea is to check if there are DDCBs in processing. If there are + * some finished DDCBs, we process them and wakeup the + * requestors. Otherwise we give other processes time using + * cond_resched(). + */ +static int genwqe_card_thread(void *data) +{ +	int should_stop = 0, rc = 0; +	struct genwqe_dev *cd = (struct genwqe_dev *)data; + +	while (!kthread_should_stop()) { + +		genwqe_check_ddcb_queue(cd, &cd->queue); + +		if (genwqe_polling_enabled) { +			rc = wait_event_interruptible_timeout( +				cd->queue_waitq, +				genwqe_ddcbs_in_flight(cd) || +				(should_stop = kthread_should_stop()), 1); +		} else { +			rc = wait_event_interruptible_timeout( +				cd->queue_waitq, +				genwqe_next_ddcb_ready(cd) || +				(should_stop = kthread_should_stop()), HZ); +		} +		if (should_stop) +			break; + +		/* +		 * Avoid soft lockups on heavy loads; we do not want +		 * to disable our interrupts. +		 */ +		cond_resched(); +	} +	return 0; +} + +/** + * genwqe_setup_service_layer() - Setup DDCB queue + * @cd:         pointer to genwqe device descriptor + * + * Allocate DDCBs. Configure Service Layer Controller (SLC). + * + * Return: 0 success + */ +int genwqe_setup_service_layer(struct genwqe_dev *cd) +{ +	int rc; +	struct ddcb_queue *queue; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (genwqe_is_privileged(cd)) { +		rc = genwqe_card_reset(cd); +		if (rc < 0) { +			dev_err(&pci_dev->dev, +				"[%s] err: reset failed.\n", __func__); +			return rc; +		} +		genwqe_read_softreset(cd); +	} + +	queue = &cd->queue; +	queue->IO_QUEUE_CONFIG  = IO_SLC_QUEUE_CONFIG; +	queue->IO_QUEUE_STATUS  = IO_SLC_QUEUE_STATUS; +	queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT; +	queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN; +	queue->IO_QUEUE_OFFSET  = IO_SLC_QUEUE_OFFSET; +	queue->IO_QUEUE_WRAP    = IO_SLC_QUEUE_WRAP; +	queue->IO_QUEUE_WTIME   = IO_SLC_QUEUE_WTIME; +	queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS; +	queue->IO_QUEUE_LRW     = IO_SLC_QUEUE_LRW; + +	rc = setup_ddcb_queue(cd, queue); +	if (rc != 0) { +		rc = -ENODEV; +		goto err_out; +	} + +	init_waitqueue_head(&cd->queue_waitq); +	cd->card_thread = kthread_run(genwqe_card_thread, cd, +				      GENWQE_DEVNAME "%d_thread", +				      cd->card_idx); +	if (IS_ERR(cd->card_thread)) { +		rc = PTR_ERR(cd->card_thread); +		cd->card_thread = NULL; +		goto stop_free_queue; +	} + +	rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); +	if (rc > 0) +		rc = genwqe_set_interrupt_capability(cd, rc); +	if (rc != 0) { +		rc = -ENODEV; +		goto stop_kthread; +	} + +	/* +	 * We must have all wait-queues initialized when we enable the +	 * interrupts. Otherwise we might crash if we get an early +	 * irq. +	 */ +	init_waitqueue_head(&cd->health_waitq); + +	if (genwqe_is_privileged(cd)) { +		rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED, +				 GENWQE_DEVNAME, cd); +	} else { +		rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED, +				 GENWQE_DEVNAME, cd); +	} +	if (rc < 0) { +		dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq); +		goto stop_irq_cap; +	} + +	cd->card_state = GENWQE_CARD_USED; +	return 0; + + stop_irq_cap: +	genwqe_reset_interrupt_capability(cd); + stop_kthread: +	kthread_stop(cd->card_thread); +	cd->card_thread = NULL; + stop_free_queue: +	free_ddcb_queue(cd, queue); + err_out: +	return rc; +} + +/** + * queue_wake_up_all() - Handles fatal error case + * + * The PCI device got unusable and we have to stop all pending + * requests as fast as we can. The code after this must purge the + * DDCBs in question and ensure that all mappings are freed. + */ +static int queue_wake_up_all(struct genwqe_dev *cd) +{ +	unsigned int i; +	unsigned long flags; +	struct ddcb_queue *queue = &cd->queue; + +	spin_lock_irqsave(&queue->ddcb_lock, flags); + +	for (i = 0; i < queue->ddcb_max; i++) +		wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + +	spin_unlock_irqrestore(&queue->ddcb_lock, flags); + +	return 0; +} + +/** + * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces + * + * Relies on the pre-condition that there are no users of the card + * device anymore e.g. with open file-descriptors. + * + * This function must be robust enough to be called twice. + */ +int genwqe_finish_queue(struct genwqe_dev *cd) +{ +	int i, rc = 0, in_flight; +	int waitmax = genwqe_ddcb_software_timeout; +	struct pci_dev *pci_dev = cd->pci_dev; +	struct ddcb_queue *queue = &cd->queue; + +	if (!ddcb_queue_initialized(queue)) +		return 0; + +	/* Do not wipe out the error state. */ +	if (cd->card_state == GENWQE_CARD_USED) +		cd->card_state = GENWQE_CARD_UNUSED; + +	/* Wake up all requests in the DDCB queue such that they +	   should be removed nicely. */ +	queue_wake_up_all(cd); + +	/* We must wait to get rid of the DDCBs in flight */ +	for (i = 0; i < waitmax; i++) { +		in_flight = genwqe_ddcbs_in_flight(cd); + +		if (in_flight == 0) +			break; + +		dev_dbg(&pci_dev->dev, +			"  DEBUG [%d/%d] waiting for queue to get empty: " +			"%d requests!\n", i, waitmax, in_flight); + +		/* +		 * Severe severe error situation: The card itself has +		 * 16 DDCB queues, each queue has e.g. 32 entries, +		 * each DDBC has a hardware timeout of currently 250 +		 * msec but the PFs have a hardware timeout of 8 sec +		 * ... so I take something large. +		 */ +		msleep(1000); +	} +	if (i == waitmax) { +		dev_err(&pci_dev->dev, "  [%s] err: queue is not empty!!\n", +			__func__); +		rc = -EIO; +	} +	return rc; +} + +/** + * genwqe_release_service_layer() - Shutdown DDCB queue + * @cd:       genwqe device descriptor + * + * This function must be robust enough to be called twice. + */ +int genwqe_release_service_layer(struct genwqe_dev *cd) +{ +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (!ddcb_queue_initialized(&cd->queue)) +		return 1; + +	free_irq(pci_dev->irq, cd); +	genwqe_reset_interrupt_capability(cd); + +	if (cd->card_thread != NULL) { +		kthread_stop(cd->card_thread); +		cd->card_thread = NULL; +	} + +	free_ddcb_queue(cd, &cd->queue); +	return 0; +} diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h new file mode 100644 index 00000000000..c4f26720753 --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.h @@ -0,0 +1,188 @@ +#ifndef __CARD_DDCB_H__ +#define __CARD_DDCB_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> + +#include "genwqe_driver.h" +#include "card_base.h" + +/** + * struct ddcb - Device Driver Control Block DDCB + * @hsi:        Hardware software interlock + * @shi:        Software hardware interlock. Hsi and shi are used to interlock + *              software and hardware activities. We are using a compare and + *              swap operation to ensure that there are no races when + *              activating new DDCBs on the queue, or when we need to + *              purge a DDCB from a running queue. + * @acfunc:     Accelerator function addresses a unit within the chip + * @cmd:        Command to work on + * @cmdopts_16: Options for the command + * @asiv:       Input data + * @asv:        Output data + * + * The DDCB data format is big endian. Multiple consequtive DDBCs form + * a DDCB queue. + */ +#define ASIV_LENGTH		104 /* Old specification without ATS field */ +#define ASIV_LENGTH_ATS		96  /* New specification with ATS field */ +#define ASV_LENGTH		64 + +struct ddcb { +	union { +		__be32 icrc_hsi_shi_32;	/* iCRC, Hardware/SW interlock */ +		struct { +			__be16	icrc_16; +			u8	hsi; +			u8	shi; +		}; +	}; +	u8  pre;		/* Preamble */ +	u8  xdir;		/* Execution Directives */ +	__be16 seqnum_16;	/* Sequence Number */ + +	u8  acfunc;		/* Accelerator Function.. */ +	u8  cmd;		/* Command. */ +	__be16 cmdopts_16;	/* Command Options */ +	u8  sur;		/* Status Update Rate */ +	u8  psp;		/* Protection Section Pointer */ +	__be16 rsvd_0e_16;	/* Reserved invariant */ + +	__be64 fwiv_64;		/* Firmware Invariant. */ + +	union { +		struct { +			__be64 ats_64;  /* Address Translation Spec */ +			u8     asiv[ASIV_LENGTH_ATS]; /* New ASIV */ +		} n; +		u8  __asiv[ASIV_LENGTH];	/* obsolete */ +	}; +	u8     asv[ASV_LENGTH];	/* Appl Spec Variant */ + +	__be16 rsvd_c0_16;	/* Reserved Variant */ +	__be16 vcrc_16;		/* Variant CRC */ +	__be32 rsvd_32;		/* Reserved unprotected */ + +	__be64 deque_ts_64;	/* Deque Time Stamp. */ + +	__be16 retc_16;		/* Return Code */ +	__be16 attn_16;		/* Attention/Extended Error Codes */ +	__be32 progress_32;	/* Progress indicator. */ + +	__be64 cmplt_ts_64;	/* Completion Time Stamp. */ + +	/* The following layout matches the new service layer format */ +	__be32 ibdc_32;		/* Inbound Data Count  (* 256) */ +	__be32 obdc_32;		/* Outbound Data Count (* 256) */ + +	__be64 rsvd_SLH_64;	/* Reserved for hardware */ +	union {			/* private data for driver */ +		u8	priv[8]; +		__be64	priv_64; +	}; +	__be64 disp_ts_64;	/* Dispatch TimeStamp */ +} __attribute__((__packed__)); + +/* CRC polynomials for DDCB */ +#define CRC16_POLYNOMIAL	0x1021 + +/* + * SHI: Software to Hardware Interlock + *   This 1 byte field is written by software to interlock the + *   movement of one queue entry to another with the hardware in the + *   chip. + */ +#define DDCB_SHI_INTR		0x04 /* Bit 2 */ +#define DDCB_SHI_PURGE		0x02 /* Bit 1 */ +#define DDCB_SHI_NEXT		0x01 /* Bit 0 */ + +/* + * HSI: Hardware to Software interlock + * This 1 byte field is written by hardware to interlock the movement + * of one queue entry to another with the software in the chip. + */ +#define DDCB_HSI_COMPLETED	0x40 /* Bit 6 */ +#define DDCB_HSI_FETCHED	0x04 /* Bit 2 */ + +/* + * Accessing HSI/SHI is done 32-bit wide + *   Normally 16-bit access would work too, but on some platforms the + *   16 compare and swap operation is not supported. Therefore + *   switching to 32-bit such that those platforms will work too. + * + *                                         iCRC HSI/SHI + */ +#define DDCB_INTR_BE32		cpu_to_be32(0x00000004) +#define DDCB_PURGE_BE32		cpu_to_be32(0x00000002) +#define DDCB_NEXT_BE32		cpu_to_be32(0x00000001) +#define DDCB_COMPLETED_BE32	cpu_to_be32(0x00004000) +#define DDCB_FETCHED_BE32	cpu_to_be32(0x00000400) + +/* Definitions of DDCB presets */ +#define DDCB_PRESET_PRE		0x80 +#define ICRC_LENGTH(n)		((n) + 8 + 8 + 8)  /* used ASIV + hdr fields */ +#define VCRC_LENGTH(n)		((n))		   /* used ASV */ + +/* + * Genwqe Scatter Gather list + *   Each element has up to 8 entries. + *   The chaining element is element 0 cause of prefetching needs. + */ + +/* + * 0b0110 Chained descriptor. The descriptor is describing the next + * descriptor list. + */ +#define SG_CHAINED		(0x6) + +/* + * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty + * condition. + */ +#define SG_DATA			(0x2) + +/* + * 0b0000 Early terminator. This is the last entry on the list + * irregardless of the length indicated. + */ +#define SG_END_LIST		(0x0) + +/** + * struct sglist - Scatter gather list + * @target_addr:       Either a dma addr of memory to work on or a + *                     dma addr or a subsequent sglist block. + * @len:               Length of the data block. + * @flags:             See above. + * + * Depending on the command the GenWQE card can use a scatter gather + * list to describe the memory it works on. Always 8 sg_entry's form + * a block. + */ +struct sg_entry { +	__be64 target_addr; +	__be32 len; +	__be32 flags; +}; + +#endif /* __CARD_DDCB_H__ */ diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c new file mode 100644 index 00000000000..0a33ade6410 --- /dev/null +++ b/drivers/misc/genwqe/card_debugfs.c @@ -0,0 +1,499 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Debugfs interfaces for the GenWQE card. Help to debug potential + * problems. Dump internal chip state for debugging and failure + * determination. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> + +#include "card_base.h" +#include "card_ddcb.h" + +#define GENWQE_DEBUGFS_RO(_name, _showfn)				\ +	static int genwqe_debugfs_##_name##_open(struct inode *inode,	\ +						 struct file *file)	\ +	{								\ +		return single_open(file, _showfn, inode->i_private);	\ +	}								\ +	static const struct file_operations genwqe_##_name##_fops = {	\ +		.open = genwqe_debugfs_##_name##_open,			\ +		.read = seq_read,					\ +		.llseek = seq_lseek,					\ +		.release = single_release,				\ +	} + +static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, +			  int entries) +{ +	unsigned int i; +	u32 v_hi, v_lo; + +	for (i = 0; i < entries; i++) { +		v_hi = (regs[i].val >> 32) & 0xffffffff; +		v_lo = (regs[i].val)       & 0xffffffff; + +		seq_printf(s, "  0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n", +			   regs[i].addr, regs[i].idx, v_hi, v_lo); +	} +} + +static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ +	struct genwqe_dev *cd = s->private; +	int entries; +	struct genwqe_reg *regs; + +	entries = genwqe_ffdc_buff_size(cd, uid); +	if (entries < 0) +		return -EINVAL; + +	if (entries == 0) +		return 0; + +	regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); +	if (regs == NULL) +		return -ENOMEM; + +	genwqe_stop_traps(cd); /* halt the traps while dumping data */ +	genwqe_ffdc_buff_read(cd, uid, regs, entries); +	genwqe_start_traps(cd); + +	dbg_uidn_show(s, regs, entries); +	kfree(regs); +	return 0; +} + +static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused) +{ +	return curr_dbg_uidn_show(s, unused, 0); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show); + +static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused) +{ +	return curr_dbg_uidn_show(s, unused, 1); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show); + +static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused) +{ +	return curr_dbg_uidn_show(s, unused, 2); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show); + +static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ +	struct genwqe_dev *cd = s->private; + +	dbg_uidn_show(s, cd->ffdc[uid].regs,  cd->ffdc[uid].entries); +	return 0; +} + +static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused) +{ +	return prev_dbg_uidn_show(s, unused, 0); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show); + +static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused) +{ +	return prev_dbg_uidn_show(s, unused, 1); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show); + +static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused) +{ +	return prev_dbg_uidn_show(s, unused, 2); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show); + +static int genwqe_curr_regs_show(struct seq_file *s, void *unused) +{ +	struct genwqe_dev *cd = s->private; +	unsigned int i; +	struct genwqe_reg *regs; + +	regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL); +	if (regs == NULL) +		return -ENOMEM; + +	genwqe_stop_traps(cd); +	genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1); +	genwqe_start_traps(cd); + +	for (i = 0; i < GENWQE_FFDC_REGS; i++) { +		if (regs[i].addr == 0xffffffff) +			break;  /* invalid entries */ + +		if (regs[i].val == 0x0ull) +			continue;  /* do not print 0x0 FIRs */ + +		seq_printf(s, "  0x%08x 0x%016llx\n", +			   regs[i].addr, regs[i].val); +	} +	return 0; +} + +GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show); + +static int genwqe_prev_regs_show(struct seq_file *s, void *unused) +{ +	struct genwqe_dev *cd = s->private; +	unsigned int i; +	struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs; + +	if (regs == NULL) +		return -EINVAL; + +	for (i = 0; i < GENWQE_FFDC_REGS; i++) { +		if (regs[i].addr == 0xffffffff) +			break;  /* invalid entries */ + +		if (regs[i].val == 0x0ull) +			continue;  /* do not print 0x0 FIRs */ + +		seq_printf(s, "  0x%08x 0x%016llx\n", +			   regs[i].addr, regs[i].val); +	} +	return 0; +} + +GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show); + +static int genwqe_jtimer_show(struct seq_file *s, void *unused) +{ +	struct genwqe_dev *cd = s->private; +	unsigned int vf_num; +	u64 jtimer; + +	jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0); +	seq_printf(s, "  PF   0x%016llx %d msec\n", jtimer, +		   genwqe_pf_jobtimeout_msec); + +	for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { +		jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, +					  vf_num + 1); +		seq_printf(s, "  VF%-2d 0x%016llx %d msec\n", vf_num, jtimer, +			   cd->vf_jobtimeout_msec[vf_num]); +	} +	return 0; +} + +GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show); + +static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) +{ +	struct genwqe_dev *cd = s->private; +	unsigned int vf_num; +	u64 t; + +	t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0); +	seq_printf(s, "  PF   0x%016llx\n", t); + +	for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { +		t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1); +		seq_printf(s, "  VF%-2d 0x%016llx\n", vf_num, t); +	} +	return 0; +} + +GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show); + +static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) +{ +	struct genwqe_dev *cd = s->private; +	unsigned int i; +	struct ddcb_queue *queue; +	struct ddcb *pddcb; + +	queue = &cd->queue; +	seq_puts(s, "DDCB QUEUE:\n"); +	seq_printf(s, "  ddcb_max:            %d\n" +		   "  ddcb_daddr:          %016llx - %016llx\n" +		   "  ddcb_vaddr:          %016llx\n" +		   "  ddcbs_in_flight:     %u\n" +		   "  ddcbs_max_in_flight: %u\n" +		   "  ddcbs_completed:     %u\n" +		   "  busy:                %u\n" +		   "  irqs_processed:      %u\n", +		   queue->ddcb_max, (long long)queue->ddcb_daddr, +		   (long long)queue->ddcb_daddr + +		   (queue->ddcb_max * DDCB_LENGTH), +		   (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, +		   queue->ddcbs_max_in_flight, queue->ddcbs_completed, +		   queue->busy, cd->irqs_processed); + +	/* Hardware State */ +	seq_printf(s, "  0x%08x 0x%016llx IO_QUEUE_CONFIG\n" +		   "  0x%08x 0x%016llx IO_QUEUE_STATUS\n" +		   "  0x%08x 0x%016llx IO_QUEUE_SEGMENT\n" +		   "  0x%08x 0x%016llx IO_QUEUE_INITSQN\n" +		   "  0x%08x 0x%016llx IO_QUEUE_WRAP\n" +		   "  0x%08x 0x%016llx IO_QUEUE_OFFSET\n" +		   "  0x%08x 0x%016llx IO_QUEUE_WTIME\n" +		   "  0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n" +		   "  0x%08x 0x%016llx IO_QUEUE_LRW\n", +		   queue->IO_QUEUE_CONFIG, +		   __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), +		   queue->IO_QUEUE_STATUS, +		   __genwqe_readq(cd, queue->IO_QUEUE_STATUS), +		   queue->IO_QUEUE_SEGMENT, +		   __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT), +		   queue->IO_QUEUE_INITSQN, +		   __genwqe_readq(cd, queue->IO_QUEUE_INITSQN), +		   queue->IO_QUEUE_WRAP, +		   __genwqe_readq(cd, queue->IO_QUEUE_WRAP), +		   queue->IO_QUEUE_OFFSET, +		   __genwqe_readq(cd, queue->IO_QUEUE_OFFSET), +		   queue->IO_QUEUE_WTIME, +		   __genwqe_readq(cd, queue->IO_QUEUE_WTIME), +		   queue->IO_QUEUE_ERRCNTS, +		   __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS), +		   queue->IO_QUEUE_LRW, +		   __genwqe_readq(cd, queue->IO_QUEUE_LRW)); + +	seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n", +		   queue->ddcb_act, queue->ddcb_next); + +	pddcb = queue->ddcb_vaddr; +	for (i = 0; i < queue->ddcb_max; i++) { +		seq_printf(s, "  %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ", +			   i, be16_to_cpu(pddcb->retc_16), +			   be16_to_cpu(pddcb->seqnum_16), +			   pddcb->hsi, pddcb->shi); +		seq_printf(s, "PRIV=%06llx CMD=%02x\n", +			   be64_to_cpu(pddcb->priv_64), pddcb->cmd); +		pddcb++; +	} +	return 0; +} + +GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); + +static int genwqe_info_show(struct seq_file *s, void *unused) +{ +	struct genwqe_dev *cd = s->private; +	u16 val16, type; +	u64 app_id, slu_id, bitstream = -1; +	struct pci_dev *pci_dev = cd->pci_dev; + +	slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); +	app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + +	if (genwqe_is_privileged(cd)) +		bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); + +	val16 = (u16)(slu_id & 0x0fLLU); +	type  = (u16)((slu_id >> 20) & 0xffLLU); + +	seq_printf(s, "%s driver version: %s\n" +		   "    Device Name/Type: %s %s CardIdx: %d\n" +		   "    SLU/APP Config  : 0x%016llx/0x%016llx\n" +		   "    Build Date      : %u/%x/%u\n" +		   "    Base Clock      : %u MHz\n" +		   "    Arch/SVN Release: %u/%llx\n" +		   "    Bitstream       : %llx\n", +		   GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev), +		   genwqe_is_privileged(cd) ? +		   "Physical" : "Virtual or no SR-IOV", +		   cd->card_idx, slu_id, app_id, +		   (u16)((slu_id >> 12) & 0x0fLLU),	   /* month */ +		   (u16)((slu_id >>  4) & 0xffLLU),	   /* day */ +		   (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */ +		   genwqe_base_clock_frequency(cd), +		   (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40, +		   bitstream); + +	return 0; +} + +GENWQE_DEBUGFS_RO(info, genwqe_info_show); + +int genwqe_init_debugfs(struct genwqe_dev *cd) +{ +	struct dentry *root; +	struct dentry *file; +	int ret; +	char card_name[64]; +	char name[64]; +	unsigned int i; + +	sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx); + +	root = debugfs_create_dir(card_name, cd->debugfs_genwqe); +	if (!root) { +		ret = -ENOMEM; +		goto err0; +	} + +	/* non privileged interfaces are done here */ +	file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd, +				   &genwqe_ddcb_info_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("info", S_IRUGO, root, cd, +				   &genwqe_info_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_u32("ddcb_software_timeout", 0666, root, +				  &cd->ddcb_software_timeout); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_u32("kill_timeout", 0666, root, +				  &cd->kill_timeout); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	/* privileged interfaces follow here */ +	if (!genwqe_is_privileged(cd)) { +		cd->debugfs_root = root; +		return 0; +	} + +	file = debugfs_create_file("curr_regs", S_IRUGO, root, cd, +				   &genwqe_curr_regs_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, +				   &genwqe_curr_dbg_uid0_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, +				   &genwqe_curr_dbg_uid1_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, +				   &genwqe_curr_dbg_uid2_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("prev_regs", S_IRUGO, root, cd, +				   &genwqe_prev_regs_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, +				   &genwqe_prev_dbg_uid0_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, +				   &genwqe_prev_dbg_uid1_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, +				   &genwqe_prev_dbg_uid2_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	for (i = 0; i <  GENWQE_MAX_VFS; i++) { +		sprintf(name, "vf%u_jobtimeout_msec", i); + +		file = debugfs_create_u32(name, 0666, root, +					  &cd->vf_jobtimeout_msec[i]); +		if (!file) { +			ret = -ENOMEM; +			goto err1; +		} +	} + +	file = debugfs_create_file("jobtimer", S_IRUGO, root, cd, +				   &genwqe_jtimer_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd, +				   &genwqe_queue_working_time_fops); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	file = debugfs_create_u32("skip_recovery", 0666, root, +				  &cd->skip_recovery); +	if (!file) { +		ret = -ENOMEM; +		goto err1; +	} + +	cd->debugfs_root = root; +	return 0; +err1: +	debugfs_remove_recursive(root); +err0: +	return ret; +} + +void genqwe_exit_debugfs(struct genwqe_dev *cd) +{ +	debugfs_remove_recursive(cd->debugfs_root); +} diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c new file mode 100644 index 00000000000..1d2f163a190 --- /dev/null +++ b/drivers/misc/genwqe/card_dev.c @@ -0,0 +1,1403 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Character device representation of the GenWQE device. This allows + * user-space applications to communicate with the card. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/atomic.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static int genwqe_open_files(struct genwqe_dev *cd) +{ +	int rc; +	unsigned long flags; + +	spin_lock_irqsave(&cd->file_lock, flags); +	rc = list_empty(&cd->file_list); +	spin_unlock_irqrestore(&cd->file_lock, flags); +	return !rc; +} + +static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ +	unsigned long flags; + +	cfile->owner = current; +	spin_lock_irqsave(&cd->file_lock, flags); +	list_add(&cfile->list, &cd->file_list); +	spin_unlock_irqrestore(&cd->file_lock, flags); +} + +static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ +	unsigned long flags; + +	spin_lock_irqsave(&cd->file_lock, flags); +	list_del(&cfile->list); +	spin_unlock_irqrestore(&cd->file_lock, flags); + +	return 0; +} + +static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ +	unsigned long flags; + +	spin_lock_irqsave(&cfile->pin_lock, flags); +	list_add(&m->pin_list, &cfile->pin_list); +	spin_unlock_irqrestore(&cfile->pin_lock, flags); +} + +static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ +	unsigned long flags; + +	spin_lock_irqsave(&cfile->pin_lock, flags); +	list_del(&m->pin_list); +	spin_unlock_irqrestore(&cfile->pin_lock, flags); + +	return 0; +} + +/** + * genwqe_search_pin() - Search for the mapping for a userspace address + * @cfile:	Descriptor of opened file + * @u_addr:	User virtual address + * @size:	Size of buffer + * @dma_addr:	DMA address to be updated + * + * Return: Pointer to the corresponding mapping	NULL if not found + */ +static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, +					    unsigned long u_addr, +					    unsigned int size, +					    void **virt_addr) +{ +	unsigned long flags; +	struct dma_mapping *m; + +	spin_lock_irqsave(&cfile->pin_lock, flags); + +	list_for_each_entry(m, &cfile->pin_list, pin_list) { +		if ((((u64)m->u_vaddr) <= (u_addr)) && +		    (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + +			if (virt_addr) +				*virt_addr = m->k_vaddr + +					(u_addr - (u64)m->u_vaddr); + +			spin_unlock_irqrestore(&cfile->pin_lock, flags); +			return m; +		} +	} +	spin_unlock_irqrestore(&cfile->pin_lock, flags); +	return NULL; +} + +static void __genwqe_add_mapping(struct genwqe_file *cfile, +			      struct dma_mapping *dma_map) +{ +	unsigned long flags; + +	spin_lock_irqsave(&cfile->map_lock, flags); +	list_add(&dma_map->card_list, &cfile->map_list); +	spin_unlock_irqrestore(&cfile->map_lock, flags); +} + +static void __genwqe_del_mapping(struct genwqe_file *cfile, +			      struct dma_mapping *dma_map) +{ +	unsigned long flags; + +	spin_lock_irqsave(&cfile->map_lock, flags); +	list_del(&dma_map->card_list); +	spin_unlock_irqrestore(&cfile->map_lock, flags); +} + + +/** + * __genwqe_search_mapping() - Search for the mapping for a userspace address + * @cfile:	descriptor of opened file + * @u_addr:	user virtual address + * @size:	size of buffer + * @dma_addr:	DMA address to be updated + * Return: Pointer to the corresponding mapping	NULL if not found + */ +static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, +						   unsigned long u_addr, +						   unsigned int size, +						   dma_addr_t *dma_addr, +						   void **virt_addr) +{ +	unsigned long flags; +	struct dma_mapping *m; +	struct pci_dev *pci_dev = cfile->cd->pci_dev; + +	spin_lock_irqsave(&cfile->map_lock, flags); +	list_for_each_entry(m, &cfile->map_list, card_list) { + +		if ((((u64)m->u_vaddr) <= (u_addr)) && +		    (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + +			/* match found: current is as expected and +			   addr is in range */ +			if (dma_addr) +				*dma_addr = m->dma_addr + +					(u_addr - (u64)m->u_vaddr); + +			if (virt_addr) +				*virt_addr = m->k_vaddr + +					(u_addr - (u64)m->u_vaddr); + +			spin_unlock_irqrestore(&cfile->map_lock, flags); +			return m; +		} +	} +	spin_unlock_irqrestore(&cfile->map_lock, flags); + +	dev_err(&pci_dev->dev, +		"[%s] Entry not found: u_addr=%lx, size=%x\n", +		__func__, u_addr, size); + +	return NULL; +} + +static void genwqe_remove_mappings(struct genwqe_file *cfile) +{ +	int i = 0; +	struct list_head *node, *next; +	struct dma_mapping *dma_map; +	struct genwqe_dev *cd = cfile->cd; +	struct pci_dev *pci_dev = cfile->cd->pci_dev; + +	list_for_each_safe(node, next, &cfile->map_list) { +		dma_map = list_entry(node, struct dma_mapping, card_list); + +		list_del_init(&dma_map->card_list); + +		/* +		 * This is really a bug, because those things should +		 * have been already tidied up. +		 * +		 * GENWQE_MAPPING_RAW should have been removed via mmunmap(). +		 * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. +		 */ +		dev_err(&pci_dev->dev, +			"[%s] %d. cleanup mapping: u_vaddr=%p " +			"u_kaddr=%016lx dma_addr=%lx\n", __func__, i++, +			dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr, +			(unsigned long)dma_map->dma_addr); + +		if (dma_map->type == GENWQE_MAPPING_RAW) { +			/* we allocated this dynamically */ +			__genwqe_free_consistent(cd, dma_map->size, +						dma_map->k_vaddr, +						dma_map->dma_addr); +			kfree(dma_map); +		} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { +			/* we use dma_map statically from the request */ +			genwqe_user_vunmap(cd, dma_map, NULL); +		} +	} +} + +static void genwqe_remove_pinnings(struct genwqe_file *cfile) +{ +	struct list_head *node, *next; +	struct dma_mapping *dma_map; +	struct genwqe_dev *cd = cfile->cd; + +	list_for_each_safe(node, next, &cfile->pin_list) { +		dma_map = list_entry(node, struct dma_mapping, pin_list); + +		/* +		 * This is not a bug, because a killed processed might +		 * not call the unpin ioctl, which is supposed to free +		 * the resources. +		 * +		 * Pinnings are dymically allocated and need to be +		 * deleted. +		 */ +		list_del_init(&dma_map->pin_list); +		genwqe_user_vunmap(cd, dma_map, NULL); +		kfree(dma_map); +	} +} + +/** + * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files + * + * E.g. genwqe_send_signal(cd, SIGIO); + */ +static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) +{ +	unsigned int files = 0; +	unsigned long flags; +	struct genwqe_file *cfile; + +	spin_lock_irqsave(&cd->file_lock, flags); +	list_for_each_entry(cfile, &cd->file_list, list) { +		if (cfile->async_queue) +			kill_fasync(&cfile->async_queue, sig, POLL_HUP); +		files++; +	} +	spin_unlock_irqrestore(&cd->file_lock, flags); +	return files; +} + +static int genwqe_force_sig(struct genwqe_dev *cd, int sig) +{ +	unsigned int files = 0; +	unsigned long flags; +	struct genwqe_file *cfile; + +	spin_lock_irqsave(&cd->file_lock, flags); +	list_for_each_entry(cfile, &cd->file_list, list) { +		force_sig(sig, cfile->owner); +		files++; +	} +	spin_unlock_irqrestore(&cd->file_lock, flags); +	return files; +} + +/** + * genwqe_open() - file open + * @inode:      file system information + * @filp:	file handle + * + * This function is executed whenever an application calls + * open("/dev/genwqe",..). + * + * Return: 0 if successful or <0 if errors + */ +static int genwqe_open(struct inode *inode, struct file *filp) +{ +	struct genwqe_dev *cd; +	struct genwqe_file *cfile; +	struct pci_dev *pci_dev; + +	cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); +	if (cfile == NULL) +		return -ENOMEM; + +	cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); +	pci_dev = cd->pci_dev; +	cfile->cd = cd; +	cfile->filp = filp; +	cfile->client = NULL; + +	spin_lock_init(&cfile->map_lock);  /* list of raw memory allocations */ +	INIT_LIST_HEAD(&cfile->map_list); + +	spin_lock_init(&cfile->pin_lock);  /* list of user pinned memory */ +	INIT_LIST_HEAD(&cfile->pin_list); + +	filp->private_data = cfile; + +	genwqe_add_file(cd, cfile); +	return 0; +} + +/** + * genwqe_fasync() - Setup process to receive SIGIO. + * @fd:        file descriptor + * @filp:      file handle + * @mode:      file mode + * + * Sending a signal is working as following: + * + * if (cdev->async_queue) + *         kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); + * + * Some devices also implement asynchronous notification to indicate + * when the device can be written; in this case, of course, + * kill_fasync must be called with a mode of POLL_OUT. + */ +static int genwqe_fasync(int fd, struct file *filp, int mode) +{ +	struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; +	return fasync_helper(fd, filp, mode, &cdev->async_queue); +} + + +/** + * genwqe_release() - file close + * @inode:      file system information + * @filp:       file handle + * + * This function is executed whenever an application calls 'close(fd_genwqe)' + * + * Return: always 0 + */ +static int genwqe_release(struct inode *inode, struct file *filp) +{ +	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; +	struct genwqe_dev *cd = cfile->cd; + +	/* there must be no entries in these lists! */ +	genwqe_remove_mappings(cfile); +	genwqe_remove_pinnings(cfile); + +	/* remove this filp from the asynchronously notified filp's */ +	genwqe_fasync(-1, filp, 0); + +	/* +	 * For this to work we must not release cd when this cfile is +	 * not yet released, otherwise the list entry is invalid, +	 * because the list itself gets reinstantiated! +	 */ +	genwqe_del_file(cd, cfile); +	kfree(cfile); +	return 0; +} + +static void genwqe_vma_open(struct vm_area_struct *vma) +{ +	/* nothing ... */ +} + +/** + * genwqe_vma_close() - Called each time when vma is unmapped + * + * Free memory which got allocated by GenWQE mmap(). + */ +static void genwqe_vma_close(struct vm_area_struct *vma) +{ +	unsigned long vsize = vma->vm_end - vma->vm_start; +	struct inode *inode = vma->vm_file->f_dentry->d_inode; +	struct dma_mapping *dma_map; +	struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, +					    cdev_genwqe); +	struct pci_dev *pci_dev = cd->pci_dev; +	dma_addr_t d_addr = 0; +	struct genwqe_file *cfile = vma->vm_private_data; + +	dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, +					 &d_addr, NULL); +	if (dma_map == NULL) { +		dev_err(&pci_dev->dev, +			"  [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", +			__func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, +			vsize); +		return; +	} +	__genwqe_del_mapping(cfile, dma_map); +	__genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, +				 dma_map->dma_addr); +	kfree(dma_map); +} + +static struct vm_operations_struct genwqe_vma_ops = { +	.open   = genwqe_vma_open, +	.close  = genwqe_vma_close, +}; + +/** + * genwqe_mmap() - Provide contignous buffers to userspace + * + * We use mmap() to allocate contignous buffers used for DMA + * transfers. After the buffer is allocated we remap it to user-space + * and remember a reference to our dma_mapping data structure, where + * we store the associated DMA address and allocated size. + * + * When we receive a DDCB execution request with the ATS bits set to + * plain buffer, we lookup our dma_mapping list to find the + * corresponding DMA address for the associated user-space address. + */ +static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) +{ +	int rc; +	unsigned long pfn, vsize = vma->vm_end - vma->vm_start; +	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; +	struct genwqe_dev *cd = cfile->cd; +	struct dma_mapping *dma_map; + +	if (vsize == 0) +		return -EINVAL; + +	if (get_order(vsize) > MAX_ORDER) +		return -ENOMEM; + +	dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); +	if (dma_map == NULL) +		return -ENOMEM; + +	genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); +	dma_map->u_vaddr = (void *)vma->vm_start; +	dma_map->size = vsize; +	dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); +	dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, +						     &dma_map->dma_addr); +	if (dma_map->k_vaddr == NULL) { +		rc = -ENOMEM; +		goto free_dma_map; +	} + +	if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) +		*(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; + +	pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; +	rc = remap_pfn_range(vma, +			     vma->vm_start, +			     pfn, +			     vsize, +			     vma->vm_page_prot); +	if (rc != 0) { +		rc = -EFAULT; +		goto free_dma_mem; +	} + +	vma->vm_private_data = cfile; +	vma->vm_ops = &genwqe_vma_ops; +	__genwqe_add_mapping(cfile, dma_map); + +	return 0; + + free_dma_mem: +	__genwqe_free_consistent(cd, dma_map->size, +				dma_map->k_vaddr, +				dma_map->dma_addr); + free_dma_map: +	kfree(dma_map); +	return rc; +} + +/** + * do_flash_update() - Excute flash update (write image or CVPD) + * @cd:        genwqe device + * @load:      details about image load + * + * Return: 0 if successful + */ + +#define	FLASH_BLOCK	0x40000	/* we use 256k blocks */ + +static int do_flash_update(struct genwqe_file *cfile, +			   struct genwqe_bitstream *load) +{ +	int rc = 0; +	int blocks_to_flash; +	dma_addr_t dma_addr; +	u64 flash = 0; +	size_t tocopy = 0; +	u8 __user *buf; +	u8 *xbuf; +	u32 crc; +	u8 cmdopts; +	struct genwqe_dev *cd = cfile->cd; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if ((load->size & 0x3) != 0) +		return -EINVAL; + +	if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) +		return -EINVAL; + +	/* FIXME Bits have changed for new service layer! */ +	switch ((char)load->partition) { +	case '0': +		cmdopts = 0x14; +		break;		/* download/erase_first/part_0 */ +	case '1': +		cmdopts = 0x1C; +		break;		/* download/erase_first/part_1 */ +	case 'v': +		cmdopts = 0x0C; +		break;		/* download/erase_first/vpd */ +	default: +		return -EINVAL; +	} + +	buf = (u8 __user *)load->data_addr; +	xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); +	if (xbuf == NULL) +		return -ENOMEM; + +	blocks_to_flash = load->size / FLASH_BLOCK; +	while (load->size) { +		struct genwqe_ddcb_cmd *req; + +		/* +		 * We must be 4 byte aligned. Buffer must be 0 appened +		 * to have defined values when calculating CRC. +		 */ +		tocopy = min_t(size_t, load->size, FLASH_BLOCK); + +		rc = copy_from_user(xbuf, buf, tocopy); +		if (rc) { +			rc = -EFAULT; +			goto free_buffer; +		} +		crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); + +		dev_dbg(&pci_dev->dev, +			"[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", +			__func__, (unsigned long)dma_addr, crc, tocopy, +			blocks_to_flash); + +		/* prepare DDCB for SLU process */ +		req = ddcb_requ_alloc(); +		if (req == NULL) { +			rc = -ENOMEM; +			goto free_buffer; +		} + +		req->cmd = SLCMD_MOVE_FLASH; +		req->cmdopts = cmdopts; + +		/* prepare invariant values */ +		if (genwqe_get_slu_id(cd) <= 0x2) { +			*(__be64 *)&req->__asiv[0]  = cpu_to_be64(dma_addr); +			*(__be64 *)&req->__asiv[8]  = cpu_to_be64(tocopy); +			*(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); +			*(__be32 *)&req->__asiv[24] = cpu_to_be32(0); +			req->__asiv[24]	       = load->uid; +			*(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); + +			/* for simulation only */ +			*(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); +			*(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); +			req->asiv_length = 32; /* bytes included in crc calc */ +		} else {	/* setup DDCB for ATS architecture */ +			*(__be64 *)&req->asiv[0]  = cpu_to_be64(dma_addr); +			*(__be32 *)&req->asiv[8]  = cpu_to_be32(tocopy); +			*(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ +			*(__be64 *)&req->asiv[16] = cpu_to_be64(flash); +			*(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); +			*(__be32 *)&req->asiv[28] = cpu_to_be32(crc); + +			/* for simulation only */ +			*(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); +			*(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); + +			/* Rd only */ +			req->ats = 0x4ULL << 44; +			req->asiv_length = 40; /* bytes included in crc calc */ +		} +		req->asv_length  = 8; + +		/* For Genwqe5 we get back the calculated CRC */ +		*(u64 *)&req->asv[0] = 0ULL;			/* 0x80 */ + +		rc = __genwqe_execute_raw_ddcb(cd, req); + +		load->retc = req->retc; +		load->attn = req->attn; +		load->progress = req->progress; + +		if (rc < 0) { +			ddcb_requ_free(req); +			goto free_buffer; +		} + +		if (req->retc != DDCB_RETC_COMPLETE) { +			rc = -EIO; +			ddcb_requ_free(req); +			goto free_buffer; +		} + +		load->size  -= tocopy; +		flash += tocopy; +		buf += tocopy; +		blocks_to_flash--; +		ddcb_requ_free(req); +	} + + free_buffer: +	__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); +	return rc; +} + +static int do_flash_read(struct genwqe_file *cfile, +			 struct genwqe_bitstream *load) +{ +	int rc, blocks_to_flash; +	dma_addr_t dma_addr; +	u64 flash = 0; +	size_t tocopy = 0; +	u8 __user *buf; +	u8 *xbuf; +	u8 cmdopts; +	struct genwqe_dev *cd = cfile->cd; +	struct pci_dev *pci_dev = cd->pci_dev; +	struct genwqe_ddcb_cmd *cmd; + +	if ((load->size & 0x3) != 0) +		return -EINVAL; + +	if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) +		return -EINVAL; + +	/* FIXME Bits have changed for new service layer! */ +	switch ((char)load->partition) { +	case '0': +		cmdopts = 0x12; +		break;		/* upload/part_0 */ +	case '1': +		cmdopts = 0x1A; +		break;		/* upload/part_1 */ +	case 'v': +		cmdopts = 0x0A; +		break;		/* upload/vpd */ +	default: +		return -EINVAL; +	} + +	buf = (u8 __user *)load->data_addr; +	xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); +	if (xbuf == NULL) +		return -ENOMEM; + +	blocks_to_flash = load->size / FLASH_BLOCK; +	while (load->size) { +		/* +		 * We must be 4 byte aligned. Buffer must be 0 appened +		 * to have defined values when calculating CRC. +		 */ +		tocopy = min_t(size_t, load->size, FLASH_BLOCK); + +		dev_dbg(&pci_dev->dev, +			"[%s] DMA: %lx SZ: %ld %d\n", +			__func__, (unsigned long)dma_addr, tocopy, +			blocks_to_flash); + +		/* prepare DDCB for SLU process */ +		cmd = ddcb_requ_alloc(); +		if (cmd == NULL) { +			rc = -ENOMEM; +			goto free_buffer; +		} +		cmd->cmd = SLCMD_MOVE_FLASH; +		cmd->cmdopts = cmdopts; + +		/* prepare invariant values */ +		if (genwqe_get_slu_id(cd) <= 0x2) { +			*(__be64 *)&cmd->__asiv[0]  = cpu_to_be64(dma_addr); +			*(__be64 *)&cmd->__asiv[8]  = cpu_to_be64(tocopy); +			*(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); +			*(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); +			cmd->__asiv[24] = load->uid; +			*(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; +			cmd->asiv_length = 32; /* bytes included in crc calc */ +		} else {	/* setup DDCB for ATS architecture */ +			*(__be64 *)&cmd->asiv[0]  = cpu_to_be64(dma_addr); +			*(__be32 *)&cmd->asiv[8]  = cpu_to_be32(tocopy); +			*(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ +			*(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); +			*(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); +			*(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ + +			/* rd/wr */ +			cmd->ats = 0x5ULL << 44; +			cmd->asiv_length = 40; /* bytes included in crc calc */ +		} +		cmd->asv_length  = 8; + +		/* we only get back the calculated CRC */ +		*(u64 *)&cmd->asv[0] = 0ULL;	/* 0x80 */ + +		rc = __genwqe_execute_raw_ddcb(cd, cmd); + +		load->retc = cmd->retc; +		load->attn = cmd->attn; +		load->progress = cmd->progress; + +		if ((rc < 0) && (rc != -EBADMSG)) { +			ddcb_requ_free(cmd); +			goto free_buffer; +		} + +		rc = copy_to_user(buf, xbuf, tocopy); +		if (rc) { +			rc = -EFAULT; +			ddcb_requ_free(cmd); +			goto free_buffer; +		} + +		/* We know that we can get retc 0x104 with CRC err */ +		if (((cmd->retc == DDCB_RETC_FAULT) && +		     (cmd->attn != 0x02)) ||  /* Normally ignore CRC error */ +		    ((cmd->retc == DDCB_RETC_COMPLETE) && +		     (cmd->attn != 0x00))) {  /* Everything was fine */ +			rc = -EIO; +			ddcb_requ_free(cmd); +			goto free_buffer; +		} + +		load->size  -= tocopy; +		flash += tocopy; +		buf += tocopy; +		blocks_to_flash--; +		ddcb_requ_free(cmd); +	} +	rc = 0; + + free_buffer: +	__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); +	return rc; +} + +static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ +	int rc; +	struct genwqe_dev *cd = cfile->cd; +	struct pci_dev *pci_dev = cfile->cd->pci_dev; +	struct dma_mapping *dma_map; +	unsigned long map_addr; +	unsigned long map_size; + +	if ((m->addr == 0x0) || (m->size == 0)) +		return -EINVAL; + +	map_addr = (m->addr & PAGE_MASK); +	map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + +	dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); +	if (dma_map == NULL) +		return -ENOMEM; + +	genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); +	rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL); +	if (rc != 0) { +		dev_err(&pci_dev->dev, +			"[%s] genwqe_user_vmap rc=%d\n", __func__, rc); +		kfree(dma_map); +		return rc; +	} + +	genwqe_add_pin(cfile, dma_map); +	return 0; +} + +static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ +	struct genwqe_dev *cd = cfile->cd; +	struct dma_mapping *dma_map; +	unsigned long map_addr; +	unsigned long map_size; + +	if (m->addr == 0x0) +		return -EINVAL; + +	map_addr = (m->addr & PAGE_MASK); +	map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + +	dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); +	if (dma_map == NULL) +		return -ENOENT; + +	genwqe_del_pin(cfile, dma_map); +	genwqe_user_vunmap(cd, dma_map, NULL); +	kfree(dma_map); +	return 0; +} + +/** + * ddcb_cmd_cleanup() - Remove dynamically created fixup entries + * + * Only if there are any. Pinnings are not removed. + */ +static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) +{ +	unsigned int i; +	struct dma_mapping *dma_map; +	struct genwqe_dev *cd = cfile->cd; + +	for (i = 0; i < DDCB_FIXUPS; i++) { +		dma_map = &req->dma_mappings[i]; + +		if (dma_mapping_used(dma_map)) { +			__genwqe_del_mapping(cfile, dma_map); +			genwqe_user_vunmap(cd, dma_map, req); +		} +		if (req->sgls[i].sgl != NULL) +			genwqe_free_sync_sgl(cd, &req->sgls[i]); +	} +	return 0; +} + +/** + * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references + * + * Before the DDCB gets executed we need to handle the fixups. We + * replace the user-space addresses with DMA addresses or do + * additional setup work e.g. generating a scatter-gather list which + * is used to describe the memory referred to in the fixup. + */ +static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) +{ +	int rc; +	unsigned int asiv_offs, i; +	struct genwqe_dev *cd = cfile->cd; +	struct genwqe_ddcb_cmd *cmd = &req->cmd; +	struct dma_mapping *m; +	const char *type = "UNKNOWN"; + +	for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; +	     i++, asiv_offs += 0x08) { + +		u64 u_addr; +		dma_addr_t d_addr; +		u32 u_size = 0; +		u64 ats_flags; + +		ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); + +		switch (ats_flags) { + +		case ATS_TYPE_DATA: +			break;	/* nothing to do here */ + +		case ATS_TYPE_FLAT_RDWR: +		case ATS_TYPE_FLAT_RD: { +			u_addr = be64_to_cpu(*((__be64 *)&cmd-> +					       asiv[asiv_offs])); +			u_size = be32_to_cpu(*((__be32 *)&cmd-> +					       asiv[asiv_offs + 0x08])); + +			/* +			 * No data available. Ignore u_addr in this +			 * case and set addr to 0. Hardware must not +			 * fetch the buffer. +			 */ +			if (u_size == 0x0) { +				*((__be64 *)&cmd->asiv[asiv_offs]) = +					cpu_to_be64(0x0); +				break; +			} + +			m = __genwqe_search_mapping(cfile, u_addr, u_size, +						   &d_addr, NULL); +			if (m == NULL) { +				rc = -EFAULT; +				goto err_out; +			} + +			*((__be64 *)&cmd->asiv[asiv_offs]) = +				cpu_to_be64(d_addr); +			break; +		} + +		case ATS_TYPE_SGL_RDWR: +		case ATS_TYPE_SGL_RD: { +			int page_offs; + +			u_addr = be64_to_cpu(*((__be64 *) +					       &cmd->asiv[asiv_offs])); +			u_size = be32_to_cpu(*((__be32 *) +					       &cmd->asiv[asiv_offs + 0x08])); + +			/* +			 * No data available. Ignore u_addr in this +			 * case and set addr to 0. Hardware must not +			 * fetch the empty sgl. +			 */ +			if (u_size == 0x0) { +				*((__be64 *)&cmd->asiv[asiv_offs]) = +					cpu_to_be64(0x0); +				break; +			} + +			m = genwqe_search_pin(cfile, u_addr, u_size, NULL); +			if (m != NULL) { +				type = "PINNING"; +				page_offs = (u_addr - +					     (u64)m->u_vaddr)/PAGE_SIZE; +			} else { +				type = "MAPPING"; +				m = &req->dma_mappings[i]; + +				genwqe_mapping_init(m, +						    GENWQE_MAPPING_SGL_TEMP); +				rc = genwqe_user_vmap(cd, m, (void *)u_addr, +						      u_size, req); +				if (rc != 0) +					goto err_out; + +				__genwqe_add_mapping(cfile, m); +				page_offs = 0; +			} + +			/* create genwqe style scatter gather list */ +			rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i], +						   (void __user *)u_addr, +						   u_size); +			if (rc != 0) +				goto err_out; + +			genwqe_setup_sgl(cd, &req->sgls[i], +					 &m->dma_list[page_offs]); + +			*((__be64 *)&cmd->asiv[asiv_offs]) = +				cpu_to_be64(req->sgls[i].sgl_dma_addr); + +			break; +		} +		default: +			rc = -EINVAL; +			goto err_out; +		} +	} +	return 0; + + err_out: +	ddcb_cmd_cleanup(cfile, req); +	return rc; +} + +/** + * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups + * + * The code will build up the translation tables or lookup the + * contignous memory allocation table to find the right translations + * and DMA addresses. + */ +static int genwqe_execute_ddcb(struct genwqe_file *cfile, +			       struct genwqe_ddcb_cmd *cmd) +{ +	int rc; +	struct genwqe_dev *cd = cfile->cd; +	struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + +	rc = ddcb_cmd_fixups(cfile, req); +	if (rc != 0) +		return rc; + +	rc = __genwqe_execute_raw_ddcb(cd, cmd); +	ddcb_cmd_cleanup(cfile, req); +	return rc; +} + +static int do_execute_ddcb(struct genwqe_file *cfile, +			   unsigned long arg, int raw) +{ +	int rc; +	struct genwqe_ddcb_cmd *cmd; +	struct ddcb_requ *req; +	struct genwqe_dev *cd = cfile->cd; + +	cmd = ddcb_requ_alloc(); +	if (cmd == NULL) +		return -ENOMEM; + +	req = container_of(cmd, struct ddcb_requ, cmd); + +	if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { +		ddcb_requ_free(cmd); +		return -EFAULT; +	} + +	if (!raw) +		rc = genwqe_execute_ddcb(cfile, cmd); +	else +		rc = __genwqe_execute_raw_ddcb(cd, cmd); + +	/* Copy back only the modifed fields. Do not copy ASIV +	   back since the copy got modified by the driver. */ +	if (copy_to_user((void __user *)arg, cmd, +			 sizeof(*cmd) - DDCB_ASIV_LENGTH)) { +		ddcb_requ_free(cmd); +		return -EFAULT; +	} + +	ddcb_requ_free(cmd); +	return rc; +} + +/** + * genwqe_ioctl() - IO control + * @filp:       file handle + * @cmd:        command identifier (passed from user) + * @arg:        argument (passed from user) + * + * Return: 0 success + */ +static long genwqe_ioctl(struct file *filp, unsigned int cmd, +			 unsigned long arg) +{ +	int rc = 0; +	struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; +	struct genwqe_dev *cd = cfile->cd; +	struct genwqe_reg_io __user *io; +	u64 val; +	u32 reg_offs; + +	if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) +		return -EINVAL; + +	switch (cmd) { + +	case GENWQE_GET_CARD_STATE: +		put_user(cd->card_state, (enum genwqe_card_state __user *)arg); +		return 0; + +		/* Register access */ +	case GENWQE_READ_REG64: { +		io = (struct genwqe_reg_io __user *)arg; + +		if (get_user(reg_offs, &io->num)) +			return -EFAULT; + +		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) +			return -EINVAL; + +		val = __genwqe_readq(cd, reg_offs); +		put_user(val, &io->val64); +		return 0; +	} + +	case GENWQE_WRITE_REG64: { +		io = (struct genwqe_reg_io __user *)arg; + +		if (!capable(CAP_SYS_ADMIN)) +			return -EPERM; + +		if ((filp->f_flags & O_ACCMODE) == O_RDONLY) +			return -EPERM; + +		if (get_user(reg_offs, &io->num)) +			return -EFAULT; + +		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) +			return -EINVAL; + +		if (get_user(val, &io->val64)) +			return -EFAULT; + +		__genwqe_writeq(cd, reg_offs, val); +		return 0; +	} + +	case GENWQE_READ_REG32: { +		io = (struct genwqe_reg_io __user *)arg; + +		if (get_user(reg_offs, &io->num)) +			return -EFAULT; + +		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) +			return -EINVAL; + +		val = __genwqe_readl(cd, reg_offs); +		put_user(val, &io->val64); +		return 0; +	} + +	case GENWQE_WRITE_REG32: { +		io = (struct genwqe_reg_io __user *)arg; + +		if (!capable(CAP_SYS_ADMIN)) +			return -EPERM; + +		if ((filp->f_flags & O_ACCMODE) == O_RDONLY) +			return -EPERM; + +		if (get_user(reg_offs, &io->num)) +			return -EFAULT; + +		if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) +			return -EINVAL; + +		if (get_user(val, &io->val64)) +			return -EFAULT; + +		__genwqe_writel(cd, reg_offs, val); +		return 0; +	} + +		/* Flash update/reading */ +	case GENWQE_SLU_UPDATE: { +		struct genwqe_bitstream load; + +		if (!genwqe_is_privileged(cd)) +			return -EPERM; + +		if ((filp->f_flags & O_ACCMODE) == O_RDONLY) +			return -EPERM; + +		if (copy_from_user(&load, (void __user *)arg, +				   sizeof(load))) +			return -EFAULT; + +		rc = do_flash_update(cfile, &load); + +		if (copy_to_user((void __user *)arg, &load, sizeof(load))) +			return -EFAULT; + +		return rc; +	} + +	case GENWQE_SLU_READ: { +		struct genwqe_bitstream load; + +		if (!genwqe_is_privileged(cd)) +			return -EPERM; + +		if (genwqe_flash_readback_fails(cd)) +			return -ENOSPC;	 /* known to fail for old versions */ + +		if (copy_from_user(&load, (void __user *)arg, sizeof(load))) +			return -EFAULT; + +		rc = do_flash_read(cfile, &load); + +		if (copy_to_user((void __user *)arg, &load, sizeof(load))) +			return -EFAULT; + +		return rc; +	} + +		/* memory pinning and unpinning */ +	case GENWQE_PIN_MEM: { +		struct genwqe_mem m; + +		if (copy_from_user(&m, (void __user *)arg, sizeof(m))) +			return -EFAULT; + +		return genwqe_pin_mem(cfile, &m); +	} + +	case GENWQE_UNPIN_MEM: { +		struct genwqe_mem m; + +		if (copy_from_user(&m, (void __user *)arg, sizeof(m))) +			return -EFAULT; + +		return genwqe_unpin_mem(cfile, &m); +	} + +		/* launch an DDCB and wait for completion */ +	case GENWQE_EXECUTE_DDCB: +		return do_execute_ddcb(cfile, arg, 0); + +	case GENWQE_EXECUTE_RAW_DDCB: { + +		if (!capable(CAP_SYS_ADMIN)) +			return -EPERM; + +		return do_execute_ddcb(cfile, arg, 1); +	} + +	default: +		return -EINVAL; +	} + +	return rc; +} + +#if defined(CONFIG_COMPAT) +/** + * genwqe_compat_ioctl() - Compatibility ioctl + * + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/genwqe<n>_card. + * + * @filp:        file pointer. + * @cmd:         command. + * @arg:         user argument. + * Return:       zero on success or negative number on failure. + */ +static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, +				unsigned long arg) +{ +	return genwqe_ioctl(filp, cmd, arg); +} +#endif /* defined(CONFIG_COMPAT) */ + +static const struct file_operations genwqe_fops = { +	.owner		= THIS_MODULE, +	.open		= genwqe_open, +	.fasync		= genwqe_fasync, +	.mmap		= genwqe_mmap, +	.unlocked_ioctl	= genwqe_ioctl, +#if defined(CONFIG_COMPAT) +	.compat_ioctl   = genwqe_compat_ioctl, +#endif +	.release	= genwqe_release, +}; + +static int genwqe_device_initialized(struct genwqe_dev *cd) +{ +	return cd->dev != NULL; +} + +/** + * genwqe_device_create() - Create and configure genwqe char device + * @cd:      genwqe device descriptor + * + * This function must be called before we create any more genwqe + * character devices, because it is allocating the major and minor + * number which are supposed to be used by the client drivers. + */ +int genwqe_device_create(struct genwqe_dev *cd) +{ +	int rc; +	struct pci_dev *pci_dev = cd->pci_dev; + +	/* +	 * Here starts the individual setup per client. It must +	 * initialize its own cdev data structure with its own fops. +	 * The appropriate devnum needs to be created. The ranges must +	 * not overlap. +	 */ +	rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, +				 GENWQE_MAX_MINOR, GENWQE_DEVNAME); +	if (rc < 0) { +		dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); +		goto err_dev; +	} + +	cdev_init(&cd->cdev_genwqe, &genwqe_fops); +	cd->cdev_genwqe.owner = THIS_MODULE; + +	rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); +	if (rc < 0) { +		dev_err(&pci_dev->dev, "err: cdev_add failed\n"); +		goto err_add; +	} + +	/* +	 * Finally the device in /dev/... must be created. The rule is +	 * to use card%d_clientname for each created device. +	 */ +	cd->dev = device_create_with_groups(cd->class_genwqe, +					    &cd->pci_dev->dev, +					    cd->devnum_genwqe, cd, +					    genwqe_attribute_groups, +					    GENWQE_DEVNAME "%u_card", +					    cd->card_idx); +	if (IS_ERR(cd->dev)) { +		rc = PTR_ERR(cd->dev); +		goto err_cdev; +	} + +	rc = genwqe_init_debugfs(cd); +	if (rc != 0) +		goto err_debugfs; + +	return 0; + + err_debugfs: +	device_destroy(cd->class_genwqe, cd->devnum_genwqe); + err_cdev: +	cdev_del(&cd->cdev_genwqe); + err_add: +	unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + err_dev: +	cd->dev = NULL; +	return rc; +} + +static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) +{ +	int rc; +	unsigned int i; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (!genwqe_open_files(cd)) +		return 0; + +	dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); + +	rc = genwqe_kill_fasync(cd, SIGIO); +	if (rc > 0) { +		/* give kill_timeout seconds to close file descriptors ... */ +		for (i = 0; (i < genwqe_kill_timeout) && +			     genwqe_open_files(cd); i++) { +			dev_info(&pci_dev->dev, "  %d sec ...", i); + +			cond_resched(); +			msleep(1000); +		} + +		/* if no open files we can safely continue, else ... */ +		if (!genwqe_open_files(cd)) +			return 0; + +		dev_warn(&pci_dev->dev, +			 "[%s] send SIGKILL and wait ...\n", __func__); + +		rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ +		if (rc) { +			/* Give kill_timout more seconds to end processes */ +			for (i = 0; (i < genwqe_kill_timeout) && +				     genwqe_open_files(cd); i++) { +				dev_warn(&pci_dev->dev, "  %d sec ...", i); + +				cond_resched(); +				msleep(1000); +			} +		} +	} +	return 0; +} + +/** + * genwqe_device_remove() - Remove genwqe's char device + * + * This function must be called after the client devices are removed + * because it will free the major/minor number range for the genwqe + * drivers. + * + * This function must be robust enough to be called twice. + */ +int genwqe_device_remove(struct genwqe_dev *cd) +{ +	int rc; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (!genwqe_device_initialized(cd)) +		return 1; + +	genwqe_inform_and_stop_processes(cd); + +	/* +	 * We currently do wait until all filedescriptors are +	 * closed. This leads to a problem when we abort the +	 * application which will decrease this reference from +	 * 1/unused to 0/illegal and not from 2/used 1/empty. +	 */ +	rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); +	if (rc != 1) { +		dev_err(&pci_dev->dev, +			"[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); +		panic("Fatal err: cannot free resources with pending references!"); +	} + +	genqwe_exit_debugfs(cd); +	device_destroy(cd->class_genwqe, cd->devnum_genwqe); +	cdev_del(&cd->cdev_genwqe); +	unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); +	cd->dev = NULL; + +	return 0; +} diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c new file mode 100644 index 00000000000..a72a99266c3 --- /dev/null +++ b/drivers/misc/genwqe/card_sysfs.c @@ -0,0 +1,288 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Sysfs interfaces for the GenWQE card. There are attributes to query + * the version of the bitstream as well as some for the driver. For + * debugging, please also see the debugfs interfaces of this driver. + */ + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sysfs.h> +#include <linux/ctype.h> +#include <linux/device.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static const char * const genwqe_types[] = { +	[GENWQE_TYPE_ALTERA_230] = "GenWQE4-230", +	[GENWQE_TYPE_ALTERA_530] = "GenWQE4-530", +	[GENWQE_TYPE_ALTERA_A4]  = "GenWQE5-A4", +	[GENWQE_TYPE_ALTERA_A7]  = "GenWQE5-A7", +}; + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, +			   char *buf) +{ +	struct genwqe_dev *cd = dev_get_drvdata(dev); +	const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" }; + +	return sprintf(buf, "%s\n", cs[cd->card_state]); +} +static DEVICE_ATTR_RO(status); + +static ssize_t appid_show(struct device *dev, struct device_attribute *attr, +			  char *buf) +{ +	char app_name[5]; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	genwqe_read_app_id(cd, app_name, sizeof(app_name)); +	return sprintf(buf, "%s\n", app_name); +} +static DEVICE_ATTR_RO(appid); + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, +			    char *buf) +{ +	u64 slu_id, app_id; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); +	app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + +	return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id); +} +static DEVICE_ATTR_RO(version); + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, +			 char *buf) +{ +	u8 card_type; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	card_type = genwqe_card_type(cd); +	return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ? +		       "invalid" : genwqe_types[card_type]); +} +static DEVICE_ATTR_RO(type); + +static ssize_t driver_show(struct device *dev, struct device_attribute *attr, +			   char *buf) +{ +	return sprintf(buf, "%s\n", DRV_VERS_STRING); +} +static DEVICE_ATTR_RO(driver); + +static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, +			     char *buf) +{ +	u64 tempsens; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR); +	return sprintf(buf, "%016llx\n", tempsens); +} +static DEVICE_ATTR_RO(tempsens); + +static ssize_t freerunning_timer_show(struct device *dev, +				      struct device_attribute *attr, +				      char *buf) +{ +	u64 t; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER); +	return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(freerunning_timer); + +static ssize_t queue_working_time_show(struct device *dev, +				       struct device_attribute *attr, +				       char *buf) +{ +	u64 t; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME); +	return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(queue_working_time); + +static ssize_t base_clock_show(struct device *dev, +			       struct device_attribute *attr, +			       char *buf) +{ +	u64 base_clock; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	base_clock = genwqe_base_clock_frequency(cd); +	return sprintf(buf, "%lld\n", base_clock); +} +static DEVICE_ATTR_RO(base_clock); + +/** + * curr_bitstream_show() - Show the current bitstream id + * + * There is a bug in some old versions of the CPLD which selects the + * bitstream, which causes the IO_SLU_BITSTREAM register to report + * unreliable data in very rare cases. This makes this sysfs + * unreliable up to the point were a new CPLD version is being used. + * + * Unfortunately there is no automatic way yet to query the CPLD + * version, such that you need to manually ensure via programming + * tools that you have a recent version of the CPLD software. + * + * The proposed circumvention is to use a special recovery bitstream + * on the backup partition (0) to identify problems while loading the + * image. + */ +static ssize_t curr_bitstream_show(struct device *dev, +				   struct device_attribute *attr, char *buf) +{ +	int curr_bitstream; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; +	return sprintf(buf, "%d\n", curr_bitstream); +} +static DEVICE_ATTR_RO(curr_bitstream); + +/** + * next_bitstream_show() - Show the next activated bitstream + * + * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF. + */ +static ssize_t next_bitstream_show(struct device *dev, +				   struct device_attribute *attr, char *buf) +{ +	int next_bitstream; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	switch ((cd->softreset & 0xc) >> 2) { +	case 0x2: +		next_bitstream =  0; +		break; +	case 0x3: +		next_bitstream =  1; +		break; +	default: +		next_bitstream = -1; +		break;		/* error */ +	} +	return sprintf(buf, "%d\n", next_bitstream); +} + +static ssize_t next_bitstream_store(struct device *dev, +				    struct device_attribute *attr, +				    const char *buf, size_t count) +{ +	int partition; +	struct genwqe_dev *cd = dev_get_drvdata(dev); + +	if (kstrtoint(buf, 0, &partition) < 0) +		return -EINVAL; + +	switch (partition) { +	case 0x0: +		cd->softreset = 0x78; +		break; +	case 0x1: +		cd->softreset = 0x7c; +		break; +	default: +		return -EINVAL; +	} + +	__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); +	return count; +} +static DEVICE_ATTR_RW(next_bitstream); + +/* + * Create device_attribute structures / params: name, mode, show, store + * additional flag if valid in VF + */ +static struct attribute *genwqe_attributes[] = { +	&dev_attr_tempsens.attr, +	&dev_attr_next_bitstream.attr, +	&dev_attr_curr_bitstream.attr, +	&dev_attr_base_clock.attr, +	&dev_attr_driver.attr, +	&dev_attr_type.attr, +	&dev_attr_version.attr, +	&dev_attr_appid.attr, +	&dev_attr_status.attr, +	&dev_attr_freerunning_timer.attr, +	&dev_attr_queue_working_time.attr, +	NULL, +}; + +static struct attribute *genwqe_normal_attributes[] = { +	&dev_attr_driver.attr, +	&dev_attr_type.attr, +	&dev_attr_version.attr, +	&dev_attr_appid.attr, +	&dev_attr_status.attr, +	&dev_attr_freerunning_timer.attr, +	&dev_attr_queue_working_time.attr, +	NULL, +}; + +/** + * genwqe_is_visible() - Determine if sysfs attribute should be visible or not + * + * VFs have restricted mmio capabilities, so not all sysfs entries + * are allowed in VFs. + */ +static umode_t genwqe_is_visible(struct kobject *kobj, +				 struct attribute *attr, int n) +{ +	unsigned int j; +	struct device *dev = container_of(kobj, struct device, kobj); +	struct genwqe_dev *cd = dev_get_drvdata(dev); +	umode_t mode = attr->mode; + +	if (genwqe_is_privileged(cd)) +		return mode; + +	for (j = 0; genwqe_normal_attributes[j] != NULL;  j++) +		if (genwqe_normal_attributes[j] == attr) +			return mode; + +	return 0; +} + +static struct attribute_group genwqe_attribute_group = { +	.is_visible = genwqe_is_visible, +	.attrs      = genwqe_attributes, +}; + +const struct attribute_group *genwqe_attribute_groups[] = { +	&genwqe_attribute_group, +	NULL, +}; diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c new file mode 100644 index 00000000000..62cc6bb3f62 --- /dev/null +++ b/drivers/misc/genwqe/card_utils.c @@ -0,0 +1,1034 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Miscelanous functionality used in the other GenWQE driver parts. + */ + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> +#include <linux/page-flags.h> +#include <linux/scatterlist.h> +#include <linux/hugetlb.h> +#include <linux/iommu.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <asm/pgtable.h> + +#include "genwqe_driver.h" +#include "card_base.h" +#include "card_ddcb.h" + +/** + * __genwqe_writeq() - Write 64-bit register + * @cd:	        genwqe device descriptor + * @byte_offs:  byte offset within BAR + * @val:        64-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val) +{ +	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) +		return -EIO; + +	if (cd->mmio == NULL) +		return -EIO; + +	__raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs); +	return 0; +} + +/** + * __genwqe_readq() - Read 64-bit register + * @cd:         genwqe device descriptor + * @byte_offs:  offset within BAR + * + * Return: value from register + */ +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs) +{ +	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) +		return 0xffffffffffffffffull; + +	if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) && +	    (byte_offs == IO_SLC_CFGREG_GFIR)) +		return 0x000000000000ffffull; + +	if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) && +	    (byte_offs == IO_SLC_CFGREG_GFIR)) +		return 0x00000000ffff0000ull; + +	if (cd->mmio == NULL) +		return 0xffffffffffffffffull; + +	return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs)); +} + +/** + * __genwqe_writel() - Write 32-bit register + * @cd:	        genwqe device descriptor + * @byte_offs:  byte offset within BAR + * @val:        32-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val) +{ +	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) +		return -EIO; + +	if (cd->mmio == NULL) +		return -EIO; + +	__raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs); +	return 0; +} + +/** + * __genwqe_readl() - Read 32-bit register + * @cd:         genwqe device descriptor + * @byte_offs:  offset within BAR + * + * Return: Value from register + */ +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs) +{ +	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) +		return 0xffffffff; + +	if (cd->mmio == NULL) +		return 0xffffffff; + +	return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs)); +} + +/** + * genwqe_read_app_id() - Extract app_id + * + * app_unitcfg need to be filled with valid data first + */ +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len) +{ +	int i, j; +	u32 app_id = (u32)cd->app_unitcfg; + +	memset(app_name, 0, len); +	for (i = 0, j = 0; j < min(len, 4); j++) { +		char ch = (char)((app_id >> (24 - j*8)) & 0xff); +		if (ch == ' ') +			continue; +		app_name[i++] = isprint(ch) ? ch : 'X'; +	} +	return i; +} + +/** + * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations + * + * Existing kernel functions seem to use a different polynom, + * therefore we could not use them here. + * + * Genwqe's Polynomial = 0x20044009 + */ +#define CRC32_POLYNOMIAL	0x20044009 +static u32 crc32_tab[256];	/* crc32 lookup table */ + +void genwqe_init_crc32(void) +{ +	int i, j; +	u32 crc; + +	for (i = 0;  i < 256;  i++) { +		crc = i << 24; +		for (j = 0;  j < 8;  j++) { +			if (crc & 0x80000000) +				crc = (crc << 1) ^ CRC32_POLYNOMIAL; +			else +				crc = (crc << 1); +		} +		crc32_tab[i] = crc; +	} +} + +/** + * genwqe_crc32() - Generate 32-bit crc as required for DDCBs + * @buff:       pointer to data buffer + * @len:        length of data for calculation + * @init:       initial crc (0xffffffff at start) + * + * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009) + + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should + * result in a crc32 of 0xf33cb7d3. + * + * The existing kernel crc functions did not cover this polynom yet. + * + * Return: crc32 checksum. + */ +u32 genwqe_crc32(u8 *buff, size_t len, u32 init) +{ +	int i; +	u32 crc; + +	crc = init; +	while (len--) { +		i = ((crc >> 24) ^ *buff++) & 0xFF; +		crc = (crc << 8) ^ crc32_tab[i]; +	} +	return crc; +} + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, +			       dma_addr_t *dma_handle) +{ +	if (get_order(size) > MAX_ORDER) +		return NULL; + +	return pci_alloc_consistent(cd->pci_dev, size, dma_handle); +} + +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, +			     void *vaddr, dma_addr_t dma_handle) +{ +	if (vaddr == NULL) +		return; + +	pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle); +} + +static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, +			      int num_pages) +{ +	int i; +	struct pci_dev *pci_dev = cd->pci_dev; + +	for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { +		pci_unmap_page(pci_dev, dma_list[i], +			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +		dma_list[i] = 0x0; +	} +} + +static int genwqe_map_pages(struct genwqe_dev *cd, +			   struct page **page_list, int num_pages, +			   dma_addr_t *dma_list) +{ +	int i; +	struct pci_dev *pci_dev = cd->pci_dev; + +	/* establish DMA mapping for requested pages */ +	for (i = 0; i < num_pages; i++) { +		dma_addr_t daddr; + +		dma_list[i] = 0x0; +		daddr = pci_map_page(pci_dev, page_list[i], +				     0,	 /* map_offs */ +				     PAGE_SIZE, +				     PCI_DMA_BIDIRECTIONAL);  /* FIXME rd/rw */ + +		if (pci_dma_mapping_error(pci_dev, daddr)) { +			dev_err(&pci_dev->dev, +				"[%s] err: no dma addr daddr=%016llx!\n", +				__func__, (long long)daddr); +			goto err; +		} + +		dma_list[i] = daddr; +	} +	return 0; + + err: +	genwqe_unmap_pages(cd, dma_list, num_pages); +	return -EIO; +} + +static int genwqe_sgl_size(int num_pages) +{ +	int len, num_tlb = num_pages / 7; + +	len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1); +	return roundup(len, PAGE_SIZE); +} + +/** + * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages + * + * Allocates memory for sgl and overlapping pages. Pages which might + * overlap other user-space memory blocks are being cached for DMAs, + * such that we do not run into syncronization issues. Data is copied + * from user-space into the cached pages. + */ +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, +			  void __user *user_addr, size_t user_size) +{ +	int rc; +	struct pci_dev *pci_dev = cd->pci_dev; + +	sgl->fpage_offs = offset_in_page((unsigned long)user_addr); +	sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size); +	sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); +	sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE; + +	dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld " +		"fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", +		__func__, user_addr, user_size, sgl->nr_pages, +		sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size); + +	sgl->user_addr = user_addr; +	sgl->user_size = user_size; +	sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); + +	if (get_order(sgl->sgl_size) > MAX_ORDER) { +		dev_err(&pci_dev->dev, +			"[%s] err: too much memory requested!\n", __func__); +		return -ENOMEM; +	} + +	sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, +					     &sgl->sgl_dma_addr); +	if (sgl->sgl == NULL) { +		dev_err(&pci_dev->dev, +			"[%s] err: no memory available!\n", __func__); +		return -ENOMEM; +	} + +	/* Only use buffering on incomplete pages */ +	if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) { +		sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, +						       &sgl->fpage_dma_addr); +		if (sgl->fpage == NULL) +			goto err_out; + +		/* Sync with user memory */ +		if (copy_from_user(sgl->fpage + sgl->fpage_offs, +				   user_addr, sgl->fpage_size)) { +			rc = -EFAULT; +			goto err_out; +		} +	} +	if (sgl->lpage_size != 0) { +		sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, +						       &sgl->lpage_dma_addr); +		if (sgl->lpage == NULL) +			goto err_out1; + +		/* Sync with user memory */ +		if (copy_from_user(sgl->lpage, user_addr + user_size - +				   sgl->lpage_size, sgl->lpage_size)) { +			rc = -EFAULT; +			goto err_out1; +		} +	} +	return 0; + + err_out1: +	__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, +				 sgl->fpage_dma_addr); + err_out: +	__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, +				 sgl->sgl_dma_addr); +	return -ENOMEM; +} + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, +		     dma_addr_t *dma_list) +{ +	int i = 0, j = 0, p; +	unsigned long dma_offs, map_offs; +	dma_addr_t prev_daddr = 0; +	struct sg_entry *s, *last_s = NULL; +	size_t size = sgl->user_size; + +	dma_offs = 128;		/* next block if needed/dma_offset */ +	map_offs = sgl->fpage_offs; /* offset in first page */ + +	s = &sgl->sgl[0];	/* first set of 8 entries */ +	p = 0;			/* page */ +	while (p < sgl->nr_pages) { +		dma_addr_t daddr; +		unsigned int size_to_map; + +		/* always write the chaining entry, cleanup is done later */ +		j = 0; +		s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs); +		s[j].len	 = cpu_to_be32(128); +		s[j].flags	 = cpu_to_be32(SG_CHAINED); +		j++; + +		while (j < 8) { +			/* DMA mapping for requested page, offs, size */ +			size_to_map = min(size, PAGE_SIZE - map_offs); + +			if ((p == 0) && (sgl->fpage != NULL)) { +				daddr = sgl->fpage_dma_addr + map_offs; + +			} else if ((p == sgl->nr_pages - 1) && +				   (sgl->lpage != NULL)) { +				daddr = sgl->lpage_dma_addr; +			} else { +				daddr = dma_list[p] + map_offs; +			} + +			size -= size_to_map; +			map_offs = 0; + +			if (prev_daddr == daddr) { +				u32 prev_len = be32_to_cpu(last_s->len); + +				/* pr_info("daddr combining: " +					"%016llx/%08x -> %016llx\n", +					prev_daddr, prev_len, daddr); */ + +				last_s->len = cpu_to_be32(prev_len + +							  size_to_map); + +				p++; /* process next page */ +				if (p == sgl->nr_pages) +					goto fixup;  /* nothing to do */ + +				prev_daddr = daddr + size_to_map; +				continue; +			} + +			/* start new entry */ +			s[j].target_addr = cpu_to_be64(daddr); +			s[j].len	 = cpu_to_be32(size_to_map); +			s[j].flags	 = cpu_to_be32(SG_DATA); +			prev_daddr = daddr + size_to_map; +			last_s = &s[j]; +			j++; + +			p++;	/* process next page */ +			if (p == sgl->nr_pages) +				goto fixup;  /* nothing to do */ +		} +		dma_offs += 128; +		s += 8;		/* continue 8 elements further */ +	} + fixup: +	if (j == 1) {		/* combining happend on last entry! */ +		s -= 8;		/* full shift needed on previous sgl block */ +		j =  7;		/* shift all elements */ +	} + +	for (i = 0; i < j; i++)	/* move elements 1 up */ +		s[i] = s[i + 1]; + +	s[i].target_addr = cpu_to_be64(0); +	s[i].len	 = cpu_to_be32(0); +	s[i].flags	 = cpu_to_be32(SG_END_LIST); +	return 0; +} + +/** + * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages + * + * After the DMA transfer has been completed we free the memory for + * the sgl and the cached pages. Data is being transfered from cached + * pages into user-space buffers. + */ +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) +{ +	int rc = 0; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (sgl->fpage) { +		if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs, +				 sgl->fpage_size)) { +			dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n", +				__func__); +			rc = -EFAULT; +		} +		__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, +					 sgl->fpage_dma_addr); +		sgl->fpage = NULL; +		sgl->fpage_dma_addr = 0; +	} +	if (sgl->lpage) { +		if (copy_to_user(sgl->user_addr + sgl->user_size - +				 sgl->lpage_size, sgl->lpage, +				 sgl->lpage_size)) { +			dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n", +				__func__); +			rc = -EFAULT; +		} +		__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, +					 sgl->lpage_dma_addr); +		sgl->lpage = NULL; +		sgl->lpage_dma_addr = 0; +	} +	__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, +				 sgl->sgl_dma_addr); + +	sgl->sgl = NULL; +	sgl->sgl_dma_addr = 0x0; +	sgl->sgl_size = 0; +	return rc; +} + +/** + * free_user_pages() - Give pinned pages back + * + * Documentation of get_user_pages is in mm/memory.c: + * + * If the page is written to, set_page_dirty (or set_page_dirty_lock, + * as appropriate) must be called after the page is finished with, and + * before put_page is called. + * + * FIXME Could be of use to others and might belong in the generic + * code, if others agree. E.g. + *    ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c + *    ceph_put_page_vector in net/ceph/pagevec.c + *    maybe more? + */ +static int free_user_pages(struct page **page_list, unsigned int nr_pages, +			   int dirty) +{ +	unsigned int i; + +	for (i = 0; i < nr_pages; i++) { +		if (page_list[i] != NULL) { +			if (dirty) +				set_page_dirty_lock(page_list[i]); +			put_page(page_list[i]); +		} +	} +	return 0; +} + +/** + * genwqe_user_vmap() - Map user-space memory to virtual kernel memory + * @cd:         pointer to genwqe device + * @m:          mapping params + * @uaddr:      user virtual address + * @size:       size of memory to be mapped + * + * We need to think about how we could speed this up. Of course it is + * not a good idea to do this over and over again, like we are + * currently doing it. Nevertheless, I am curious where on the path + * the performance is spend. Most probably within the memory + * allocation functions, but maybe also in the DMA mapping code. + * + * Restrictions: The maximum size of the possible mapping currently depends + *               on the amount of memory we can get using kzalloc() for the + *               page_list and pci_alloc_consistent for the sg_list. + *               The sg_list is currently itself not scattered, which could + *               be fixed with some effort. The page_list must be split into + *               PAGE_SIZE chunks too. All that will make the complicated + *               code more complicated. + * + * Return: 0 if success + */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, +		     unsigned long size, struct ddcb_requ *req) +{ +	int rc = -EINVAL; +	unsigned long data, offs; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if ((uaddr == NULL) || (size == 0)) { +		m->size = 0;	/* mark unused and not added */ +		return -EINVAL; +	} +	m->u_vaddr = uaddr; +	m->size    = size; + +	/* determine space needed for page_list. */ +	data = (unsigned long)uaddr; +	offs = offset_in_page(data); +	m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); + +	m->page_list = kcalloc(m->nr_pages, +			       sizeof(struct page *) + sizeof(dma_addr_t), +			       GFP_KERNEL); +	if (!m->page_list) { +		dev_err(&pci_dev->dev, "err: alloc page_list failed\n"); +		m->nr_pages = 0; +		m->u_vaddr = NULL; +		m->size = 0;	/* mark unused and not added */ +		return -ENOMEM; +	} +	m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); + +	/* pin user pages in memory */ +	rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ +				 m->nr_pages, +				 1,		/* write by caller */ +				 m->page_list);	/* ptrs to pages */ + +	/* assumption: get_user_pages can be killed by signals. */ +	if (rc < m->nr_pages) { +		free_user_pages(m->page_list, rc, 0); +		rc = -EFAULT; +		goto fail_get_user_pages; +	} + +	rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); +	if (rc != 0) +		goto fail_free_user_pages; + +	return 0; + + fail_free_user_pages: +	free_user_pages(m->page_list, m->nr_pages, 0); + + fail_get_user_pages: +	kfree(m->page_list); +	m->page_list = NULL; +	m->dma_list = NULL; +	m->nr_pages = 0; +	m->u_vaddr = NULL; +	m->size = 0;		/* mark unused and not added */ +	return rc; +} + +/** + * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel + *                        memory + * @cd:         pointer to genwqe device + * @m:          mapping params + */ +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, +		       struct ddcb_requ *req) +{ +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (!dma_mapping_used(m)) { +		dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n", +			__func__, m); +		return -EINVAL; +	} + +	if (m->dma_list) +		genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); + +	if (m->page_list) { +		free_user_pages(m->page_list, m->nr_pages, 1); + +		kfree(m->page_list); +		m->page_list = NULL; +		m->dma_list = NULL; +		m->nr_pages = 0; +	} + +	m->u_vaddr = NULL; +	m->size = 0;		/* mark as unused and not added */ +	return 0; +} + +/** + * genwqe_card_type() - Get chip type SLU Configuration Register + * @cd:         pointer to the genwqe device descriptor + * Return: 0: Altera Stratix-IV 230 + *         1: Altera Stratix-IV 530 + *         2: Altera Stratix-V A4 + *         3: Altera Stratix-V A7 + */ +u8 genwqe_card_type(struct genwqe_dev *cd) +{ +	u64 card_type = cd->slu_unitcfg; +	return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); +} + +/** + * genwqe_card_reset() - Reset the card + * @cd:         pointer to the genwqe device descriptor + */ +int genwqe_card_reset(struct genwqe_dev *cd) +{ +	u64 softrst; +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (!genwqe_is_privileged(cd)) +		return -ENODEV; + +	/* new SL */ +	__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull); +	msleep(1000); +	__genwqe_readq(cd, IO_HSU_FIR_CLR); +	__genwqe_readq(cd, IO_APP_FIR_CLR); +	__genwqe_readq(cd, IO_SLU_FIR_CLR); + +	/* +	 * Read-modify-write to preserve the stealth bits +	 * +	 * For SL >= 039, Stealth WE bit allows removing +	 * the read-modify-wrote. +	 * r-m-w may require a mask 0x3C to avoid hitting hard +	 * reset again for error reset (should be 0, chicken). +	 */ +	softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull; +	__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull); + +	/* give ERRORRESET some time to finish */ +	msleep(50); + +	if (genwqe_need_err_masking(cd)) { +		dev_info(&pci_dev->dev, +			 "[%s] masking errors for old bitstreams\n", __func__); +		__genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); +	} +	return 0; +} + +int genwqe_read_softreset(struct genwqe_dev *cd) +{ +	u64 bitstream; + +	if (!genwqe_is_privileged(cd)) +		return -ENODEV; + +	bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; +	cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull; +	return 0; +} + +/** + * genwqe_set_interrupt_capability() - Configure MSI capability structure + * @cd:         pointer to the device + * Return: 0 if no error + */ +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) +{ +	int rc; +	struct pci_dev *pci_dev = cd->pci_dev; + +	rc = pci_enable_msi_exact(pci_dev, count); +	if (rc == 0) +		cd->flags |= GENWQE_FLAG_MSI_ENABLED; +	return rc; +} + +/** + * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability() + * @cd:         pointer to the device + */ +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd) +{ +	struct pci_dev *pci_dev = cd->pci_dev; + +	if (cd->flags & GENWQE_FLAG_MSI_ENABLED) { +		pci_disable_msi(pci_dev); +		cd->flags &= ~GENWQE_FLAG_MSI_ENABLED; +	} +} + +/** + * set_reg_idx() - Fill array with data. Ignore illegal offsets. + * @cd:         card device + * @r:          debug register array + * @i:          index to desired entry + * @m:          maximum possible entries + * @addr:       addr which is read + * @index:      index in debug array + * @val:        read value + */ +static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r, +		       unsigned int *i, unsigned int m, u32 addr, u32 idx, +		       u64 val) +{ +	if (WARN_ON_ONCE(*i >= m)) +		return -EFAULT; + +	r[*i].addr = addr; +	r[*i].idx = idx; +	r[*i].val = val; +	++*i; +	return 0; +} + +static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r, +		   unsigned int *i, unsigned int m, u32 addr, u64 val) +{ +	return set_reg_idx(cd, r, i, m, addr, 0, val); +} + +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, +			 unsigned int max_regs, int all) +{ +	unsigned int i, j, idx = 0; +	u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr; +	u64 gfir, sluid, appid, ufir, ufec, sfir, sfec; + +	/* Global FIR */ +	gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); +	set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir); + +	/* UnitCfg for SLU */ +	sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */ +	set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid); + +	/* UnitCfg for APP */ +	appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */ +	set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid); + +	/* Check all chip Units */ +	for (i = 0; i < GENWQE_MAX_UNITS; i++) { + +		/* Unit FIR */ +		ufir_addr = (i << 24) | 0x008; +		ufir = __genwqe_readq(cd, ufir_addr); +		set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir); + +		/* Unit FEC */ +		ufec_addr = (i << 24) | 0x018; +		ufec = __genwqe_readq(cd, ufec_addr); +		set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec); + +		for (j = 0; j < 64; j++) { +			/* wherever there is a primary 1, read the 2ndary */ +			if (!all && (!(ufir & (1ull << j)))) +				continue; + +			sfir_addr = (i << 24) | (0x100 + 8 * j); +			sfir = __genwqe_readq(cd, sfir_addr); +			set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir); + +			sfec_addr = (i << 24) | (0x300 + 8 * j); +			sfec = __genwqe_readq(cd, sfec_addr); +			set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec); +		} +	} + +	/* fill with invalid data until end */ +	for (i = idx; i < max_regs; i++) { +		regs[i].addr = 0xffffffff; +		regs[i].val = 0xffffffffffffffffull; +	} +	return idx; +} + +/** + * genwqe_ffdc_buff_size() - Calculates the number of dump registers + */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid) +{ +	int entries = 0, ring, traps, traces, trace_entries; +	u32 eevptr_addr, l_addr, d_len, d_type; +	u64 eevptr, val, addr; + +	eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; +	eevptr = __genwqe_readq(cd, eevptr_addr); + +	if ((eevptr != 0x0) && (eevptr != -1ull)) { +		l_addr = GENWQE_UID_OFFS(uid) | eevptr; + +		while (1) { +			val = __genwqe_readq(cd, l_addr); + +			if ((val == 0x0) || (val == -1ull)) +				break; + +			/* 38:24 */ +			d_len  = (val & 0x0000007fff000000ull) >> 24; + +			/* 39 */ +			d_type = (val & 0x0000008000000000ull) >> 36; + +			if (d_type) {	/* repeat */ +				entries += d_len; +			} else {	/* size in bytes! */ +				entries += d_len >> 3; +			} + +			l_addr += 8; +		} +	} + +	for (ring = 0; ring < 8; ring++) { +		addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); +		val = __genwqe_readq(cd, addr); + +		if ((val == 0x0ull) || (val == -1ull)) +			continue; + +		traps = (val >> 24) & 0xff; +		traces = (val >> 16) & 0xff; +		trace_entries = val & 0xffff; + +		entries += traps + (traces * trace_entries); +	} +	return entries; +} + +/** + * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure + */ +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid, +			  struct genwqe_reg *regs, unsigned int max_regs) +{ +	int i, traps, traces, trace, trace_entries, trace_entry, ring; +	unsigned int idx = 0; +	u32 eevptr_addr, l_addr, d_addr, d_len, d_type; +	u64 eevptr, e, val, addr; + +	eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; +	eevptr = __genwqe_readq(cd, eevptr_addr); + +	if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) { +		l_addr = GENWQE_UID_OFFS(uid) | eevptr; +		while (1) { +			e = __genwqe_readq(cd, l_addr); +			if ((e == 0x0) || (e == 0xffffffffffffffffull)) +				break; + +			d_addr = (e & 0x0000000000ffffffull);	    /* 23:0 */ +			d_len  = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */ +			d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */ +			d_addr |= GENWQE_UID_OFFS(uid); + +			if (d_type) { +				for (i = 0; i < (int)d_len; i++) { +					val = __genwqe_readq(cd, d_addr); +					set_reg_idx(cd, regs, &idx, max_regs, +						    d_addr, i, val); +				} +			} else { +				d_len >>= 3; /* Size in bytes! */ +				for (i = 0; i < (int)d_len; i++, d_addr += 8) { +					val = __genwqe_readq(cd, d_addr); +					set_reg_idx(cd, regs, &idx, max_regs, +						    d_addr, 0, val); +				} +			} +			l_addr += 8; +		} +	} + +	/* +	 * To save time, there are only 6 traces poplulated on Uid=2, +	 * Ring=1. each with iters=512. +	 */ +	for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds, +					      2...7 are ASI rings */ +		addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); +		val = __genwqe_readq(cd, addr); + +		if ((val == 0x0ull) || (val == -1ull)) +			continue; + +		traps = (val >> 24) & 0xff;	/* Number of Traps	*/ +		traces = (val >> 16) & 0xff;	/* Number of Traces	*/ +		trace_entries = val & 0xffff;	/* Entries per trace	*/ + +		/* Note: This is a combined loop that dumps both the traps */ +		/* (for the trace == 0 case) as well as the traces 1 to    */ +		/* 'traces'.						   */ +		for (trace = 0; trace <= traces; trace++) { +			u32 diag_sel = +				GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); + +			addr = (GENWQE_UID_OFFS(uid) | +				IO_EXTENDED_DIAG_SELECTOR); +			__genwqe_writeq(cd, addr, diag_sel); + +			for (trace_entry = 0; +			     trace_entry < (trace ? trace_entries : traps); +			     trace_entry++) { +				addr = (GENWQE_UID_OFFS(uid) | +					IO_EXTENDED_DIAG_READ_MBX); +				val = __genwqe_readq(cd, addr); +				set_reg_idx(cd, regs, &idx, max_regs, addr, +					    (diag_sel<<16) | trace_entry, val); +			} +		} +	} +	return 0; +} + +/** + * genwqe_write_vreg() - Write register in virtual window + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func) +{ +	__genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); +	__genwqe_writeq(cd, reg, val); +	return 0; +} + +/** + * genwqe_read_vreg() - Read register in virtual window + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func) +{ +	__genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); +	return __genwqe_readq(cd, reg); +} + +/** + * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + * + * Return: Card clock in MHz + */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd) +{ +	u16 speed;		/*         MHz  MHz  MHz  MHz */ +	static const int speed_grade[] = { 250, 200, 166, 175 }; + +	speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); +	if (speed >= ARRAY_SIZE(speed_grade)) +		return 0;	/* illegal value */ + +	return speed_grade[speed]; +} + +/** + * genwqe_stop_traps() - Stop traps + * + * Before reading out the analysis data, we need to stop the traps. + */ +void genwqe_stop_traps(struct genwqe_dev *cd) +{ +	__genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull); +} + +/** + * genwqe_start_traps() - Start traps + * + * After having read the data, we can/must enable the traps again. + */ +void genwqe_start_traps(struct genwqe_dev *cd) +{ +	__genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull); + +	if (genwqe_need_err_masking(cd)) +		__genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); +} diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h new file mode 100644 index 00000000000..cd5263163a6 --- /dev/null +++ b/drivers/misc/genwqe/genwqe_driver.h @@ -0,0 +1,77 @@ +#ifndef __GENWQE_DRIVER_H__ +#define __GENWQE_DRIVER_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/kthread.h> +#include <linux/scatterlist.h> +#include <linux/iommu.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/printk.h> + +#include <asm/byteorder.h> +#include <linux/genwqe/genwqe_card.h> + +#define DRV_VERS_STRING		"2.0.15" + +/* + * Static minor number assignement, until we decide/implement + * something dynamic. + */ +#define GENWQE_MAX_MINOR	128 /* up to 128 possible genwqe devices */ + +/** + * genwqe_requ_alloc() - Allocate a new DDCB execution request + * + * This data structure contains the user visiable fields of the DDCB + * to be executed. + * + * Return: ptr to genwqe_ddcb_cmd data structure + */ +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void); + +/** + * ddcb_requ_free() - Free DDCB execution request. + * @req:       ptr to genwqe_ddcb_cmd data structure. + */ +void ddcb_requ_free(struct genwqe_ddcb_cmd *req); + +u32  genwqe_crc32(u8 *buff, size_t len, u32 init); + +static inline void genwqe_hexdump(struct pci_dev *pci_dev, +				  const void *buff, unsigned int size) +{ +	char prefix[32]; + +	scnprintf(prefix, sizeof(prefix), "%s %s: ", +		  GENWQE_DEVNAME, pci_name(pci_dev)); + +	print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff, +			     size, true); +} + +#endif	/* __GENWQE_DRIVER_H__ */ diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index 170bd3daf33..90520d76633 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c @@ -22,7 +22,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/i2c.h>  #include <linux/err.h> diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c index 0346d87c5fe..6b3bf9ab051 100644 --- a/drivers/misc/ibmasm/module.c +++ b/drivers/misc/ibmasm/module.c @@ -153,7 +153,6 @@ error_ioremap:  error_heartbeat:  	ibmasm_event_buffer_exit(sp);  error_eventbuffer: -	pci_set_drvdata(pdev, NULL);  	kfree(sp);  error_kmalloc:          pci_release_regions(pdev); @@ -165,7 +164,7 @@ error_resources:  static void ibmasm_remove_one(struct pci_dev *pdev)  { -	struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev); +	struct service_processor *sp = pci_get_drvdata(pdev);  	dbg("Unregistering UART\n");  	ibmasm_unregister_uart(sp); @@ -182,7 +181,6 @@ static void ibmasm_remove_one(struct pci_dev *pdev)  	ibmasm_free_remote_input_dev(sp);  	iounmap(sp->base_address);  	ibmasm_event_buffer_exit(sp); -	pci_set_drvdata(pdev, NULL);  	kfree(sp);  	pci_release_regions(pdev);  	pci_disable_device(pdev); diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index e3183f26216..12c30b486b2 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -26,7 +26,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/i2c.h>  #include <linux/mutex.h> diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index b7f84dacf82..4a9c50a43af 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c @@ -23,7 +23,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/i2c.h>  #include <linux/err.h> diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 61fbe6acabe..0a1565e63c7 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -12,7 +12,6 @@  #include <linux/module.h>  #include <linux/errno.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/spi/spi.h>  #include <linux/platform_device.h>  #include <linux/delay.h> diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index 036effe9a79..3ef4627f9cb 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -23,7 +23,6 @@  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/dmi.h>  #include <linux/module.h>  #include <linux/types.h> diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index 7c97550240f..d324f8a97b8 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -26,7 +26,6 @@  #include <linux/module.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/err.h>  #include <linux/i2c.h>  #include <linux/pm_runtime.h> diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c index 9aa2bd2a71a..bd06d0cfac4 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c @@ -10,7 +10,6 @@  #include <linux/module.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/err.h>  #include <linux/input.h>  #include <linux/interrupt.h> diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 2fc0586ce3b..d66a2f24f6b 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -30,6 +30,7 @@   *   * See Documentation/fault-injection/provoke-crashes.txt for instructions   */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt  #include <linux/kernel.h>  #include <linux/fs.h> @@ -44,13 +45,26 @@  #include <scsi/scsi_cmnd.h>  #include <linux/debugfs.h>  #include <linux/vmalloc.h> +#include <linux/mman.h> +#include <asm/cacheflush.h>  #ifdef CONFIG_IDE  #include <linux/ide.h>  #endif +/* + * Make sure our attempts to over run the kernel stack doesn't trigger + * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we + * recurse past the end of THREAD_SIZE by default. + */ +#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) +#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) +#else +#define REC_STACK_SIZE (THREAD_SIZE / 8) +#endif +#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) +  #define DEFAULT_COUNT 10 -#define REC_NUM_DEFAULT 10  #define EXEC_SIZE 64  enum cname { @@ -86,6 +100,10 @@ enum ctype {  	CT_EXEC_STACK,  	CT_EXEC_KMALLOC,  	CT_EXEC_VMALLOC, +	CT_EXEC_USERSPACE, +	CT_ACCESS_USERSPACE, +	CT_WRITE_RO, +	CT_WRITE_KERN,  };  static char* cp_name[] = { @@ -119,6 +137,10 @@ static char* cp_type[] = {  	"EXEC_STACK",  	"EXEC_KMALLOC",  	"EXEC_VMALLOC", +	"EXEC_USERSPACE", +	"ACCESS_USERSPACE", +	"WRITE_RO", +	"WRITE_KERN",  };  static struct jprobe lkdtm; @@ -139,9 +161,10 @@ static DEFINE_SPINLOCK(lock_me_up);  static u8 data_area[EXEC_SIZE]; +static const unsigned long rodata = 0xAA55AA55; +  module_param(recur_count, int, 0644); -MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ -				 "default is 10"); +MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");  module_param(cpoint_name, charp, 0444);  MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");  module_param(cpoint_type, charp, 0444); @@ -205,7 +228,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)  }  #ifdef CONFIG_IDE -int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, +static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,  			struct block_device *bdev, unsigned int cmd,  			unsigned long arg)  { @@ -280,16 +303,16 @@ static int lkdtm_parse_commandline(void)  	return -EINVAL;  } -static int recursive_loop(int a) +static int recursive_loop(int remaining)  { -	char buf[1024]; +	char buf[REC_STACK_SIZE]; -	memset(buf,0xFF,1024); -	recur_count--; -	if (!recur_count) +	/* Make sure compiler does not optimize this away. */ +	memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE); +	if (!remaining)  		return 0;  	else -        	return recursive_loop(a); +		return recursive_loop(remaining - 1);  }  static void do_nothing(void) @@ -297,11 +320,46 @@ static void do_nothing(void)  	return;  } +/* Must immediately follow do_nothing for size calculuations to work out. */ +static void do_overwritten(void) +{ +	pr_info("do_overwritten wasn't overwritten!\n"); +	return; +} + +static noinline void corrupt_stack(void) +{ +	/* Use default char array length that triggers stack protection. */ +	char data[8]; + +	memset((void *)data, 0, 64); +} +  static void execute_location(void *dst)  {  	void (*func)(void) = dst; +	pr_info("attempting ok execution at %p\n", do_nothing); +	do_nothing(); +  	memcpy(dst, do_nothing, EXEC_SIZE); +	flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); +	pr_info("attempting bad execution at %p\n", func); +	func(); +} + +static void execute_user_location(void *dst) +{ +	/* Intentionally crossing kernel/user memory boundary. */ +	void (*func)(void) = dst; + +	pr_info("attempting ok execution at %p\n", do_nothing); +	do_nothing(); + +	if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) +		return; +	flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); +	pr_info("attempting bad execution at %p\n", func);  	func();  } @@ -325,15 +383,11 @@ static void lkdtm_do_action(enum ctype which)  			;  		break;  	case CT_OVERFLOW: -		(void) recursive_loop(0); +		(void) recursive_loop(recur_count);  		break; -	case CT_CORRUPT_STACK: { -		/* Make sure the compiler creates and uses an 8 char array. */ -		volatile char data[8]; - -		memset((void *)data, 0, 64); +	case CT_CORRUPT_STACK: +		corrupt_stack();  		break; -	}  	case CT_UNALIGNED_LOAD_STORE_WRITE: {  		static u8 data[5] __attribute__((aligned(4))) = {1, 2,  				3, 4, 5}; @@ -376,6 +430,8 @@ static void lkdtm_do_action(enum ctype which)  	case CT_SPINLOCKUP:  		/* Must be called twice to trigger. */  		spin_lock(&lock_me_up); +		/* Let sparse know we intended to exit holding the lock. */ +		__release(&lock_me_up);  		break;  	case CT_HUNG_TASK:  		set_current_state(TASK_UNINTERRUPTIBLE); @@ -401,6 +457,71 @@ static void lkdtm_do_action(enum ctype which)  		vfree(vmalloc_area);  		break;  	} +	case CT_EXEC_USERSPACE: { +		unsigned long user_addr; + +		user_addr = vm_mmap(NULL, 0, PAGE_SIZE, +				    PROT_READ | PROT_WRITE | PROT_EXEC, +				    MAP_ANONYMOUS | MAP_PRIVATE, 0); +		if (user_addr >= TASK_SIZE) { +			pr_warn("Failed to allocate user memory\n"); +			return; +		} +		execute_user_location((void *)user_addr); +		vm_munmap(user_addr, PAGE_SIZE); +		break; +	} +	case CT_ACCESS_USERSPACE: { +		unsigned long user_addr, tmp; +		unsigned long *ptr; + +		user_addr = vm_mmap(NULL, 0, PAGE_SIZE, +				    PROT_READ | PROT_WRITE | PROT_EXEC, +				    MAP_ANONYMOUS | MAP_PRIVATE, 0); +		if (user_addr >= TASK_SIZE) { +			pr_warn("Failed to allocate user memory\n"); +			return; +		} + +		ptr = (unsigned long *)user_addr; + +		pr_info("attempting bad read at %p\n", ptr); +		tmp = *ptr; +		tmp += 0xc0dec0de; + +		pr_info("attempting bad write at %p\n", ptr); +		*ptr = tmp; + +		vm_munmap(user_addr, PAGE_SIZE); + +		break; +	} +	case CT_WRITE_RO: { +		unsigned long *ptr; + +		ptr = (unsigned long *)&rodata; + +		pr_info("attempting bad write at %p\n", ptr); +		*ptr ^= 0xabcd1234; + +		break; +	} +	case CT_WRITE_KERN: { +		size_t size; +		unsigned char *ptr; + +		size = (unsigned long)do_overwritten - +		       (unsigned long)do_nothing; +		ptr = (unsigned char *)do_overwritten; + +		pr_info("attempting bad %zu byte write at %p\n", size, ptr); +		memcpy(ptr, (unsigned char *)do_nothing, size); +		flush_icache_range((unsigned long)ptr, +				   (unsigned long)(ptr + size)); + +		do_overwritten(); +		break; +	}  	case CT_NONE:  	default:  		break; @@ -415,8 +536,8 @@ static void lkdtm_handler(void)  	spin_lock_irqsave(&count_lock, flags);  	count--; -	printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", -			cp_name_to_str(cpoint), cp_type_to_str(cptype), count); +	pr_info("Crash point %s of type %s hit, trigger in %d rounds\n", +		cp_name_to_str(cpoint), cp_type_to_str(cptype), count);  	if (count == 0) {  		do_it = true; @@ -473,18 +594,18 @@ static int lkdtm_register_cpoint(enum cname which)  		lkdtm.kp.symbol_name = "generic_ide_ioctl";  		lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;  #else -		printk(KERN_INFO "lkdtm: Crash point not available\n"); +		pr_info("Crash point not available\n");  		return -EINVAL;  #endif  		break;  	default: -		printk(KERN_INFO "lkdtm: Invalid Crash Point\n"); +		pr_info("Invalid Crash Point\n");  		return -EINVAL;  	}  	cpoint = which;  	if ((ret = register_jprobe(&lkdtm)) < 0) { -		printk(KERN_INFO "lkdtm: Couldn't register jprobe\n"); +		pr_info("Couldn't register jprobe\n");  		cpoint = CN_INVALID;  	} @@ -631,8 +752,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,  	if (type == CT_NONE)  		return -EINVAL; -	printk(KERN_INFO "lkdtm: Performing direct entry %s\n", -			cp_type_to_str(type)); +	pr_info("Performing direct entry %s\n", cp_type_to_str(type));  	lkdtm_do_action(type);  	*off += count; @@ -694,7 +814,7 @@ static int __init lkdtm_module_init(void)  	/* Register debugfs interface */  	lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);  	if (!lkdtm_debugfs_root) { -		printk(KERN_ERR "lkdtm: creating root dir failed\n"); +		pr_err("creating root dir failed\n");  		return -ENODEV;  	} @@ -709,28 +829,26 @@ static int __init lkdtm_module_init(void)  		de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,  				NULL, &cur->fops);  		if (de == NULL) { -			printk(KERN_ERR "lkdtm: could not create %s\n", -					cur->name); +			pr_err("could not create %s\n", cur->name);  			goto out_err;  		}  	}  	if (lkdtm_parse_commandline() == -EINVAL) { -		printk(KERN_INFO "lkdtm: Invalid command\n"); +		pr_info("Invalid command\n");  		goto out_err;  	}  	if (cpoint != CN_INVALID && cptype != CT_NONE) {  		ret = lkdtm_register_cpoint(cpoint);  		if (ret < 0) { -			printk(KERN_INFO "lkdtm: Invalid crash point %d\n", -					cpoint); +			pr_info("Invalid crash point %d\n", cpoint);  			goto out_err;  		} -		printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n", -				cpoint_name, cpoint_type); +		pr_info("Crash point %s of type %s registered\n", +			cpoint_name, cpoint_type);  	} else { -		printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n"); +		pr_info("No crash points registered, enable through debugfs\n");  	}  	return 0; @@ -745,7 +863,7 @@ static void __exit lkdtm_module_exit(void)  	debugfs_remove_recursive(lkdtm_debugfs_root);  	unregister_jprobe(&lkdtm); -	printk(KERN_INFO "lkdtm: Crash point unregistered\n"); +	pr_info("Crash point unregistered\n");  }  module_init(lkdtm_module_init); diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c76fa31e9bf..d23384dde73 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -34,3 +34,12 @@ config INTEL_MEI_ME  	  82Q33 Express  	  82X38/X48 Express +config INTEL_MEI_TXE +	tristate "Intel Trusted Execution Environment with ME Interface" +	select INTEL_MEI +	depends on X86 && PCI && WATCHDOG_CORE +	help +	  MEI Support for Trusted Execution Environment device on Intel SoCs + +	  Supported SoCs: +	  Intel Bay Trail diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 08698a46626..8ebc6cda137 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -1,6 +1,6 @@  #  # Makefile - Intel Management Engine Interface (Intel MEI) Linux driver -# Copyright (c) 2010-2011, Intel Corporation. +# Copyright (c) 2010-2014, Intel Corporation.  #  obj-$(CONFIG_INTEL_MEI) += mei.o  mei-objs := init.o @@ -17,3 +17,7 @@ mei-$(CONFIG_DEBUG_FS) += debugfs.o  obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o  mei-me-objs := pci-me.o  mei-me-objs += hw-me.o + +obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o +mei-txe-objs := pci-txe.o +mei-txe-objs += hw-txe.o diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index d0fdc134068..0d6234db00f 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -21,7 +21,6 @@  #include <linux/fcntl.h>  #include <linux/aio.h>  #include <linux/pci.h> -#include <linux/init.h>  #include <linux/ioctl.h>  #include <linux/cdev.h>  #include <linux/list.h> @@ -35,7 +34,6 @@  #include "mei_dev.h"  #include "hbm.h" -#include "hw-me.h"  #include "client.h"  const uuid_le mei_amthif_guid  = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, @@ -57,6 +55,8 @@ void mei_amthif_reset_params(struct mei_device *dev)  	dev->iamthif_ioctl = false;  	dev->iamthif_state = MEI_IAMTHIF_IDLE;  	dev->iamthif_timer = 0; +	dev->iamthif_stall_timer = 0; +	dev->iamthif_open_count = 0;  }  /** @@ -77,8 +77,9 @@ int mei_amthif_host_init(struct mei_device *dev)  	i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);  	if (i < 0) { -		dev_info(&dev->pdev->dev, "amthif: failed to find the client\n"); -		return -ENOENT; +		dev_info(&dev->pdev->dev, +			"amthif: failed to find the client %d\n", i); +		return -ENOTTY;  	}  	cl->me_client_id = dev->me_clients[i].client_id; @@ -105,20 +106,16 @@ int mei_amthif_host_init(struct mei_device *dev)  	ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);  	if (ret < 0) { -		dev_err(&dev->pdev->dev, "amthif: failed link client\n"); -		return -ENOENT; +		dev_err(&dev->pdev->dev, +			"amthif: failed link client %d\n", ret); +		return ret;  	} -	cl->state = MEI_FILE_CONNECTING; +	ret = mei_cl_connect(cl, NULL); -	if (mei_hbm_cl_connect_req(dev, cl)) { -		dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n"); -		cl->state = MEI_FILE_DISCONNECTED; -		cl->host_client_id = 0; -	} else { -		cl->timer_count = MEI_CONNECT_TIMEOUT; -	} -	return 0; +	dev->iamthif_state = MEI_IAMTHIF_IDLE; + +	return ret;  }  /** @@ -132,14 +129,12 @@ int mei_amthif_host_init(struct mei_device *dev)  struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,  						struct file *file)  { -	struct mei_cl_cb *pos = NULL; -	struct mei_cl_cb *next = NULL; +	struct mei_cl_cb *cb; -	list_for_each_entry_safe(pos, next, -				&dev->amthif_rd_complete_list.list, list) { -		if (pos->cl && pos->cl == &dev->iamthif_cl && -			pos->file_object == file) -			return pos; +	list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) { +		if (cb->cl && cb->cl == &dev->iamthif_cl && +			cb->file_object == file) +			return cb;  	}  	return NULL;  } @@ -172,17 +167,16 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,  	unsigned long timeout;  	int i; -	/* Only Posible if we are in timeout */ +	/* Only possible if we are in timeout */  	if (!cl || cl != &dev->iamthif_cl) {  		dev_dbg(&dev->pdev->dev, "bad file ext.\n"); -		return -ETIMEDOUT; +		return -ETIME;  	}  	i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); -  	if (i < 0) {  		dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); -		return -ENODEV; +		return -ENOTTY;  	}  	dev_dbg(&dev->pdev->dev, "checking amthif data\n");  	cb = mei_amthif_find_read_list_entry(dev, file); @@ -223,7 +217,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,  			dev_dbg(&dev->pdev->dev, "amthif Time out\n");  			/* 15 sec for the message has expired */  			list_del(&cb->list); -			rets = -ETIMEDOUT; +			rets = -ETIME;  			goto free;  		}  	} @@ -244,13 +238,14 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,  	    cb->response_buffer.size);  	dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); -	/* length is being turncated to PAGE_SIZE, however, +	/* length is being truncated to PAGE_SIZE, however,  	 * the buf_idx may point beyond */  	length = min_t(size_t, length, (cb->buf_idx - *offset)); -	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) +	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { +		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");  		rets = -EFAULT; -	else { +	} else {  		rets = length;  		if ((*offset + length) < cb->buf_idx) {  			*offset += length; @@ -297,9 +292,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)  	if (ret < 0)  		return ret; -	if (ret && dev->hbuf_is_ready) { +	if (ret && mei_hbuf_acquire(dev)) {  		ret = 0; -		dev->hbuf_is_ready = false;  		if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {  			mei_hdr.length = mei_hbuf_max_len(dev);  			mei_hdr.msg_complete = 0; @@ -311,14 +305,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)  		mei_hdr.host_addr = dev->iamthif_cl.host_client_id;  		mei_hdr.me_addr = dev->iamthif_cl.me_client_id;  		mei_hdr.reserved = 0; +		mei_hdr.internal = 0;  		dev->iamthif_msg_buf_index += mei_hdr.length; -		if (mei_write_message(dev, &mei_hdr, -					(unsigned char *)dev->iamthif_msg_buf)) -			return -ENODEV; +		ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); +		if (ret) +			return ret;  		if (mei_hdr.msg_complete) {  			if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl)) -				return -ENODEV; +				return -EIO;  			dev->iamthif_flow_control_pending = true;  			dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;  			dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n"); @@ -330,10 +325,6 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)  			list_add_tail(&cb->list, &dev->write_list.list);  		}  	} else { -		if (!dev->hbuf_is_ready) -			dev_dbg(&dev->pdev->dev, "host buffer is not empty"); - -		dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");  		list_add_tail(&cb->list, &dev->write_list.list);  	}  	return 0; @@ -359,7 +350,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)  	if (ret)  		return ret; -	cb->fop_type = MEI_FOP_IOCTL; +	cb->fop_type = MEI_FOP_WRITE;  	if (!list_empty(&dev->amthif_cmd_list.list) ||  	    dev->iamthif_state != MEI_IAMTHIF_IDLE) { @@ -441,35 +432,48 @@ unsigned int mei_amthif_poll(struct mei_device *dev,  /** - * mei_amthif_irq_write_completed - processes completed iamthif operation. + * mei_amthif_irq_write - write iamthif command in irq thread context.   *   * @dev: the device structure. - * @slots: free slots.   * @cb_pos: callback block.   * @cl: private data of the file object.   * @cmpl_list: complete list.   *   * returns 0, OK; otherwise, error.   */ -int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, -				  s32 *slots, struct mei_cl_cb *cmpl_list) +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +			 struct mei_cl_cb *cmpl_list)  {  	struct mei_device *dev = cl->dev;  	struct mei_msg_hdr mei_hdr;  	size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;  	u32 msg_slots = mei_data2slots(len); +	int slots; +	int rets; + +	rets = mei_cl_flow_ctrl_creds(cl); +	if (rets < 0) +		return rets; + +	if (rets == 0) { +		cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +		return 0; +	}  	mei_hdr.host_addr = cl->host_client_id;  	mei_hdr.me_addr = cl->me_client_id;  	mei_hdr.reserved = 0; +	mei_hdr.internal = 0; -	if (*slots >= msg_slots) { +	slots = mei_hbuf_empty_slots(dev); + +	if (slots >= msg_slots) {  		mei_hdr.length = len;  		mei_hdr.msg_complete = 1;  	/* Split the message only if we can write the whole host buffer */ -	} else if (*slots == dev->hbuf_depth) { -		msg_slots = *slots; -		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); +	} else if (slots == dev->hbuf_depth) { +		msg_slots = slots; +		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);  		mei_hdr.length = len;  		mei_hdr.msg_complete = 0;  	} else { @@ -479,17 +483,17 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,  	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT,  MEI_HDR_PRM(&mei_hdr)); -	*slots -=  msg_slots; -	if (mei_write_message(dev, &mei_hdr, -		dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) { -			dev->iamthif_state = MEI_IAMTHIF_IDLE; -			cl->status = -ENODEV; -			list_del(&cb->list); -			return -ENODEV; +	rets = mei_write_message(dev, &mei_hdr, +			dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); +	if (rets) { +		dev->iamthif_state = MEI_IAMTHIF_IDLE; +		cl->status = rets; +		list_del(&cb->list); +		return rets;  	}  	if (mei_cl_flow_ctrl_reduce(cl)) -		return -ENODEV; +		return -EIO;  	dev->iamthif_msg_buf_index += mei_hdr.length;  	cl->status = 0; @@ -719,8 +723,8 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file)  */  int mei_amthif_release(struct mei_device *dev, struct file *file)  { -	if (dev->open_handle_count > 0) -		dev->open_handle_count--; +	if (dev->iamthif_open_count > 0) +		dev->iamthif_open_count--;  	if (dev->iamthif_file_object == file &&  	    dev->iamthif_state != MEI_IAMTHIF_IDLE) { diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 6d0282c08a0..0e993ef28b9 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -26,7 +26,6 @@  #include <linux/mei_cl_bus.h>  #include "mei_dev.h" -#include "hw-me.h"  #include "client.h"  #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) @@ -145,9 +144,9 @@ static struct device_type mei_cl_device_type = {  static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev,  						uuid_le uuid)  { -	struct mei_cl *cl, *next; +	struct mei_cl *cl; -	list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { +	list_for_each_entry(cl, &dev->device_list, device_link) {  		if (!uuid_le_cmp(uuid, cl->device_uuid))  			return cl;  	} @@ -245,10 +244,10 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,  	/* Check if we have an ME client device */  	id = mei_me_cl_by_id(dev, cl->me_client_id);  	if (id < 0) -		return -ENODEV; +		return id;  	if (length > dev->me_clients[id].props.max_msg_length) -		return -EINVAL; +		return -EFBIG;  	cb = mei_io_cb_init(cl, NULL);  	if (!cb) @@ -297,10 +296,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)  	if (cl->reading_state != MEI_READ_COMPLETE &&  	    !waitqueue_active(&cl->rx_wait)) { +  		mutex_unlock(&dev->device_lock);  		if (wait_event_interruptible(cl->rx_wait, -				(MEI_READ_COMPLETE == cl->reading_state))) { +				cl->reading_state == MEI_READ_COMPLETE  || +				mei_cl_is_transitioning(cl))) { +  			if (signal_pending(current))  				return -EINTR;  			return -ERESTARTSYS; @@ -425,8 +427,6 @@ int mei_cl_enable_device(struct mei_cl_device *device)  	mutex_lock(&dev->device_lock); -	cl->state = MEI_FILE_CONNECTING; -  	err = mei_cl_connect(cl, NULL);  	if (err < 0) {  		mutex_unlock(&dev->device_lock); @@ -521,6 +521,22 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)  	schedule_work(&device->event_work);  } +void mei_cl_bus_remove_devices(struct mei_device *dev) +{ +	struct mei_cl *cl, *next; + +	mutex_lock(&dev->device_lock); +	list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { +		if (cl->device) +			mei_cl_remove_device(cl->device); + +		list_del(&cl->device_link); +		mei_cl_unlink(cl); +		kfree(cl); +	} +	mutex_unlock(&dev->device_lock); +} +  int __init mei_cl_bus_init(void)  {  	return bus_register(&mei_cl_bus_type); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index e0684b4d9a0..59d20c599b1 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -18,6 +18,7 @@  #include <linux/sched.h>  #include <linux/wait.h>  #include <linux/delay.h> +#include <linux/pm_runtime.h>  #include <linux/mei.h> @@ -29,20 +30,21 @@   * mei_me_cl_by_uuid - locate index of me client   *   * @dev: mei device + * + * Locking: called under "dev->device_lock" lock + *   * returns me client index or -ENOENT if not found   */  int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)  { -	int i, res = -ENOENT; +	int i;  	for (i = 0; i < dev->me_clients_num; ++i)  		if (uuid_le_cmp(*uuid, -				dev->me_clients[i].props.protocol_name) == 0) { -			res = i; -			break; -		} +				dev->me_clients[i].props.protocol_name) == 0) +			return i; -	return res; +	return -ENOENT;  } @@ -60,37 +62,79 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)  int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)  {  	int i; +  	for (i = 0; i < dev->me_clients_num; i++)  		if (dev->me_clients[i].client_id == client_id) -			break; -	if (WARN_ON(dev->me_clients[i].client_id != client_id)) -		return -ENOENT; - -	if (i == dev->me_clients_num) -		return -ENOENT; +			return i; -	return i; +	return -ENOENT;  }  /** - * mei_io_list_flush - removes list entry belonging to cl. + * mei_cl_cmp_id - tells if the clients are the same   * - * @list:  An instance of our list structure - * @cl: host client + * @cl1: host client 1 + * @cl2: host client 2 + * + * returns true  - if the clients has same host and me ids + *         false - otherwise   */ -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, +				const struct mei_cl *cl2) +{ +	return cl1 && cl2 && +		(cl1->host_client_id == cl2->host_client_id) && +		(cl1->me_client_id == cl2->me_client_id); +} + +/** + * mei_io_list_flush - removes cbs belonging to cl. + * + * @list:  an instance of our list structure + * @cl:    host client, can be NULL for flushing the whole list + * @free:  whether to free the cbs + */ +static void __mei_io_list_flush(struct mei_cl_cb *list, +				struct mei_cl *cl, bool free)  {  	struct mei_cl_cb *cb;  	struct mei_cl_cb *next; +	/* enable removing everything if no cl is specified */  	list_for_each_entry_safe(cb, next, &list->list, list) { -		if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) +		if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {  			list_del(&cb->list); +			if (free) +				mei_io_cb_free(cb); +		}  	}  }  /** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list:  An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ +	__mei_io_list_flush(list, cl, false); +} + + +/** + * mei_io_list_free - removes cb belonging to cl and free them + * + * @list:  An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) +{ +	__mei_io_list_flush(list, cl, true); +} + +/**   * mei_io_cb_free - free mei_cb_private related memory   *   * @cb: mei callback struct @@ -154,7 +198,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)  	return 0;  }  /** - * mei_io_cb_alloc_resp_buf - allocate respose buffer + * mei_io_cb_alloc_resp_buf - allocate response buffer   *   * @cb: io callback structure   * @length: size of the buffer @@ -187,13 +231,17 @@ int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)   */  int mei_cl_flush_queues(struct mei_cl *cl)  { +	struct mei_device *dev; +  	if (WARN_ON(!cl || !cl->dev))  		return -EINVAL; -	dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n"); +	dev = cl->dev; + +	cl_dbg(dev, cl, "remove list entry belonging to cl\n");  	mei_io_list_flush(&cl->dev->read_list, cl); -	mei_io_list_flush(&cl->dev->write_list, cl); -	mei_io_list_flush(&cl->dev->write_waiting_list, cl); +	mei_io_list_free(&cl->dev->write_list, cl); +	mei_io_list_free(&cl->dev->write_waiting_list, cl);  	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);  	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);  	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); @@ -203,7 +251,7 @@ int mei_cl_flush_queues(struct mei_cl *cl)  /** - * mei_cl_init - initializes intialize cl. + * mei_cl_init - initializes cl.   *   * @cl: host client to be initialized   * @dev: mei device @@ -250,19 +298,18 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)  struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)  {  	struct mei_device *dev = cl->dev; -	struct mei_cl_cb *cb = NULL; -	struct mei_cl_cb *next = NULL; +	struct mei_cl_cb *cb; -	list_for_each_entry_safe(cb, next, &dev->read_list.list, list) +	list_for_each_entry(cb, &dev->read_list.list, list)  		if (mei_cl_cmp_id(cl, cb->cl))  			return cb;  	return NULL;  } -/** mei_cl_link: allocte host id in the host map +/** mei_cl_link: allocate host id in the host map   *   * @cl - host client - * @id - fixed host id or -1 for genereting one + * @id - fixed host id or -1 for generic one   *   * returns 0 on success   *	-EINVAL on incorrect values @@ -271,20 +318,28 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)  int mei_cl_link(struct mei_cl *cl, int id)  {  	struct mei_device *dev; +	long open_handle_count;  	if (WARN_ON(!cl || !cl->dev))  		return -EINVAL;  	dev = cl->dev; -	/* If Id is not asigned get one*/ +	/* If Id is not assigned get one*/  	if (id == MEI_HOST_CLIENT_ID_ANY)  		id = find_first_zero_bit(dev->host_clients_map,  					MEI_CLIENTS_MAX);  	if (id >= MEI_CLIENTS_MAX) { -		dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; -		return -ENOENT; +		dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); +		return -EMFILE; +	} + +	open_handle_count = dev->open_handle_count + dev->iamthif_open_count; +	if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { +		dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", +			MEI_MAX_OPEN_HANDLE_COUNT); +		return -EMFILE;  	}  	dev->open_handle_count++; @@ -296,7 +351,7 @@ int mei_cl_link(struct mei_cl *cl, int id)  	cl->state = MEI_FILE_INITIALIZING; -	dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id); +	cl_dbg(dev, cl, "link cl\n");  	return 0;  } @@ -308,7 +363,6 @@ int mei_cl_link(struct mei_cl *cl, int id)  int mei_cl_unlink(struct mei_cl *cl)  {  	struct mei_device *dev; -	struct mei_cl *pos, *next;  	/* don't shout on error exit path */  	if (!cl) @@ -320,14 +374,19 @@ int mei_cl_unlink(struct mei_cl *cl)  	dev = cl->dev; -	list_for_each_entry_safe(pos, next, &dev->file_list, link) { -		if (cl->host_client_id == pos->host_client_id) { -			dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n", -				pos->host_client_id, pos->me_client_id); -			list_del_init(&pos->link); -			break; -		} -	} +	cl_dbg(dev, cl, "unlink client"); + +	if (dev->open_handle_count > 0) +		dev->open_handle_count--; + +	/* never clear the 0 bit */ +	if (cl->host_client_id) +		clear_bit(cl->host_client_id, dev->host_clients_map); + +	list_del_init(&cl->link); + +	cl->state = MEI_FILE_INITIALIZING; +  	return 0;  } @@ -341,17 +400,6 @@ void mei_host_client_init(struct work_struct *work)  	mutex_lock(&dev->device_lock); -	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); -	dev->open_handle_count = 0; - -	/* -	 * Reserving the first three client IDs -	 * 0: Reserved for MEI Bus Message communications -	 * 1: Reserved for Watchdog -	 * 2: Reserved for AMTHI -	 */ -	bitmap_set(dev->host_clients_map, 0, 3); -  	for (i = 0; i < dev->me_clients_num; i++) {  		client_props = &dev->me_clients[i].props; @@ -365,13 +413,41 @@ void mei_host_client_init(struct work_struct *work)  	}  	dev->dev_state = MEI_DEV_ENABLED; +	dev->reset_count = 0;  	mutex_unlock(&dev->device_lock); + +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); +	pm_runtime_autosuspend(&dev->pdev->dev);  } +/** + * mei_hbuf_acquire: try to acquire host buffer + * + * @dev: the device structure + * returns true if host buffer was acquired + */ +bool mei_hbuf_acquire(struct mei_device *dev) +{ +	if (mei_pg_state(dev) == MEI_PG_ON || +	    dev->pg_event == MEI_PG_EVENT_WAIT) { +		dev_dbg(&dev->pdev->dev, "device is in pg\n"); +		return false; +	} + +	if (!dev->hbuf_is_ready) { +		dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); +		return false; +	} + +	dev->hbuf_is_ready = false; + +	return true; +}  /** - * mei_cl_disconnect - disconnect host clinet form the me one + * mei_cl_disconnect - disconnect host client from the me one   *   * @cl: host client   * @@ -390,25 +466,35 @@ int mei_cl_disconnect(struct mei_cl *cl)  	dev = cl->dev; +	cl_dbg(dev, cl, "disconnecting"); +  	if (cl->state != MEI_FILE_DISCONNECTING)  		return 0; +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	} +  	cb = mei_io_cb_init(cl, NULL); -	if (!cb) -		return -ENOMEM; +	if (!cb) { +		rets = -ENOMEM; +		goto free; +	}  	cb->fop_type = MEI_FOP_CLOSE; -	if (dev->hbuf_is_ready) { -		dev->hbuf_is_ready = false; +	if (mei_hbuf_acquire(dev)) {  		if (mei_hbm_cl_disconnect_req(dev, cl)) {  			rets = -ENODEV; -			dev_err(&dev->pdev->dev, "failed to disconnect.\n"); +			cl_err(dev, cl, "failed to disconnect.\n");  			goto free;  		}  		mdelay(10); /* Wait for hardware disconnection ready */  		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);  	} else { -		dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n"); +		cl_dbg(dev, cl, "add disconnect cb to control write list\n");  		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);  	} @@ -421,23 +507,25 @@ int mei_cl_disconnect(struct mei_cl *cl)  	mutex_lock(&dev->device_lock);  	if (MEI_FILE_DISCONNECTED == cl->state) {  		rets = 0; -		dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n"); +		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");  	} else {  		rets = -ENODEV;  		if (MEI_FILE_DISCONNECTED != cl->state) -			dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n"); +			cl_err(dev, cl, "wrong status client disconnect.\n");  		if (err) -			dev_dbg(&dev->pdev->dev, -					"wait failed disconnect err=%08x\n", -					err); +			cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); -		dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n"); +		cl_err(dev, cl, "failed to disconnect from FW client.\n");  	}  	mei_io_list_flush(&dev->ctrl_rd_list, cl);  	mei_io_list_flush(&dev->ctrl_wr_list, cl);  free: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); +  	mei_io_cb_free(cb);  	return rets;  } @@ -449,22 +537,22 @@ free:   *   * @cl: private data of the file object   * - * returns ture if other client is connected, 0 - otherwise. + * returns true if other client is connected, false - otherwise.   */  bool mei_cl_is_other_connecting(struct mei_cl *cl)  {  	struct mei_device *dev; -	struct mei_cl *pos; -	struct mei_cl *next; +	struct mei_cl *ocl; /* the other client */  	if (WARN_ON(!cl || !cl->dev))  		return false;  	dev = cl->dev; -	list_for_each_entry_safe(pos, next, &dev->file_list, link) { -		if ((pos->state == MEI_FILE_CONNECTING) && -		    (pos != cl) && cl->me_client_id == pos->me_client_id) +	list_for_each_entry(ocl, &dev->file_list, link) { +		if (ocl->state == MEI_FILE_CONNECTING && +		    ocl != cl && +		    cl->me_client_id == ocl->me_client_id)  			return true;  	} @@ -473,7 +561,7 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl)  }  /** - * mei_cl_connect - connect host clinet to the me one + * mei_cl_connect - connect host client to the me one   *   * @cl: host client   * @@ -492,17 +580,24 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)  	dev = cl->dev; +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	} +  	cb = mei_io_cb_init(cl, file);  	if (!cb) {  		rets = -ENOMEM;  		goto out;  	} -	cb->fop_type = MEI_FOP_IOCTL; - -	if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) { -		dev->hbuf_is_ready = false; +	cb->fop_type = MEI_FOP_CONNECT; +	/* run hbuf acquire last so we don't have to undo */ +	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { +		cl->state = MEI_FILE_CONNECTING;  		if (mei_hbm_cl_connect_req(dev, cl)) {  			rets = -ENODEV;  			goto out; @@ -514,23 +609,28 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)  	}  	mutex_unlock(&dev->device_lock); -	rets = wait_event_timeout(dev->wait_recvd_msg, -				 (cl->state == MEI_FILE_CONNECTED || -				  cl->state == MEI_FILE_DISCONNECTED), -				 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); +	wait_event_timeout(dev->wait_recvd_msg, +			(cl->state == MEI_FILE_CONNECTED || +			 cl->state == MEI_FILE_DISCONNECTED), +			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));  	mutex_lock(&dev->device_lock);  	if (cl->state != MEI_FILE_CONNECTED) { -		rets = -EFAULT; +		/* something went really wrong */ +		if (!cl->status) +			cl->status = -EFAULT;  		mei_io_list_flush(&dev->ctrl_rd_list, cl);  		mei_io_list_flush(&dev->ctrl_wr_list, cl); -		goto out;  	}  	rets = cl->status;  out: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); +  	mei_io_cb_free(cb);  	return rets;  } @@ -547,7 +647,8 @@ out:  int mei_cl_flow_ctrl_creds(struct mei_cl *cl)  {  	struct mei_device *dev; -	int i; +	struct mei_me_client *me_cl; +	int id;  	if (WARN_ON(!cl || !cl->dev))  		return -EINVAL; @@ -560,19 +661,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)  	if (cl->mei_flow_ctrl_creds > 0)  		return 1; -	for (i = 0; i < dev->me_clients_num; i++) { -		struct mei_me_client  *me_cl = &dev->me_clients[i]; -		if (me_cl->client_id == cl->me_client_id) { -			if (me_cl->mei_flow_ctrl_creds) { -				if (WARN_ON(me_cl->props.single_recv_buf == 0)) -					return -EINVAL; -				return 1; -			} else { -				return 0; -			} -		} +	id = mei_me_cl_by_id(dev, cl->me_client_id); +	if (id < 0) { +		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); +		return id;  	} -	return -ENOENT; + +	me_cl = &dev->me_clients[id]; +	if (me_cl->mei_flow_ctrl_creds) { +		if (WARN_ON(me_cl->props.single_recv_buf == 0)) +			return -EINVAL; +		return 1; +	} +	return 0;  }  /** @@ -588,32 +689,31 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)  int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)  {  	struct mei_device *dev; -	int i; +	struct mei_me_client *me_cl; +	int id;  	if (WARN_ON(!cl || !cl->dev))  		return -EINVAL;  	dev = cl->dev; -	if (!dev->me_clients_num) -		return -ENOENT; +	id = mei_me_cl_by_id(dev, cl->me_client_id); +	if (id < 0) { +		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); +		return id; +	} -	for (i = 0; i < dev->me_clients_num; i++) { -		struct mei_me_client  *me_cl = &dev->me_clients[i]; -		if (me_cl->client_id == cl->me_client_id) { -			if (me_cl->props.single_recv_buf != 0) { -				if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) -					return -EINVAL; -				dev->me_clients[i].mei_flow_ctrl_creds--; -			} else { -				if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) -					return -EINVAL; -				cl->mei_flow_ctrl_creds--; -			} -			return 0; -		} +	me_cl = &dev->me_clients[id]; +	if (me_cl->props.single_recv_buf != 0) { +		if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) +			return -EINVAL; +		me_cl->mei_flow_ctrl_creds--; +	} else { +		if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) +			return -EINVAL; +		cl->mei_flow_ctrl_creds--;  	} -	return -ENOENT; +	return 0;  }  /** @@ -639,74 +739,111 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)  		return -ENODEV;  	if (cl->read_cb) { -		dev_dbg(&dev->pdev->dev, "read is pending.\n"); +		cl_dbg(dev, cl, "read is pending.\n");  		return -EBUSY;  	}  	i = mei_me_cl_by_id(dev, cl->me_client_id);  	if (i < 0) { -		dev_err(&dev->pdev->dev, "no such me client %d\n", -			cl->me_client_id); -		return  -ENODEV; +		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); +		return  -ENOTTY; +	} + +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets;  	}  	cb = mei_io_cb_init(cl, NULL); -	if (!cb) -		return -ENOMEM; +	if (!cb) { +		rets = -ENOMEM; +		goto out; +	}  	/* always allocate at least client max message */  	length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);  	rets = mei_io_cb_alloc_resp_buf(cb, length);  	if (rets) -		goto err; +		goto out;  	cb->fop_type = MEI_FOP_READ; -	cl->read_cb = cb; -	if (dev->hbuf_is_ready) { -		dev->hbuf_is_ready = false; -		if (mei_hbm_cl_flow_control_req(dev, cl)) { -			rets = -ENODEV; -			goto err; -		} +	if (mei_hbuf_acquire(dev)) { +		rets = mei_hbm_cl_flow_control_req(dev, cl); +		if (rets < 0) +			goto out; +  		list_add_tail(&cb->list, &dev->read_list.list);  	} else {  		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);  	} -	return rets; -err: -	mei_io_cb_free(cb); + +	cl->read_cb = cb; + +out: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); + +	if (rets) +		mei_io_cb_free(cb); +  	return rets;  }  /** - * mei_cl_irq_write_complete - write a message to device + * mei_cl_irq_write - write a message to device   *	from the interrupt thread context   *   * @cl: client   * @cb: callback block. - * @slots: free slots.   * @cmpl_list: complete list.   *   * returns 0, OK; otherwise error.   */ -int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, -				     s32 *slots, struct mei_cl_cb *cmpl_list) +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +		     struct mei_cl_cb *cmpl_list)  { -	struct mei_device *dev = cl->dev; +	struct mei_device *dev; +	struct mei_msg_data *buf;  	struct mei_msg_hdr mei_hdr; -	size_t len = cb->request_buffer.size - cb->buf_idx; -	u32 msg_slots = mei_data2slots(len); +	size_t len; +	u32 msg_slots; +	int slots; +	int rets; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	buf = &cb->request_buffer; + +	rets = mei_cl_flow_ctrl_creds(cl); +	if (rets < 0) +		return rets; + +	if (rets == 0) { +		cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +		return 0; +	} + +	slots = mei_hbuf_empty_slots(dev); +	len = buf->size - cb->buf_idx; +	msg_slots = mei_data2slots(len);  	mei_hdr.host_addr = cl->host_client_id;  	mei_hdr.me_addr = cl->me_client_id;  	mei_hdr.reserved = 0; +	mei_hdr.internal = cb->internal; -	if (*slots >= msg_slots) { +	if (slots >= msg_slots) {  		mei_hdr.length = len;  		mei_hdr.msg_complete = 1;  	/* Split the message only if we can write the whole host buffer */ -	} else if (*slots == dev->hbuf_depth) { -		msg_slots = *slots; -		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); +	} else if (slots == dev->hbuf_depth) { +		msg_slots = slots; +		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);  		mei_hdr.length = len;  		mei_hdr.msg_complete = 0;  	} else { @@ -714,16 +851,14 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,  		return 0;  	} -	dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n", +	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",  			cb->request_buffer.size, cb->buf_idx); -	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); -	*slots -=  msg_slots; -	if (mei_write_message(dev, &mei_hdr, -			cb->request_buffer.data + cb->buf_idx)) { -		cl->status = -ENODEV; +	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); +	if (rets) { +		cl->status = rets;  		list_move_tail(&cb->list, &cmpl_list->list); -		return -ENODEV; +		return rets;  	}  	cl->status = 0; @@ -732,7 +867,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,  	if (mei_hdr.msg_complete) {  		if (mei_cl_flow_ctrl_reduce(cl)) -			return -ENODEV; +			return -EIO;  		list_move_tail(&cb->list, &dev->write_waiting_list.list);  	} @@ -746,7 +881,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,   * @cl: host client   * @cl: write callback with filled data   * - * returns numbe of bytes sent on success, <0 on failure. + * returns number of bytes sent on success, <0 on failure.   */  int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)  { @@ -767,25 +902,39 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)  	buf = &cb->request_buffer; -	dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size); +	cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	}  	cb->fop_type = MEI_FOP_WRITE; +	cb->buf_idx = 0; +	cl->writing_state = MEI_IDLE; + +	mei_hdr.host_addr = cl->host_client_id; +	mei_hdr.me_addr = cl->me_client_id; +	mei_hdr.reserved = 0; +	mei_hdr.msg_complete = 0; +	mei_hdr.internal = cb->internal;  	rets = mei_cl_flow_ctrl_creds(cl);  	if (rets < 0)  		goto err; -	/* Host buffer is not ready, we queue the request */ -	if (rets == 0 || !dev->hbuf_is_ready) { -		cb->buf_idx = 0; -		/* unseting complete will enqueue the cb for write */ -		mei_hdr.msg_complete = 0; +	if (rets == 0) { +		cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +		rets = buf->size; +		goto out; +	} +	if (!mei_hbuf_acquire(dev)) { +		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");  		rets = buf->size;  		goto out;  	} - -	dev->hbuf_is_ready = false;  	/* Check for a maximum length */  	if (buf->size > mei_hbuf_max_len(dev)) { @@ -796,29 +945,19 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)  		mei_hdr.msg_complete = 1;  	} -	mei_hdr.host_addr = cl->host_client_id; -	mei_hdr.me_addr = cl->me_client_id; -	mei_hdr.reserved = 0; - -	dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n", -		MEI_HDR_PRM(&mei_hdr)); - - -	if (mei_write_message(dev, &mei_hdr, buf->data)) { -		rets = -EIO; +	rets = mei_write_message(dev, &mei_hdr, buf->data); +	if (rets)  		goto err; -	}  	cl->writing_state = MEI_WRITING;  	cb->buf_idx = mei_hdr.length; -	rets = buf->size;  out:  	if (mei_hdr.msg_complete) { -		if (mei_cl_flow_ctrl_reduce(cl)) { -			rets = -ENODEV; +		rets = mei_cl_flow_ctrl_reduce(cl); +		if (rets < 0)  			goto err; -		} +  		list_add_tail(&cb->list, &dev->write_waiting_list.list);  	} else {  		list_add_tail(&cb->list, &dev->write_list.list); @@ -828,16 +967,23 @@ out:  	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {  		mutex_unlock(&dev->device_lock); -		if (wait_event_interruptible(cl->tx_wait, -			cl->writing_state == MEI_WRITE_COMPLETE)) { -				if (signal_pending(current)) -					rets = -EINTR; -				else -					rets = -ERESTARTSYS; -		} +		rets = wait_event_interruptible(cl->tx_wait, +				cl->writing_state == MEI_WRITE_COMPLETE);  		mutex_lock(&dev->device_lock); +		/* wait_event_interruptible returns -ERESTARTSYS */ +		if (rets) { +			if (signal_pending(current)) +				rets = -EINTR; +			goto err; +		}  	} + +	rets = buf->size;  err: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); +  	return rets;  } @@ -877,12 +1023,11 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)  void mei_cl_all_disconnect(struct mei_device *dev)  { -	struct mei_cl *cl, *next; +	struct mei_cl *cl; -	list_for_each_entry_safe(cl, next, &dev->file_list, link) { +	list_for_each_entry(cl, &dev->file_list, link) {  		cl->state = MEI_FILE_DISCONNECTED;  		cl->mei_flow_ctrl_creds = 0; -		cl->read_cb = NULL;  		cl->timer_count = 0;  	}  } @@ -895,14 +1040,14 @@ void mei_cl_all_disconnect(struct mei_device *dev)   */  void mei_cl_all_wakeup(struct mei_device *dev)  { -	struct mei_cl *cl, *next; -	list_for_each_entry_safe(cl, next, &dev->file_list, link) { +	struct mei_cl *cl; +	list_for_each_entry(cl, &dev->file_list, link) {  		if (waitqueue_active(&cl->rx_wait)) { -			dev_dbg(&dev->pdev->dev, "Waking up reading client!\n"); +			cl_dbg(dev, cl, "Waking up reading client!\n");  			wake_up_interruptible(&cl->rx_wait);  		}  		if (waitqueue_active(&cl->tx_wait)) { -			dev_dbg(&dev->pdev->dev, "Waking up writing client!\n"); +			cl_dbg(dev, cl, "Waking up writing client!\n");  			wake_up_interruptible(&cl->tx_wait);  		}  	} @@ -915,12 +1060,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)   */  void mei_cl_all_write_clear(struct mei_device *dev)  { -	struct mei_cl_cb *cb, *next; - -	list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { -		list_del(&cb->list); -		mei_io_cb_free(cb); -	} +	mei_io_list_free(&dev->write_list, NULL); +	mei_io_list_free(&dev->write_waiting_list, NULL);  } diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 9eb031e9207..96d5de0389f 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list)  {  	INIT_LIST_HEAD(&list->list);  } -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl); -  /*   * MEI Host Client Functions   */ @@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl);  int mei_cl_flush_queues(struct mei_cl *cl);  struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); -/** - * mei_cl_cmp_id - tells if file private data have same id - * - * @fe1: private data of 1. file object - * @fe2: private data of 2. file object - * - * returns true  - if ids are the same and not NULL - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, -				const struct mei_cl *cl2) -{ -	return cl1 && cl2 && -		(cl1->host_client_id == cl2->host_client_id) && -		(cl1->me_client_id == cl2->me_client_id); -} -  int mei_cl_flow_ctrl_creds(struct mei_cl *cl); @@ -86,9 +68,15 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);   */  static inline bool mei_cl_is_connected(struct mei_cl *cl)  { -	return (cl->dev && +	return  cl->dev &&  		cl->dev->dev_state == MEI_DEV_ENABLED && -		cl->state == MEI_FILE_CONNECTED); +		cl->state == MEI_FILE_CONNECTED; +} +static inline bool mei_cl_is_transitioning(struct mei_cl *cl) +{ +	return  MEI_FILE_INITIALIZING == cl->state || +		MEI_FILE_DISCONNECTED == cl->state || +		MEI_FILE_DISCONNECTING == cl->state;  }  bool mei_cl_is_other_connecting(struct mei_cl *cl); @@ -96,8 +84,8 @@ int mei_cl_disconnect(struct mei_cl *cl);  int mei_cl_connect(struct mei_cl *cl, struct file *file);  int mei_cl_read_start(struct mei_cl *cl, size_t length);  int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); -int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, -				s32 *slots, struct mei_cl_cb *cmpl_list); +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +		     struct mei_cl_cb *cmpl_list);  void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); @@ -109,4 +97,13 @@ void mei_cl_all_disconnect(struct mei_device *dev);  void mei_cl_all_wakeup(struct mei_device *dev);  void mei_cl_all_write_clear(struct mei_device *dev); +#define MEI_CL_FMT "cl:host=%02d me=%02d " +#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id + +#define cl_dbg(dev, cl, format, arg...) \ +	dev_dbg(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#define cl_err(dev, cl, format, arg...) \ +	dev_err(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) +  #endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index e3870f22d23..ced5b777c70 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -43,7 +43,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,  	mutex_lock(&dev->device_lock); -	/*  if the driver is not enabled the list won't b consitent */ +	/*  if the driver is not enabled the list won't be consistent */  	if (dev->dev_state != MEI_DEV_ENABLED)  		goto out; @@ -75,6 +75,54 @@ static const struct file_operations mei_dbgfs_fops_meclients = {  	.llseek = generic_file_llseek,  }; +static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, +					size_t cnt, loff_t *ppos) +{ +	struct mei_device *dev = fp->private_data; +	struct mei_cl *cl; +	const size_t bufsz = 1024; +	char *buf; +	int i = 0; +	int pos = 0; +	int ret; + +	if (!dev) +		return -ENODEV; + +	buf = kzalloc(bufsz, GFP_KERNEL); +	if  (!buf) +		return -ENOMEM; + +	pos += scnprintf(buf + pos, bufsz - pos, +			"  |me|host|state|rd|wr|\n"); + +	mutex_lock(&dev->device_lock); + +	/*  if the driver is not enabled the list won't b consitent */ +	if (dev->dev_state != MEI_DEV_ENABLED) +		goto out; + +	list_for_each_entry(cl, &dev->file_list, link) { + +		pos += scnprintf(buf + pos, bufsz - pos, +			"%2d|%2d|%4d|%5d|%2d|%2d|\n", +			i, cl->me_client_id, cl->host_client_id, cl->state, +			cl->reading_state, cl->writing_state); +		i++; +	} +out: +	mutex_unlock(&dev->device_lock); +	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); +	kfree(buf); +	return ret; +} + +static const struct file_operations mei_dbgfs_fops_active = { +	.open = simple_open, +	.read = mei_dbgfs_read_active, +	.llseek = generic_file_llseek, +}; +  static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,  					size_t cnt, loff_t *ppos)  { @@ -101,7 +149,7 @@ static const struct file_operations mei_dbgfs_fops_devstate = {  /**   * mei_dbgfs_deregister - Remove the debugfs files and directories - * @mei - pointer to mei device private dat + * @mei - pointer to mei device private data   */  void mei_dbgfs_deregister(struct mei_device *dev)  { @@ -128,6 +176,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)  		dev_err(&dev->pdev->dev, "meclients: registration failed\n");  		goto err;  	} +	f = debugfs_create_file("active", S_IRUSR, dir, +				dev, &mei_dbgfs_fops_active); +	if (!f) { +		dev_err(&dev->pdev->dev, "meclients: registration failed\n"); +		goto err; +	}  	f = debugfs_create_file("devstate", S_IRUSR, dir,  				dev, &mei_dbgfs_fops_devstate);  	if (!f) { diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 6127ab64bb3..804106209d7 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -14,50 +14,111 @@   *   */ +#include <linux/export.h>  #include <linux/pci.h>  #include <linux/sched.h>  #include <linux/wait.h>  #include <linux/mei.h> +#include <linux/pm_runtime.h>  #include "mei_dev.h"  #include "hbm.h" -#include "hw-me.h" +#include "client.h" + +static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) +{ +#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status +	switch (status) { +	MEI_CL_CS(SUCCESS); +	MEI_CL_CS(NOT_FOUND); +	MEI_CL_CS(ALREADY_STARTED); +	MEI_CL_CS(OUT_OF_RESOURCES); +	MEI_CL_CS(MESSAGE_SMALL); +	default: return "unknown"; +	} +#undef MEI_CL_CCS +} + +/** + * mei_cl_conn_status_to_errno - convert client connect response + * status to error code + * + * @status: client connect response status + * + * returns corresponding error code + */ +static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) +{ +	switch (status) { +	case MEI_CL_CONN_SUCCESS:          return 0; +	case MEI_CL_CONN_NOT_FOUND:        return -ENOTTY; +	case MEI_CL_CONN_ALREADY_STARTED:  return -EBUSY; +	case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY; +	case MEI_CL_CONN_MESSAGE_SMALL:    return -EINVAL; +	default:                           return -EINVAL; +	} +} + +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ +	dev->init_clients_timer = 0; +	dev->hbm_state = MEI_HBM_IDLE; +} + +/** + * mei_hbm_reset - reset hbm counters and book keeping data structurs + * + * @dev: the device structure + */ +void mei_hbm_reset(struct mei_device *dev) +{ +	dev->me_clients_num = 0; +	dev->me_client_presentation_num = 0; +	dev->me_client_index = 0; + +	kfree(dev->me_clients); +	dev->me_clients = NULL; + +	mei_hbm_idle(dev); +}  /**   * mei_hbm_me_cl_allocate - allocates storage for me clients   *   * @dev: the device structure   * - * returns none. + * returns 0 on success -ENOMEM on allocation failure   */ -static void mei_hbm_me_cl_allocate(struct mei_device *dev) +static int mei_hbm_me_cl_allocate(struct mei_device *dev)  {  	struct mei_me_client *clients;  	int b; +	mei_hbm_reset(dev); +  	/* count how many ME clients we have */  	for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)  		dev->me_clients_num++; -	if (dev->me_clients_num <= 0) -		return; - -	kfree(dev->me_clients); -	dev->me_clients = NULL; +	if (dev->me_clients_num == 0) +		return 0; -	dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n", +	dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n",  		dev->me_clients_num * sizeof(struct mei_me_client));  	/* allocate storage for ME clients representation */  	clients = kcalloc(dev->me_clients_num,  			sizeof(struct mei_me_client), GFP_KERNEL);  	if (!clients) {  		dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); -		dev->dev_state = MEI_DEV_RESETTING; -		mei_reset(dev, 1); -		return; +		return -ENOMEM;  	}  	dev->me_clients = clients; -	return; +	return 0;  }  /** @@ -81,12 +142,12 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)  }  /** - * same_disconn_addr - tells if they have the same address + * mei_hbm_cl_addr_equal - tells if they have the same address   * - * @file: private data of the file object. - * @disconn: disconnection request. + * @cl: - client + * @buf: buffer with cl header   * - * returns true if addres are same + * returns true if addresses are the same   */  static inline  bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) @@ -97,33 +158,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)  } -/** - * is_treat_specially_client - checks if the message belongs - * to the file private data. - * - * @cl: private data of the file object - * @rs: connect response bus message - * - */ -static bool is_treat_specially_client(struct mei_cl *cl, -		struct hbm_client_connect_response *rs) -{ -	if (mei_hbm_cl_addr_equal(cl, rs)) { -		if (!rs->status) { -			cl->state = MEI_FILE_CONNECTED; -			cl->status = 0; - -		} else { -			cl->state = MEI_FILE_DISCONNECTED; -			cl->status = -ENODEV; -		} -		cl->timer_count = 0; - -		return true; -	} -	return false; -} -  int mei_hbm_start_wait(struct mei_device *dev)  {  	int ret; @@ -133,14 +167,14 @@ int mei_hbm_start_wait(struct mei_device *dev)  	mutex_unlock(&dev->device_lock);  	ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,  			dev->hbm_state == MEI_HBM_IDLE || -			dev->hbm_state > MEI_HBM_START, -			mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); +			dev->hbm_state >= MEI_HBM_STARTED, +			mei_secs_to_jiffies(MEI_HBM_TIMEOUT));  	mutex_lock(&dev->device_lock);  	if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) {  		dev->hbm_state = MEI_HBM_IDLE;  		dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); -		return -ETIMEDOUT; +		return -ETIME;  	}  	return 0;  } @@ -149,12 +183,15 @@ int mei_hbm_start_wait(struct mei_device *dev)   * mei_hbm_start_req - sends start request message.   *   * @dev: the device structure + * + * returns 0 on success and < 0 on failure   */  int mei_hbm_start_req(struct mei_device *dev)  {  	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;  	struct hbm_host_version_request *start_req;  	const size_t len = sizeof(struct hbm_host_version_request); +	int ret;  	mei_hbm_hdr(mei_hdr, len); @@ -166,12 +203,13 @@ int mei_hbm_start_req(struct mei_device *dev)  	start_req->host_version.minor_version = HBM_MINOR_VERSION;  	dev->hbm_state = MEI_HBM_IDLE; -	if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { -		dev_err(&dev->pdev->dev, "version message write failed\n"); -		dev->dev_state = MEI_DEV_RESETTING; -		mei_reset(dev, 1); -		return -ENODEV; +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) { +		dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", +			ret); +		return ret;  	} +  	dev->hbm_state = MEI_HBM_START;  	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;  	return 0; @@ -182,13 +220,15 @@ int mei_hbm_start_req(struct mei_device *dev)   *   * @dev: the device structure   * - * returns none. + * returns 0 on success and < 0 on failure   */ -static void mei_hbm_enum_clients_req(struct mei_device *dev) +static int mei_hbm_enum_clients_req(struct mei_device *dev)  {  	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;  	struct hbm_host_enum_request *enum_req;  	const size_t len = sizeof(struct hbm_host_enum_request); +	int ret; +  	/* enumerate clients */  	mei_hbm_hdr(mei_hdr, len); @@ -196,14 +236,15 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)  	memset(enum_req, 0, len);  	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; -	if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { -		dev->dev_state = MEI_DEV_RESETTING; -		dev_err(&dev->pdev->dev, "enumeration request write failed.\n"); -		mei_reset(dev, 1); +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) { +		dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", +			ret); +		return ret;  	}  	dev->hbm_state = MEI_HBM_ENUM_CLIENTS;  	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; -	return; +	return 0;  }  /** @@ -211,7 +252,7 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)   *   * @dev: the device structure   * - * returns none. + * returns 0 on success and < 0 on failure   */  static int mei_hbm_prop_req(struct mei_device *dev) @@ -221,8 +262,8 @@ static int mei_hbm_prop_req(struct mei_device *dev)  	struct hbm_props_request *prop_req;  	const size_t len = sizeof(struct hbm_props_request);  	unsigned long next_client_index; -	u8 client_num; - +	unsigned long client_num; +	int ret;  	client_num = dev->me_client_presentation_num; @@ -249,12 +290,11 @@ static int mei_hbm_prop_req(struct mei_device *dev)  	prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;  	prop_req->address = next_client_index; -	if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { -		dev->dev_state = MEI_DEV_RESETTING; -		dev_err(&dev->pdev->dev, "properties request write failed\n"); -		mei_reset(dev, 1); - -		return -EIO; +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) { +		dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", +			ret); +		return ret;  	}  	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; @@ -263,18 +303,47 @@ static int mei_hbm_prop_req(struct mei_device *dev)  	return 0;  } +/* + * mei_hbm_pg - sends pg command + * + * @dev: the device structure + * @pg_cmd: the pg command code + * + * This function returns -EIO on write failure + */ +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	struct hbm_power_gate *req; +	const size_t len = sizeof(struct hbm_power_gate); +	int ret; + +	mei_hbm_hdr(mei_hdr, len); + +	req = (struct hbm_power_gate *)dev->wr_msg.data; +	memset(req, 0, len); +	req->hbm_cmd = pg_cmd; + +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) +		dev_err(&dev->pdev->dev, "power gate command write failed.\n"); +	return ret; +} +EXPORT_SYMBOL_GPL(mei_hbm_pg); +  /** - * mei_hbm_stop_req_prepare - perpare stop request message + * mei_hbm_stop_req - send stop request message   *   * @dev - mei device - * @mei_hdr - mei message header - * @data - hbm message body buffer + * @cl: client info + * + * This function returns -EIO on write failure   */ -static void mei_hbm_stop_req_prepare(struct mei_device *dev, -		struct mei_msg_hdr *mei_hdr, unsigned char *data) +static int mei_hbm_stop_req(struct mei_device *dev)  { +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;  	struct hbm_host_stop_request *req = -			(struct hbm_host_stop_request *)data; +			(struct hbm_host_stop_request *)dev->wr_msg.data;  	const size_t len = sizeof(struct hbm_host_stop_request);  	mei_hbm_hdr(mei_hdr, len); @@ -282,10 +351,12 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,  	memset(req, 0, len);  	req->hbm_cmd = HOST_STOP_REQ_CMD;  	req->reason = DRIVER_STOP_REQUEST; + +	return mei_write_message(dev, mei_hdr, dev->wr_msg.data);  }  /** - * mei_hbm_cl_flow_control_req - sends flow control requst. + * mei_hbm_cl_flow_control_req - sends flow control request.   *   * @dev: the device structure   * @cl: client info @@ -300,8 +371,7 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)  	mei_hbm_hdr(mei_hdr, len);  	mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); -	dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n", -		cl->host_client_id, cl->me_client_id); +	cl_dbg(dev, cl, "sending flow control\n");  	return mei_write_message(dev, mei_hdr, dev->wr_msg.data);  } @@ -311,27 +381,34 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)   *   * @dev: the device structure   * @flow: flow control. + * + * return 0 on success, < 0 otherwise   */ -static void mei_hbm_add_single_flow_creds(struct mei_device *dev, +static int mei_hbm_add_single_flow_creds(struct mei_device *dev,  				  struct hbm_flow_control *flow)  { -	struct mei_me_client *client; -	int i; - -	for (i = 0; i < dev->me_clients_num; i++) { -		client = &dev->me_clients[i]; -		if (client && flow->me_addr == client->client_id) { -			if (client->props.single_recv_buf) { -				client->mei_flow_ctrl_creds++; -				dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", -				    flow->me_addr); -				dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", -				    client->mei_flow_ctrl_creds); -			} else { -				BUG();	/* error in flow control */ -			} -		} +	struct mei_me_client *me_cl; +	int id; + +	id = mei_me_cl_by_id(dev, flow->me_addr); +	if (id < 0) { +		dev_err(&dev->pdev->dev, "no such me client %d\n", +			flow->me_addr); +		return id; +	} + +	me_cl = &dev->me_clients[id]; +	if (me_cl->props.single_recv_buf) { +		me_cl->mei_flow_ctrl_creds++; +		dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", +		    flow->me_addr); +		dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", +		    me_cl->mei_flow_ctrl_creds); +	} else { +		BUG();	/* error in flow control */  	} + +	return 0;  }  /** @@ -343,8 +420,7 @@ static void mei_hbm_add_single_flow_creds(struct mei_device *dev,  static void mei_hbm_cl_flow_control_res(struct mei_device *dev,  		struct hbm_flow_control *flow_control)  { -	struct mei_cl *cl = NULL; -	struct mei_cl *next = NULL; +	struct mei_cl *cl;  	if (!flow_control->host_addr) {  		/* single receive buffer */ @@ -353,7 +429,7 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,  	}  	/* normal connection */ -	list_for_each_entry_safe(cl, next, &dev->file_list, link) { +	list_for_each_entry(cl, &dev->file_list, link) {  		if (mei_hbm_cl_addr_equal(cl, flow_control)) {  			cl->mei_flow_ctrl_creds++;  			dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", @@ -386,6 +462,25 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)  }  /** + * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	const size_t len = sizeof(struct hbm_client_connect_response); + +	mei_hbm_hdr(mei_hdr, len); +	mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len); + +	return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/**   * mei_hbm_cl_disconnect_res - disconnect response from ME   *   * @dev: the device structure @@ -395,29 +490,23 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev,  		struct hbm_client_connect_response *rs)  {  	struct mei_cl *cl; -	struct mei_cl_cb *pos = NULL, *next = NULL; - -	dev_dbg(&dev->pdev->dev, -			"disconnect_response:\n" -			"ME Client = %d\n" -			"Host Client = %d\n" -			"Status = %d\n", -			rs->me_addr, -			rs->host_addr, -			rs->status); - -	list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { -		cl = pos->cl; - -		if (!cl) { -			list_del(&pos->list); +	struct mei_cl_cb *cb, *next; + +	dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", +			rs->me_addr, rs->host_addr, rs->status); + +	list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { +		cl = cb->cl; + +		/* this should not happen */ +		if (WARN_ON(!cl)) { +			list_del(&cb->list);  			return;  		} -		dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");  		if (mei_hbm_cl_addr_equal(cl, rs)) { -			list_del(&pos->list); -			if (!rs->status) +			list_del(&cb->list); +			if (rs->status == MEI_CL_DISCONN_SUCCESS)  				cl->state = MEI_FILE_DISCONNECTED;  			cl->status = 0; @@ -447,7 +536,7 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)  }  /** - * mei_hbm_cl_connect_res - connect resposne from the ME + * mei_hbm_cl_connect_res - connect response from the ME   *   * @dev: the device structure   * @rs: connect response bus message @@ -457,81 +546,78 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,  {  	struct mei_cl *cl; -	struct mei_cl_cb *pos = NULL, *next = NULL; +	struct mei_cl_cb *cb, *next; -	dev_dbg(&dev->pdev->dev, -			"connect_response:\n" -			"ME Client = %d\n" -			"Host Client = %d\n" -			"Status = %d\n", -			rs->me_addr, -			rs->host_addr, -			rs->status); +	dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", +			rs->me_addr, rs->host_addr, +			mei_cl_conn_status_str(rs->status)); -	/* if WD or iamthif client treat specially */ +	cl = NULL; -	if (is_treat_specially_client(&dev->wd_cl, rs)) { -		dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n"); -		mei_watchdog_register(dev); +	list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { -		return; -	} +		cl = cb->cl; +		/* this should not happen */ +		if (WARN_ON(!cl)) { +			list_del_init(&cb->list); +			continue; +		} -	if (is_treat_specially_client(&dev->iamthif_cl, rs)) { -		dev->iamthif_state = MEI_IAMTHIF_IDLE; -		return; -	} -	list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { +		if (cb->fop_type !=  MEI_FOP_CONNECT) +			continue; -		cl = pos->cl; -		if (!cl) { -			list_del(&pos->list); -			return; -		} -		if (pos->fop_type == MEI_FOP_IOCTL) { -			if (is_treat_specially_client(cl, rs)) { -				list_del(&pos->list); -				cl->status = 0; -				cl->timer_count = 0; -				break; -			} +		if (mei_hbm_cl_addr_equal(cl, rs)) { +			list_del(&cb->list); +			break;  		}  	} + +	if (!cl) +		return; + +	cl->timer_count = 0; +	if (rs->status == MEI_CL_CONN_SUCCESS) +		cl->state = MEI_FILE_CONNECTED; +	else +		cl->state = MEI_FILE_DISCONNECTED; +	cl->status = mei_cl_conn_status_to_errno(rs->status);  }  /** - * mei_hbm_fw_disconnect_req - disconnect request initiated by me - *  host sends disoconnect response + * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware + *  host sends disconnect response   *   * @dev: the device structure.   * @disconnect_req: disconnect request bus message from the me + * + * returns -ENOMEM on allocation failure   */ -static void mei_hbm_fw_disconnect_req(struct mei_device *dev, +static int mei_hbm_fw_disconnect_req(struct mei_device *dev,  		struct hbm_client_connect_request *disconnect_req)  { -	struct mei_cl *cl, *next; -	const size_t len = sizeof(struct hbm_client_connect_response); +	struct mei_cl *cl; +	struct mei_cl_cb *cb; -	list_for_each_entry_safe(cl, next, &dev->file_list, link) { +	list_for_each_entry(cl, &dev->file_list, link) {  		if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {  			dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",  					disconnect_req->host_addr,  					disconnect_req->me_addr);  			cl->state = MEI_FILE_DISCONNECTED;  			cl->timer_count = 0; -			if (cl == &dev->wd_cl) -				dev->wd_pending = false; -			else if (cl == &dev->iamthif_cl) -				dev->iamthif_timer = 0; - -			/* prepare disconnect response */ -			mei_hbm_hdr(&dev->wr_ext_msg.hdr, len); -			mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, -					 dev->wr_ext_msg.data, len); + +			cb = mei_io_cb_init(cl, NULL); +			if (!cb) +				return -ENOMEM; +			cb->fop_type = MEI_FOP_DISCONNECT_RSP; +			cl_dbg(dev, cl, "add disconnect response as first\n"); +			list_add(&cb->list, &dev->ctrl_wr_list.list); +  			break;  		}  	} +	return 0;  } @@ -555,8 +641,10 @@ bool mei_hbm_version_is_supported(struct mei_device *dev)   *   * @dev: the device structure   * @mei_hdr: header of bus message + * + * returns 0 on success and < 0 on failure   */ -void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)  {  	struct mei_bus_message *mei_msg;  	struct mei_me_client *me_client; @@ -573,8 +661,20 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)  	mei_read_slots(dev, dev->rd_msg_buf, hdr->length);  	mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; +	/* ignore spurious message and prevent reset nesting +	 * hbm is put to idle during system reset +	 */ +	if (dev->hbm_state == MEI_HBM_IDLE) { +		dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); +		return 0; +	} +  	switch (mei_msg->hbm_cmd) {  	case HOST_START_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); + +		dev->init_clients_timer = 0; +  		version_res = (struct hbm_host_version_response *)mei_msg;  		dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", @@ -593,73 +693,107 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)  		}  		if (!mei_hbm_version_is_supported(dev)) { -			dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n"); +			dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); -			dev->hbm_state = MEI_HBM_STOP; -			mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, -						dev->wr_msg.data); -			mei_write_message(dev, &dev->wr_msg.hdr, -					dev->wr_msg.data); +			dev->hbm_state = MEI_HBM_STOPPED; +			if (mei_hbm_stop_req(dev)) { +				dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); +				return -EIO; +			} +			break; +		} -			return; +		if (dev->dev_state != MEI_DEV_INIT_CLIENTS || +		    dev->hbm_state != MEI_HBM_START) { +			dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO;  		} -		if (dev->dev_state == MEI_DEV_INIT_CLIENTS && -		    dev->hbm_state == MEI_HBM_START) { -			dev->init_clients_timer = 0; -			mei_hbm_enum_clients_req(dev); -		} else { -			dev_err(&dev->pdev->dev, "reset: wrong host start response\n"); -			mei_reset(dev, 1); -			return; +		dev->hbm_state = MEI_HBM_STARTED; + +		if (mei_hbm_enum_clients_req(dev)) { +			dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); +			return -EIO;  		}  		wake_up_interruptible(&dev->wait_recvd_msg); -		dev_dbg(&dev->pdev->dev, "host start response message received.\n");  		break;  	case CLIENT_CONNECT_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); +  		connect_res = (struct hbm_client_connect_response *) mei_msg;  		mei_hbm_cl_connect_res(dev, connect_res); -		dev_dbg(&dev->pdev->dev, "client connect response message received.\n");  		wake_up(&dev->wait_recvd_msg);  		break;  	case CLIENT_DISCONNECT_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); +  		disconnect_res = (struct hbm_client_connect_response *) mei_msg;  		mei_hbm_cl_disconnect_res(dev, disconnect_res); -		dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");  		wake_up(&dev->wait_recvd_msg);  		break;  	case MEI_FLOW_CONTROL_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); +  		flow_control = (struct hbm_flow_control *) mei_msg;  		mei_hbm_cl_flow_control_res(dev, flow_control); -		dev_dbg(&dev->pdev->dev, "client flow control response message received.\n"); +		break; + +	case MEI_PG_ISOLATION_ENTRY_RES_CMD: +		dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n"); +		dev->pg_event = MEI_PG_EVENT_RECEIVED; +		if (waitqueue_active(&dev->wait_pg)) +			wake_up(&dev->wait_pg); +		break; + +	case MEI_PG_ISOLATION_EXIT_REQ_CMD: +		dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n"); +		dev->pg_event = MEI_PG_EVENT_RECEIVED; +		if (waitqueue_active(&dev->wait_pg)) +			wake_up(&dev->wait_pg); +		else +			/* +			* If the driver is not waiting on this then +			* this is HW initiated exit from PG. +			* Start runtime pm resume sequence to exit from PG. +			*/ +			pm_request_resume(&dev->pdev->dev);  		break;  	case HOST_CLIENT_PROPERTIES_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); + +		dev->init_clients_timer = 0; + +		if (dev->me_clients == NULL) { +			dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); +			return -EPROTO; +		} +  		props_res = (struct hbm_props_response *)mei_msg;  		me_client = &dev->me_clients[dev->me_client_presentation_num]; -		if (props_res->status || !dev->me_clients) { -			dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n"); -			mei_reset(dev, 1); -			return; +		if (props_res->status) { +			dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", +				props_res->status); +			return -EPROTO;  		}  		if (me_client->client_id != props_res->address) { -			dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n"); -			mei_reset(dev, 1); -			return; +			dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", +				me_client->client_id, props_res->address); +			return -EPROTO;  		}  		if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||  		    dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { -			dev_err(&dev->pdev->dev, "reset: unexpected properties response\n"); -			mei_reset(dev, 1); - -			return; +			dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO;  		}  		me_client->props = props_res->client_properties; @@ -667,55 +801,79 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)  		dev->me_client_presentation_num++;  		/* request property for the next client */ -		mei_hbm_prop_req(dev); +		if (mei_hbm_prop_req(dev)) +			return -EIO;  		break;  	case HOST_ENUM_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); + +		dev->init_clients_timer = 0; +  		enum_res = (struct hbm_host_enum_response *) mei_msg; -		memcpy(dev->me_clients_map, enum_res->valid_addresses, 32); -		if (dev->dev_state == MEI_DEV_INIT_CLIENTS && -		    dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { -				dev->init_clients_timer = 0; -				dev->me_client_presentation_num = 0; -				dev->me_client_index = 0; -				mei_hbm_me_cl_allocate(dev); -				dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; - -				/* first property reqeust */ -				mei_hbm_prop_req(dev); -		} else { -			dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n"); -			mei_reset(dev, 1); -			return; +		BUILD_BUG_ON(sizeof(dev->me_clients_map) +				< sizeof(enum_res->valid_addresses)); +		memcpy(dev->me_clients_map, enum_res->valid_addresses, +			sizeof(enum_res->valid_addresses)); + +		if (dev->dev_state != MEI_DEV_INIT_CLIENTS || +		    dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { +			dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO; +		} + +		if (mei_hbm_me_cl_allocate(dev)) { +			dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); +			return -ENOMEM;  		} + +		dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; + +		/* first property request */ +		if (mei_hbm_prop_req(dev)) +			return -EIO; +  		break;  	case HOST_STOP_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); + +		dev->init_clients_timer = 0; -		if (dev->hbm_state != MEI_HBM_STOP) -			dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n"); -		dev->dev_state = MEI_DEV_DISABLED; -		dev_info(&dev->pdev->dev, "reset: FW stop response.\n"); -		mei_reset(dev, 1); +		if (dev->hbm_state != MEI_HBM_STOPPED) { +			dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO; +		} + +		dev->dev_state = MEI_DEV_POWER_DOWN; +		dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); +		/* force the reset */ +		return -EPROTO;  		break;  	case CLIENT_DISCONNECT_REQ_CMD: -		/* search for client */ +		dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); +  		disconnect_req = (struct hbm_client_connect_request *)mei_msg;  		mei_hbm_fw_disconnect_req(dev, disconnect_req);  		break;  	case ME_STOP_REQ_CMD: - -		dev->hbm_state = MEI_HBM_STOP; -		mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, -					dev->wr_ext_msg.data); +		dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); +		dev->hbm_state = MEI_HBM_STOPPED; +		if (mei_hbm_stop_req(dev)) { +			dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); +			return -EIO; +		}  		break;  	default:  		BUG();  		break;  	} +	return 0;  } diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 4ae2e56e404..683eb2835ce 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -32,13 +32,13 @@ struct mei_cl;  enum mei_hbm_state {  	MEI_HBM_IDLE = 0,  	MEI_HBM_START, +	MEI_HBM_STARTED,  	MEI_HBM_ENUM_CLIENTS,  	MEI_HBM_CLIENT_PROPERTIES, -	MEI_HBM_STARTED, -	MEI_HBM_STOP, +	MEI_HBM_STOPPED,  }; -void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);  static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)  { @@ -49,12 +49,16 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)  	hdr->reserved = 0;  } +void mei_hbm_idle(struct mei_device *dev); +void mei_hbm_reset(struct mei_device *dev);  int mei_hbm_start_req(struct mei_device *dev);  int mei_hbm_start_wait(struct mei_device *dev);  int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);  int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);  int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);  bool mei_hbm_version_is_supported(struct mei_device *dev); +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);  #endif /* _MEI_HBM_H_ */ diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 6a203b6e834..a7856c0ac57 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -109,8 +109,17 @@  #define MEI_DEV_ID_PPT_2      0x1CBA  /* Panther Point */  #define MEI_DEV_ID_PPT_3      0x1DBA  /* Panther Point */ -#define MEI_DEV_ID_LPT        0x8C3A  /* Lynx Point */ +#define MEI_DEV_ID_LPT_H      0x8C3A  /* Lynx Point H */ +#define MEI_DEV_ID_LPT_W      0x8D3A  /* Lynx Point - Wellsburg */  #define MEI_DEV_ID_LPT_LP     0x9C3A  /* Lynx Point LP */ +#define MEI_DEV_ID_LPT_HR     0x8CBA  /* Lynx Point H Refresh */ + +#define MEI_DEV_ID_WPT_LP     0x9CBA  /* Wildcat Point LP */ + +/* Host Firmware Status Registers in PCI Config Space */ +#define PCI_CFG_HFS_1         0x40 +#define PCI_CFG_HFS_2         0x48 +  /*   * MEI HW Section   */ @@ -124,6 +133,8 @@  #define ME_CB_RW   8  /* ME_CSR_HA - ME Control Status Host Access register (read only) */  #define ME_CSR_HA  0xC +/* H_HGC_CSR - PGI register */ +#define H_HPG_CSR  0x10  /* register bits of H_CSR (Host Control Status register) */ @@ -153,6 +164,8 @@ access to ME_CBD */  #define ME_CBWP_HRA       0x00FF0000  /* ME CB Read Pointer HRA - host read only access to ME_CBRP */  #define ME_CBRP_HRA       0x0000FF00 +/* ME Power Gate Isolation Capability HRA  - host ready only access */ +#define ME_PGIC_HRA       0x00000040  /* ME Reset HRA - host read only access to ME_RST */  #define ME_RST_HRA        0x00000010  /* ME Ready HRA - host read only access to ME_RDY */ @@ -164,4 +177,9 @@ access to ME_CBD */  /* ME Interrupt Enable HRA - host read only access to ME_IE */  #define ME_IE_HRA         0x00000001 + +/* register bits - H_HPG_CSR */ +#define H_HPG_CSR_PGIHEXR       0x00000001 +#define H_HPG_CSR_PGI           0x00000002 +  #endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 3412adcdaeb..6a2d272cea4 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -20,10 +20,10 @@  #include <linux/interrupt.h>  #include "mei_dev.h" -#include "hw-me.h" -  #include "hbm.h" +#include "hw-me.h" +#include "hw-me-regs.h"  /**   * mei_me_reg_read - Reads 32bit data from the mei device @@ -109,10 +109,27 @@ static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)   */  static void mei_me_hw_config(struct mei_device *dev)  { +	struct mei_me_hw *hw = to_me_hw(dev);  	u32 hcsr = mei_hcsr_read(to_me_hw(dev));  	/* Doesn't change in runtime */  	dev->hbuf_depth = (hcsr & H_CBD) >> 24; + +	hw->pg_state = MEI_PG_OFF;  } + +/** + * mei_me_pg_state  - translate internal pg state + *   to the mei power gating state + * + * @hw -  me hardware + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	return hw->pg_state; +} +  /**   * mei_clear_interrupts - clear and stop interrupts   * @@ -164,6 +181,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev)  	hcsr |= H_IG;  	hcsr &= ~H_RST;  	mei_hcsr_set(hw, hcsr); + +	/* complete this write before we set host ready on another CPU */ +	mmiowb();  }  /**   * mei_me_hw_reset - resets fw via mei csr register. @@ -183,9 +203,22 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)  	else  		hcsr &= ~H_IE; +	dev->recvd_hw_ready = false;  	mei_me_reg_write(hw, H_CSR, hcsr); -	if (dev->dev_state == MEI_DEV_POWER_DOWN) +	/* +	 * Host reads the H_CSR once to ensure that the +	 * posted write to H_CSR completes. +	 */ +	hcsr = mei_hcsr_read(hw); + +	if ((hcsr & H_RST) == 0) +		dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); + +	if ((hcsr & H_RDY) == H_RDY) +		dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); + +	if (intr_enable == false)  		mei_me_hw_reset_release(dev);  	return 0; @@ -201,6 +234,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)  static void mei_me_host_set_ready(struct mei_device *dev)  {  	struct mei_me_hw *hw = to_me_hw(dev); +	hw->host_hw_state = mei_hcsr_read(hw);  	hw->host_hw_state |= H_IE | H_IG | H_RDY;  	mei_hcsr_set(hw, hw->host_hw_state);  } @@ -233,18 +267,15 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)  static int mei_me_hw_ready_wait(struct mei_device *dev)  {  	int err; -	if (mei_me_hw_is_ready(dev)) -		return 0; -	dev->recvd_hw_ready = false;  	mutex_unlock(&dev->device_lock);  	err = wait_event_interruptible_timeout(dev->wait_hw_ready,  			dev->recvd_hw_ready, -			mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); +			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));  	mutex_lock(&dev->device_lock);  	if (!err && !dev->recvd_hw_ready) {  		if (!err) -			err = -ETIMEDOUT; +			err = -ETIME;  		dev_err(&dev->pdev->dev,  			"wait hw ready failed. status = %d\n", err);  		return err; @@ -303,7 +334,7 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev)   *   * @dev: the device structure   * - * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count + * returns -EOVERFLOW if overflow, otherwise empty slots count   */  static int mei_me_hbuf_empty_slots(struct mei_device *dev)  { @@ -326,7 +357,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)  /** - * mei_write_message - writes a message to mei device. + * mei_me_write_message - writes a message to mei device.   *   * @dev: the device structure   * @header: mei HECI header of message @@ -354,7 +385,7 @@ static int mei_me_write_message(struct mei_device *dev,  	dw_cnt = mei_data2slots(length);  	if (empty_slots < 0 || dw_cnt > empty_slots) -		return -EIO; +		return -EMSGSIZE;  	mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); @@ -381,7 +412,7 @@ static int mei_me_write_message(struct mei_device *dev,   *   * @dev: the device structure   * - * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count + * returns -EOVERFLOW if overflow, otherwise filled slots count   */  static int mei_me_count_full_read_slots(struct mei_device *dev)  { @@ -431,6 +462,144 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,  }  /** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_enter(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 reg = mei_me_reg_read(hw, H_HPG_CSR); +	reg |= H_HPG_CSR_PGI; +	mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_exit(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 reg = mei_me_reg_read(hw, H_HPG_CSR); + +	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); + +	reg |= H_HPG_CSR_PGIHEXR; +	mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_set_sync - perform pg entry procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_set_sync(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); +	int ret; + +	dev->pg_event = MEI_PG_EVENT_WAIT; + +	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); +	if (ret) +		return ret; + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_pg, +		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); +	mutex_lock(&dev->device_lock); + +	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { +		mei_me_pg_enter(dev); +		ret = 0; +	} else { +		ret = -ETIME; +	} + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	hw->pg_state = MEI_PG_ON; + +	return ret; +} + +/** + * mei_me_pg_unset_sync - perform pg exit procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_unset_sync(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); +	int ret; + +	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) +		goto reply; + +	dev->pg_event = MEI_PG_EVENT_WAIT; + +	mei_me_pg_exit(dev); + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_pg, +		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); +	mutex_lock(&dev->device_lock); + +reply: +	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) +		ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); +	else +		ret = -ETIME; + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	hw->pg_state = MEI_PG_OFF; + +	return ret; +} + +/** + * mei_me_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_me_pg_is_enabled(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 reg = mei_me_reg_read(hw, ME_CSR_HA); + +	if ((reg & ME_PGIC_HRA) == 0) +		goto notsupported; + +	if (dev->version.major_version < HBM_MAJOR_VERSION_PGI) +		goto notsupported; + +	if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && +	    dev->version.minor_version < HBM_MINOR_VERSION_PGI) +		goto notsupported; + +	return true; + +notsupported: +	dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", +		!!(reg & ME_PGIC_HRA), +		dev->version.major_version, +		dev->version.minor_version, +		HBM_MAJOR_VERSION_PGI, +		HBM_MINOR_VERSION_PGI); + +	return false; +} + +/**   * mei_me_irq_quick_handler - The ISR of the MEI device   *   * @irq: The irq number @@ -469,7 +638,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)  	struct mei_device *dev = (struct mei_device *) dev_id;  	struct mei_cl_cb complete_list;  	s32 slots; -	int rets; +	int rets = 0;  	dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");  	/* initialize our complete list */ @@ -482,59 +651,123 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)  		mei_clear_interrupts(dev);  	/* check if ME wants a reset */ -	if (!mei_hw_is_ready(dev) && -	    dev->dev_state != MEI_DEV_RESETTING && -	    dev->dev_state != MEI_DEV_INITIALIZING && -	    dev->dev_state != MEI_DEV_POWER_DOWN && -	    dev->dev_state != MEI_DEV_POWER_UP) { -		dev_dbg(&dev->pdev->dev, "FW not ready.\n"); -		mei_reset(dev, 1); -		mutex_unlock(&dev->device_lock); -		return IRQ_HANDLED; +	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { +		dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); +		schedule_work(&dev->reset_work); +		goto end;  	}  	/*  check if we need to start the dev */  	if (!mei_host_is_ready(dev)) {  		if (mei_hw_is_ready(dev)) { +			mei_me_hw_reset_release(dev);  			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");  			dev->recvd_hw_ready = true;  			wake_up_interruptible(&dev->wait_hw_ready); - -			mutex_unlock(&dev->device_lock); -			return IRQ_HANDLED;  		} else { -			dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); -			mei_me_hw_reset_release(dev); -			mutex_unlock(&dev->device_lock); -			return IRQ_HANDLED; +			dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");  		} +		goto end;  	}  	/* check slots available for reading */  	slots = mei_count_full_read_slots(dev);  	while (slots > 0) { -		/* we have urgent data to send so break the read */ -		if (dev->wr_ext_msg.hdr.length) -			break; -		dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); -		dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n"); +		dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);  		rets = mei_irq_read_handler(dev, &complete_list, &slots); -		if (rets) +		/* There is a race between ME write and interrupt delivery: +		 * Not all data is always available immediately after the +		 * interrupt, so try to read again on the next interrupt. +		 */ +		if (rets == -ENODATA) +			break; + +		if (rets && dev->dev_state != MEI_DEV_RESETTING) { +			dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n", +						rets); +			schedule_work(&dev->reset_work);  			goto end; +		}  	} -	rets = mei_irq_write_handler(dev, &complete_list); -end: -	dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); +  	dev->hbuf_is_ready = mei_hbuf_is_ready(dev); -	mutex_unlock(&dev->device_lock); +	/* +	 * During PG handshake only allowed write is the replay to the +	 * PG exit message, so block calling write function +	 * if the pg state is not idle +	 */ +	if (dev->pg_event == MEI_PG_EVENT_IDLE) { +		rets = mei_irq_write_handler(dev, &complete_list); +		dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +	}  	mei_irq_compl_handler(dev, &complete_list); +end: +	dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); +	mutex_unlock(&dev->device_lock);  	return IRQ_HANDLED;  } + +/** + * mei_me_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns 0 on success an error code otherwise + */ +static int mei_me_fw_status(struct mei_device *dev, +			    struct mei_fw_status *fw_status) +{ +	const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2}; +	int i; + +	if (!fw_status) +		return -EINVAL; + +	switch (dev->pdev->device) { +	case MEI_DEV_ID_IBXPK_1: +	case MEI_DEV_ID_IBXPK_2: +	case MEI_DEV_ID_CPT_1: +	case MEI_DEV_ID_PBG_1: +	case MEI_DEV_ID_PPT_1: +	case MEI_DEV_ID_PPT_2: +	case MEI_DEV_ID_PPT_3: +	case MEI_DEV_ID_LPT_H: +	case MEI_DEV_ID_LPT_W: +	case MEI_DEV_ID_LPT_LP: +	case MEI_DEV_ID_LPT_HR: +	case MEI_DEV_ID_WPT_LP: +		fw_status->count = 2; +		break; +	case MEI_DEV_ID_ICH10_1: +	case MEI_DEV_ID_ICH10_2: +	case MEI_DEV_ID_ICH10_3: +	case MEI_DEV_ID_ICH10_4: +		fw_status->count = 1; +		break; +	default: +		fw_status->count = 0; +		break; +	} + +	for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { +		int ret; +		ret = pci_read_config_dword(dev->pdev, +				pci_cfg_reg[i], &fw_status->status[i]); +		if (ret) +			return ret; +	} +	return 0; +} +  static const struct mei_hw_ops mei_me_hw_ops = { +	.pg_state  = mei_me_pg_state, + +	.fw_status = mei_me_fw_status,  	.host_is_ready = mei_me_host_is_ready,  	.hw_is_ready = mei_me_hw_is_ready, @@ -542,6 +775,8 @@ static const struct mei_hw_ops mei_me_hw_ops = {  	.hw_config = mei_me_hw_config,  	.hw_start = mei_me_hw_start, +	.pg_is_enabled = mei_me_pg_is_enabled, +  	.intr_clear = mei_me_intr_clear,  	.intr_enable = mei_me_intr_enable,  	.intr_disable = mei_me_intr_disable, @@ -557,14 +792,81 @@ static const struct mei_hw_ops mei_me_hw_ops = {  	.read = mei_me_read_slots  }; +static bool mei_me_fw_type_nm(struct pci_dev *pdev) +{ +	u32 reg; +	pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); +	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ +	return (reg & 0x600) == 0x200; +} + +#define MEI_CFG_FW_NM                           \ +	.quirk_probe = mei_me_fw_type_nm + +static bool mei_me_fw_type_sps(struct pci_dev *pdev) +{ +	u32 reg; +	/* Read ME FW Status check for SPS Firmware */ +	pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); +	/* if bits [19:16] = 15, running SPS Firmware */ +	return (reg & 0xf0000) == 0xf0000; +} + +#define MEI_CFG_FW_SPS                           \ +	.quirk_probe = mei_me_fw_type_sps + + +#define MEI_CFG_LEGACY_HFS                      \ +	.fw_status.count = 0 + +#define MEI_CFG_ICH_HFS                        \ +	.fw_status.count = 1,                   \ +	.fw_status.status[0] = PCI_CFG_HFS_1 + +#define MEI_CFG_PCH_HFS                         \ +	.fw_status.count = 2,                   \ +	.fw_status.status[0] = PCI_CFG_HFS_1,   \ +	.fw_status.status[1] = PCI_CFG_HFS_2 + + +/* ICH Legacy devices */ +const struct mei_cfg mei_me_legacy_cfg = { +	MEI_CFG_LEGACY_HFS, +}; + +/* ICH devices */ +const struct mei_cfg mei_me_ich_cfg = { +	MEI_CFG_ICH_HFS, +}; + +/* PCH devices */ +const struct mei_cfg mei_me_pch_cfg = { +	MEI_CFG_PCH_HFS, +}; + + +/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ +const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { +	MEI_CFG_PCH_HFS, +	MEI_CFG_FW_NM, +}; + +/* PCH Lynx Point with quirk for SPS Firmware exclusion */ +const struct mei_cfg mei_me_lpt_cfg = { +	MEI_CFG_PCH_HFS, +	MEI_CFG_FW_SPS, +}; +  /**   * mei_me_dev_init - allocates and initializes the mei device structure   *   * @pdev: The pci device structure + * @cfg: per device generation config   *   * returns The mei_device_device pointer on success, NULL on failure.   */ -struct mei_device *mei_me_dev_init(struct pci_dev *pdev) +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, +				   const struct mei_cfg *cfg)  {  	struct mei_device *dev; @@ -573,7 +875,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev)  	if (!dev)  		return NULL; -	mei_device_init(dev); +	mei_device_init(dev, cfg);  	dev->ops = &mei_me_hw_ops; diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 80bd829fbd9..12b0f4bbe1f 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -20,9 +20,12 @@  #define _MEI_INTERFACE_H_  #include <linux/mei.h> +#include <linux/irqreturn.h>  #include "mei_dev.h"  #include "client.h" +#define MEI_ME_RPM_TIMEOUT    500 /* ms */ +  struct mei_me_hw {  	void __iomem *mem_addr;  	/* @@ -30,11 +33,22 @@ struct mei_me_hw {  	 */  	u32 host_hw_state;  	u32 me_hw_state; +	enum mei_pg_state pg_state;  };  #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) -struct mei_device *mei_me_dev_init(struct pci_dev *pdev); +extern const struct mei_cfg mei_me_legacy_cfg; +extern const struct mei_cfg mei_me_ich_cfg; +extern const struct mei_cfg mei_me_pch_cfg; +extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg; +extern const struct mei_cfg mei_me_lpt_cfg; + +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, +				   const struct mei_cfg *cfg); + +int mei_me_pg_set_sync(struct mei_device *dev); +int mei_me_pg_unset_sync(struct mei_device *dev);  irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);  irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h new file mode 100644 index 00000000000..f19229c4e65 --- /dev/null +++ b/drivers/misc/mei/hw-txe-regs.h @@ -0,0 +1,294 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * Contact Information: + *	Intel Corporation. + *	linux-mei@linux.intel.com + *	http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + *  * Redistributions of source code must retain the above copyright + *    notice, this list of conditions and the following disclaimer. + *  * Redistributions in binary form must reproduce the above copyright + *    notice, this list of conditions and the following disclaimer in + *    the documentation and/or other materials provided with the + *    distribution. + *  * Neither the name Intel Corporation nor the names of its + *    contributors may be used to endorse or promote products derived + *    from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_TXE_REGS_H_ +#define _MEI_HW_TXE_REGS_H_ + +#include "hw.h" + +#define SEC_ALIVENESS_TIMER_TIMEOUT        (5 * MSEC_PER_SEC) +#define SEC_ALIVENESS_WAIT_TIMEOUT         (1 * MSEC_PER_SEC) +#define SEC_RESET_WAIT_TIMEOUT             (1 * MSEC_PER_SEC) +#define SEC_READY_WAIT_TIMEOUT             (5 * MSEC_PER_SEC) +#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define RESET_CANCEL_WAIT_TIMEOUT          (1 * MSEC_PER_SEC) + +enum { +	SEC_BAR, +	BRIDGE_BAR, + +	NUM_OF_MEM_BARS +}; + +/* SeC FW Status Register + * + * FW uses this register in order to report its status to host. + * This register resides in PCI-E config space. + */ +#define PCI_CFG_TXE_FW_STS0   0x40 +#  define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK    0x0000000F +#  define PCI_CFG_TXE_FW_STS0_OP_ST_MSK     0x000001C0 +#  define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200 +#  define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK  0x0000F000 +#  define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK   0x000F0000 +#  define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK   0x00F00000 +#define PCI_CFG_TXE_FW_STS1   0x48 + +#define IPC_BASE_ADDR	0x80400 /* SeC IPC Base Address */ + +/* IPC Input Doorbell Register */ +#define SEC_IPC_INPUT_DOORBELL_REG       (0x0000 + IPC_BASE_ADDR) + +/* IPC Input Status Register + * This register indicates whether or not processing of + * the most recent command has been completed by the SEC + * New commands and payloads should not be written by the Host + * until this indicates that the previous command has been processed. + */ +#define SEC_IPC_INPUT_STATUS_REG         (0x0008 + IPC_BASE_ADDR) +#  define SEC_IPC_INPUT_STATUS_RDY    BIT(0) + +/* IPC Host Interrupt Status Register */ +#define SEC_IPC_HOST_INT_STATUS_REG      (0x0010 + IPC_BASE_ADDR) +#define   SEC_IPC_HOST_INT_STATUS_OUT_DB             BIT(0) +#define   SEC_IPC_HOST_INT_STATUS_IN_RDY             BIT(1) +#define   SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD       BIT(5) +#define   SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS     BIT(17) +#define   SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR       BIT(18) +#define   SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR       BIT(19) +#define   SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW    BIT(21) + +/* Convenient mask for pending interrupts */ +#define   SEC_IPC_HOST_INT_STATUS_PENDING \ +		(SEC_IPC_HOST_INT_STATUS_OUT_DB| \ +		SEC_IPC_HOST_INT_STATUS_IN_RDY) + +/* IPC Host Interrupt Mask Register */ +#define SEC_IPC_HOST_INT_MASK_REG        (0x0014 + IPC_BASE_ADDR) + +#  define SEC_IPC_HOST_INT_MASK_OUT_DB	BIT(0) /* Output Doorbell Int Mask */ +#  define SEC_IPC_HOST_INT_MASK_IN_RDY	BIT(1) /* Input Ready Int Mask */ + +/* IPC Input Payload RAM */ +#define SEC_IPC_INPUT_PAYLOAD_REG        (0x0100 + IPC_BASE_ADDR) +/* IPC Shared Payload RAM */ +#define IPC_SHARED_PAYLOAD_REG           (0x0200 + IPC_BASE_ADDR) + +/* SeC Address Translation Table Entry 2 - Ctrl + * + * This register resides also in SeC's PCI-E Memory space. + */ +#define SATT2_CTRL_REG                   0x1040 +#  define SATT2_CTRL_VALID_MSK            BIT(0) +#  define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8 +#  define SATT2_CTRL_BRIDGE_HOST_EN_MSK   BIT(12) + +/* SATT Table Entry 2 SAP Base Address Register */ +#define SATT2_SAP_BA_REG                 0x1044 +/* SATT Table Entry 2 SAP Size Register. */ +#define SATT2_SAP_SIZE_REG               0x1048 + /* SATT Table Entry 2 SAP Bridge Address - LSB Register */ +#define SATT2_BRG_BA_LSB_REG             0x104C + +/* Host High-level Interrupt Status Register */ +#define HHISR_REG                        0x2020 +/* Host High-level Interrupt Enable Register + * + * Resides in PCI memory space. This is the top hierarchy for + * interrupts from SeC to host, aggregating both interrupts that + * arrive through HICR registers as well as interrupts + * that arrive via IPC. + */ +#define HHIER_REG                        0x2024 +#define   IPC_HHIER_SEC	BIT(0) +#define   IPC_HHIER_BRIDGE	BIT(1) +#define   IPC_HHIER_MSK	(IPC_HHIER_SEC | IPC_HHIER_BRIDGE) + +/* Host High-level Interrupt Mask Register. + * + * Resides in PCI memory space. + * This is the top hierarchy for masking interrupts from SeC to host. + */ +#define HHIMR_REG                        0x2028 +#define   IPC_HHIMR_SEC       BIT(0) +#define   IPC_HHIMR_BRIDGE    BIT(1) + +/* Host High-level IRQ Status Register */ +#define HHIRQSR_REG                      0x202C + +/* Host Interrupt Cause Register 0 - SeC IPC Readiness + * + * This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * This register is used by SeC's IPC driver in order + * to synchronize with host about IPC interface state. + */ +#define HICR_SEC_IPC_READINESS_REG       0x2040 +#define   HICR_SEC_IPC_READINESS_HOST_RDY  BIT(0) +#define   HICR_SEC_IPC_READINESS_SEC_RDY   BIT(1) +#define   HICR_SEC_IPC_READINESS_SYS_RDY     \ +	  (HICR_SEC_IPC_READINESS_HOST_RDY | \ +	   HICR_SEC_IPC_READINESS_SEC_RDY) +#define   HICR_SEC_IPC_READINESS_RDY_CLR   BIT(2) + +/* Host Interrupt Cause Register 1 - Aliveness Response */ +/* This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * The register may be used by SeC to ACK a host request for aliveness. + */ +#define HICR_HOST_ALIVENESS_RESP_REG     0x2044 +#define   HICR_HOST_ALIVENESS_RESP_ACK    BIT(0) + +/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */ +#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048 + +/* Host Interrupt Status Register. + * + * Resides in PCI memory space. + * This is the main register involved in generating interrupts + * from SeC to host via HICRs. + * The interrupt generation rules are as follows: + * An interrupt will be generated whenever for any i, + * there is a transition from a state where at least one of + * the following conditions did not hold, to a state where + * ALL the following conditions hold: + * A) HISR.INT[i]_STS == 1. + * B) HIER.INT[i]_EN == 1. + */ +#define HISR_REG                         0x2060 +#define   HISR_INT_0_STS      BIT(0) +#define   HISR_INT_1_STS      BIT(1) +#define   HISR_INT_2_STS      BIT(2) +#define   HISR_INT_3_STS      BIT(3) +#define   HISR_INT_4_STS      BIT(4) +#define   HISR_INT_5_STS      BIT(5) +#define   HISR_INT_6_STS      BIT(6) +#define   HISR_INT_7_STS      BIT(7) +#define   HISR_INT_STS_MSK \ +	(HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS) + +/* Host Interrupt Enable Register. Resides in PCI memory space. */ +#define HIER_REG                         0x2064 +#define   HIER_INT_0_EN      BIT(0) +#define   HIER_INT_1_EN      BIT(1) +#define   HIER_INT_2_EN      BIT(2) +#define   HIER_INT_3_EN      BIT(3) +#define   HIER_INT_4_EN      BIT(4) +#define   HIER_INT_5_EN      BIT(5) +#define   HIER_INT_6_EN      BIT(6) +#define   HIER_INT_7_EN      BIT(7) + +#define   HIER_INT_EN_MSK \ +	 (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN) + + +/* SEC Memory Space IPC output payload. + * + * This register is part of the output payload which SEC provides to host. + */ +#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG    0x20C0 + +/* SeC Interrupt Cause Register - Host Aliveness Request + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * The register is used by host to request SeC aliveness. + */ +#define SICR_HOST_ALIVENESS_REQ_REG      0x214C +#define   SICR_HOST_ALIVENESS_REQ_REQUESTED    BIT(0) + + +/* SeC Interrupt Cause Register - Host IPC Readiness + * + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * This register is used by the host's SeC driver uses in order + * to synchronize with SeC about IPC interface state. + */ +#define SICR_HOST_IPC_READINESS_REQ_REG  0x2150 + + +#define SICR_HOST_IPC_READINESS_HOST_RDY  BIT(0) +#define SICR_HOST_IPC_READINESS_SEC_RDY   BIT(1) +#define SICR_HOST_IPC_READINESS_SYS_RDY     \ +	(SICR_HOST_IPC_READINESS_HOST_RDY | \ +	 SICR_HOST_IPC_READINESS_SEC_RDY) +#define SICR_HOST_IPC_READINESS_RDY_CLR   BIT(2) + +/* SeC Interrupt Cause Register - SeC IPC Output Status + * + * This register indicates whether or not processing of the most recent + * command has been completed by the Host. + * New commands and payloads should not be written by SeC until this + * register indicates that the previous command has been processed. + */ +#define SICR_SEC_IPC_OUTPUT_STATUS_REG   0x2154 +#  define SEC_IPC_OUTPUT_STATUS_RDY BIT(0) + + + +/*  MEI IPC Message payload size 64 bytes */ +#define PAYLOAD_SIZE        64 + +/* MAX size for SATT range 32MB */ +#define SATT_RANGE_MAX     (32 << 20) + + +#endif /* _MEI_HW_TXE_REGS_H_ */ + diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c new file mode 100644 index 00000000000..93273783dec --- /dev/null +++ b/drivers/misc/mei/hw-txe.c @@ -0,0 +1,1190 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/irqreturn.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw-txe.h" +#include "client.h" +#include "hbm.h" + +/** + * mei_txe_reg_read - Reads 32bit data from the device + * + * @base_addr: registers base address + * @offset: register offset + * + */ +static inline u32 mei_txe_reg_read(void __iomem *base_addr, +					unsigned long offset) +{ +	return ioread32(base_addr + offset); +} + +/** + * mei_txe_reg_write - Writes 32bit data to the device + * + * @base_addr: registers base address + * @offset: register offset + * @value: the value to write + */ +static inline void mei_txe_reg_write(void __iomem *base_addr, +				unsigned long offset, u32 value) +{ +	iowrite32(value, base_addr + offset); +} + +/** + * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Doesn't check for aliveness while Reads 32bit data from the SeC BAR + */ +static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, +				unsigned long offset) +{ +	return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset); +} + +/** + * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, +				unsigned long offset) +{ +	WARN(!hw->aliveness, "sec read: aliveness not asserted\n"); +	return mei_txe_sec_reg_read_silent(hw, offset); +} +/** + * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR + *   doesn't check for aliveness + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Doesn't check for aliveness while writes 32bit data from to the SeC BAR + */ +static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw, +				unsigned long offset, u32 value) +{ +	mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value); +} + +/** + * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw, +				unsigned long offset, u32 value) +{ +	WARN(!hw->aliveness, "sec write: aliveness not asserted\n"); +	mei_txe_sec_reg_write_silent(hw, offset, value); +} +/** + * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to read the data + * + */ +static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, +				unsigned long offset) +{ +	return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset); +} + +/** + * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to write the data + * @value: the byte to write + */ +static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw, +				unsigned long offset, u32 value) +{ +	mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value); +} + +/** + * mei_txe_aliveness_set - request for aliveness change + * + * @dev: the device structure + * @req: requested aliveness value + * + * Request for aliveness change and returns true if the change is + *   really needed and false if aliveness is already + *   in the requested state + * Requires device lock to be held + */ +static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	bool do_req = hw->aliveness != req; + +	dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", +				hw->aliveness, req); +	if (do_req) { +		dev->pg_event = MEI_PG_EVENT_WAIT; +		mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); +	} +	return do_req; +} + + +/** + * mei_txe_aliveness_req_get - get aliveness requested register value + * + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from + * from HICR_HOST_ALIVENESS_REQ register value + */ +static u32 mei_txe_aliveness_req_get(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 reg; +	reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); +	return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; +} + +/** + * mei_txe_aliveness_get - get aliveness response register value + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit + * from HICR_HOST_ALIVENESS_RESP register value + */ +static u32 mei_txe_aliveness_get(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 reg; +	reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); +	return reg & HICR_HOST_ALIVENESS_RESP_ACK; +} + +/** + * mei_txe_aliveness_poll - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns > 0 if the expected value was received, -ETIME otherwise + */ +static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	int t = 0; + +	do { +		hw->aliveness = mei_txe_aliveness_get(dev); +		if (hw->aliveness == expected) { +			dev->pg_event = MEI_PG_EVENT_IDLE; +			dev_dbg(&dev->pdev->dev, +				"aliveness settled after %d msecs\n", t); +			return t; +		} +		mutex_unlock(&dev->device_lock); +		msleep(MSEC_PER_SEC / 5); +		mutex_lock(&dev->device_lock); +		t += MSEC_PER_SEC / 5; +	} while (t < SEC_ALIVENESS_WAIT_TIMEOUT); + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	dev_err(&dev->pdev->dev, "aliveness timed out\n"); +	return -ETIME; +} + +/** + * mei_txe_aliveness_wait - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns returns 0 on success and < 0 otherwise + */ +static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	const unsigned long timeout = +			msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT); +	long err; +	int ret; + +	hw->aliveness = mei_txe_aliveness_get(dev); +	if (hw->aliveness == expected) +		return 0; + +	mutex_unlock(&dev->device_lock); +	err = wait_event_timeout(hw->wait_aliveness_resp, +			dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); +	mutex_lock(&dev->device_lock); + +	hw->aliveness = mei_txe_aliveness_get(dev); +	ret = hw->aliveness == expected ? 0 : -ETIME; + +	if (ret) +		dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", +			err, hw->aliveness, dev->pg_event); +	else +		dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", +			jiffies_to_msecs(timeout - err), +			hw->aliveness, dev->pg_event); + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	return ret; +} + +/** + * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete + * + * @dev: the device structure + * + * returns returns 0 on success and < 0 otherwise + */ +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) +{ +	if (mei_txe_aliveness_set(dev, req)) +		return mei_txe_aliveness_wait(dev, req); +	return 0; +} + +/** + * mei_txe_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_txe_pg_is_enabled(struct mei_device *dev) +{ +	return true; +} + +/** + * mei_txe_pg_state  - translate aliveness register value + *   to the mei power gating state + * + * @dev: the device structure + * + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; +} + +/** + * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt + * + * @dev: the device structure + */ +static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 hintmsk; +	/* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */ +	hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG); +	hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY; +	mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk); +} + +/** + * mei_txe_input_doorbell_set + *   - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. + * @dev: the device structure + */ +static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) +{ +	/* Clear the interrupt cause */ +	clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause); +	mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1); +} + +/** + * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 + * + * @dev: the device structure + */ +static void mei_txe_output_ready_set(struct mei_txe_hw *hw) +{ +	mei_txe_br_reg_write(hw, +			SICR_SEC_IPC_OUTPUT_STATUS_REG, +			SEC_IPC_OUTPUT_STATUS_RDY); +} + +/** + * mei_txe_is_input_ready - check if TXE is ready for receiving data + * + * @dev: the device structure + */ +static bool mei_txe_is_input_ready(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 status; +	status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); +	return !!(SEC_IPC_INPUT_STATUS_RDY & status); +} + +/** + * mei_txe_intr_clear - clear all interrupts + * + * @dev: the device structure + */ +static inline void mei_txe_intr_clear(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, +		SEC_IPC_HOST_INT_STATUS_PENDING); +	mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); +	mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK); +} + +/** + * mei_txe_intr_disable - disable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_disable(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, HHIER_REG, 0); +	mei_txe_br_reg_write(hw, HIER_REG, 0); +} +/** + * mei_txe_intr_disable - enable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_enable(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); +	mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); +} + +/** + * mei_txe_pending_interrupts - check if there are pending interrupts + *	only Aliveness, Input ready, and output doorbell are of relevance + * + * @dev: the device structure + * + * Checks if there are pending interrupts + * only Aliveness, Readiness, Input ready, and Output doorbell are relevant + */ +static bool mei_txe_pending_interrupts(struct mei_device *dev) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	bool ret = (hw->intr_cause & (TXE_INTR_READINESS | +				      TXE_INTR_ALIVENESS | +				      TXE_INTR_IN_READY  | +				      TXE_INTR_OUT_DB)); + +	if (ret) { +		dev_dbg(&dev->pdev->dev, +			"Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", +			!!(hw->intr_cause & TXE_INTR_IN_READY), +			!!(hw->intr_cause & TXE_INTR_READINESS), +			!!(hw->intr_cause & TXE_INTR_ALIVENESS), +			!!(hw->intr_cause & TXE_INTR_OUT_DB)); +	} +	return ret; +} + +/** + * mei_txe_input_payload_write - write a dword to the host buffer + *	at offset idx + * + * @dev: the device structure + * @idx: index in the host buffer + * @value: value + */ +static void mei_txe_input_payload_write(struct mei_device *dev, +			unsigned long idx, u32 value) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + +			(idx * sizeof(u32)), value); +} + +/** + * mei_txe_out_data_read - read dword from the device buffer + *	at offset idx + * + * @dev: the device structure + * @idx: index in the device buffer + * + * returns register value at index + */ +static u32 mei_txe_out_data_read(const struct mei_device *dev, +					unsigned long idx) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return mei_txe_br_reg_read(hw, +		BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); +} + +/* Readiness */ + +/** + * mei_txe_readiness_set_host_rdy + * + * @dev: the device structure + */ +static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, +		SICR_HOST_IPC_READINESS_REQ_REG, +		SICR_HOST_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_clear + * + * @dev: the device structure + */ +static void mei_txe_readiness_clear(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, +				SICR_HOST_IPC_READINESS_RDY_CLR); +} +/** + * mei_txe_readiness_get - Reads and returns + *	the HICR_SEC_IPC_READINESS register value + * + * @dev: the device structure + */ +static u32 mei_txe_readiness_get(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +} + + +/** + * mei_txe_readiness_is_sec_rdy - check readiness + *  for HICR_SEC_IPC_READINESS_SEC_RDY + * + * @readiness - cached readiness state + */ +static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) +{ +	return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY); +} + +/** + * mei_txe_hw_is_ready - check if the hw is ready + * + * @dev: the device structure + */ +static bool mei_txe_hw_is_ready(struct mei_device *dev) +{ +	u32 readiness =  mei_txe_readiness_get(dev); +	return mei_txe_readiness_is_sec_rdy(readiness); +} + +/** + * mei_txe_host_is_ready - check if the host is ready + * + * @dev: the device structure + */ +static inline bool mei_txe_host_is_ready(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +	return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_wait - wait till readiness settles + * + * @dev: the device structure + * + * returns 0 on success and -ETIME on timeout + */ +static int mei_txe_readiness_wait(struct mei_device *dev) +{ +	if (mei_txe_hw_is_ready(dev)) +		return 0; + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, +			msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); +	mutex_lock(&dev->device_lock); +	if (!dev->recvd_hw_ready) { +		dev_err(&dev->pdev->dev, "wait for readiness failed\n"); +		return -ETIME; +	} + +	dev->recvd_hw_ready = false; +	return 0; +} + +/** + *  mei_txe_hw_config - configure hardware at the start of the devices + * + * @dev: the device structure + * + * Configure hardware at the start of the device should be done only + *   once at the device probe time + */ +static void mei_txe_hw_config(struct mei_device *dev) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	/* Doesn't change in runtime */ +	dev->hbuf_depth = PAYLOAD_SIZE / 4; + +	hw->aliveness = mei_txe_aliveness_get(dev); +	hw->readiness = mei_txe_readiness_get(dev); + +	dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", +		hw->aliveness, hw->readiness); +} + + +/** + * mei_txe_write - writes a message to device. + * + * @dev: the device structure + * @header: header of message + * @buf: message buffer will be written + * returns 1 if success, 0 - otherwise. + */ + +static int mei_txe_write(struct mei_device *dev, +		struct mei_msg_hdr *header, unsigned char *buf) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	unsigned long rem; +	unsigned long length; +	int slots = dev->hbuf_depth; +	u32 *reg_buf = (u32 *)buf; +	u32 dw_cnt; +	int i; + +	if (WARN_ON(!header || !buf)) +		return -EINVAL; + +	length = header->length; + +	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + +	dw_cnt = mei_data2slots(length); +	if (dw_cnt > slots) +		return -EMSGSIZE; + +	if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n")) +		return -EAGAIN; + +	/* Enable Input Ready Interrupt. */ +	mei_txe_input_ready_interrupt_enable(dev); + +	if (!mei_txe_is_input_ready(dev)) { +		struct mei_fw_status fw_status; +		mei_fw_status(dev, &fw_status); +		dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n", +			FW_STS_PRM(fw_status)); +		return -EAGAIN; +	} + +	mei_txe_input_payload_write(dev, 0, *((u32 *)header)); + +	for (i = 0; i < length / 4; i++) +		mei_txe_input_payload_write(dev, i + 1, reg_buf[i]); + +	rem = length & 0x3; +	if (rem > 0) { +		u32 reg = 0; +		memcpy(®, &buf[length - rem], rem); +		mei_txe_input_payload_write(dev, i + 1, reg); +	} + +	/* after each write the whole buffer is consumed */ +	hw->slots = 0; + +	/* Set Input-Doorbell */ +	mei_txe_input_doorbell_set(hw); + +	return 0; +} + +/** + * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns the PAYLOAD_SIZE - 4 + */ +static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) +{ +	return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr); +} + +/** + * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns always hbuf_depth + */ +static int mei_txe_hbuf_empty_slots(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return hw->slots; +} + +/** + * mei_txe_count_full_read_slots - mimics the me device circular buffer + * + * @dev: the device structure + * + * returns always buffer size in dwords count + */ +static int mei_txe_count_full_read_slots(struct mei_device *dev) +{ +	/* read buffers has static size */ +	return  PAYLOAD_SIZE / 4; +} + +/** + * mei_txe_read_hdr - read message header which is always in 4 first bytes + * + * @dev: the device structure + * + * returns mei message header + */ + +static u32 mei_txe_read_hdr(const struct mei_device *dev) +{ +	return mei_txe_out_data_read(dev, 0); +} +/** + * mei_txe_read - reads a message from the txe device. + * + * @dev: the device structure + * @buf: message buffer will be written + * @len: message size will be read + * + * returns -EINVAL on error wrong argument and 0 on success + */ +static int mei_txe_read(struct mei_device *dev, +		unsigned char *buf, unsigned long len) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 i; +	u32 *reg_buf = (u32 *)buf; +	u32 rem = len & 0x3; + +	if (WARN_ON(!buf || !len)) +		return -EINVAL; + +	dev_dbg(&dev->pdev->dev, +		"buffer-length = %lu buf[0]0x%08X\n", +		len, mei_txe_out_data_read(dev, 0)); + +	for (i = 0; i < len / 4; i++) { +		/* skip header: index starts from 1 */ +		u32 reg = mei_txe_out_data_read(dev, i + 1); +		dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg); +		*reg_buf++ = reg; +	} + +	if (rem) { +		u32 reg = mei_txe_out_data_read(dev, i + 1); +		memcpy(reg_buf, ®, rem); +	} + +	mei_txe_output_ready_set(hw); +	return 0; +} + +/** + * mei_txe_hw_reset - resets host and fw. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); + +	u32 aliveness_req; +	/* +	 * read input doorbell to ensure consistency between  Bridge and SeC +	 * return value might be garbage return +	 */ +	(void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG); + +	aliveness_req = mei_txe_aliveness_req_get(dev); +	hw->aliveness = mei_txe_aliveness_get(dev); + +	/* Disable interrupts in this stage we will poll */ +	mei_txe_intr_disable(dev); + +	/* +	 * If Aliveness Request and Aliveness Response are not equal then +	 * wait for them to be equal +	 * Since we might have interrupts disabled - poll for it +	 */ +	if (aliveness_req != hw->aliveness) +		if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { +			dev_err(&dev->pdev->dev, +				"wait for aliveness settle failed ... bailing out\n"); +			return -EIO; +		} + +	/* +	 * If Aliveness Request and Aliveness Response are set then clear them +	 */ +	if (aliveness_req) { +		mei_txe_aliveness_set(dev, 0); +		if (mei_txe_aliveness_poll(dev, 0) < 0) { +			dev_err(&dev->pdev->dev, +				"wait for aliveness failed ... bailing out\n"); +			return -EIO; +		} +	} + +	/* +	 * Set rediness RDY_CLR bit +	 */ +	mei_txe_readiness_clear(dev); + +	return 0; +} + +/** + * mei_txe_hw_start - start the hardware after reset + * + * @dev: the device structure + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_start(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	int ret; + +	u32 hisr; + +	/* bring back interrupts */ +	mei_txe_intr_enable(dev); + +	ret = mei_txe_readiness_wait(dev); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "wating for readiness failed\n"); +		return ret; +	} + +	/* +	 * If HISR.INT2_STS interrupt status bit is set then clear it. +	 */ +	hisr = mei_txe_br_reg_read(hw, HISR_REG); +	if (hisr & HISR_INT_2_STS) +		mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS); + +	/* Clear the interrupt cause of OutputDoorbell */ +	clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause); + +	ret = mei_txe_aliveness_set_sync(dev, 1); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n"); +		return ret; +	} + +	/* enable input ready interrupts: +	 * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK +	 */ +	mei_txe_input_ready_interrupt_enable(dev); + + +	/*  Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */ +	mei_txe_output_ready_set(hw); + +	/* Set bit SICR_HOST_IPC_READINESS.HOST_RDY +	 */ +	mei_txe_readiness_set_host_rdy(dev); + +	return 0; +} + +/** + * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into + *  single bit mask and acknowledge the interrupts + * + * @dev: the device structure + * @do_ack: acknowledge interrupts + */ +static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 hisr; +	u32 hhisr; +	u32 ipc_isr; +	u32 aliveness; +	bool generated; + +	/* read interrupt registers */ +	hhisr = mei_txe_br_reg_read(hw, HHISR_REG); +	generated = (hhisr & IPC_HHIER_MSK); +	if (!generated) +		goto out; + +	hisr = mei_txe_br_reg_read(hw, HISR_REG); + +	aliveness = mei_txe_aliveness_get(dev); +	if (hhisr & IPC_HHIER_SEC && aliveness) +		ipc_isr = mei_txe_sec_reg_read_silent(hw, +				SEC_IPC_HOST_INT_STATUS_REG); +	else +		ipc_isr = 0; + +	generated = generated || +		(hisr & HISR_INT_STS_MSK) || +		(ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING); + +	if (generated && do_ack) { +		/* Save the interrupt causes */ +		hw->intr_cause |= hisr & HISR_INT_STS_MSK; +		if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY) +			hw->intr_cause |= TXE_INTR_IN_READY; + + +		mei_txe_intr_disable(dev); +		/* Clear the interrupts in hierarchy: +		 * IPC and Bridge, than the High Level */ +		mei_txe_sec_reg_write_silent(hw, +			SEC_IPC_HOST_INT_STATUS_REG, ipc_isr); +		mei_txe_br_reg_write(hw, HISR_REG, hisr); +		mei_txe_br_reg_write(hw, HHISR_REG, hhisr); +	} + +out: +	return generated; +} + +/** + * mei_txe_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) +{ +	struct mei_device *dev = dev_id; + +	if (mei_txe_check_and_ack_intrs(dev, true)) +		return IRQ_WAKE_THREAD; +	return IRQ_NONE; +} + + +/** + * mei_txe_irq_thread_handler - txe interrupt thread + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) +{ +	struct mei_device *dev = (struct mei_device *) dev_id; +	struct mei_txe_hw *hw = to_txe_hw(dev); +	struct mei_cl_cb complete_list; +	s32 slots; +	int rets = 0; + +	dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", +		mei_txe_br_reg_read(hw, HHISR_REG), +		mei_txe_br_reg_read(hw, HISR_REG), +		mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); + + +	/* initialize our complete list */ +	mutex_lock(&dev->device_lock); +	mei_io_list_init(&complete_list); + +	if (pci_dev_msi_enabled(dev->pdev)) +		mei_txe_check_and_ack_intrs(dev, true); + +	/* show irq events */ +	mei_txe_pending_interrupts(dev); + +	hw->aliveness = mei_txe_aliveness_get(dev); +	hw->readiness = mei_txe_readiness_get(dev); + +	/* Readiness: +	 * Detection of TXE driver going through reset +	 * or TXE driver resetting the HECI interface. +	 */ +	if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { +		dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n"); + +		/* Check if SeC is going through reset */ +		if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { +			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); +			dev->recvd_hw_ready = true; +		} else { +			dev->recvd_hw_ready = false; +			if (dev->dev_state != MEI_DEV_RESETTING) { + +				dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); +				schedule_work(&dev->reset_work); +				goto end; + +			} +		} +		wake_up(&dev->wait_hw_ready); +	} + +	/************************************************************/ +	/* Check interrupt cause: +	 * Aliveness: Detection of SeC acknowledge of host request that +	 * it remain alive or host cancellation of that request. +	 */ + +	if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { +		/* Clear the interrupt cause */ +		dev_dbg(&dev->pdev->dev, +			"Aliveness Interrupt: Status: %d\n", hw->aliveness); +		dev->pg_event = MEI_PG_EVENT_RECEIVED; +		if (waitqueue_active(&hw->wait_aliveness_resp)) +			wake_up(&hw->wait_aliveness_resp); +	} + + +	/* Output Doorbell: +	 * Detection of SeC having sent output to host +	 */ +	slots = mei_count_full_read_slots(dev); +	if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) { +		/* Read from TXE */ +		rets = mei_irq_read_handler(dev, &complete_list, &slots); +		if (rets && dev->dev_state != MEI_DEV_RESETTING) { +			dev_err(&dev->pdev->dev, +				"mei_irq_read_handler ret = %d.\n", rets); + +			schedule_work(&dev->reset_work); +			goto end; +		} +	} +	/* Input Ready: Detection if host can write to SeC */ +	if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) { +		dev->hbuf_is_ready = true; +		hw->slots = dev->hbuf_depth; +	} + +	if (hw->aliveness && dev->hbuf_is_ready) { +		/* get the real register value */ +		dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +		rets = mei_irq_write_handler(dev, &complete_list); +		if (rets && rets != -EMSGSIZE) +			dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n", +				rets); +		dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +	} + +	mei_irq_compl_handler(dev, &complete_list); + +end: +	dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + +	mutex_unlock(&dev->device_lock); + +	mei_enable_interrupts(dev); +	return IRQ_HANDLED; +} + + +/** + * mei_txe_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns: 0 on success an error code otherwise + */ +static int mei_txe_fw_status(struct mei_device *dev, +			     struct mei_fw_status *fw_status) +{ +	const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1}; +	int i; + +	if (!fw_status) +		return -EINVAL; + +	fw_status->count = 2; + +	for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { +		int ret; +		ret = pci_read_config_dword(dev->pdev, +				pci_cfg_reg[i], &fw_status->status[i]); +		if (ret) +			return ret; +	} + +	return 0; +} + +static const struct mei_hw_ops mei_txe_hw_ops = { + +	.fw_status = mei_txe_fw_status, +	.host_is_ready = mei_txe_host_is_ready, + +	.pg_state = mei_txe_pg_state, + +	.hw_is_ready = mei_txe_hw_is_ready, +	.hw_reset = mei_txe_hw_reset, +	.hw_config = mei_txe_hw_config, +	.hw_start = mei_txe_hw_start, + +	.pg_is_enabled = mei_txe_pg_is_enabled, + +	.intr_clear = mei_txe_intr_clear, +	.intr_enable = mei_txe_intr_enable, +	.intr_disable = mei_txe_intr_disable, + +	.hbuf_free_slots = mei_txe_hbuf_empty_slots, +	.hbuf_is_ready = mei_txe_is_input_ready, +	.hbuf_max_len = mei_txe_hbuf_max_len, + +	.write = mei_txe_write, + +	.rdbuf_full_slots = mei_txe_count_full_read_slots, +	.read_hdr = mei_txe_read_hdr, + +	.read = mei_txe_read, + +}; + +#define MEI_CFG_TXE_FW_STS                            \ +	.fw_status.count = 2,                         \ +	.fw_status.status[0] = PCI_CFG_TXE_FW_STS0,   \ +	.fw_status.status[1] = PCI_CFG_TXE_FW_STS1 + +const struct mei_cfg mei_txe_cfg = { +	MEI_CFG_TXE_FW_STS, +}; + + +/** + * mei_txe_dev_init - allocates and initializes txe hardware specific structure + * + * @pdev - pci device + * @cfg - per device generation config + * + * returns struct mei_device * on success or NULL; + * + */ +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, +				    const struct mei_cfg *cfg) +{ +	struct mei_device *dev; +	struct mei_txe_hw *hw; + +	dev = kzalloc(sizeof(struct mei_device) + +			 sizeof(struct mei_txe_hw), GFP_KERNEL); +	if (!dev) +		return NULL; + +	mei_device_init(dev, cfg); + +	hw = to_txe_hw(dev); + +	init_waitqueue_head(&hw->wait_aliveness_resp); + +	dev->ops = &mei_txe_hw_ops; + +	dev->pdev = pdev; +	return dev; +} + +/** + * mei_txe_setup_satt2 - SATT2 configuration for DMA support. + * + * @dev:   the device structure + * @addr:  physical address start of the range + * @range: physical range size + */ +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); + +	u32 lo32 = lower_32_bits(addr); +	u32 hi32 = upper_32_bits(addr); +	u32 ctrl; + +	/* SATT is limited to 36 Bits */ +	if (hi32 & ~0xF) +		return -EINVAL; + +	/* SATT has to be 16Byte aligned */ +	if (lo32 & 0xF) +		return -EINVAL; + +	/* SATT range has to be 4Bytes aligned */ +	if (range & 0x4) +		return -EINVAL; + +	/* SATT is limited to 32 MB range*/ +	if (range > SATT_RANGE_MAX) +		return -EINVAL; + +	ctrl = SATT2_CTRL_VALID_MSK; +	ctrl |= hi32  << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT; + +	mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); +	mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); +	mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); +	dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", +		range, lo32, ctrl); + +	return 0; +} diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h new file mode 100644 index 00000000000..e244af79167 --- /dev/null +++ b/drivers/misc/mei/hw-txe.h @@ -0,0 +1,77 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HW_TXE_H_ +#define _MEI_HW_TXE_H_ + +#include <linux/irqreturn.h> + +#include "hw.h" +#include "hw-txe-regs.h" + +#define MEI_TXI_RPM_TIMEOUT    500 /* ms */ + +/* Flatten Hierarchy interrupt cause */ +#define TXE_INTR_READINESS_BIT  0 /* HISR_INT_0_STS */ +#define TXE_INTR_READINESS      HISR_INT_0_STS +#define TXE_INTR_ALIVENESS_BIT  1 /* HISR_INT_1_STS */ +#define TXE_INTR_ALIVENESS      HISR_INT_1_STS +#define TXE_INTR_OUT_DB_BIT     2 /* HISR_INT_2_STS */ +#define TXE_INTR_OUT_DB         HISR_INT_2_STS +#define TXE_INTR_IN_READY_BIT   8 /* beyond HISR */ +#define TXE_INTR_IN_READY       BIT(8) + +/** + * struct mei_txe_hw - txe hardware specifics + * + * @mem_addr:            SeC and BRIDGE bars + * @aliveness:           aliveness (power gating) state of the hardware + * @readiness:           readiness state of the hardware + * @wait_aliveness_resp: aliveness wait queue + * @intr_cause:          translated interrupt cause + */ +struct mei_txe_hw { +	void __iomem *mem_addr[NUM_OF_MEM_BARS]; +	u32 aliveness; +	u32 readiness; +	u32 slots; + +	wait_queue_head_t wait_aliveness_resp; + +	unsigned long intr_cause; +}; + +#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw) + +static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) +{ +	return container_of((void *)hw, struct mei_device, hw); +} + +extern const struct mei_cfg mei_txe_cfg; + +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, +	const struct mei_cfg *cfg); + +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); + +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req); + +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range); + + +#endif /* _MEI_HW_TXE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index cb2f556b425..dd448e58cc8 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -22,7 +22,7 @@  /*   * Timeouts in Seconds   */ -#define MEI_INTEROP_TIMEOUT         7  /* Timeout on ready message */ +#define MEI_HW_READY_TIMEOUT        2  /* Timeout on ready message */  #define MEI_CONNECT_TIMEOUT         3  /* HPS: at least 2 seconds */  #define MEI_CL_CONNECT_TIMEOUT     15  /* HPS: Client Connect Timeout */ @@ -31,13 +31,20 @@  #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */  #define MEI_IAMTHIF_READ_TIMER     10  /* HPS */ +#define MEI_PGI_TIMEOUT            1  /* PG Isolation time response 1 sec */ +#define MEI_HBM_TIMEOUT            1   /* 1 second */  /*   * MEI Version   */ -#define HBM_MINOR_VERSION                   0 +#define HBM_MINOR_VERSION                   1  #define HBM_MAJOR_VERSION                   1 -#define HBM_TIMEOUT                         1	/* 1 second */ + +/* + * MEI version with PGI support + */ +#define HBM_MINOR_VERSION_PGI               1 +#define HBM_MAJOR_VERSION_PGI               1  /* Host bus message command opcode */  #define MEI_HBM_CMD_OP_MSK                  0x7f @@ -69,6 +76,11 @@  #define MEI_FLOW_CONTROL_CMD                0x08 +#define MEI_PG_ISOLATION_ENTRY_REQ_CMD      0x0a +#define MEI_PG_ISOLATION_ENTRY_RES_CMD      0x8a +#define MEI_PG_ISOLATION_EXIT_REQ_CMD       0x0b +#define MEI_PG_ISOLATION_EXIT_RES_CMD       0x8b +  /*   * MEI Stop Reason   * used by hbm_host_stop_request.reason @@ -89,19 +101,19 @@ enum mei_stop_reason_types {   * Client Connect Status   * used by hbm_client_connect_response.status   */ -enum client_connect_status_types { -	CCS_SUCCESS = 0x00, -	CCS_NOT_FOUND = 0x01, -	CCS_ALREADY_STARTED = 0x02, -	CCS_OUT_OF_RESOURCES = 0x03, -	CCS_MESSAGE_SMALL = 0x04 +enum mei_cl_connect_status { +	MEI_CL_CONN_SUCCESS          = 0x00, +	MEI_CL_CONN_NOT_FOUND        = 0x01, +	MEI_CL_CONN_ALREADY_STARTED  = 0x02, +	MEI_CL_CONN_OUT_OF_RESOURCES = 0x03, +	MEI_CL_CONN_MESSAGE_SMALL    = 0x04  };  /*   * Client Disconnect Status   */ -enum client_disconnect_status_types { -	CDS_SUCCESS = 0x00 +enum  mei_cl_disconnect_status { +	MEI_CL_DISCONN_SUCCESS = 0x00  };  /* @@ -111,7 +123,8 @@ struct mei_msg_hdr {  	u32 me_addr:8;  	u32 host_addr:8;  	u32 length:9; -	u32 reserved:6; +	u32 reserved:5; +	u32 internal:1;  	u32 msg_complete:1;  } __packed; @@ -207,6 +220,17 @@ struct hbm_props_response {  } __packed;  /** + * struct hbm_power_gate - power gate request/response + * + * @hbm_cmd - bus message command header + * @reserved[3] + */ +struct hbm_power_gate { +	u8 hbm_cmd; +	u8 reserved[3]; +} __packed; + +/**   * struct hbm_client_connect_request - connect/disconnect request   *   * @hbm_cmd - bus message command header diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 92c73118b13..00692922248 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -43,33 +43,120 @@ const char *mei_dev_state_str(int state)  #undef MEI_DEV_STATE  } -void mei_device_init(struct mei_device *dev) + +/** + * mei_cancel_work. Cancel mei background jobs + * + * @dev: the device structure + * + * returns 0 on success or < 0 if the reset hasn't succeeded + */ +void mei_cancel_work(struct mei_device *dev)  { -	/* setup our list array */ -	INIT_LIST_HEAD(&dev->file_list); -	INIT_LIST_HEAD(&dev->device_list); -	mutex_init(&dev->device_lock); -	init_waitqueue_head(&dev->wait_hw_ready); -	init_waitqueue_head(&dev->wait_recvd_msg); -	init_waitqueue_head(&dev->wait_stop_wd); -	dev->dev_state = MEI_DEV_INITIALIZING; +	cancel_work_sync(&dev->init_work); +	cancel_work_sync(&dev->reset_work); -	mei_io_list_init(&dev->read_list); -	mei_io_list_init(&dev->write_list); -	mei_io_list_init(&dev->write_waiting_list); -	mei_io_list_init(&dev->ctrl_wr_list); -	mei_io_list_init(&dev->ctrl_rd_list); +	cancel_delayed_work(&dev->timer_work); +} +EXPORT_SYMBOL_GPL(mei_cancel_work); -	INIT_DELAYED_WORK(&dev->timer_work, mei_timer); -	INIT_WORK(&dev->init_work, mei_host_client_init); +/** + * mei_reset - resets host and fw. + * + * @dev: the device structure + */ +int mei_reset(struct mei_device *dev) +{ +	enum mei_dev_state state = dev->dev_state; +	bool interrupts_enabled; +	int ret; -	INIT_LIST_HEAD(&dev->wd_cl.link); -	INIT_LIST_HEAD(&dev->iamthif_cl.link); -	mei_io_list_init(&dev->amthif_cmd_list); -	mei_io_list_init(&dev->amthif_rd_complete_list); +	if (state != MEI_DEV_INITIALIZING && +	    state != MEI_DEV_DISABLED && +	    state != MEI_DEV_POWER_DOWN && +	    state != MEI_DEV_POWER_UP) { +		struct mei_fw_status fw_status; +		mei_fw_status(dev, &fw_status); +		dev_warn(&dev->pdev->dev, +			"unexpected reset: dev_state = %s " FW_STS_FMT "\n", +			mei_dev_state_str(state), FW_STS_PRM(fw_status)); +	} + +	/* we're already in reset, cancel the init timer +	 * if the reset was called due the hbm protocol error +	 * we need to call it before hw start +	 * so the hbm watchdog won't kick in +	 */ +	mei_hbm_idle(dev); +	/* enter reset flow */ +	interrupts_enabled = state != MEI_DEV_POWER_DOWN; +	dev->dev_state = MEI_DEV_RESETTING; + +	dev->reset_count++; +	if (dev->reset_count > MEI_MAX_CONSEC_RESET) { +		dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); +		dev->dev_state = MEI_DEV_DISABLED; +		return -ENODEV; +	} + +	ret = mei_hw_reset(dev, interrupts_enabled); +	/* fall through and remove the sw state even if hw reset has failed */ + +	/* no need to clean up software state in case of power up */ +	if (state != MEI_DEV_INITIALIZING && +	    state != MEI_DEV_POWER_UP) { + +		/* remove all waiting requests */ +		mei_cl_all_write_clear(dev); + +		mei_cl_all_disconnect(dev); + +		/* wake up all readers and writers so they can be interrupted */ +		mei_cl_all_wakeup(dev); + +		/* remove entry if already in list */ +		dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); +		mei_cl_unlink(&dev->wd_cl); +		mei_cl_unlink(&dev->iamthif_cl); +		mei_amthif_reset_params(dev); +	} + +	mei_hbm_reset(dev); + +	dev->rd_msg_hdr = 0; +	dev->wd_pending = false; + +	if (ret) { +		dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); +		return ret; +	} + +	if (state == MEI_DEV_POWER_DOWN) { +		dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); +		dev->dev_state = MEI_DEV_DISABLED; +		return 0; +	} + +	ret = mei_hw_start(dev); +	if (ret) { +		dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); +		return ret; +	} + +	dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); + +	dev->dev_state = MEI_DEV_INIT_CLIENTS; +	ret = mei_hbm_start_req(dev); +	if (ret) { +		dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); +		dev->dev_state = MEI_DEV_RESETTING; +		return ret; +	} + +	return 0;  } -EXPORT_SYMBOL_GPL(mei_device_init); +EXPORT_SYMBOL_GPL(mei_reset);  /**   * mei_start - initializes host and fw to start work. @@ -80,16 +167,32 @@ EXPORT_SYMBOL_GPL(mei_device_init);   */  int mei_start(struct mei_device *dev)  { +	int ret;  	mutex_lock(&dev->device_lock); -	/* acknowledge interrupt and stop interupts */ +	/* acknowledge interrupt and stop interrupts */  	mei_clear_interrupts(dev);  	mei_hw_config(dev);  	dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); -	mei_reset(dev, 1); +	dev->reset_count = 0; +	do { +		dev->dev_state = MEI_DEV_INITIALIZING; +		ret = mei_reset(dev); + +		if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { +			dev_err(&dev->pdev->dev, "reset failed ret = %d", ret); +			goto err; +		} +	} while (ret); + +	/* we cannot start the device w/o hbm start message completed */ +	if (dev->dev_state == MEI_DEV_DISABLED) { +		dev_err(&dev->pdev->dev, "reset failed"); +		goto err; +	}  	if (mei_hbm_start_wait(dev)) {  		dev_err(&dev->pdev->dev, "HBM haven't started"); @@ -124,103 +227,79 @@ err:  EXPORT_SYMBOL_GPL(mei_start);  /** - * mei_reset - resets host and fw. + * mei_restart - restart device after suspend   *   * @dev: the device structure - * @interrupts_enabled: if interrupt should be enabled after reset. + * + * returns 0 on success or -ENODEV if the restart hasn't succeeded   */ -void mei_reset(struct mei_device *dev, int interrupts_enabled) +int mei_restart(struct mei_device *dev)  { -	bool unexpected; -	int ret; +	int err; -	unexpected = (dev->dev_state != MEI_DEV_INITIALIZING && -			dev->dev_state != MEI_DEV_DISABLED && -			dev->dev_state != MEI_DEV_POWER_DOWN && -			dev->dev_state != MEI_DEV_POWER_UP); +	mutex_lock(&dev->device_lock); -	ret = mei_hw_reset(dev, interrupts_enabled); -	if (ret) { -		dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n"); -		interrupts_enabled = false; -		dev->dev_state = MEI_DEV_DISABLED; -	} +	mei_clear_interrupts(dev); -	dev->hbm_state = MEI_HBM_IDLE; +	dev->dev_state = MEI_DEV_POWER_UP; +	dev->reset_count = 0; -	if (dev->dev_state != MEI_DEV_INITIALIZING && -	    dev->dev_state != MEI_DEV_POWER_UP) { -		if (dev->dev_state != MEI_DEV_DISABLED && -		    dev->dev_state != MEI_DEV_POWER_DOWN) -			dev->dev_state = MEI_DEV_RESETTING; +	err = mei_reset(dev); -		/* remove all waiting requests */ -		mei_cl_all_write_clear(dev); +	mutex_unlock(&dev->device_lock); -		mei_cl_all_disconnect(dev); +	if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { +		dev_err(&dev->pdev->dev, "device disabled = %d\n", err); +		return -ENODEV; +	} -		/* wake up all readings so they can be interrupted */ -		mei_cl_all_wakeup(dev); +	/* try to start again */ +	if (err) +		schedule_work(&dev->reset_work); -		/* remove entry if already in list */ -		dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); -		mei_cl_unlink(&dev->wd_cl); -		if (dev->open_handle_count > 0) -			dev->open_handle_count--; -		mei_cl_unlink(&dev->iamthif_cl); -		if (dev->open_handle_count > 0) -			dev->open_handle_count--; -		mei_amthif_reset_params(dev); -		memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); -	} +	return 0; +} +EXPORT_SYMBOL_GPL(mei_restart); -	dev->me_clients_num = 0; -	dev->rd_msg_hdr = 0; -	dev->wd_pending = false; +static void mei_reset_work(struct work_struct *work) +{ +	struct mei_device *dev = +		container_of(work, struct mei_device,  reset_work); +	int ret; -	if (unexpected) -		dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", -			 mei_dev_state_str(dev->dev_state)); +	mutex_lock(&dev->device_lock); -	if (!interrupts_enabled) { -		dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n"); -		return; -	} +	ret = mei_reset(dev); -	ret = mei_hw_start(dev); -	if (ret) { -		dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n"); -		dev->dev_state = MEI_DEV_DISABLED; +	mutex_unlock(&dev->device_lock); + +	if (dev->dev_state == MEI_DEV_DISABLED) { +		dev_err(&dev->pdev->dev, "device disabled = %d\n", ret);  		return;  	} -	dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); -	/* link is established * start sending messages.  */ - -	dev->dev_state = MEI_DEV_INIT_CLIENTS; - -	mei_hbm_start_req(dev); - +	/* retry reset in case of failure */ +	if (ret) +		schedule_work(&dev->reset_work);  } -EXPORT_SYMBOL_GPL(mei_reset);  void mei_stop(struct mei_device *dev)  {  	dev_dbg(&dev->pdev->dev, "stopping the device.\n"); -	flush_scheduled_work(); +	mei_cancel_work(dev); -	mutex_lock(&dev->device_lock); +	mei_nfc_host_exit(dev); -	cancel_delayed_work(&dev->timer_work); +	mei_cl_bus_remove_devices(dev); -	mei_wd_stop(dev); +	mutex_lock(&dev->device_lock); -	mei_nfc_host_exit(); +	mei_wd_stop(dev);  	dev->dev_state = MEI_DEV_POWER_DOWN; -	mei_reset(dev, 0); +	mei_reset(dev);  	mutex_unlock(&dev->device_lock); @@ -228,5 +307,89 @@ void mei_stop(struct mei_device *dev)  }  EXPORT_SYMBOL_GPL(mei_stop); +/** + * mei_write_is_idle - check if the write queues are idle + * + * @dev: the device structure + * + * returns true of there is no pending write + */ +bool mei_write_is_idle(struct mei_device *dev) +{ +	bool idle = (dev->dev_state == MEI_DEV_ENABLED && +		list_empty(&dev->ctrl_wr_list.list) && +		list_empty(&dev->write_list.list)); + +	dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", +		idle, +		mei_dev_state_str(dev->dev_state), +		list_empty(&dev->ctrl_wr_list.list), +		list_empty(&dev->write_list.list)); + +	return idle; +} +EXPORT_SYMBOL_GPL(mei_write_is_idle); + +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) +{ +	int i; +	const struct mei_fw_status *fw_src = &dev->cfg->fw_status; + +	if (!fw_status) +		return -EINVAL; + +	fw_status->count = fw_src->count; +	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { +		int ret; +		ret = pci_read_config_dword(dev->pdev, +			fw_src->status[i], &fw_status->status[i]); +		if (ret) +			return ret; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(mei_fw_status); +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) +{ +	/* setup our list array */ +	INIT_LIST_HEAD(&dev->file_list); +	INIT_LIST_HEAD(&dev->device_list); +	mutex_init(&dev->device_lock); +	init_waitqueue_head(&dev->wait_hw_ready); +	init_waitqueue_head(&dev->wait_pg); +	init_waitqueue_head(&dev->wait_recvd_msg); +	init_waitqueue_head(&dev->wait_stop_wd); +	dev->dev_state = MEI_DEV_INITIALIZING; +	dev->reset_count = 0; + +	mei_io_list_init(&dev->read_list); +	mei_io_list_init(&dev->write_list); +	mei_io_list_init(&dev->write_waiting_list); +	mei_io_list_init(&dev->ctrl_wr_list); +	mei_io_list_init(&dev->ctrl_rd_list); + +	INIT_DELAYED_WORK(&dev->timer_work, mei_timer); +	INIT_WORK(&dev->init_work, mei_host_client_init); +	INIT_WORK(&dev->reset_work, mei_reset_work); + +	INIT_LIST_HEAD(&dev->wd_cl.link); +	INIT_LIST_HEAD(&dev->iamthif_cl.link); +	mei_io_list_init(&dev->amthif_cmd_list); +	mei_io_list_init(&dev->amthif_rd_complete_list); + +	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); +	dev->open_handle_count = 0; + +	/* +	 * Reserving the first client ID +	 * 0: Reserved for MEI Bus Message communications +	 */ +	bitmap_set(dev->host_clients_map, 0, 1); + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	dev->cfg      = cfg; +} +EXPORT_SYMBOL_GPL(mei_device_init); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 4b59cb742de..4e3cba6da3f 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -26,12 +26,11 @@  #include "mei_dev.h"  #include "hbm.h" -#include "hw-me.h"  #include "client.h"  /** - * mei_irq_compl_handler - dispatch complete handelers + * mei_irq_compl_handler - dispatch complete handlers   *	for the completed callbacks   *   * @dev - mei device @@ -113,13 +112,13 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,  		if (cb->response_buffer.size == 0 ||  		    cb->response_buffer.data == NULL) { -			dev_err(&dev->pdev->dev, "response buffer is not allocated.\n"); +			cl_err(dev, cl, "response buffer is not allocated.\n");  			list_del(&cb->list);  			return -ENOMEM;  		}  		if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { -			dev_dbg(&dev->pdev->dev, "message overflow. size %d len %d idx %ld\n", +			cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",  				cb->response_buffer.size,  				mei_hdr->length, cb->buf_idx);  			buffer = krealloc(cb->response_buffer.data, @@ -127,7 +126,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,  					  GFP_KERNEL);  			if (!buffer) { -				dev_err(&dev->pdev->dev, "allocation failed.\n"); +				cl_err(dev, cl, "allocation failed.\n");  				list_del(&cb->list);  				return -ENOMEM;  			} @@ -143,9 +142,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,  		if (mei_hdr->msg_complete) {  			cl->status = 0;  			list_del(&cb->list); -			dev_dbg(&dev->pdev->dev, "completed read H cl = %d, ME cl = %d, length = %lu\n", -				cl->host_client_id, -				cl->me_client_id, +			cl_dbg(dev, cl, "completed read length = %lu\n",  				cb->buf_idx);  			list_add_tail(&cb->list, &complete_list->list);  		} @@ -163,29 +160,63 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,  }  /** + * mei_cl_irq_disconnect_rsp - send disconnection response message + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, +				     struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; +	int ret; + +	slots = mei_hbuf_empty_slots(dev); +	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); + +	if (slots < msg_slots) +		return -EMSGSIZE; + +	ret = mei_hbm_cl_disconnect_rsp(dev, cl); + +	cl->state = MEI_FILE_DISCONNECTED; +	cl->status = 0; +	list_del(&cb->list); +	mei_io_cb_free(cb); + +	return ret; +} + + + +/**   * mei_cl_irq_close - processes close related operation from   *	interrupt thread context - send disconnect request   *   * @cl: client   * @cb: callback block. - * @slots: free slots.   * @cmpl_list: complete list.   *   * returns 0, OK; otherwise, error.   */  static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, -			s32 *slots, struct mei_cl_cb *cmpl_list) +			    struct mei_cl_cb *cmpl_list)  {  	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; -	u32 msg_slots = -		mei_data2slots(sizeof(struct hbm_client_connect_request)); +	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); +	slots = mei_hbuf_empty_slots(dev); -	if (*slots < msg_slots) +	if (slots < msg_slots)  		return -EMSGSIZE; -	*slots -= msg_slots; -  	if (mei_hbm_cl_disconnect_req(dev, cl)) {  		cl->status = 0;  		cb->buf_idx = 0; @@ -209,32 +240,32 @@ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb,   *   * @cl: client   * @cb: callback block. - * @slots: free slots.   * @cmpl_list: complete list.   *   * returns 0, OK; otherwise, error.   */  static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, -			   s32 *slots, struct mei_cl_cb *cmpl_list) +			   struct mei_cl_cb *cmpl_list)  {  	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; +	int ret; -	u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); +	msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); +	slots = mei_hbuf_empty_slots(dev); -	if (*slots < msg_slots) { -		/* return the cancel routine */ -		list_del(&cb->list); +	if (slots < msg_slots)  		return -EMSGSIZE; -	} - -	*slots -= msg_slots; -	if (mei_hbm_cl_flow_control_req(dev, cl)) { -		cl->status = -ENODEV; +	ret = mei_hbm_cl_flow_control_req(dev, cl); +	if (ret) { +		cl->status = ret;  		cb->buf_idx = 0;  		list_move_tail(&cb->list, &cmpl_list->list); -		return -ENODEV; +		return ret;  	} +  	list_move_tail(&cb->list, &dev->read_list.list);  	return 0; @@ -242,39 +273,39 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,  /** - * mei_cl_irq_ioctl - processes client ioctl related operation from the - *	interrupt thread context -   send connection request + * mei_cl_irq_connect - send connect request in irq_thread context   *   * @cl: client   * @cb: callback block. - * @slots: free slots.   * @cmpl_list: complete list.   *   * returns 0, OK; otherwise, error.   */ -static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb, -			   s32 *slots, struct mei_cl_cb *cmpl_list) +static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, +			      struct mei_cl_cb *cmpl_list)  {  	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; +	int ret; -	u32 msg_slots = -		mei_data2slots(sizeof(struct hbm_client_connect_request)); +	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); +	slots = mei_hbuf_empty_slots(dev); -	if (*slots < msg_slots) { -		/* return the cancel routine */ -		list_del(&cb->list); -		return -EMSGSIZE; -	} +	if (mei_cl_is_other_connecting(cl)) +		return 0; -	*slots -=  msg_slots; +	if (slots < msg_slots) +		return -EMSGSIZE;  	cl->state = MEI_FILE_CONNECTING; -	if (mei_hbm_cl_connect_req(dev, cl)) { -		cl->status = -ENODEV; +	ret = mei_hbm_cl_connect_req(dev, cl); +	if (ret) { +		cl->status = ret;  		cb->buf_idx = 0;  		list_del(&cb->list); -		return -ENODEV; +		return ret;  	}  	list_move_tail(&cb->list, &dev->ctrl_rd_list.list); @@ -297,13 +328,11 @@ int mei_irq_read_handler(struct mei_device *dev,  		struct mei_cl_cb *cmpl_list, s32 *slots)  {  	struct mei_msg_hdr *mei_hdr; -	struct mei_cl *cl_pos = NULL; -	struct mei_cl *cl_next = NULL; -	int ret = 0; +	struct mei_cl *cl; +	int ret;  	if (!dev->rd_msg_hdr) {  		dev->rd_msg_hdr = mei_read_hdr(dev); -		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);  		(*slots)--;  		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);  	} @@ -311,61 +340,67 @@ int mei_irq_read_handler(struct mei_device *dev,  	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));  	if (mei_hdr->reserved || !dev->rd_msg_hdr) { -		dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); +		dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", +				dev->rd_msg_hdr);  		ret = -EBADMSG;  		goto end;  	} -	if (mei_hdr->host_addr || mei_hdr->me_addr) { -		list_for_each_entry_safe(cl_pos, cl_next, -					&dev->file_list, link) { -			dev_dbg(&dev->pdev->dev, -					"list_for_each_entry_safe read host" -					" client = %d, ME client = %d\n", -					cl_pos->host_client_id, -					cl_pos->me_client_id); -			if (mei_cl_hbm_equal(cl_pos, mei_hdr)) -				break; -		} +	if (mei_slots2data(*slots) < mei_hdr->length) { +		dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", +				*slots); +		/* we can't read the message */ +		ret = -ENODATA; +		goto end; +	} -		if (&cl_pos->link == &dev->file_list) { -			dev_dbg(&dev->pdev->dev, "corrupted message header\n"); -			ret = -EBADMSG; +	/*  HBM message */ +	if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { +		ret = mei_hbm_dispatch(dev, mei_hdr); +		if (ret) { +			dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", +					ret);  			goto end;  		} +		goto reset_slots;  	} -	if (((*slots) * sizeof(u32)) < mei_hdr->length) { -		dev_err(&dev->pdev->dev, -				"we can't read the message slots =%08x.\n", -				*slots); -		/* we can't read the message */ -		ret = -ERANGE; -		goto end; + +	/* find recipient cl */ +	list_for_each_entry(cl, &dev->file_list, link) { +		if (mei_cl_hbm_equal(cl, mei_hdr)) { +			cl_dbg(dev, cl, "got a message\n"); +			break; +		}  	} -	/* decide where to read the message too */ -	if (!mei_hdr->host_addr) { -		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n"); -		mei_hbm_dispatch(dev, mei_hdr); -		dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n"); -	} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && -		   (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && -		   (dev->iamthif_state == MEI_IAMTHIF_READING)) { +	/* if no recipient cl was found we assume corrupted header */ +	if (&cl->link == &dev->file_list) { +		dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", +				dev->rd_msg_hdr); +		ret = -EBADMSG; +		goto end; +	} -		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n"); -		dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); +	if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && +	    MEI_FILE_CONNECTED == dev->iamthif_cl.state && +	    dev->iamthif_state == MEI_IAMTHIF_READING) {  		ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); -		if (ret) +		if (ret) { +			dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", +					ret);  			goto end; +		}  	} else { -		dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n"); -		dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));  		ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); -		if (ret) +		if (ret) { +			dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", +					ret);  			goto end; +		}  	} +reset_slots:  	/* reset the number of slots and header */  	*slots = mei_count_full_read_slots(dev);  	dev->rd_msg_hdr = 0; @@ -401,10 +436,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)  	s32 slots;  	int ret; -	if (!mei_hbuf_is_ready(dev)) { -		dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n"); + +	if (!mei_hbuf_acquire(dev))  		return 0; -	} +  	slots = mei_hbuf_empty_slots(dev);  	if (slots <= 0)  		return -EMSGSIZE; @@ -420,15 +455,14 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)  		cl->status = 0;  		list_del(&cb->list); -		if (MEI_WRITING == cl->writing_state && -		    cb->fop_type == MEI_FOP_WRITE && +		if (cb->fop_type == MEI_FOP_WRITE &&  		    cl != &dev->iamthif_cl) { -			dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n"); +			cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");  			cl->writing_state = MEI_WRITE_COMPLETE;  			list_add_tail(&cb->list, &cmpl_list->list);  		}  		if (cl == &dev->iamthif_cl) { -			dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n"); +			cl_dbg(dev, cl, "check iamthif flow control.\n");  			if (dev->iamthif_flow_control_pending) {  				ret = mei_amthif_irq_read(dev, &slots);  				if (ret) @@ -439,29 +473,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)  	if (dev->wd_state == MEI_WD_STOPPING) {  		dev->wd_state = MEI_WD_IDLE; -		wake_up_interruptible(&dev->wait_stop_wd); +		wake_up(&dev->wait_stop_wd);  	} -	if (dev->wr_ext_msg.hdr.length) { -		mei_write_message(dev, &dev->wr_ext_msg.hdr, -				dev->wr_ext_msg.data); -		slots -= mei_data2slots(dev->wr_ext_msg.hdr.length); -		dev->wr_ext_msg.hdr.length = 0; -	} -	if (dev->dev_state == MEI_DEV_ENABLED) { +	if (mei_cl_is_connected(&dev->wd_cl)) {  		if (dev->wd_pending &&  		    mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { -			if (mei_wd_send(dev)) -				dev_dbg(&dev->pdev->dev, "wd send failed.\n"); -			else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) -				return -ENODEV; - +			ret = mei_wd_send(dev); +			if (ret) +				return ret;  			dev->wd_pending = false; - -			if (dev->wd_state == MEI_WD_RUNNING) -				slots -= mei_data2slots(MEI_WD_START_MSG_SIZE); -			else -				slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);  		}  	} @@ -476,28 +497,31 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)  		switch (cb->fop_type) {  		case MEI_FOP_CLOSE:  			/* send disconnect message */ -			ret = mei_cl_irq_close(cl, cb, &slots, cmpl_list); +			ret = mei_cl_irq_close(cl, cb, cmpl_list);  			if (ret)  				return ret;  			break;  		case MEI_FOP_READ:  			/* send flow control message */ -			ret = mei_cl_irq_read(cl, cb, &slots, cmpl_list); +			ret = mei_cl_irq_read(cl, cb, cmpl_list);  			if (ret)  				return ret;  			break; -		case MEI_FOP_IOCTL: +		case MEI_FOP_CONNECT:  			/* connect message */ -			if (mei_cl_is_other_connecting(cl)) -				continue; -			ret = mei_cl_irq_ioctl(cl, cb, &slots, cmpl_list); +			ret = mei_cl_irq_connect(cl, cb, cmpl_list);  			if (ret)  				return ret;  			break; - +		case MEI_FOP_DISCONNECT_RSP: +			/* send disconnect resp */ +			ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); +			if (ret) +				return ret; +			break;  		default:  			BUG();  		} @@ -509,19 +533,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)  		cl = cb->cl;  		if (cl == NULL)  			continue; -		if (mei_cl_flow_ctrl_creds(cl) <= 0) { -			dev_dbg(&dev->pdev->dev, -				"No flow control credentials for client %d, not sending.\n", -				cl->host_client_id); -			continue; -		} -  		if (cl == &dev->iamthif_cl) -			ret = mei_amthif_irq_write_complete(cl, cb, -						&slots, cmpl_list); +			ret = mei_amthif_irq_write(cl, cb, cmpl_list);  		else -			ret = mei_cl_irq_write_complete(cl, cb, -						&slots, cmpl_list); +			ret = mei_cl_irq_write(cl, cb, cmpl_list);  		if (ret)  			return ret;  	} @@ -536,13 +551,11 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);   *   * @work: pointer to the work_struct structure   * - * NOTE: This function is called by timer interrupt work   */  void mei_timer(struct work_struct *work)  {  	unsigned long timeout; -	struct mei_cl *cl_pos = NULL; -	struct mei_cl *cl_next = NULL; +	struct mei_cl *cl;  	struct mei_cl_cb  *cb_pos = NULL;  	struct mei_cl_cb  *cb_next = NULL; @@ -551,33 +564,42 @@ void mei_timer(struct work_struct *work)  	mutex_lock(&dev->device_lock); -	if (dev->dev_state != MEI_DEV_ENABLED) { -		if (dev->dev_state == MEI_DEV_INIT_CLIENTS) { -			if (dev->init_clients_timer) { -				if (--dev->init_clients_timer == 0) { -					dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n", -						dev->hbm_state); -					mei_reset(dev, 1); -				} + +	/* Catch interrupt stalls during HBM init handshake */ +	if (dev->dev_state == MEI_DEV_INIT_CLIENTS && +	    dev->hbm_state != MEI_HBM_IDLE) { + +		if (dev->init_clients_timer) { +			if (--dev->init_clients_timer == 0) { +				dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", +					dev->hbm_state); +				mei_reset(dev); +				goto out;  			}  		} -		goto out;  	} + +	if (dev->dev_state != MEI_DEV_ENABLED) +		goto out; +  	/*** connect/disconnect timeouts ***/ -	list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { -		if (cl_pos->timer_count) { -			if (--cl_pos->timer_count == 0) { -				dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n"); -				mei_reset(dev, 1); +	list_for_each_entry(cl, &dev->file_list, link) { +		if (cl->timer_count) { +			if (--cl->timer_count == 0) { +				dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); +				mei_reset(dev);  				goto out;  			}  		}  	} +	if (!mei_cl_is_connected(&dev->iamthif_cl)) +		goto out; +  	if (dev->iamthif_stall_timer) {  		if (--dev->iamthif_stall_timer == 0) { -			dev_err(&dev->pdev->dev, "reset: amthif  hanged.\n"); -			mei_reset(dev, 1); +			dev_err(&dev->pdev->dev, "timer: amthif  hanged.\n"); +			mei_reset(dev);  			dev->iamthif_msg_buf_size = 0;  			dev->iamthif_msg_buf_index = 0;  			dev->iamthif_canceled = false; @@ -613,10 +635,10 @@ void mei_timer(struct work_struct *work)  			list_for_each_entry_safe(cb_pos, cb_next,  				&dev->amthif_rd_complete_list.list, list) { -				cl_pos = cb_pos->file_object->private_data; +				cl = cb_pos->file_object->private_data;  				/* Finding the AMTHI entry. */ -				if (cl_pos == &dev->iamthif_cl) +				if (cl == &dev->iamthif_cl)  					list_del(&cb_pos->list);  			}  			mei_io_cb_free(dev->iamthif_current_cb); @@ -630,7 +652,8 @@ void mei_timer(struct work_struct *work)  		}  	}  out: -	schedule_delayed_work(&dev->timer_work, 2 * HZ); +	if (dev->dev_state != MEI_DEV_DISABLED) +		schedule_delayed_work(&dev->timer_work, 2 * HZ);  	mutex_unlock(&dev->device_lock);  } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 173ff095be0..66f0a1a0645 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -13,9 +13,6 @@   * more details.   *   */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <linux/kernel.h> @@ -40,7 +37,6 @@  #include <linux/mei.h>  #include "mei_dev.h" -#include "hw-me.h"  #include "client.h"  /** @@ -48,7 +44,7 @@   *   * @inode: pointer to inode structure   * @file: pointer to file structure - e + *   * returns 0 on success, <0 on error   */  static int mei_open(struct inode *inode, struct file *file) @@ -60,48 +56,45 @@ static int mei_open(struct inode *inode, struct file *file)  	int err; -	err = -ENODEV;  	if (!misc->parent) -		goto out; +		return -ENODEV;  	pdev = container_of(misc->parent, struct pci_dev, dev);  	dev = pci_get_drvdata(pdev);  	if (!dev) -		goto out; +		return -ENODEV;  	mutex_lock(&dev->device_lock); -	err = -ENOMEM; -	cl = mei_cl_allocate(dev); -	if (!cl) -		goto out_unlock; + +	cl = NULL;  	err = -ENODEV;  	if (dev->dev_state != MEI_DEV_ENABLED) {  		dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n",  		    mei_dev_state_str(dev->dev_state)); -		goto out_unlock; -	} -	err = -EMFILE; -	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { -		dev_err(&dev->pdev->dev, "open_handle_count exceded %d", -			MEI_MAX_OPEN_HANDLE_COUNT); -		goto out_unlock; +		goto err_unlock;  	} +	err = -ENOMEM; +	cl = mei_cl_allocate(dev); +	if (!cl) +		goto err_unlock; + +	/* open_handle_count check is handled in the mei_cl_link */  	err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);  	if (err) -		goto out_unlock; +		goto err_unlock;  	file->private_data = cl; +  	mutex_unlock(&dev->device_lock);  	return nonseekable_open(inode, file); -out_unlock: +err_unlock:  	mutex_unlock(&dev->device_lock);  	kfree(cl); -out:  	return err;  } @@ -132,22 +125,12 @@ static int mei_release(struct inode *inode, struct file *file)  	}  	if (cl->state == MEI_FILE_CONNECTED) {  		cl->state = MEI_FILE_DISCONNECTING; -		dev_dbg(&dev->pdev->dev, -			"disconnecting client host client = %d, " -		    "ME client = %d\n", -		    cl->host_client_id, -		    cl->me_client_id); +		cl_dbg(dev, cl, "disconnecting\n");  		rets = mei_cl_disconnect(cl);  	}  	mei_cl_flush_queues(cl); -	dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n", -	    cl->host_client_id, -	    cl->me_client_id); +	cl_dbg(dev, cl, "removing\n"); -	if (dev->open_handle_count > 0) { -		clear_bit(cl->host_client_id, dev->host_clients_map); -		dev->open_handle_count--; -	}  	mei_cl_unlink(cl); @@ -165,10 +148,7 @@ static int mei_release(struct inode *inode, struct file *file)  	file->private_data = NULL; -	if (cb) { -		mei_io_cb_free(cb); -		cb = NULL; -	} +	mei_io_cb_free(cb);  	kfree(cl);  out: @@ -203,12 +183,18 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,  	dev = cl->dev; +  	mutex_lock(&dev->device_lock);  	if (dev->dev_state != MEI_DEV_ENABLED) {  		rets = -ENODEV;  		goto out;  	} +	if (length == 0) { +		rets = 0; +		goto out; +	} +  	if (cl == &dev->iamthif_cl) {  		rets = mei_amthif_read(dev, file, ubuf, length, offset);  		goto out; @@ -249,19 +235,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,  		mutex_unlock(&dev->device_lock);  		if (wait_event_interruptible(cl->rx_wait, -			(MEI_READ_COMPLETE == cl->reading_state || -			 MEI_FILE_INITIALIZING == cl->state || -			 MEI_FILE_DISCONNECTED == cl->state || -			 MEI_FILE_DISCONNECTING == cl->state))) { +				MEI_READ_COMPLETE == cl->reading_state || +				mei_cl_is_transitioning(cl))) { +  			if (signal_pending(current))  				return -EINTR;  			return -ERESTARTSYS;  		}  		mutex_lock(&dev->device_lock); -		if (MEI_FILE_INITIALIZING == cl->state || -		    MEI_FILE_DISCONNECTED == cl->state || -		    MEI_FILE_DISCONNECTING == cl->state) { +		if (mei_cl_is_transitioning(cl)) {  			rets = -EBUSY;  			goto out;  		} @@ -291,6 +274,7 @@ copy_buffer:  	length = min_t(size_t, length, cb->buf_idx - *offset);  	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { +		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");  		rets = -EFAULT;  		goto free;  	} @@ -347,11 +331,17 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,  	id = mei_me_cl_by_id(dev, cl->me_client_id);  	if (id < 0) { -		rets = -ENODEV; +		rets = -ENOTTY;  		goto out;  	} -	if (length > dev->me_clients[id].props.max_msg_length || length <= 0) { -		rets = -EMSGSIZE; + +	if (length == 0) { +		rets = 0; +		goto out; +	} + +	if (length > dev->me_clients[id].props.max_msg_length) { +		rets = -EFBIG;  		goto out;  	} @@ -404,8 +394,11 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,  		goto out;  	rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); -	if (rets) +	if (rets) { +		dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); +		rets = -EFAULT;  		goto out; +	}  	if (cl == &dev->iamthif_cl) {  		rets = mei_amthif_write(dev, write_cb); @@ -469,12 +462,11 @@ static int mei_ioctl_connect_client(struct file *file,  	if (i < 0 || dev->me_clients[i].props.fixed_address) {  		dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",  				&data->in_client_uuid); -		rets = -ENODEV; +		rets = -ENOTTY;  		goto end;  	}  	cl->me_client_id = dev->me_clients[i].client_id; -	cl->state = MEI_FILE_CONNECTING;  	dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",  			cl->me_client_id); @@ -492,11 +484,11 @@ static int mei_ioctl_connect_client(struct file *file,  			rets = -ENODEV;  			goto end;  		} -		clear_bit(cl->host_client_id, dev->host_clients_map);  		mei_cl_unlink(cl);  		kfree(cl);  		cl = NULL; +		dev->iamthif_open_count++;  		file->private_data = &dev->iamthif_cl;  		client = &data->out_client_properties; @@ -651,8 +643,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)  		goto out;  	} -	if (MEI_WRITE_COMPLETE == cl->writing_state) -		mask |= (POLLIN | POLLRDNORM); +	mask |= (POLLIN | POLLRDNORM);  out:  	mutex_unlock(&dev->device_lock); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 7b918b2fb89..5c7e990e2f2 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -24,7 +24,6 @@  #include <linux/mei_cl_bus.h>  #include "hw.h" -#include "hw-me-regs.h"  #include "hbm.h"  /* @@ -61,11 +60,16 @@ extern const uuid_le mei_wd_guid;  #define MEI_CLIENTS_MAX 256  /* + * maximum number of consecutive resets + */ +#define MEI_MAX_CONSEC_RESET  3 + +/*   * Number of File descriptors/handles   * that can be opened to the driver.   *   * Limit to 255: 256 Total Clients - * minus internal client for MEI Bus Messags + * minus internal client for MEI Bus Messages   */  #define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) @@ -125,16 +129,18 @@ enum mei_wd_states {  /**   * enum mei_cb_file_ops  - file operation associated with the callback - * @MEI_FOP_READ   - read - * @MEI_FOP_WRITE  - write - * @MEI_FOP_IOCTL  - ioctl - * @MEI_FOP_OPEN   - open - * @MEI_FOP_CLOSE  - close + * @MEI_FOP_READ      - read + * @MEI_FOP_WRITE     - write + * @MEI_FOP_CONNECT   - connect + * @MEI_FOP_DISCONNECT_RSP - disconnect response + * @MEI_FOP_OPEN      - open + * @MEI_FOP_CLOSE     - close   */  enum mei_cb_file_ops {  	MEI_FOP_READ = 0,  	MEI_FOP_WRITE, -	MEI_FOP_IOCTL, +	MEI_FOP_CONNECT, +	MEI_FOP_DISCONNECT_RSP,  	MEI_FOP_OPEN,  	MEI_FOP_CLOSE  }; @@ -147,6 +153,20 @@ struct mei_msg_data {  	unsigned char *data;  }; +/* Maximum number of processed FW status registers */ +#define MEI_FW_STATUS_MAX 2 + +/* + * struct mei_fw_status - storage of FW status data + * + * @count - number of actually available elements in array + * @status - FW status registers + */ +struct mei_fw_status { +	int count; +	u32 status[MEI_FW_STATUS_MAX]; +}; +  /**   * struct mei_me_client - representation of me (fw) client   * @@ -178,9 +198,10 @@ struct mei_cl_cb {  	unsigned long buf_idx;  	unsigned long read_time;  	struct file *file_object; +	u32 internal:1;  }; -/* MEI client instance carried as file->pirvate_data*/ +/* MEI client instance carried as file->private_data*/  struct mei_cl {  	struct list_head link;  	struct mei_device *dev; @@ -206,6 +227,7 @@ struct mei_cl {  /** struct mei_hw_ops   * + * @fw_status        - read FW status from PCI config space   * @host_is_ready    - query for host readiness   * @hw_is_ready      - query if hw is ready @@ -213,6 +235,9 @@ struct mei_cl {   * @hw_start         - start hw after reset   * @hw_config        - configure hw + * @pg_state         - power gating state of the device + * @pg_is_enabled    - is power gating enabled +   * @intr_clear       - clear pending interrupts   * @intr_enable      - enable interrupts   * @intr_disable     - disable interrupts @@ -230,20 +255,25 @@ struct mei_cl {   */  struct mei_hw_ops { -	bool (*host_is_ready) (struct mei_device *dev); +	int (*fw_status)(struct mei_device *dev, +		struct mei_fw_status *fw_status); +	bool (*host_is_ready)(struct mei_device *dev); -	bool (*hw_is_ready) (struct mei_device *dev); -	int (*hw_reset) (struct mei_device *dev, bool enable); -	int  (*hw_start) (struct mei_device *dev); -	void (*hw_config) (struct mei_device *dev); +	bool (*hw_is_ready)(struct mei_device *dev); +	int (*hw_reset)(struct mei_device *dev, bool enable); +	int (*hw_start)(struct mei_device *dev); +	void (*hw_config)(struct mei_device *dev); -	void (*intr_clear) (struct mei_device *dev); -	void (*intr_enable) (struct mei_device *dev); -	void (*intr_disable) (struct mei_device *dev); +	enum mei_pg_state (*pg_state)(struct mei_device *dev); +	bool (*pg_is_enabled)(struct mei_device *dev); -	int (*hbuf_free_slots) (struct mei_device *dev); -	bool (*hbuf_is_ready) (struct mei_device *dev); -	size_t (*hbuf_max_len) (const struct mei_device *dev); +	void (*intr_clear)(struct mei_device *dev); +	void (*intr_enable)(struct mei_device *dev); +	void (*intr_disable)(struct mei_device *dev); + +	int (*hbuf_free_slots)(struct mei_device *dev); +	bool (*hbuf_is_ready)(struct mei_device *dev); +	size_t (*hbuf_max_len)(const struct mei_device *dev);  	int (*write)(struct mei_device *dev,  		     struct mei_msg_hdr *hdr, @@ -252,7 +282,7 @@ struct mei_hw_ops {  	int (*rdbuf_full_slots)(struct mei_device *dev);  	u32 (*read_hdr)(const struct mei_device *dev); -	int (*read) (struct mei_device *dev, +	int (*read)(struct mei_device *dev,  		     unsigned char *buf, unsigned long len);  }; @@ -288,6 +318,7 @@ int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length);  int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length);  int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);  void mei_cl_bus_rx_event(struct mei_cl *cl); +void mei_cl_bus_remove_devices(struct mei_device *dev);  int mei_cl_bus_init(void);  void mei_cl_bus_exit(void); @@ -323,16 +354,61 @@ struct mei_cl_device {  	void *priv_data;  }; + + /** + * enum mei_pg_event - power gating transition events + * + * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition + * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete + * @MEI_PG_EVENT_RECEIVED: the driver received pg event + */ +enum mei_pg_event { +	MEI_PG_EVENT_IDLE, +	MEI_PG_EVENT_WAIT, +	MEI_PG_EVENT_RECEIVED, +}; + +/** + * enum mei_pg_state - device internal power gating state + * + * @MEI_PG_OFF: device is not power gated - it is active + * @MEI_PG_ON:  device is power gated - it is in lower power state + */ +enum mei_pg_state { +	MEI_PG_OFF = 0, +	MEI_PG_ON =  1, +}; + +/* + * mei_cfg + * + * @fw_status - FW status + * @quirk_probe - device exclusion quirk + */ +struct mei_cfg { +	const struct mei_fw_status fw_status; +	bool (*quirk_probe)(struct pci_dev *pdev); +}; + + +#define MEI_PCI_DEVICE(dev, cfg) \ +	.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ +	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ +	.driver_data = (kernel_ulong_t)&(cfg) + +  /**   * struct mei_device -  MEI private device struct + * @reset_count - limits the number of consecutive resets   * @hbm_state - state of host bus message protocol + * @pg_event - power gating event   * @mem_addr - mem mapped base register address   * @hbuf_depth - depth of hardware host/write buffer is slots   * @hbuf_is_ready - query if the host host/write buffer is ready   * @wr_msg - the buffer for hbm control messages - * @wr_ext_msg - the buffer for hbm control responses (set in read cycle) + * @cfg - per device generation config and ops   */  struct mei_device {  	struct pci_dev *pdev;	/* pointer to pci device struct */ @@ -363,16 +439,26 @@ struct mei_device {  	 * waiting queue for receive message from FW  	 */  	wait_queue_head_t wait_hw_ready; +	wait_queue_head_t wait_pg;  	wait_queue_head_t wait_recvd_msg;  	wait_queue_head_t wait_stop_wd;  	/*  	 * mei device  states  	 */ +	unsigned long reset_count;  	enum mei_dev_state dev_state;  	enum mei_hbm_state hbm_state;  	u16 init_clients_timer; +	/* +	 * Power Gating support +	 */ +	enum mei_pg_event pg_event; +#ifdef CONFIG_PM_RUNTIME +	struct dev_pm_domain pg_domain; +#endif /* CONFIG_PM_RUNTIME */ +  	unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];	/* control messages */  	u32 rd_msg_hdr; @@ -386,19 +472,14 @@ struct mei_device {  		unsigned char data[128];  	} wr_msg; -	struct { -		struct mei_msg_hdr hdr; -		unsigned char data[4];	/* All HBM messages are 4 bytes */ -	} wr_ext_msg;		/* for control responses */ -  	struct hbm_version version;  	struct mei_me_client *me_clients; /* Note: memory has to be allocated */  	DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);  	DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); -	u8 me_clients_num; -	u8 me_client_presentation_num; -	u8 me_client_index; +	unsigned long me_clients_num; +	unsigned long me_client_presentation_num; +	unsigned long me_client_index;  	struct mei_cl wd_cl;  	enum mei_wd_states wd_state; @@ -414,6 +495,7 @@ struct mei_device {  	struct file *iamthif_file_object;  	struct mei_cl iamthif_cl;  	struct mei_cl_cb *iamthif_current_cb; +	long iamthif_open_count;  	int iamthif_mtu;  	unsigned long iamthif_timer;  	u32 iamthif_stall_timer; @@ -426,6 +508,7 @@ struct mei_device {  	bool iamthif_canceled;  	struct work_struct init_work; +	struct work_struct reset_work;  	/* List of bus devices */  	struct list_head device_list; @@ -436,6 +519,7 @@ struct mei_device {  	const struct mei_hw_ops *ops; +	const struct mei_cfg *cfg;  	char hw[0] __aligned(sizeof(void *));  }; @@ -455,13 +539,25 @@ static inline u32 mei_data2slots(size_t length)  	return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);  } +/** + * mei_slots2data- get data in slots - bytes from slots + * @slots -  number of available slots + * returns  - number of bytes in slots + */ +static inline u32 mei_slots2data(int slots) +{ +	return slots * 4; +} +  /*   * mei init function prototypes   */ -void mei_device_init(struct mei_device *dev); -void mei_reset(struct mei_device *dev, int interrupts); +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg); +int mei_reset(struct mei_device *dev);  int mei_start(struct mei_device *dev); +int mei_restart(struct mei_device *dev);  void mei_stop(struct mei_device *dev); +void mei_cancel_work(struct mei_device *dev);  /*   *  MEI interrupt functions prototype @@ -496,8 +592,8 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,  void mei_amthif_run_next_cmd(struct mei_device *dev); -int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, -				  s32 *slots, struct mei_cl_cb *cmpl_list); +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +			struct mei_cl_cb *cmpl_list);  void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);  int mei_amthif_irq_read_msg(struct mei_device *dev, @@ -509,7 +605,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);   * NFC functions   */  int mei_nfc_host_init(struct mei_device *dev); -void mei_nfc_host_exit(void); +void mei_nfc_host_exit(struct mei_device *dev);  /*   * NFC Client UUID @@ -524,7 +620,7 @@ int mei_wd_host_init(struct mei_device *dev);   *   once we got connection to the WD Client   * @dev - mei device   */ -void mei_watchdog_register(struct mei_device *dev); +int mei_watchdog_register(struct mei_device *dev);  /*   * mei_watchdog_unregister  - Unregistering watchdog interface   * @dev - mei device @@ -535,10 +631,22 @@ void mei_watchdog_unregister(struct mei_device *dev);   * Register Access Function   */ +  static inline void mei_hw_config(struct mei_device *dev)  {  	dev->ops->hw_config(dev);  } + +static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) +{ +	return dev->ops->pg_state(dev); +} + +static inline bool mei_pg_is_enabled(struct mei_device *dev) +{ +	return dev->ops->pg_is_enabled(dev); +} +  static inline int mei_hw_reset(struct mei_device *dev, bool enable)  {  	return dev->ops->hw_reset(dev, enable); @@ -611,6 +719,17 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)  	return dev->ops->rdbuf_full_slots(dev);  } +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status); + +#define FW_STS_FMT "%08X %08X" +#define FW_STS_PRM(fw_status) \ +	(fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \ +	(fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF + +bool mei_hbuf_acquire(struct mei_device *dev); + +bool mei_write_is_idle(struct mei_device *dev); +  #if IS_ENABLED(CONFIG_DEBUG_FS)  int mei_dbgfs_register(struct mei_device *dev, const char *name);  void mei_dbgfs_deregister(struct mei_device *dev); @@ -625,9 +744,9 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}  int mei_register(struct mei_device *dev);  void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"  #define MEI_HDR_PRM(hdr)                  \  	(hdr)->host_addr, (hdr)->me_addr, \ -	(hdr)->length, (hdr)->msg_complete +	(hdr)->length, (hdr)->internal, (hdr)->msg_complete  #endif diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index d0c6907dfd9..3095fc514a6 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c @@ -92,7 +92,7 @@ struct mei_nfc_hci_hdr {   * @cl: NFC host client   * @cl_info: NFC info host client   * @init_work: perform connection to the info client - * @fw_ivn: NFC Intervace Version Number + * @fw_ivn: NFC Interface Version Number   * @vendor_id: NFC manufacturer ID   * @radio_type: NFC radio type   */ @@ -163,7 +163,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)  			return 0;  		default: -			dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", +			dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",  				ndev->radio_type);  			return -EINVAL; @@ -175,14 +175,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)  			ndev->bus_name = "pn544";  			return 0;  		default: -			dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", +			dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",  				ndev->radio_type);  			return -EINVAL;  		}  	default: -		dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n", +		dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n",  			ndev->vendor_id);  		return -EINVAL; @@ -364,7 +364,7 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)  	if (!wait_event_interruptible_timeout(ndev->send_wq,  				ndev->recv_req_id == ndev->req_id, HZ)) {  		dev_err(&dev->pdev->dev, "NFC MEI command timeout\n"); -		err = -ETIMEDOUT; +		err = -ETIME;  	} else {  		ndev->req_id++;  	} @@ -428,7 +428,7 @@ static void mei_nfc_init(struct work_struct *work)  	mutex_unlock(&dev->device_lock);  	if (mei_nfc_if_version(ndev) < 0) { -		dev_err(&dev->pdev->dev, "Could not get the NFC interfave version"); +		dev_err(&dev->pdev->dev, "Could not get the NFC interface version");  		goto err;  	} @@ -469,7 +469,9 @@ static void mei_nfc_init(struct work_struct *work)  	return;  err: +	mutex_lock(&dev->device_lock);  	mei_nfc_free(ndev); +	mutex_unlock(&dev->device_lock);  	return;  } @@ -481,12 +483,15 @@ int mei_nfc_host_init(struct mei_device *dev)  	struct mei_cl *cl_info, *cl = NULL;  	int i, ret; -	/* already initialzed */ +	/* already initialized */  	if (ndev->cl_info)  		return 0; -	cl_info = mei_cl_allocate(dev); -	cl = mei_cl_allocate(dev); +	ndev->cl_info = mei_cl_allocate(dev); +	ndev->cl = mei_cl_allocate(dev); + +	cl = ndev->cl; +	cl_info = ndev->cl_info;  	if (!cl || !cl_info) {  		ret = -ENOMEM; @@ -497,7 +502,7 @@ int mei_nfc_host_init(struct mei_device *dev)  	i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);  	if (i < 0) {  		dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); -		ret = -ENOENT; +		ret = -ENOTTY;  		goto err;  	} @@ -515,7 +520,7 @@ int mei_nfc_host_init(struct mei_device *dev)  	i = mei_me_cl_by_uuid(dev, &mei_nfc_guid);  	if (i < 0) {  		dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); -		ret = -ENOENT; +		ret = -ENOTTY;  		goto err;  	} @@ -527,10 +532,9 @@ int mei_nfc_host_init(struct mei_device *dev)  	cl->device_uuid = mei_nfc_guid; +  	list_add_tail(&cl->device_link, &dev->device_list); -	ndev->cl_info = cl_info; -	ndev->cl = cl;  	ndev->req_id = 1;  	INIT_WORK(&ndev->init_work, mei_nfc_init); @@ -545,12 +549,10 @@ err:  	return ret;  } -void mei_nfc_host_exit(void) +void mei_nfc_host_exit(struct mei_device *dev)  {  	struct mei_nfc_dev *ndev = &nfc_dev; +	cancel_work_sync(&ndev->init_work); +} -	if (ndev->cl && ndev->cl->device) -		mei_cl_remove_device(ndev->cl->device); -	mei_nfc_free(ndev); -} diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 1b3844e8237..1b46c64a649 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -13,9 +13,6 @@   * more details.   *   */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -  #include <linux/module.h>  #include <linux/moduleparam.h>  #include <linux/kernel.h> @@ -27,7 +24,6 @@  #include <linux/aio.h>  #include <linux/pci.h>  #include <linux/poll.h> -#include <linux/init.h>  #include <linux/ioctl.h>  #include <linux/cdev.h>  #include <linux/sched.h> @@ -37,47 +33,55 @@  #include <linux/interrupt.h>  #include <linux/miscdevice.h> +#include <linux/pm_runtime.h> +  #include <linux/mei.h>  #include "mei_dev.h" -#include "hw-me.h"  #include "client.h" +#include "hw-me-regs.h" +#include "hw-me.h"  /* mei_pci_tbl - PCI Device ID Table */ -static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, -	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, +static const struct pci_device_id mei_me_pci_tbl[] = { +	{MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)}, + +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)}, + +	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},  	/* required last entry */  	{0, } @@ -85,28 +89,33 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = {  MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); +#ifdef CONFIG_PM_RUNTIME +static inline void mei_me_set_pm_domain(struct mei_device *dev); +static inline void mei_me_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_me_set_pm_domain(struct mei_device *dev) {} +static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ +  /**   * mei_quirk_probe - probe for devices that doesn't valid ME interface   *   * @pdev: PCI device structure - * @ent: entry into pci_device_table + * @cfg: per generation config   *   * returns true if ME Interface is valid, false otherwise   */  static bool mei_me_quirk_probe(struct pci_dev *pdev, -				const struct pci_device_id *ent) +				const struct mei_cfg *cfg)  { -	u32 reg; -	if (ent->device == MEI_DEV_ID_PBG_1) { -		pci_read_config_dword(pdev, 0x48, ®); -		/* make sure that bit 9 is up and bit 10 is down */ -		if ((reg & 0x600) == 0x200) { -			dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); -			return false; -		} +	if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { +		dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); +		return false;  	} +  	return true;  } +  /**   * mei_probe - Device Initialization Routine   * @@ -117,15 +126,14 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,   */  static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  { +	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);  	struct mei_device *dev;  	struct mei_me_hw *hw;  	int err; -	if (!mei_me_quirk_probe(pdev, ent)) { -		err = -ENODEV; -		goto end; -	} +	if (!mei_me_quirk_probe(pdev, cfg)) +		return -ENODEV;  	/* enable pci dev */  	err = pci_enable_device(pdev); @@ -141,8 +149,23 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		dev_err(&pdev->dev, "failed to get pci regions.\n");  		goto disable_device;  	} + +	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || +	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + +		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); +		if (err) +			err = dma_set_coherent_mask(&pdev->dev, +						    DMA_BIT_MASK(32)); +	} +	if (err) { +		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); +		goto release_regions; +	} + +  	/* allocates and initializes the mei dev structure */ -	dev = mei_me_dev_init(pdev); +	dev = mei_me_dev_init(pdev, cfg);  	if (!dev) {  		err = -ENOMEM;  		goto release_regions; @@ -181,6 +204,9 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto release_irq;  	} +	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); +	pm_runtime_use_autosuspend(&pdev->dev); +  	err = mei_register(dev);  	if (err)  		goto release_irq; @@ -189,13 +215,24 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	schedule_delayed_work(&dev->timer_work, HZ); -	pr_debug("initialization successful.\n"); +	/* +	* For not wake-able HW runtime pm framework +	* can't be used on pci device level. +	* Use domain runtime pm callbacks instead. +	*/ +	if (!pci_dev_run_wake(pdev)) +		mei_me_set_pm_domain(dev); + +	if (mei_pg_is_enabled(dev)) +		pm_runtime_put_noidle(&pdev->dev); + +	dev_dbg(&pdev->dev, "initialization successful.\n");  	return 0;  release_irq: +	mei_cancel_work(dev);  	mei_disable_interrupts(dev); -	flush_scheduled_work();  	free_irq(pdev->irq, dev);  disable_msi:  	pci_disable_msi(pdev); @@ -228,18 +265,23 @@ static void mei_me_remove(struct pci_dev *pdev)  	if (!dev)  		return; +	if (mei_pg_is_enabled(dev)) +		pm_runtime_get_noresume(&pdev->dev); +  	hw = to_me_hw(dev); -	dev_err(&pdev->dev, "stop\n"); +	dev_dbg(&pdev->dev, "stop\n");  	mei_stop(dev); +	if (!pci_dev_run_wake(pdev)) +		mei_me_unset_pm_domain(dev); +  	/* disable interrupts */  	mei_disable_interrupts(dev);  	free_irq(pdev->irq, dev);  	pci_disable_msi(pdev); -	pci_set_drvdata(pdev, NULL);  	if (hw->mem_addr)  		pci_iounmap(pdev, hw->mem_addr); @@ -253,7 +295,7 @@ static void mei_me_remove(struct pci_dev *pdev)  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int mei_me_pci_suspend(struct device *device)  {  	struct pci_dev *pdev = to_pci_dev(device); @@ -262,7 +304,7 @@ static int mei_me_pci_suspend(struct device *device)  	if (!dev)  		return -ENODEV; -	dev_err(&pdev->dev, "suspend\n"); +	dev_dbg(&pdev->dev, "suspend\n");  	mei_stop(dev); @@ -304,18 +346,125 @@ static int mei_me_pci_resume(struct device *device)  		return err;  	} -	mutex_lock(&dev->device_lock); -	dev->dev_state = MEI_DEV_POWER_UP; -	mei_clear_interrupts(dev); -	mei_reset(dev, 1); -	mutex_unlock(&dev->device_lock); +	err = mei_restart(dev); +	if (err) +		return err;  	/* Start timer if stopped in suspend */  	schedule_delayed_work(&dev->timer_work, HZ); -	return err; +	return 0;  } -static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_me_pm_runtime_idle(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; + +	dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; +	if (mei_write_is_idle(dev)) +		pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); + +	return -EBUSY; +} + +static int mei_me_pm_runtime_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	if (mei_write_is_idle(dev)) +		ret = mei_me_pg_set_sync(dev); +	else +		ret = -EAGAIN; + +	mutex_unlock(&dev->device_lock); + +	dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); + +	return ret; +} + +static int mei_me_pm_runtime_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	ret = mei_me_pg_unset_sync(dev); + +	mutex_unlock(&dev->device_lock); + +	dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); + +	return ret; +} + +/** + * mei_me_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_set_pm_domain(struct mei_device *dev) +{ +	struct pci_dev *pdev  = dev->pdev; + +	if (pdev->dev.bus && pdev->dev.bus->pm) { +		dev->pg_domain.ops = *pdev->dev.bus->pm; + +		dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; +		dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; +		dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; + +		pdev->dev.pm_domain = &dev->pg_domain; +	} +} + +/** + * mei_me_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_unset_pm_domain(struct mei_device *dev) +{ +	/* stop using pm callbacks if any */ +	dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_me_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, +				mei_me_pci_resume) +	SET_RUNTIME_PM_OPS( +		mei_me_pm_runtime_suspend, +		mei_me_pm_runtime_resume, +		mei_me_pm_runtime_idle) +}; +  #define MEI_ME_PM_OPS	(&mei_me_pm_ops)  #else  #define MEI_ME_PM_OPS	NULL diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c new file mode 100644 index 00000000000..2343c6236df --- /dev/null +++ b/drivers/misc/mei/pci-txe.c @@ -0,0 +1,436 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + + +#include "mei_dev.h" +#include "hw-txe.h" + +static const struct pci_device_id mei_txe_pci_tbl[] = { +	{MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */ +	{0, } +}; +MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); + +#ifdef CONFIG_PM_RUNTIME +static inline void mei_txe_set_pm_domain(struct mei_device *dev); +static inline void mei_txe_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + +static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) +{ +	int i; +	for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { +		if (hw->mem_addr[i]) { +			pci_iounmap(pdev, hw->mem_addr[i]); +			hw->mem_addr[i] = NULL; +		} +	} +} +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mei_txe_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ +	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); +	struct mei_device *dev; +	struct mei_txe_hw *hw; +	int err; +	int i; + +	/* enable pci dev */ +	err = pci_enable_device(pdev); +	if (err) { +		dev_err(&pdev->dev, "failed to enable pci device.\n"); +		goto end; +	} +	/* set PCI host mastering  */ +	pci_set_master(pdev); +	/* pci request regions for mei driver */ +	err = pci_request_regions(pdev, KBUILD_MODNAME); +	if (err) { +		dev_err(&pdev->dev, "failed to get pci regions.\n"); +		goto disable_device; +	} + +	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); +	if (err) { +		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); +		if (err) { +			dev_err(&pdev->dev, "No suitable DMA available.\n"); +			goto release_regions; +		} +	} + +	/* allocates and initializes the mei dev structure */ +	dev = mei_txe_dev_init(pdev, cfg); +	if (!dev) { +		err = -ENOMEM; +		goto release_regions; +	} +	hw = to_txe_hw(dev); + +	/* mapping  IO device memory */ +	for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { +		hw->mem_addr[i] = pci_iomap(pdev, i, 0); +		if (!hw->mem_addr[i]) { +			dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); +			err = -ENOMEM; +			goto free_device; +		} +	} + + +	pci_enable_msi(pdev); + +	/* clear spurious interrupts */ +	mei_clear_interrupts(dev); + +	/* request and enable interrupt  */ +	if (pci_dev_msi_enabled(pdev)) +		err = request_threaded_irq(pdev->irq, +			NULL, +			mei_txe_irq_thread_handler, +			IRQF_ONESHOT, KBUILD_MODNAME, dev); +	else +		err = request_threaded_irq(pdev->irq, +			mei_txe_irq_quick_handler, +			mei_txe_irq_thread_handler, +			IRQF_SHARED, KBUILD_MODNAME, dev); +	if (err) { +		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", +			pdev->irq); +		goto free_device; +	} + +	if (mei_start(dev)) { +		dev_err(&pdev->dev, "init hw failure.\n"); +		err = -ENODEV; +		goto release_irq; +	} + +	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); +	pm_runtime_use_autosuspend(&pdev->dev); + +	err = mei_register(dev); +	if (err) +		goto release_irq; + +	pci_set_drvdata(pdev, dev); + +	/* +	* For not wake-able HW runtime pm framework +	* can't be used on pci device level. +	* Use domain runtime pm callbacks instead. +	*/ +	if (!pci_dev_run_wake(pdev)) +		mei_txe_set_pm_domain(dev); + +	pm_runtime_put_noidle(&pdev->dev); + +	return 0; + +release_irq: + +	mei_cancel_work(dev); + +	/* disable interrupts */ +	mei_disable_interrupts(dev); + +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +free_device: +	mei_txe_pci_iounmap(pdev, hw); + +	kfree(dev); +release_regions: +	pci_release_regions(pdev); +disable_device: +	pci_disable_device(pdev); +end: +	dev_err(&pdev->dev, "initialization failed.\n"); +	return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_txe_remove(struct pci_dev *pdev) +{ +	struct mei_device *dev; +	struct mei_txe_hw *hw; + +	dev = pci_get_drvdata(pdev); +	if (!dev) { +		dev_err(&pdev->dev, "mei: dev =NULL\n"); +		return; +	} + +	pm_runtime_get_noresume(&pdev->dev); + +	hw = to_txe_hw(dev); + +	mei_stop(dev); + +	if (!pci_dev_run_wake(pdev)) +		mei_txe_unset_pm_domain(dev); + +	/* disable interrupts */ +	mei_disable_interrupts(dev); +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +	pci_set_drvdata(pdev, NULL); + +	mei_txe_pci_iounmap(pdev, hw); + +	mei_deregister(dev); + +	kfree(dev); + +	pci_release_regions(pdev); +	pci_disable_device(pdev); +} + + +#ifdef CONFIG_PM_SLEEP +static int mei_txe_pci_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev = pci_get_drvdata(pdev); + +	if (!dev) +		return -ENODEV; + +	dev_dbg(&pdev->dev, "suspend\n"); + +	mei_stop(dev); + +	mei_disable_interrupts(dev); + +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +	return 0; +} + +static int mei_txe_pci_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int err; + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	pci_enable_msi(pdev); + +	mei_clear_interrupts(dev); + +	/* request and enable interrupt */ +	if (pci_dev_msi_enabled(pdev)) +		err = request_threaded_irq(pdev->irq, +			NULL, +			mei_txe_irq_thread_handler, +			IRQF_ONESHOT, KBUILD_MODNAME, dev); +	else +		err = request_threaded_irq(pdev->irq, +			mei_txe_irq_quick_handler, +			mei_txe_irq_thread_handler, +			IRQF_SHARED, KBUILD_MODNAME, dev); +	if (err) { +		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", +				pdev->irq); +		return err; +	} + +	err = mei_restart(dev); + +	return err; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_txe_pm_runtime_idle(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; + +	dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; +	if (mei_write_is_idle(dev)) +		pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); + +	return -EBUSY; +} +static int mei_txe_pm_runtime_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	if (mei_write_is_idle(dev)) +		ret = mei_txe_aliveness_set_sync(dev, 0); +	else +		ret = -EAGAIN; + +	/* +	 * If everything is okay we're about to enter PCI low +	 * power state (D3) therefor we need to disable the +	 * interrupts towards host. +	 * However if device is not wakeable we do not enter +	 * D-low state and we need to keep the interrupt kicking +	 */ +	 if (!ret && pci_dev_run_wake(pdev)) +		mei_disable_interrupts(dev); + +	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); + +	mutex_unlock(&dev->device_lock); +	return ret; +} + +static int mei_txe_pm_runtime_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	mei_enable_interrupts(dev); + +	ret = mei_txe_aliveness_set_sync(dev, 1); + +	mutex_unlock(&dev->device_lock); + +	dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); + +	return ret; +} + +/** + * mei_txe_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_set_pm_domain(struct mei_device *dev) +{ +	struct pci_dev *pdev  = dev->pdev; + +	if (pdev->dev.bus && pdev->dev.bus->pm) { +		dev->pg_domain.ops = *pdev->dev.bus->pm; + +		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; +		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; +		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; + +		pdev->dev.pm_domain = &dev->pg_domain; +	} +} + +/** + * mei_txe_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) +{ +	/* stop using pm callbacks if any */ +	dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_txe_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, +				mei_txe_pci_resume) +	SET_RUNTIME_PM_OPS( +		mei_txe_pm_runtime_suspend, +		mei_txe_pm_runtime_resume, +		mei_txe_pm_runtime_idle) +}; + +#define MEI_TXE_PM_OPS	(&mei_txe_pm_ops) +#else +#define MEI_TXE_PM_OPS	NULL +#endif /* CONFIG_PM */ + +/* + *  PCI driver structure + */ +static struct pci_driver mei_txe_driver = { +	.name = KBUILD_MODNAME, +	.id_table = mei_txe_pci_tbl, +	.probe = mei_txe_probe, +	.remove = mei_txe_remove, +	.shutdown = mei_txe_remove, +	.driver.pm = MEI_TXE_PM_OPS, +}; + +module_pci_driver(mei_txe_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index b8921432e89..a84a664dfcc 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c @@ -25,7 +25,6 @@  #include "mei_dev.h"  #include "hbm.h" -#include "hw-me.h"  #include "client.h"  static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; @@ -53,14 +52,14 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)   *   * @dev: the device structure   * - * returns -ENENT if wd client cannot be found + * returns -ENOTTY if wd client cannot be found   *         -EIO if write has failed   *         0 on success   */  int mei_wd_host_init(struct mei_device *dev)  {  	struct mei_cl *cl = &dev->wd_cl; -	int i; +	int id;  	int ret;  	mei_cl_init(cl, dev); @@ -70,32 +69,35 @@ int mei_wd_host_init(struct mei_device *dev)  	/* check for valid client id */ -	i = mei_me_cl_by_uuid(dev, &mei_wd_guid); -	if (i < 0) { +	id = mei_me_cl_by_uuid(dev, &mei_wd_guid); +	if (id < 0) {  		dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); -		return -ENOENT; +		return -ENOTTY;  	} -	cl->me_client_id = dev->me_clients[i].client_id; +	cl->me_client_id = dev->me_clients[id].client_id;  	ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);  	if (ret < 0) {  		dev_info(&dev->pdev->dev, "wd: failed link client\n"); -		return -ENOENT; +		return ret;  	} -	cl->state = MEI_FILE_CONNECTING; +	ret = mei_cl_connect(cl, NULL); -	if (mei_hbm_cl_connect_req(dev, cl)) { -		dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n"); -		cl->state = MEI_FILE_DISCONNECTED; -		cl->host_client_id = 0; -		return -EIO; +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret); +		mei_cl_unlink(cl); +		return ret;  	} -	cl->timer_count = MEI_CONNECT_TIMEOUT; -	return 0; +	ret = mei_watchdog_register(dev); +	if (ret) { +		mei_cl_disconnect(cl); +		mei_cl_unlink(cl); +	} +	return ret;  }  /** @@ -106,24 +108,42 @@ int mei_wd_host_init(struct mei_device *dev)   * returns 0 if success,   *	-EIO when message send fails   *	-EINVAL when invalid message is to be sent + *	-ENODEV on flow control failure   */  int mei_wd_send(struct mei_device *dev)  { +	struct mei_cl *cl = &dev->wd_cl;  	struct mei_msg_hdr hdr; +	int ret; -	hdr.host_addr = dev->wd_cl.host_client_id; -	hdr.me_addr = dev->wd_cl.me_client_id; +	hdr.host_addr = cl->host_client_id; +	hdr.me_addr = cl->me_client_id;  	hdr.msg_complete = 1;  	hdr.reserved = 0; +	hdr.internal = 0;  	if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))  		hdr.length = MEI_WD_START_MSG_SIZE;  	else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))  		hdr.length = MEI_WD_STOP_MSG_SIZE; -	else +	else { +		dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n");  		return -EINVAL; +	} + +	ret = mei_write_message(dev, &hdr, dev->wd_data); +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: write message failed\n"); +		return ret; +	} -	return mei_write_message(dev, &hdr, dev->wd_data); +	ret = mei_cl_flow_ctrl_reduce(cl); +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n"); +		return ret; +	} + +	return 0;  }  /** @@ -132,9 +152,11 @@ int mei_wd_send(struct mei_device *dev)   * @dev: the device structure   * @preserve: indicate if to keep the timeout value   * - * returns 0 if success, - *	-EIO when message send fails + * returns 0 if success + * on error: + *	-EIO    when message send fails   *	-EINVAL when invalid message is to be sent + *	-ETIME  on message timeout   */  int mei_wd_stop(struct mei_device *dev)  { @@ -150,20 +172,12 @@ int mei_wd_stop(struct mei_device *dev)  	ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);  	if (ret < 0) -		goto out; - -	if (ret && dev->hbuf_is_ready) { -		ret = 0; -		dev->hbuf_is_ready = false; - -		if (!mei_wd_send(dev)) { -			ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl); -			if (ret) -				goto out; -		} else { -			dev_err(&dev->pdev->dev, "wd: send stop failed\n"); -		} +		goto err; +	if (ret && mei_hbuf_acquire(dev)) { +		ret = mei_wd_send(dev); +		if (ret) +			goto err;  		dev->wd_pending = false;  	} else {  		dev->wd_pending = true; @@ -171,21 +185,21 @@ int mei_wd_stop(struct mei_device *dev)  	mutex_unlock(&dev->device_lock); -	ret = wait_event_interruptible_timeout(dev->wait_stop_wd, -					dev->wd_state == MEI_WD_IDLE, -					msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); +	ret = wait_event_timeout(dev->wait_stop_wd, +				dev->wd_state == MEI_WD_IDLE, +				msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));  	mutex_lock(&dev->device_lock); -	if (dev->wd_state == MEI_WD_IDLE) { -		dev_dbg(&dev->pdev->dev, "wd: stop completed ret=%d.\n", ret); -		ret = 0; -	} else { -		if (!ret) -			ret = -ETIMEDOUT; +	if (dev->wd_state != MEI_WD_IDLE) { +		/* timeout */ +		ret = -ETIME;  		dev_warn(&dev->pdev->dev,  			"wd: stop failed to complete ret=%d.\n", ret); +		goto err;  	} - -out: +	dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n", +			MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); +	return 0; +err:  	return ret;  } @@ -259,8 +273,8 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)   */  static int mei_wd_ops_ping(struct watchdog_device *wd_dev)  { -	int ret = 0;  	struct mei_device *dev; +	int ret;  	dev = watchdog_get_drvdata(wd_dev);  	if (!dev) @@ -276,25 +290,18 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)  	dev->wd_state = MEI_WD_RUNNING; +	ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); +	if (ret < 0) +		goto end;  	/* Check if we can send the ping to HW*/ -	if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { +	if (ret && mei_hbuf_acquire(dev)) { -		dev->hbuf_is_ready = false;  		dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); -		if (mei_wd_send(dev)) { -			dev_err(&dev->pdev->dev, "wd: send failed.\n"); -			ret = -EIO; -			goto end; -		} - -		if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) { -			dev_err(&dev->pdev->dev, -				"wd: mei_cl_flow_ctrl_reduce() failed.\n"); -			ret = -EIO; +		ret = mei_wd_send(dev); +		if (ret)  			goto end; -		} - +		dev->wd_pending = false;  	} else {  		dev->wd_pending = true;  	} @@ -362,17 +369,25 @@ static struct watchdog_device amt_wd_dev = {  }; -void mei_watchdog_register(struct mei_device *dev) +int mei_watchdog_register(struct mei_device *dev)  { -	if (watchdog_register_device(&amt_wd_dev)) { -		dev_err(&dev->pdev->dev, -			"wd: unable to register watchdog device.\n"); -		return; + +	int ret; + +	/* unlock to perserve correct locking order */ +	mutex_unlock(&dev->device_lock); +	ret = watchdog_register_device(&amt_wd_dev); +	mutex_lock(&dev->device_lock); +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n", +			ret); +		return ret;  	}  	dev_dbg(&dev->pdev->dev,  		"wd: successfully register watchdog interface.\n");  	watchdog_set_drvdata(&amt_wd_dev, dev); +	return 0;  }  void mei_watchdog_unregister(struct mei_device *dev) diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig new file mode 100644 index 00000000000..462a5b1d865 --- /dev/null +++ b/drivers/misc/mic/Kconfig @@ -0,0 +1,37 @@ +comment "Intel MIC Host Driver" + +config INTEL_MIC_HOST +	tristate "Intel MIC Host Driver" +	depends on 64BIT && PCI && X86 +	select VHOST_RING +	help +	  This enables Host Driver support for the Intel Many Integrated +	  Core (MIC) family of PCIe form factor coprocessor devices that +	  run a 64 bit Linux OS. The driver manages card OS state and +	  enables communication between host and card. Intel MIC X100 +	  devices are currently supported. + +	  If you are building a host kernel with an Intel MIC device then +	  say M (recommended) or Y, else say N. If unsure say N. + +	  More information about the Intel MIC family as well as the Linux +	  OS and tools for MIC to use with this driver are available from +	  <http://software.intel.com/en-us/mic-developer>. + +comment "Intel MIC Card Driver" + +config INTEL_MIC_CARD +	tristate "Intel MIC Card Driver" +	depends on 64BIT && X86 +	select VIRTIO +	help +	  This enables card driver support for the Intel Many Integrated +	  Core (MIC) device family. The card driver communicates shutdown/ +	  crash events to the host and allows registration/configuration of +	  virtio devices. Intel MIC X100 devices are currently supported. + +	  If you are building a card kernel for an Intel MIC device then +	  say M (recommended) or Y, else say N. If unsure say N. + +	  For more information see +	  <http://software.intel.com/en-us/mic-developer>. diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile new file mode 100644 index 00000000000..05b34d683a5 --- /dev/null +++ b/drivers/misc/mic/Makefile @@ -0,0 +1,6 @@ +# +# Makefile - Intel MIC Linux driver. +# Copyright(c) 2013, Intel Corporation. +# +obj-$(CONFIG_INTEL_MIC_HOST) += host/ +obj-$(CONFIG_INTEL_MIC_CARD) += card/ diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile new file mode 100644 index 00000000000..69d58bef92c --- /dev/null +++ b/drivers/misc/mic/card/Makefile @@ -0,0 +1,11 @@ +# +# Makefile - Intel MIC Linux driver. +# Copyright(c) 2013, Intel Corporation. +# +ccflags-y += -DINTEL_MIC_CARD + +obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o +mic_card-y += mic_x100.o +mic_card-y += mic_device.o +mic_card-y += mic_debugfs.o +mic_card-y += mic_virtio.o diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c new file mode 100644 index 00000000000..421b3d7911d --- /dev/null +++ b/drivers/misc/mic/card/mic_debugfs.c @@ -0,0 +1,130 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/seq_file.h> +#include <linux/interrupt.h> +#include <linux/device.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" + +/* Debugfs parent dir */ +static struct dentry *mic_dbg; + +/** + * mic_intr_test - Send interrupts to host. + */ +static int mic_intr_test(struct seq_file *s, void *unused) +{ +	struct mic_driver *mdrv = s->private; +	struct mic_device *mdev = &mdrv->mdev; + +	mic_send_intr(mdev, 0); +	msleep(1000); +	mic_send_intr(mdev, 1); +	msleep(1000); +	mic_send_intr(mdev, 2); +	msleep(1000); +	mic_send_intr(mdev, 3); +	msleep(1000); + +	return 0; +} + +static int mic_intr_test_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_intr_test, inode->i_private); +} + +static int mic_intr_test_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations intr_test_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_intr_test_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_intr_test_release +}; + +/** + * mic_create_card_debug_dir - Initialize MIC debugfs entries. + */ +void __init mic_create_card_debug_dir(struct mic_driver *mdrv) +{ +	struct dentry *d; + +	if (!mic_dbg) +		return; + +	mdrv->dbg_dir = debugfs_create_dir(mdrv->name, mic_dbg); +	if (!mdrv->dbg_dir) { +		dev_err(mdrv->dev, "Cant create dbg_dir %s\n", mdrv->name); +		return; +	} + +	d = debugfs_create_file("intr_test", 0444, mdrv->dbg_dir, +		mdrv, &intr_test_ops); + +	if (!d) { +		dev_err(mdrv->dev, +			"Cant create dbg intr_test %s\n", mdrv->name); +		return; +	} +} + +/** + * mic_delete_card_debug_dir - Uninitialize MIC debugfs entries. + */ +void mic_delete_card_debug_dir(struct mic_driver *mdrv) +{ +	if (!mdrv->dbg_dir) +		return; + +	debugfs_remove_recursive(mdrv->dbg_dir); +} + +/** + * mic_init_card_debugfs - Initialize global debugfs entry. + */ +void __init mic_init_card_debugfs(void) +{ +	mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); +	if (!mic_dbg) +		pr_err("can't create debugfs dir\n"); +} + +/** + * mic_exit_card_debugfs - Uninitialize global debugfs entry + */ +void mic_exit_card_debugfs(void) +{ +	debugfs_remove(mic_dbg); +} diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c new file mode 100644 index 00000000000..d0980ff9683 --- /dev/null +++ b/drivers/misc/mic/card/mic_device.c @@ -0,0 +1,305 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/reboot.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_virtio.h" + +static struct mic_driver *g_drv; +static struct mic_irq *shutdown_cookie; + +static void mic_notify_host(u8 state) +{ +	struct mic_driver *mdrv = g_drv; +	struct mic_bootparam __iomem *bootparam = mdrv->dp; + +	iowrite8(state, &bootparam->shutdown_status); +	dev_dbg(mdrv->dev, "%s %d system_state %d\n", +		__func__, __LINE__, state); +	mic_send_intr(&mdrv->mdev, ioread8(&bootparam->c2h_shutdown_db)); +} + +static int mic_panic_event(struct notifier_block *this, unsigned long event, +		void *ptr) +{ +	struct mic_driver *mdrv = g_drv; +	struct mic_bootparam __iomem *bootparam = mdrv->dp; + +	iowrite8(-1, &bootparam->h2c_config_db); +	iowrite8(-1, &bootparam->h2c_shutdown_db); +	mic_notify_host(MIC_CRASHED); +	return NOTIFY_DONE; +} + +static struct notifier_block mic_panic = { +	.notifier_call  = mic_panic_event, +}; + +static irqreturn_t mic_shutdown_isr(int irq, void *data) +{ +	struct mic_driver *mdrv = g_drv; +	struct mic_bootparam __iomem *bootparam = mdrv->dp; + +	mic_ack_interrupt(&g_drv->mdev); +	if (ioread8(&bootparam->shutdown_card)) +		orderly_poweroff(true); +	return IRQ_HANDLED; +} + +static int mic_shutdown_init(void) +{ +	int rc = 0; +	struct mic_driver *mdrv = g_drv; +	struct mic_bootparam __iomem *bootparam = mdrv->dp; +	int shutdown_db; + +	shutdown_db = mic_next_card_db(); +	shutdown_cookie = mic_request_card_irq(mic_shutdown_isr, +			"Shutdown", mdrv, shutdown_db); +	if (IS_ERR(shutdown_cookie)) +		rc = PTR_ERR(shutdown_cookie); +	else +		iowrite8(shutdown_db, &bootparam->h2c_shutdown_db); +	return rc; +} + +static void mic_shutdown_uninit(void) +{ +	struct mic_driver *mdrv = g_drv; +	struct mic_bootparam __iomem *bootparam = mdrv->dp; + +	iowrite8(-1, &bootparam->h2c_shutdown_db); +	mic_free_card_irq(shutdown_cookie, mdrv); +} + +static int __init mic_dp_init(void) +{ +	struct mic_driver *mdrv = g_drv; +	struct mic_device *mdev = &mdrv->mdev; +	struct mic_bootparam __iomem *bootparam; +	u64 lo, hi, dp_dma_addr; +	u32 magic; + +	lo = mic_read_spad(&mdrv->mdev, MIC_DPLO_SPAD); +	hi = mic_read_spad(&mdrv->mdev, MIC_DPHI_SPAD); + +	dp_dma_addr = lo | (hi << 32); +	mdrv->dp = mic_card_map(mdev, dp_dma_addr, MIC_DP_SIZE); +	if (!mdrv->dp) { +		dev_err(mdrv->dev, "Cannot remap Aperture BAR\n"); +		return -ENOMEM; +	} +	bootparam = mdrv->dp; +	magic = ioread32(&bootparam->magic); +	if (MIC_MAGIC != magic) { +		dev_err(mdrv->dev, "bootparam magic mismatch 0x%x\n", magic); +		return -EIO; +	} +	return 0; +} + +/* Uninitialize the device page */ +static void mic_dp_uninit(void) +{ +	mic_card_unmap(&g_drv->mdev, g_drv->dp); +} + +/** + * mic_request_card_irq - request an irq. + * + * @func: The callback function that handles the interrupt. + * @name: The ASCII name of the callee requesting the irq. + * @data: private data that is returned back when calling the + * function handler. + * @index: The doorbell index of the requester. + * + * returns: The cookie that is transparent to the caller. Passed + * back when calling mic_free_irq. An appropriate error code + * is returned on failure. Caller needs to use IS_ERR(return_val) + * to check for failure and PTR_ERR(return_val) to obtained the + * error code. + * + */ +struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data), +	const char *name, void *data, int index) +{ +	int rc = 0; +	unsigned long cookie; +	struct mic_driver *mdrv = g_drv; + +	rc  = request_irq(mic_db_to_irq(mdrv, index), func, +		0, name, data); +	if (rc) { +		dev_err(mdrv->dev, "request_irq failed rc = %d\n", rc); +		goto err; +	} +	mdrv->irq_info.irq_usage_count[index]++; +	cookie = index; +	return (struct mic_irq *)cookie; +err: +	return ERR_PTR(rc); +} + +/** + * mic_free_card_irq - free irq. + * + * @cookie: cookie obtained during a successful call to mic_request_irq + * @data: private data specified by the calling function during the + * mic_request_irq + * + * returns: none. + */ +void mic_free_card_irq(struct mic_irq *cookie, void *data) +{ +	int index; +	struct mic_driver *mdrv = g_drv; + +	index = (unsigned long)cookie & 0xFFFFU; +	free_irq(mic_db_to_irq(mdrv, index), data); +	mdrv->irq_info.irq_usage_count[index]--; +} + +/** + * mic_next_card_db - Get the doorbell with minimum usage count. + * + * Returns the irq index. + */ +int mic_next_card_db(void) +{ +	int i; +	int index = 0; +	struct mic_driver *mdrv = g_drv; + +	for (i = 0; i < mdrv->intr_info.num_intr; i++) { +		if (mdrv->irq_info.irq_usage_count[i] < +			mdrv->irq_info.irq_usage_count[index]) +			index = i; +	} + +	return index; +} + +/** + * mic_init_irq - Initialize irq information. + * + * Returns 0 in success. Appropriate error code on failure. + */ +static int mic_init_irq(void) +{ +	struct mic_driver *mdrv = g_drv; + +	mdrv->irq_info.irq_usage_count = kzalloc((sizeof(u32) * +			mdrv->intr_info.num_intr), +			GFP_KERNEL); +	if (!mdrv->irq_info.irq_usage_count) +		return -ENOMEM; +	return 0; +} + +/** + * mic_uninit_irq - Uninitialize irq information. + * + * None. + */ +static void mic_uninit_irq(void) +{ +	struct mic_driver *mdrv = g_drv; + +	kfree(mdrv->irq_info.irq_usage_count); +} + +/* + * mic_driver_init - MIC driver initialization tasks. + * + * Returns 0 in success. Appropriate error code on failure. + */ +int __init mic_driver_init(struct mic_driver *mdrv) +{ +	int rc; + +	g_drv = mdrv; +	/* +	 * Unloading the card module is not supported. The MIC card module +	 * handles fundamental operations like host/card initiated shutdowns +	 * and informing the host about card crashes and cannot be unloaded. +	 */ +	if (!try_module_get(mdrv->dev->driver->owner)) { +		rc = -ENODEV; +		goto done; +	} +	rc = mic_dp_init(); +	if (rc) +		goto put; +	rc = mic_init_irq(); +	if (rc) +		goto dp_uninit; +	rc = mic_shutdown_init(); +	if (rc) +		goto irq_uninit; +	rc = mic_devices_init(mdrv); +	if (rc) +		goto shutdown_uninit; +	mic_create_card_debug_dir(mdrv); +	atomic_notifier_chain_register(&panic_notifier_list, &mic_panic); +done: +	return rc; +shutdown_uninit: +	mic_shutdown_uninit(); +irq_uninit: +	mic_uninit_irq(); +dp_uninit: +	mic_dp_uninit(); +put: +	module_put(mdrv->dev->driver->owner); +	return rc; +} + +/* + * mic_driver_uninit - MIC driver uninitialization tasks. + * + * Returns None + */ +void mic_driver_uninit(struct mic_driver *mdrv) +{ +	mic_delete_card_debug_dir(mdrv); +	mic_devices_uninit(mdrv); +	/* +	 * Inform the host about the shutdown status i.e. poweroff/restart etc. +	 * The module cannot be unloaded so the only code path to call +	 * mic_devices_uninit(..) is the shutdown callback. +	 */ +	mic_notify_host(system_state); +	mic_shutdown_uninit(); +	mic_uninit_irq(); +	mic_dp_uninit(); +	module_put(mdrv->dev->driver->owner); +} diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h new file mode 100644 index 00000000000..306f502be95 --- /dev/null +++ b/drivers/misc/mic/card/mic_device.h @@ -0,0 +1,134 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#ifndef _MIC_CARD_DEVICE_H_ +#define _MIC_CARD_DEVICE_H_ + +#include <linux/workqueue.h> +#include <linux/io.h> +#include <linux/irqreturn.h> + +/** + * struct mic_intr_info - Contains h/w specific interrupt sources info + * + * @num_intr: The number of irqs available + */ +struct mic_intr_info { +	u32 num_intr; +}; + +/** + * struct mic_irq_info - OS specific irq information + * + * @irq_usage_count: usage count array tracking the number of sources + * assigned for each irq. + */ +struct mic_irq_info { +	int *irq_usage_count; +}; + +/** + * struct mic_device -  MIC device information. + * + * @mmio: MMIO bar information. + */ +struct mic_device { +	struct mic_mw mmio; +}; + +/** + * struct mic_driver - MIC card driver information. + * + * @name: Name for MIC driver. + * @dbg_dir: debugfs directory of this MIC device. + * @dev: The device backing this MIC. + * @dp: The pointer to the virtio device page. + * @mdev: MIC device information for the host. + * @hotplug_work: Hot plug work for adding/removing virtio devices. + * @irq_info: The OS specific irq information + * @intr_info: H/W specific interrupt information. + */ +struct mic_driver { +	char name[20]; +	struct dentry *dbg_dir; +	struct device *dev; +	void __iomem *dp; +	struct mic_device mdev; +	struct work_struct hotplug_work; +	struct mic_irq_info irq_info; +	struct mic_intr_info intr_info; +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/** + * mic_mmio_read - read from an MMIO register. + * @mw: MMIO register base virtual address. + * @offset: register offset. + * + * RETURNS: register value. + */ +static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset) +{ +	return ioread32(mw->va + offset); +} + +/** + * mic_mmio_write - write to an MMIO register. + * @mw: MMIO register base virtual address. + * @val: the data value to put into the register + * @offset: register offset. + * + * RETURNS: none. + */ +static inline void +mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset) +{ +	iowrite32(val, mw->va + offset); +} + +int mic_driver_init(struct mic_driver *mdrv); +void mic_driver_uninit(struct mic_driver *mdrv); +int mic_next_card_db(void); +struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data), +	const char *name, void *data, int intr_src); +void mic_free_card_irq(struct mic_irq *cookie, void *data); +u32 mic_read_spad(struct mic_device *mdev, unsigned int idx); +void mic_send_intr(struct mic_device *mdev, int doorbell); +int mic_db_to_irq(struct mic_driver *mdrv, int db); +u32 mic_ack_interrupt(struct mic_device *mdev); +void mic_hw_intr_init(struct mic_driver *mdrv); +void __iomem * +mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size); +void mic_card_unmap(struct mic_device *mdev, void __iomem *addr); +void __init mic_create_card_debug_dir(struct mic_driver *mdrv); +void mic_delete_card_debug_dir(struct mic_driver *mdrv); +void __init mic_init_card_debugfs(void); +void mic_exit_card_debugfs(void); +#endif diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c new file mode 100644 index 00000000000..653799b96bf --- /dev/null +++ b/drivers/misc/mic/card/mic_virtio.c @@ -0,0 +1,633 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Adapted from: + * + * virtio for kvm on s390 + * + * Copyright IBM Corp. 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com> + * + * Intel MIC Card driver. + * + */ +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/virtio_config.h> + +#include "../common/mic_dev.h" +#include "mic_virtio.h" + +#define VIRTIO_SUBCODE_64 0x0D00 + +#define MIC_MAX_VRINGS                4 +struct mic_vdev { +	struct virtio_device vdev; +	struct mic_device_desc __iomem *desc; +	struct mic_device_ctrl __iomem *dc; +	struct mic_device *mdev; +	void __iomem *vr[MIC_MAX_VRINGS]; +	int used_size[MIC_MAX_VRINGS]; +	struct completion reset_done; +	struct mic_irq *virtio_cookie; +	int c2h_vdev_db; +}; + +static struct mic_irq *virtio_config_cookie; +#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev) + +/* Helper API to obtain the parent of the virtio device */ +static inline struct device *mic_dev(struct mic_vdev *mvdev) +{ +	return mvdev->vdev.dev.parent; +} + +/* This gets the device's feature bits. */ +static u32 mic_get_features(struct virtio_device *vdev) +{ +	unsigned int i, bits; +	u32 features = 0; +	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; +	u8 __iomem *in_features = mic_vq_features(desc); +	int feature_len = ioread8(&desc->feature_len); + +	bits = min_t(unsigned, feature_len, +		sizeof(vdev->features)) * 8; +	for (i = 0; i < bits; i++) +		if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) +			features |= BIT(i); + +	return features; +} + +static void mic_finalize_features(struct virtio_device *vdev) +{ +	unsigned int i, bits; +	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; +	u8 feature_len = ioread8(&desc->feature_len); +	/* Second half of bitmap is features we accept. */ +	u8 __iomem *out_features = +		mic_vq_features(desc) + feature_len; + +	/* Give virtio_ring a chance to accept features. */ +	vring_transport_features(vdev); + +	memset_io(out_features, 0, feature_len); +	bits = min_t(unsigned, feature_len, +		sizeof(vdev->features)) * 8; +	for (i = 0; i < bits; i++) { +		if (test_bit(i, vdev->features)) +			iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), +				 &out_features[i / 8]); +	} +} + +/* + * Reading and writing elements in config space + */ +static void mic_get(struct virtio_device *vdev, unsigned int offset, +		   void *buf, unsigned len) +{ +	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; + +	if (offset + len > ioread8(&desc->config_len)) +		return; +	memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len); +} + +static void mic_set(struct virtio_device *vdev, unsigned int offset, +		   const void *buf, unsigned len) +{ +	struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; + +	if (offset + len > ioread8(&desc->config_len)) +		return; +	memcpy_toio(mic_vq_configspace(desc) + offset, buf, len); +} + +/* + * The operations to get and set the status word just access the status + * field of the device descriptor. set_status also interrupts the host + * to tell about status changes. + */ +static u8 mic_get_status(struct virtio_device *vdev) +{ +	return ioread8(&to_micvdev(vdev)->desc->status); +} + +static void mic_set_status(struct virtio_device *vdev, u8 status) +{ +	struct mic_vdev *mvdev = to_micvdev(vdev); +	if (!status) +		return; +	iowrite8(status, &mvdev->desc->status); +	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); +} + +/* Inform host on a virtio device reset and wait for ack from host */ +static void mic_reset_inform_host(struct virtio_device *vdev) +{ +	struct mic_vdev *mvdev = to_micvdev(vdev); +	struct mic_device_ctrl __iomem *dc = mvdev->dc; +	int retry; + +	iowrite8(0, &dc->host_ack); +	iowrite8(1, &dc->vdev_reset); +	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); + +	/* Wait till host completes all card accesses and acks the reset */ +	for (retry = 100; retry--;) { +		if (ioread8(&dc->host_ack)) +			break; +		msleep(100); +	}; + +	dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); + +	/* Reset status to 0 in case we timed out */ +	iowrite8(0, &mvdev->desc->status); +} + +static void mic_reset(struct virtio_device *vdev) +{ +	struct mic_vdev *mvdev = to_micvdev(vdev); + +	dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n", +		__func__, vdev->id.device); + +	mic_reset_inform_host(vdev); +	complete_all(&mvdev->reset_done); +} + +/* + * The virtio_ring code calls this API when it wants to notify the Host. + */ +static bool mic_notify(struct virtqueue *vq) +{ +	struct mic_vdev *mvdev = vq->priv; + +	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); +	return true; +} + +static void mic_del_vq(struct virtqueue *vq, int n) +{ +	struct mic_vdev *mvdev = to_micvdev(vq->vdev); +	struct vring *vr = (struct vring *)(vq + 1); + +	free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n])); +	vring_del_virtqueue(vq); +	mic_card_unmap(mvdev->mdev, mvdev->vr[n]); +	mvdev->vr[n] = NULL; +} + +static void mic_del_vqs(struct virtio_device *vdev) +{ +	struct mic_vdev *mvdev = to_micvdev(vdev); +	struct virtqueue *vq, *n; +	int idx = 0; + +	dev_dbg(mic_dev(mvdev), "%s\n", __func__); + +	list_for_each_entry_safe(vq, n, &vdev->vqs, list) +		mic_del_vq(vq, idx++); +} + +/* + * This routine will assign vring's allocated in host/io memory. Code in + * virtio_ring.c however continues to access this io memory as if it were local + * memory without io accessors. + */ +static struct virtqueue *mic_find_vq(struct virtio_device *vdev, +				     unsigned index, +				     void (*callback)(struct virtqueue *vq), +				     const char *name) +{ +	struct mic_vdev *mvdev = to_micvdev(vdev); +	struct mic_vqconfig __iomem *vqconfig; +	struct mic_vqconfig config; +	struct virtqueue *vq; +	void __iomem *va; +	struct _mic_vring_info __iomem *info; +	void *used; +	int vr_size, _vr_size, err, magic; +	struct vring *vr; +	u8 type = ioread8(&mvdev->desc->type); + +	if (index >= ioread8(&mvdev->desc->num_vq)) +		return ERR_PTR(-ENOENT); + +	if (!name) +		return ERR_PTR(-ENOENT); + +	/* First assign the vring's allocated in host memory */ +	vqconfig = mic_vq_config(mvdev->desc) + index; +	memcpy_fromio(&config, vqconfig, sizeof(config)); +	_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); +	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); +	va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); +	if (!va) +		return ERR_PTR(-ENOMEM); +	mvdev->vr[index] = va; +	memset_io(va, 0x0, _vr_size); +	vq = vring_new_virtqueue(index, le16_to_cpu(config.num), +				 MIC_VIRTIO_RING_ALIGN, vdev, false, +				 (void __force *)va, mic_notify, callback, +				 name); +	if (!vq) { +		err = -ENOMEM; +		goto unmap; +	} +	info = va + _vr_size; +	magic = ioread32(&info->magic); + +	if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { +		err = -EIO; +		goto unmap; +	} + +	/* Allocate and reassign used ring now */ +	mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + +					     sizeof(struct vring_used_elem) * +					     le16_to_cpu(config.num)); +	used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, +					get_order(mvdev->used_size[index])); +	if (!used) { +		err = -ENOMEM; +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, err); +		goto del_vq; +	} +	iowrite64(virt_to_phys(used), &vqconfig->used_address); + +	/* +	 * To reassign the used ring here we are directly accessing +	 * struct vring_virtqueue which is a private data structure +	 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in +	 * vring_new_virtqueue() would ensure that +	 *  (&vq->vring == (struct vring *) (&vq->vq + 1)); +	 */ +	vr = (struct vring *)(vq + 1); +	vr->used = used; + +	vq->priv = mvdev; +	return vq; +del_vq: +	vring_del_virtqueue(vq); +unmap: +	mic_card_unmap(mvdev->mdev, mvdev->vr[index]); +	return ERR_PTR(err); +} + +static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, +			struct virtqueue *vqs[], +			vq_callback_t *callbacks[], +			const char *names[]) +{ +	struct mic_vdev *mvdev = to_micvdev(vdev); +	struct mic_device_ctrl __iomem *dc = mvdev->dc; +	int i, err, retry; + +	/* We must have this many virtqueues. */ +	if (nvqs > ioread8(&mvdev->desc->num_vq)) +		return -ENOENT; + +	for (i = 0; i < nvqs; ++i) { +		dev_dbg(mic_dev(mvdev), "%s: %d: %s\n", +			__func__, i, names[i]); +		vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]); +		if (IS_ERR(vqs[i])) { +			err = PTR_ERR(vqs[i]); +			goto error; +		} +	} + +	iowrite8(1, &dc->used_address_updated); +	/* +	 * Send an interrupt to the host to inform it that used +	 * rings have been re-assigned. +	 */ +	mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); +	for (retry = 100; retry--;) { +		if (!ioread8(&dc->used_address_updated)) +			break; +		msleep(100); +	}; + +	dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); +	if (!retry) { +		err = -ENODEV; +		goto error; +	} + +	return 0; +error: +	mic_del_vqs(vdev); +	return err; +} + +/* + * The config ops structure as defined by virtio config + */ +static struct virtio_config_ops mic_vq_config_ops = { +	.get_features = mic_get_features, +	.finalize_features = mic_finalize_features, +	.get = mic_get, +	.set = mic_set, +	.get_status = mic_get_status, +	.set_status = mic_set_status, +	.reset = mic_reset, +	.find_vqs = mic_find_vqs, +	.del_vqs = mic_del_vqs, +}; + +static irqreturn_t +mic_virtio_intr_handler(int irq, void *data) +{ +	struct mic_vdev *mvdev = data; +	struct virtqueue *vq; + +	mic_ack_interrupt(mvdev->mdev); +	list_for_each_entry(vq, &mvdev->vdev.vqs, list) +		vring_interrupt(0, vq); + +	return IRQ_HANDLED; +} + +static void mic_virtio_release_dev(struct device *_d) +{ +	/* +	 * No need for a release method similar to virtio PCI. +	 * Provide an empty one to avoid getting a warning from core. +	 */ +} + +/* + * adds a new device and register it with virtio + * appropriate drivers are loaded by the device model + */ +static int mic_add_device(struct mic_device_desc __iomem *d, +	unsigned int offset, struct mic_driver *mdrv) +{ +	struct mic_vdev *mvdev; +	int ret; +	int virtio_db; +	u8 type = ioread8(&d->type); + +	mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); +	if (!mvdev) { +		dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n", +			offset, type); +		return -ENOMEM; +	} + +	mvdev->mdev = &mdrv->mdev; +	mvdev->vdev.dev.parent = mdrv->dev; +	mvdev->vdev.dev.release = mic_virtio_release_dev; +	mvdev->vdev.id.device = type; +	mvdev->vdev.config = &mic_vq_config_ops; +	mvdev->desc = d; +	mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d); +	init_completion(&mvdev->reset_done); + +	virtio_db = mic_next_card_db(); +	mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler, +			"virtio intr", mvdev, virtio_db); +	if (IS_ERR(mvdev->virtio_cookie)) { +		ret = PTR_ERR(mvdev->virtio_cookie); +		goto kfree; +	} +	iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db); +	mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db); + +	ret = register_virtio_device(&mvdev->vdev); +	if (ret) { +		dev_err(mic_dev(mvdev), +			"Failed to register mic device %u type %u\n", +			offset, type); +		goto free_irq; +	} +	iowrite64((u64)mvdev, &mvdev->dc->vdev); +	dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n", +		__func__, offset, type, mvdev); + +	return 0; + +free_irq: +	mic_free_card_irq(mvdev->virtio_cookie, mvdev); +kfree: +	kfree(mvdev); +	return ret; +} + +/* + * match for a mic device with a specific desc pointer + */ +static int mic_match_desc(struct device *dev, void *data) +{ +	struct virtio_device *vdev = dev_to_virtio(dev); +	struct mic_vdev *mvdev = to_micvdev(vdev); + +	return mvdev->desc == (void __iomem *)data; +} + +static void mic_handle_config_change(struct mic_device_desc __iomem *d, +	unsigned int offset, struct mic_driver *mdrv) +{ +	struct mic_device_ctrl __iomem *dc +		= (void __iomem *)d + mic_aligned_desc_size(d); +	struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); +	struct virtio_driver *drv; + +	if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) +		return; + +	dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__); +	drv = container_of(mvdev->vdev.dev.driver, +				struct virtio_driver, driver); +	if (drv->config_changed) +		drv->config_changed(&mvdev->vdev); +	iowrite8(1, &dc->guest_ack); +} + +/* + * removes a virtio device if a hot remove event has been + * requested by the host. + */ +static int mic_remove_device(struct mic_device_desc __iomem *d, +	unsigned int offset, struct mic_driver *mdrv) +{ +	struct mic_device_ctrl __iomem *dc +		= (void __iomem *)d + mic_aligned_desc_size(d); +	struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); +	u8 status; +	int ret = -1; + +	if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { +		dev_dbg(mdrv->dev, +			"%s %d config_change %d type %d mvdev %p\n", +			__func__, __LINE__, +			ioread8(&dc->config_change), ioread8(&d->type), mvdev); + +		status = ioread8(&d->status); +		reinit_completion(&mvdev->reset_done); +		unregister_virtio_device(&mvdev->vdev); +		mic_free_card_irq(mvdev->virtio_cookie, mvdev); +		if (status & VIRTIO_CONFIG_S_DRIVER_OK) +			wait_for_completion(&mvdev->reset_done); +		kfree(mvdev); +		iowrite8(1, &dc->guest_ack); +		dev_dbg(mdrv->dev, "%s %d guest_ack %d\n", +			__func__, __LINE__, ioread8(&dc->guest_ack)); +		ret = 0; +	} + +	return ret; +} + +#define REMOVE_DEVICES true + +static void mic_scan_devices(struct mic_driver *mdrv, bool remove) +{ +	s8 type; +	unsigned int i; +	struct mic_device_desc __iomem *d; +	struct mic_device_ctrl __iomem *dc; +	struct device *dev; +	int ret; + +	for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; +		i += mic_total_desc_size(d)) { +		d = mdrv->dp + i; +		dc = (void __iomem *)d + mic_aligned_desc_size(d); +		/* +		 * This read barrier is paired with the corresponding write +		 * barrier on the host which is inserted before adding or +		 * removing a virtio device descriptor, by updating the type. +		 */ +		rmb(); +		type = ioread8(&d->type); + +		/* end of list */ +		if (type == 0) +			break; + +		if (type == -1) +			continue; + +		/* device already exists */ +		dev = device_find_child(mdrv->dev, (void __force *)d, +					mic_match_desc); +		if (dev) { +			if (remove) +				iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, +					 &dc->config_change); +			put_device(dev); +			mic_handle_config_change(d, i, mdrv); +			ret = mic_remove_device(d, i, mdrv); +			if (!ret && !remove) +				iowrite8(-1, &d->type); +			if (remove) { +				iowrite8(0, &dc->config_change); +				iowrite8(0, &dc->guest_ack); +			} +			continue; +		} + +		/* new device */ +		dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n", +			__func__, __LINE__, d); +		if (!remove) +			mic_add_device(d, i, mdrv); +	} +} + +/* + * mic_hotplug_device tries to find changes in the device page. + */ +static void mic_hotplug_devices(struct work_struct *work) +{ +	struct mic_driver *mdrv = container_of(work, +		struct mic_driver, hotplug_work); + +	mic_scan_devices(mdrv, !REMOVE_DEVICES); +} + +/* + * Interrupt handler for hot plug/config changes etc. + */ +static irqreturn_t +mic_extint_handler(int irq, void *data) +{ +	struct mic_driver *mdrv = (struct mic_driver *)data; + +	dev_dbg(mdrv->dev, "%s %d hotplug work\n", +		__func__, __LINE__); +	mic_ack_interrupt(&mdrv->mdev); +	schedule_work(&mdrv->hotplug_work); +	return IRQ_HANDLED; +} + +/* + * Init function for virtio + */ +int mic_devices_init(struct mic_driver *mdrv) +{ +	int rc; +	struct mic_bootparam __iomem *bootparam; +	int config_db; + +	INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices); +	mic_scan_devices(mdrv, !REMOVE_DEVICES); + +	config_db = mic_next_card_db(); +	virtio_config_cookie = mic_request_card_irq(mic_extint_handler, +			"virtio_config_intr", mdrv, config_db); +	if (IS_ERR(virtio_config_cookie)) { +		rc = PTR_ERR(virtio_config_cookie); +		goto exit; +	} + +	bootparam = mdrv->dp; +	iowrite8(config_db, &bootparam->h2c_config_db); +	return 0; +exit: +	return rc; +} + +/* + * Uninit function for virtio + */ +void mic_devices_uninit(struct mic_driver *mdrv) +{ +	struct mic_bootparam __iomem *bootparam = mdrv->dp; +	iowrite8(-1, &bootparam->h2c_config_db); +	mic_free_card_irq(virtio_config_cookie, mdrv); +	flush_work(&mdrv->hotplug_work); +	mic_scan_devices(mdrv, REMOVE_DEVICES); +} diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h new file mode 100644 index 00000000000..d0407ba53bb --- /dev/null +++ b/drivers/misc/mic/card/mic_virtio.h @@ -0,0 +1,76 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#ifndef __MIC_CARD_VIRTIO_H +#define __MIC_CARD_VIRTIO_H + +#include <linux/mic_common.h> +#include "mic_device.h" + +/* + * 64 bit I/O access + */ +#ifndef ioread64 +#define ioread64 readq +#endif +#ifndef iowrite64 +#define iowrite64 writeq +#endif + +static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) +{ +	return sizeof(*desc) +		+ ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) +		+ ioread8(&desc->feature_len) * 2 +		+ ioread8(&desc->config_len); +} + +static inline struct mic_vqconfig __iomem * +mic_vq_config(struct mic_device_desc __iomem *desc) +{ +	return (struct mic_vqconfig __iomem *)(desc + 1); +} + +static inline __u8 __iomem * +mic_vq_features(struct mic_device_desc __iomem *desc) +{ +	return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq)); +} + +static inline __u8 __iomem * +mic_vq_configspace(struct mic_device_desc __iomem *desc) +{ +	return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2; +} +static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) +{ +	return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); +} + +int mic_devices_init(struct mic_driver *mdrv); +void mic_devices_uninit(struct mic_driver *mdrv); + +#endif diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c new file mode 100644 index 00000000000..2868945c9a4 --- /dev/null +++ b/drivers/misc/mic/card/mic_x100.c @@ -0,0 +1,256 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_x100.h" + +static const char mic_driver_name[] = "mic"; + +static struct mic_driver g_drv; + +/** + * mic_read_spad - read from the scratchpad register + * @mdev: pointer to mic_device instance + * @idx: index to scratchpad register, 0 based + * + * This function allows reading of the 32bit scratchpad register. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +u32 mic_read_spad(struct mic_device *mdev, unsigned int idx) +{ +	return mic_mmio_read(&mdev->mmio, +		MIC_X100_SBOX_BASE_ADDRESS + +		MIC_X100_SBOX_SPAD0 + idx * 4); +} + +/** + * __mic_send_intr - Send interrupt to Host. + * @mdev: pointer to mic_device instance + * @doorbell: Doorbell number. + */ +void mic_send_intr(struct mic_device *mdev, int doorbell) +{ +	struct mic_mw *mw = &mdev->mmio; + +	if (doorbell > MIC_X100_MAX_DOORBELL_IDX) +		return; +	/* Ensure that the interrupt is ordered w.r.t previous stores. */ +	wmb(); +	mic_mmio_write(mw, MIC_X100_SBOX_SDBIC0_DBREQ_BIT, +		       MIC_X100_SBOX_BASE_ADDRESS + +		       (MIC_X100_SBOX_SDBIC0 + (4 * doorbell))); +} + +/** + * mic_ack_interrupt - Device specific interrupt handling. + * @mdev: pointer to mic_device instance + * + * Returns: bitmask of doorbell events triggered. + */ +u32 mic_ack_interrupt(struct mic_device *mdev) +{ +	return 0; +} + +static inline int mic_get_sbox_irq(int db) +{ +	return MIC_X100_IRQ_BASE + db; +} + +static inline int mic_get_rdmasr_irq(int index) +{ +	return  MIC_X100_RDMASR_IRQ_BASE + index; +} + +/** + * mic_hw_intr_init - Initialize h/w specific interrupt + * information. + * @mdrv: pointer to mic_driver + */ +void mic_hw_intr_init(struct mic_driver *mdrv) +{ +	mdrv->intr_info.num_intr = MIC_X100_NUM_SBOX_IRQ + +				MIC_X100_NUM_RDMASR_IRQ; +} + +/** + * mic_db_to_irq - Retrieve irq number corresponding to a doorbell. + * @mdrv: pointer to mic_driver + * @db: The doorbell obtained for which the irq is needed. Doorbell + * may correspond to an sbox doorbell or an rdmasr index. + * + * Returns the irq corresponding to the doorbell. + */ +int mic_db_to_irq(struct mic_driver *mdrv, int db) +{ +	int rdmasr_index; +	if (db < MIC_X100_NUM_SBOX_IRQ) { +		return mic_get_sbox_irq(db); +	} else { +		rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ + +			MIC_X100_RDMASR_IRQ_BASE; +		return mic_get_rdmasr_irq(rdmasr_index); +	} +} + +/* + * mic_card_map - Allocate virtual address for a remote memory region. + * @mdev: pointer to mic_device instance. + * @addr: Remote DMA address. + * @size: Size of the region. + * + * Returns: Virtual address backing the remote memory region. + */ +void __iomem * +mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size) +{ +	return ioremap(addr, size); +} + +/* + * mic_card_unmap - Unmap the virtual address for a remote memory region. + * @mdev: pointer to mic_device instance. + * @addr: Virtual address for remote memory region. + * + * Returns: None. + */ +void mic_card_unmap(struct mic_device *mdev, void __iomem *addr) +{ +	iounmap(addr); +} + +static int __init mic_probe(struct platform_device *pdev) +{ +	struct mic_driver *mdrv = &g_drv; +	struct mic_device *mdev = &mdrv->mdev; +	int rc = 0; + +	mdrv->dev = &pdev->dev; +	snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name); + +	mdev->mmio.pa = MIC_X100_MMIO_BASE; +	mdev->mmio.len = MIC_X100_MMIO_LEN; +	mdev->mmio.va = ioremap(MIC_X100_MMIO_BASE, MIC_X100_MMIO_LEN); +	if (!mdev->mmio.va) { +		dev_err(&pdev->dev, "Cannot remap MMIO BAR\n"); +		rc = -EIO; +		goto done; +	} +	mic_hw_intr_init(mdrv); +	rc = mic_driver_init(mdrv); +	if (rc) { +		dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc); +		goto iounmap; +	} +done: +	return rc; +iounmap: +	iounmap(mdev->mmio.va); +	return rc; +} + +static int mic_remove(struct platform_device *pdev) +{ +	struct mic_driver *mdrv = &g_drv; +	struct mic_device *mdev = &mdrv->mdev; + +	mic_driver_uninit(mdrv); +	iounmap(mdev->mmio.va); +	return 0; +} + +static void mic_platform_shutdown(struct platform_device *pdev) +{ +	mic_remove(pdev); +} + +static struct platform_device mic_platform_dev = { +	.name = mic_driver_name, +	.id   = 0, +	.num_resources = 0, +}; + +static struct platform_driver __refdata mic_platform_driver = { +	.probe = mic_probe, +	.remove = mic_remove, +	.shutdown = mic_platform_shutdown, +	.driver         = { +		.name   = mic_driver_name, +		.owner	= THIS_MODULE, +	}, +}; + +static int __init mic_init(void) +{ +	int ret; +	struct cpuinfo_x86 *c = &cpu_data(0); + +	if (!(c->x86 == 11 && c->x86_model == 1)) { +		ret = -ENODEV; +		pr_err("%s not running on X100 ret %d\n", __func__, ret); +		goto done; +	} + +	mic_init_card_debugfs(); +	ret = platform_device_register(&mic_platform_dev); +	if (ret) { +		pr_err("platform_device_register ret %d\n", ret); +		goto cleanup_debugfs; +	} +	ret = platform_driver_register(&mic_platform_driver); +	if (ret) { +		pr_err("platform_driver_register ret %d\n", ret); +		goto device_unregister; +	} +	return ret; + +device_unregister: +	platform_device_unregister(&mic_platform_dev); +cleanup_debugfs: +	mic_exit_card_debugfs(); +done: +	return ret; +} + +static void __exit mic_exit(void) +{ +	platform_driver_unregister(&mic_platform_driver); +	platform_device_unregister(&mic_platform_dev); +	mic_exit_card_debugfs(); +} + +module_init(mic_init); +module_exit(mic_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MIC X100 Card driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h new file mode 100644 index 00000000000..d66ea55639c --- /dev/null +++ b/drivers/misc/mic/card/mic_x100.h @@ -0,0 +1,48 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#ifndef _MIC_X100_CARD_H_ +#define _MIC_X100_CARD_H_ + +#define MIC_X100_MMIO_BASE 0x08007C0000ULL +#define MIC_X100_MMIO_LEN 0x00020000ULL +#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000ULL + +#define MIC_X100_SBOX_SPAD0 0x0000AB20 +#define MIC_X100_SBOX_SDBIC0 0x0000CC90 +#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000 +#define MIC_X100_SBOX_RDMASR0	0x0000B180 + +#define MIC_X100_MAX_DOORBELL_IDX 8 + +#define MIC_X100_NUM_SBOX_IRQ 8 +#define MIC_X100_NUM_RDMASR_IRQ 8 +#define MIC_X100_SBOX_IRQ_BASE 0 +#define MIC_X100_RDMASR_IRQ_BASE 17 + +#define MIC_X100_IRQ_BASE 26 + +#endif diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h new file mode 100644 index 00000000000..92999c2bbf8 --- /dev/null +++ b/drivers/misc/mic/common/mic_dev.h @@ -0,0 +1,51 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC driver. + * + */ +#ifndef __MIC_DEV_H__ +#define __MIC_DEV_H__ + +/** + * struct mic_mw - MIC memory window + * + * @pa: Base physical address. + * @va: Base ioremap'd virtual address. + * @len: Size of the memory window. + */ +struct mic_mw { +	phys_addr_t pa; +	void __iomem *va; +	resource_size_t len; +}; + +/* + * Scratch pad register offsets used by the host to communicate + * device page DMA address to the card. + */ +#define MIC_DPLO_SPAD 14 +#define MIC_DPHI_SPAD 15 + +/* + * These values are supposed to be in the config_change field of the + * device page when the host sends a config change interrupt to the card. + */ +#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1 +#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2 + +#endif diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile new file mode 100644 index 00000000000..c2197f99939 --- /dev/null +++ b/drivers/misc/mic/host/Makefile @@ -0,0 +1,14 @@ +# +# Makefile - Intel MIC Linux driver. +# Copyright(c) 2013, Intel Corporation. +# +obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o +mic_host-objs := mic_main.o +mic_host-objs += mic_x100.o +mic_host-objs += mic_sysfs.o +mic_host-objs += mic_smpt.o +mic_host-objs += mic_intr.o +mic_host-objs += mic_boot.o +mic_host-objs += mic_debugfs.o +mic_host-objs += mic_fops.o +mic_host-objs += mic_virtio.o diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c new file mode 100644 index 00000000000..b75c6b5cc20 --- /dev/null +++ b/drivers/misc/mic/host/mic_boot.c @@ -0,0 +1,300 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/pci.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" +#include "mic_virtio.h" + +/** + * mic_reset - Reset the MIC device. + * @mdev: pointer to mic_device instance + */ +static void mic_reset(struct mic_device *mdev) +{ +	int i; + +#define MIC_RESET_TO (45) + +	reinit_completion(&mdev->reset_wait); +	mdev->ops->reset_fw_ready(mdev); +	mdev->ops->reset(mdev); + +	for (i = 0; i < MIC_RESET_TO; i++) { +		if (mdev->ops->is_fw_ready(mdev)) +			goto done; +		/* +		 * Resets typically take 10s of seconds to complete. +		 * Since an MMIO read is required to check if the +		 * firmware is ready or not, a 1 second delay works nicely. +		 */ +		msleep(1000); +	} +	mic_set_state(mdev, MIC_RESET_FAILED); +done: +	complete_all(&mdev->reset_wait); +} + +/* Initialize the MIC bootparams */ +void mic_bootparam_init(struct mic_device *mdev) +{ +	struct mic_bootparam *bootparam = mdev->dp; + +	bootparam->magic = cpu_to_le32(MIC_MAGIC); +	bootparam->c2h_shutdown_db = mdev->shutdown_db; +	bootparam->h2c_shutdown_db = -1; +	bootparam->h2c_config_db = -1; +	bootparam->shutdown_status = 0; +	bootparam->shutdown_card = 0; +} + +/** + * mic_start - Start the MIC. + * @mdev: pointer to mic_device instance + * @buf: buffer containing boot string including firmware/ramdisk path. + * + * This function prepares an MIC for boot and initiates boot. + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +int mic_start(struct mic_device *mdev, const char *buf) +{ +	int rc; +	mutex_lock(&mdev->mic_mutex); +retry: +	if (MIC_OFFLINE != mdev->state) { +		rc = -EINVAL; +		goto unlock_ret; +	} +	if (!mdev->ops->is_fw_ready(mdev)) { +		mic_reset(mdev); +		/* +		 * The state will either be MIC_OFFLINE if the reset succeeded +		 * or MIC_RESET_FAILED if the firmware reset failed. +		 */ +		goto retry; +	} +	rc = mdev->ops->load_mic_fw(mdev, buf); +	if (rc) +		goto unlock_ret; +	mic_smpt_restore(mdev); +	mic_intr_restore(mdev); +	mdev->intr_ops->enable_interrupts(mdev); +	mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr); +	mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); +	mdev->ops->send_firmware_intr(mdev); +	mic_set_state(mdev, MIC_ONLINE); +unlock_ret: +	mutex_unlock(&mdev->mic_mutex); +	return rc; +} + +/** + * mic_stop - Prepare the MIC for reset and trigger reset. + * @mdev: pointer to mic_device instance + * @force: force a MIC to reset even if it is already offline. + * + * RETURNS: None. + */ +void mic_stop(struct mic_device *mdev, bool force) +{ +	mutex_lock(&mdev->mic_mutex); +	if (MIC_OFFLINE != mdev->state || force) { +		mic_virtio_reset_devices(mdev); +		mic_bootparam_init(mdev); +		mic_reset(mdev); +		if (MIC_RESET_FAILED == mdev->state) +			goto unlock; +		mic_set_shutdown_status(mdev, MIC_NOP); +		if (MIC_SUSPENDED != mdev->state) +			mic_set_state(mdev, MIC_OFFLINE); +	} +unlock: +	mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_shutdown - Initiate MIC shutdown. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_shutdown(struct mic_device *mdev) +{ +	struct mic_bootparam *bootparam = mdev->dp; +	s8 db = bootparam->h2c_shutdown_db; + +	mutex_lock(&mdev->mic_mutex); +	if (MIC_ONLINE == mdev->state && db != -1) { +		bootparam->shutdown_card = 1; +		mdev->ops->send_intr(mdev, db); +		mic_set_state(mdev, MIC_SHUTTING_DOWN); +	} +	mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_shutdown_work - Handle shutdown interrupt from MIC. + * @work: The work structure. + * + * This work is scheduled whenever the host has received a shutdown + * interrupt from the MIC. + */ +void mic_shutdown_work(struct work_struct *work) +{ +	struct mic_device *mdev = container_of(work, struct mic_device, +			shutdown_work); +	struct mic_bootparam *bootparam = mdev->dp; + +	mutex_lock(&mdev->mic_mutex); +	mic_set_shutdown_status(mdev, bootparam->shutdown_status); +	bootparam->shutdown_status = 0; + +	/* +	 * if state is MIC_SUSPENDED, OSPM suspend is in progress. We do not +	 * change the state here so as to prevent users from booting the card +	 * during and after the suspend operation. +	 */ +	if (MIC_SHUTTING_DOWN != mdev->state && +	    MIC_SUSPENDED != mdev->state) +		mic_set_state(mdev, MIC_SHUTTING_DOWN); +	mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_reset_trigger_work - Trigger MIC reset. + * @work: The work structure. + * + * This work is scheduled whenever the host wants to reset the MIC. + */ +void mic_reset_trigger_work(struct work_struct *work) +{ +	struct mic_device *mdev = container_of(work, struct mic_device, +			reset_trigger_work); + +	mic_stop(mdev, false); +} + +/** + * mic_complete_resume - Complete MIC Resume after an OSPM suspend/hibernate + * event. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_complete_resume(struct mic_device *mdev) +{ +	if (mdev->state != MIC_SUSPENDED) { +		dev_warn(mdev->sdev->parent, "state %d should be %d\n", +			 mdev->state, MIC_SUSPENDED); +		return; +	} + +	/* Make sure firmware is ready */ +	if (!mdev->ops->is_fw_ready(mdev)) +		mic_stop(mdev, true); + +	mutex_lock(&mdev->mic_mutex); +	mic_set_state(mdev, MIC_OFFLINE); +	mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_prepare_suspend - Handle suspend notification for the MIC device. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_prepare_suspend(struct mic_device *mdev) +{ +	int rc; + +#define MIC_SUSPEND_TIMEOUT (60 * HZ) + +	mutex_lock(&mdev->mic_mutex); +	switch (mdev->state) { +	case MIC_OFFLINE: +		/* +		 * Card is already offline. Set state to MIC_SUSPENDED +		 * to prevent users from booting the card. +		 */ +		mic_set_state(mdev, MIC_SUSPENDED); +		mutex_unlock(&mdev->mic_mutex); +		break; +	case MIC_ONLINE: +		/* +		 * Card is online. Set state to MIC_SUSPENDING and notify +		 * MIC user space daemon which will issue card +		 * shutdown and reset. +		 */ +		mic_set_state(mdev, MIC_SUSPENDING); +		mutex_unlock(&mdev->mic_mutex); +		rc = wait_for_completion_timeout(&mdev->reset_wait, +						MIC_SUSPEND_TIMEOUT); +		/* Force reset the card if the shutdown completion timed out */ +		if (!rc) { +			mutex_lock(&mdev->mic_mutex); +			mic_set_state(mdev, MIC_SUSPENDED); +			mutex_unlock(&mdev->mic_mutex); +			mic_stop(mdev, true); +		} +		break; +	case MIC_SHUTTING_DOWN: +		/* +		 * Card is shutting down. Set state to MIC_SUSPENDED +		 * to prevent further boot of the card. +		 */ +		mic_set_state(mdev, MIC_SUSPENDED); +		mutex_unlock(&mdev->mic_mutex); +		rc = wait_for_completion_timeout(&mdev->reset_wait, +						MIC_SUSPEND_TIMEOUT); +		/* Force reset the card if the shutdown completion timed out */ +		if (!rc) +			mic_stop(mdev, true); +		break; +	default: +		mutex_unlock(&mdev->mic_mutex); +		break; +	} +} + +/** + * mic_suspend - Initiate MIC suspend. Suspend merely issues card shutdown. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_suspend(struct mic_device *mdev) +{ +	struct mic_bootparam *bootparam = mdev->dp; +	s8 db = bootparam->h2c_shutdown_db; + +	mutex_lock(&mdev->mic_mutex); +	if (MIC_SUSPENDING == mdev->state && db != -1) { +		bootparam->shutdown_card = 1; +		mdev->ops->send_intr(mdev, db); +		mic_set_state(mdev, MIC_SUSPENDED); +	} +	mutex_unlock(&mdev->mic_mutex); +} diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c new file mode 100644 index 00000000000..028ba5d6fd1 --- /dev/null +++ b/drivers/misc/mic/host/mic_debugfs.c @@ -0,0 +1,491 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/debugfs.h> +#include <linux/pci.h> +#include <linux/seq_file.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" +#include "mic_virtio.h" + +/* Debugfs parent dir */ +static struct dentry *mic_dbg; + +/** + * mic_log_buf_show - Display MIC kernel log buffer. + * + * log_buf addr/len is read from System.map by user space + * and populated in sysfs entries. + */ +static int mic_log_buf_show(struct seq_file *s, void *unused) +{ +	void __iomem *log_buf_va; +	int __iomem *log_buf_len_va; +	struct mic_device *mdev = s->private; +	void *kva; +	int size; +	unsigned long aper_offset; + +	if (!mdev || !mdev->log_buf_addr || !mdev->log_buf_len) +		goto done; +	/* +	 * Card kernel will never be relocated and any kernel text/data mapping +	 * can be translated to phys address by subtracting __START_KERNEL_map. +	 */ +	aper_offset = (unsigned long)mdev->log_buf_len - __START_KERNEL_map; +	log_buf_len_va = mdev->aper.va + aper_offset; +	aper_offset = (unsigned long)mdev->log_buf_addr - __START_KERNEL_map; +	log_buf_va = mdev->aper.va + aper_offset; +	size = ioread32(log_buf_len_va); + +	kva = kmalloc(size, GFP_KERNEL); +	if (!kva) +		goto done; +	mutex_lock(&mdev->mic_mutex); +	memcpy_fromio(kva, log_buf_va, size); +	switch (mdev->state) { +	case MIC_ONLINE: +		/* Fall through */ +	case MIC_SHUTTING_DOWN: +		seq_write(s, kva, size); +		break; +	default: +		break; +	} +	mutex_unlock(&mdev->mic_mutex); +	kfree(kva); +done: +	return 0; +} + +static int mic_log_buf_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_log_buf_show, inode->i_private); +} + +static int mic_log_buf_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations log_buf_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_log_buf_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_log_buf_release +}; + +static int mic_smpt_show(struct seq_file *s, void *pos) +{ +	int i; +	struct mic_device *mdev = s->private; +	unsigned long flags; + +	seq_printf(s, "MIC %-2d |%-10s| %-14s %-10s\n", +		   mdev->id, "SMPT entry", "SW DMA addr", "RefCount"); +	seq_puts(s, "====================================================\n"); + +	if (mdev->smpt) { +		struct mic_smpt_info *smpt_info = mdev->smpt; +		spin_lock_irqsave(&smpt_info->smpt_lock, flags); +		for (i = 0; i < smpt_info->info.num_reg; i++) { +			seq_printf(s, "%9s|%-10d| %-#14llx %-10lld\n", +				   " ",  i, smpt_info->entry[i].dma_addr, +				   smpt_info->entry[i].ref_count); +		} +		spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); +	} +	seq_puts(s, "====================================================\n"); +	return 0; +} + +static int mic_smpt_debug_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_smpt_show, inode->i_private); +} + +static int mic_smpt_debug_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations smpt_file_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_smpt_debug_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_smpt_debug_release +}; + +static int mic_soft_reset_show(struct seq_file *s, void *pos) +{ +	struct mic_device *mdev = s->private; + +	mic_stop(mdev, true); +	return 0; +} + +static int mic_soft_reset_debug_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_soft_reset_show, inode->i_private); +} + +static int mic_soft_reset_debug_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations soft_reset_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_soft_reset_debug_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_soft_reset_debug_release +}; + +static int mic_post_code_show(struct seq_file *s, void *pos) +{ +	struct mic_device *mdev = s->private; +	u32 reg = mdev->ops->get_postcode(mdev); + +	seq_printf(s, "%c%c", reg & 0xff, (reg >> 8) & 0xff); +	return 0; +} + +static int mic_post_code_debug_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_post_code_show, inode->i_private); +} + +static int mic_post_code_debug_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations post_code_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_post_code_debug_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_post_code_debug_release +}; + +static int mic_dp_show(struct seq_file *s, void *pos) +{ +	struct mic_device *mdev = s->private; +	struct mic_device_desc *d; +	struct mic_device_ctrl *dc; +	struct mic_vqconfig *vqconfig; +	__u32 *features; +	__u8 *config; +	struct mic_bootparam *bootparam = mdev->dp; +	int i, j; + +	seq_printf(s, "Bootparam: magic 0x%x\n", +		   bootparam->magic); +	seq_printf(s, "Bootparam: h2c_shutdown_db %d\n", +		   bootparam->h2c_shutdown_db); +	seq_printf(s, "Bootparam: h2c_config_db %d\n", +		   bootparam->h2c_config_db); +	seq_printf(s, "Bootparam: c2h_shutdown_db %d\n", +		   bootparam->c2h_shutdown_db); +	seq_printf(s, "Bootparam: shutdown_status %d\n", +		   bootparam->shutdown_status); +	seq_printf(s, "Bootparam: shutdown_card %d\n", +		   bootparam->shutdown_card); + +	for (i = sizeof(*bootparam); i < MIC_DP_SIZE; +	     i += mic_total_desc_size(d)) { +		d = mdev->dp + i; +		dc = (void *)d + mic_aligned_desc_size(d); + +		/* end of list */ +		if (d->type == 0) +			break; + +		if (d->type == -1) +			continue; + +		seq_printf(s, "Type %d ", d->type); +		seq_printf(s, "Num VQ %d ", d->num_vq); +		seq_printf(s, "Feature Len %d\n", d->feature_len); +		seq_printf(s, "Config Len %d ", d->config_len); +		seq_printf(s, "Shutdown Status %d\n", d->status); + +		for (j = 0; j < d->num_vq; j++) { +			vqconfig = mic_vq_config(d) + j; +			seq_printf(s, "vqconfig[%d]: ", j); +			seq_printf(s, "address 0x%llx ", vqconfig->address); +			seq_printf(s, "num %d ", vqconfig->num); +			seq_printf(s, "used address 0x%llx\n", +				   vqconfig->used_address); +		} + +		features = (__u32 *)mic_vq_features(d); +		seq_printf(s, "Features: Host 0x%x ", features[0]); +		seq_printf(s, "Guest 0x%x\n", features[1]); + +		config = mic_vq_configspace(d); +		for (j = 0; j < d->config_len; j++) +			seq_printf(s, "config[%d]=%d\n", j, config[j]); + +		seq_puts(s, "Device control:\n"); +		seq_printf(s, "Config Change %d ", dc->config_change); +		seq_printf(s, "Vdev reset %d\n", dc->vdev_reset); +		seq_printf(s, "Guest Ack %d ", dc->guest_ack); +		seq_printf(s, "Host ack %d\n", dc->host_ack); +		seq_printf(s, "Used address updated %d ", +			   dc->used_address_updated); +		seq_printf(s, "Vdev 0x%llx\n", dc->vdev); +		seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db); +		seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db); +	} + +	return 0; +} + +static int mic_dp_debug_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_dp_show, inode->i_private); +} + +static int mic_dp_debug_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations dp_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_dp_debug_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_dp_debug_release +}; + +static int mic_vdev_info_show(struct seq_file *s, void *unused) +{ +	struct mic_device *mdev = s->private; +	struct list_head *pos, *tmp; +	struct mic_vdev *mvdev; +	int i, j; + +	mutex_lock(&mdev->mic_mutex); +	list_for_each_safe(pos, tmp, &mdev->vdev_list) { +		mvdev = list_entry(pos, struct mic_vdev, list); +		seq_printf(s, "VDEV type %d state %s in %ld out %ld\n", +			   mvdev->virtio_id, +			   mic_vdevup(mvdev) ? "UP" : "DOWN", +			   mvdev->in_bytes, +			   mvdev->out_bytes); +		for (i = 0; i < MIC_MAX_VRINGS; i++) { +			struct vring_desc *desc; +			struct vring_avail *avail; +			struct vring_used *used; +			struct mic_vringh *mvr = &mvdev->mvr[i]; +			struct vringh *vrh = &mvr->vrh; +			int num = vrh->vring.num; +			if (!num) +				continue; +			desc = vrh->vring.desc; +			seq_printf(s, "vring i %d avail_idx %d", +				   i, mvr->vring.info->avail_idx & (num - 1)); +			seq_printf(s, " vring i %d avail_idx %d\n", +				   i, mvr->vring.info->avail_idx); +			seq_printf(s, "vrh i %d weak_barriers %d", +				   i, vrh->weak_barriers); +			seq_printf(s, " last_avail_idx %d last_used_idx %d", +				   vrh->last_avail_idx, vrh->last_used_idx); +			seq_printf(s, " completed %d\n", vrh->completed); +			for (j = 0; j < num; j++) { +				seq_printf(s, "desc[%d] addr 0x%llx len %d", +					   j, desc->addr, desc->len); +				seq_printf(s, " flags 0x%x next %d\n", +					   desc->flags, desc->next); +				desc++; +			} +			avail = vrh->vring.avail; +			seq_printf(s, "avail flags 0x%x idx %d\n", +				   avail->flags, avail->idx & (num - 1)); +			seq_printf(s, "avail flags 0x%x idx %d\n", +				   avail->flags, avail->idx); +			for (j = 0; j < num; j++) +				seq_printf(s, "avail ring[%d] %d\n", +					   j, avail->ring[j]); +			used = vrh->vring.used; +			seq_printf(s, "used flags 0x%x idx %d\n", +				   used->flags, used->idx & (num - 1)); +			seq_printf(s, "used flags 0x%x idx %d\n", +				   used->flags, used->idx); +			for (j = 0; j < num; j++) +				seq_printf(s, "used ring[%d] id %d len %d\n", +					   j, used->ring[j].id, +					   used->ring[j].len); +		} +	} +	mutex_unlock(&mdev->mic_mutex); + +	return 0; +} + +static int mic_vdev_info_debug_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_vdev_info_show, inode->i_private); +} + +static int mic_vdev_info_debug_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations vdev_info_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_vdev_info_debug_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_vdev_info_debug_release +}; + +static int mic_msi_irq_info_show(struct seq_file *s, void *pos) +{ +	struct mic_device *mdev  = s->private; +	int reg; +	int i, j; +	u16 entry; +	u16 vector; +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); + +	if (pci_dev_msi_enabled(pdev)) { +		for (i = 0; i < mdev->irq_info.num_vectors; i++) { +			if (pdev->msix_enabled) { +				entry = mdev->irq_info.msix_entries[i].entry; +				vector = mdev->irq_info.msix_entries[i].vector; +			} else { +				entry = 0; +				vector = pdev->irq; +			} + +			reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry); + +			seq_printf(s, "%s %-10d %s %-10d MXAR[%d]: %08X\n", +				   "IRQ:", vector, "Entry:", entry, i, reg); + +			seq_printf(s, "%-10s", "offset:"); +			for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--) +				seq_printf(s, "%4d ", j); +			seq_puts(s, "\n"); + + +			seq_printf(s, "%-10s", "count:"); +			for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--) +				seq_printf(s, "%4d ", +					   (mdev->irq_info.mic_msi_map[i] & +					   BIT(j)) ? 1 : 0); +			seq_puts(s, "\n\n"); +		} +	} else { +		seq_puts(s, "MSI/MSIx interrupts not enabled\n"); +	} + +	return 0; +} + +static int mic_msi_irq_info_debug_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mic_msi_irq_info_show, inode->i_private); +} + +static int +mic_msi_irq_info_debug_release(struct inode *inode, struct file *file) +{ +	return single_release(inode, file); +} + +static const struct file_operations msi_irq_info_ops = { +	.owner   = THIS_MODULE, +	.open    = mic_msi_irq_info_debug_open, +	.read    = seq_read, +	.llseek  = seq_lseek, +	.release = mic_msi_irq_info_debug_release +}; + +/** + * mic_create_debug_dir - Initialize MIC debugfs entries. + */ +void mic_create_debug_dir(struct mic_device *mdev) +{ +	if (!mic_dbg) +		return; + +	mdev->dbg_dir = debugfs_create_dir(dev_name(mdev->sdev), mic_dbg); +	if (!mdev->dbg_dir) +		return; + +	debugfs_create_file("log_buf", 0444, mdev->dbg_dir, mdev, &log_buf_ops); + +	debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops); + +	debugfs_create_file("soft_reset", 0444, mdev->dbg_dir, mdev, +			    &soft_reset_ops); + +	debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, +			    &post_code_ops); + +	debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops); + +	debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev, +			    &vdev_info_ops); + +	debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, +			    &msi_irq_info_ops); +} + +/** + * mic_delete_debug_dir - Uninitialize MIC debugfs entries. + */ +void mic_delete_debug_dir(struct mic_device *mdev) +{ +	if (!mdev->dbg_dir) +		return; + +	debugfs_remove_recursive(mdev->dbg_dir); +} + +/** + * mic_init_debugfs - Initialize global debugfs entry. + */ +void __init mic_init_debugfs(void) +{ +	mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); +	if (!mic_dbg) +		pr_err("can't create debugfs dir\n"); +} + +/** + * mic_exit_debugfs - Uninitialize global debugfs entry + */ +void mic_exit_debugfs(void) +{ +	debugfs_remove(mic_dbg); +} diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h new file mode 100644 index 00000000000..0398c696d25 --- /dev/null +++ b/drivers/misc/mic/host/mic_device.h @@ -0,0 +1,207 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_DEVICE_H_ +#define _MIC_DEVICE_H_ + +#include <linux/cdev.h> +#include <linux/idr.h> +#include <linux/notifier.h> +#include <linux/irqreturn.h> + +#include "mic_intr.h" + +/* The maximum number of MIC devices supported in a single host system. */ +#define MIC_MAX_NUM_DEVS 256 + +/** + * enum mic_hw_family - The hardware family to which a device belongs. + */ +enum mic_hw_family { +	MIC_FAMILY_X100 = 0, +	MIC_FAMILY_UNKNOWN +}; + +/** + * enum mic_stepping - MIC stepping ids. + */ +enum mic_stepping { +	MIC_A0_STEP = 0x0, +	MIC_B0_STEP = 0x10, +	MIC_B1_STEP = 0x11, +	MIC_C0_STEP = 0x20, +}; + +/** + * struct mic_device -  MIC device information for each card. + * + * @mmio: MMIO bar information. + * @aper: Aperture bar information. + * @family: The MIC family to which this device belongs. + * @ops: MIC HW specific operations. + * @id: The unique device id for this MIC device. + * @stepping: Stepping ID. + * @attr_group: Pointer to list of sysfs attribute groups. + * @sdev: Device for sysfs entries. + * @mic_mutex: Mutex for synchronizing access to mic_device. + * @intr_ops: HW specific interrupt operations. + * @smpt_ops: Hardware specific SMPT operations. + * @smpt: MIC SMPT information. + * @intr_info: H/W specific interrupt information. + * @irq_info: The OS specific irq information + * @dbg_dir: debugfs directory of this MIC device. + * @cmdline: Kernel command line. + * @firmware: Firmware file name. + * @ramdisk: Ramdisk file name. + * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates. + * @bootaddr: MIC boot address. + * @reset_trigger_work: Work for triggering reset requests. + * @shutdown_work: Work for handling shutdown interrupts. + * @state: MIC state. + * @shutdown_status: MIC status reported by card for shutdown/crashes. + * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes. + * @reset_wait: Waitqueue for sleeping while reset completes. + * @log_buf_addr: Log buffer address for MIC. + * @log_buf_len: Log buffer length address for MIC. + * @dp: virtio device page + * @dp_dma_addr: virtio device page DMA address. + * @shutdown_db: shutdown doorbell. + * @shutdown_cookie: shutdown cookie. + * @cdev: Character device for MIC. + * @vdev_list: list of virtio devices. + * @pm_notifier: Handles PM notifications from the OS. + */ +struct mic_device { +	struct mic_mw mmio; +	struct mic_mw aper; +	enum mic_hw_family family; +	struct mic_hw_ops *ops; +	int id; +	enum mic_stepping stepping; +	const struct attribute_group **attr_group; +	struct device *sdev; +	struct mutex mic_mutex; +	struct mic_hw_intr_ops *intr_ops; +	struct mic_smpt_ops *smpt_ops; +	struct mic_smpt_info *smpt; +	struct mic_intr_info *intr_info; +	struct mic_irq_info irq_info; +	struct dentry *dbg_dir; +	char *cmdline; +	char *firmware; +	char *ramdisk; +	char *bootmode; +	u32 bootaddr; +	struct work_struct reset_trigger_work; +	struct work_struct shutdown_work; +	u8 state; +	u8 shutdown_status; +	struct kernfs_node *state_sysfs; +	struct completion reset_wait; +	void *log_buf_addr; +	int *log_buf_len; +	void *dp; +	dma_addr_t dp_dma_addr; +	int shutdown_db; +	struct mic_irq *shutdown_cookie; +	struct cdev cdev; +	struct list_head vdev_list; +	struct notifier_block pm_notifier; +}; + +/** + * struct mic_hw_ops - MIC HW specific operations. + * @aper_bar: Aperture bar resource number. + * @mmio_bar: MMIO bar resource number. + * @read_spad: Read from scratch pad register. + * @write_spad: Write to scratch pad register. + * @send_intr: Send an interrupt for a particular doorbell on the card. + * @ack_interrupt: Hardware specific operations to ack the h/w on + * receipt of an interrupt. + * @intr_workarounds: Hardware specific workarounds needed after + * handling an interrupt. + * @reset: Reset the remote processor. + * @reset_fw_ready: Reset firmware ready field. + * @is_fw_ready: Check if firmware is ready for OS download. + * @send_firmware_intr: Send an interrupt to the card firmware. + * @load_mic_fw: Load firmware segments required to boot the card + * into card memory. This includes the kernel, command line, ramdisk etc. + * @get_postcode: Get post code status from firmware. + */ +struct mic_hw_ops { +	u8 aper_bar; +	u8 mmio_bar; +	u32 (*read_spad)(struct mic_device *mdev, unsigned int idx); +	void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); +	void (*send_intr)(struct mic_device *mdev, int doorbell); +	u32 (*ack_interrupt)(struct mic_device *mdev); +	void (*intr_workarounds)(struct mic_device *mdev); +	void (*reset)(struct mic_device *mdev); +	void (*reset_fw_ready)(struct mic_device *mdev); +	bool (*is_fw_ready)(struct mic_device *mdev); +	void (*send_firmware_intr)(struct mic_device *mdev); +	int (*load_mic_fw)(struct mic_device *mdev, const char *buf); +	u32 (*get_postcode)(struct mic_device *mdev); +}; + +/** + * mic_mmio_read - read from an MMIO register. + * @mw: MMIO register base virtual address. + * @offset: register offset. + * + * RETURNS: register value. + */ +static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset) +{ +	return ioread32(mw->va + offset); +} + +/** + * mic_mmio_write - write to an MMIO register. + * @mw: MMIO register base virtual address. + * @val: the data value to put into the register + * @offset: register offset. + * + * RETURNS: none. + */ +static inline void +mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset) +{ +	iowrite32(val, mw->va + offset); +} + +void mic_sysfs_init(struct mic_device *mdev); +int mic_start(struct mic_device *mdev, const char *buf); +void mic_stop(struct mic_device *mdev, bool force); +void mic_shutdown(struct mic_device *mdev); +void mic_reset_delayed_work(struct work_struct *work); +void mic_reset_trigger_work(struct work_struct *work); +void mic_shutdown_work(struct work_struct *work); +void mic_bootparam_init(struct mic_device *mdev); +void mic_set_state(struct mic_device *mdev, u8 state); +void mic_set_shutdown_status(struct mic_device *mdev, u8 status); +void mic_create_debug_dir(struct mic_device *dev); +void mic_delete_debug_dir(struct mic_device *dev); +void __init mic_init_debugfs(void); +void mic_exit_debugfs(void); +void mic_prepare_suspend(struct mic_device *mdev); +void mic_complete_resume(struct mic_device *mdev); +void mic_suspend(struct mic_device *mdev); +#endif diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c new file mode 100644 index 00000000000..85776d7327f --- /dev/null +++ b/drivers/misc/mic/host/mic_fops.c @@ -0,0 +1,222 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/poll.h> +#include <linux/pci.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_fops.h" +#include "mic_virtio.h" + +int mic_open(struct inode *inode, struct file *f) +{ +	struct mic_vdev *mvdev; +	struct mic_device *mdev = container_of(inode->i_cdev, +		struct mic_device, cdev); + +	mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); +	if (!mvdev) +		return -ENOMEM; + +	init_waitqueue_head(&mvdev->waitq); +	INIT_LIST_HEAD(&mvdev->list); +	mvdev->mdev = mdev; +	mvdev->virtio_id = -1; + +	f->private_data = mvdev; +	return 0; +} + +int mic_release(struct inode *inode, struct file *f) +{ +	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; + +	if (-1 != mvdev->virtio_id) +		mic_virtio_del_device(mvdev); +	f->private_data = NULL; +	kfree(mvdev); +	return 0; +} + +long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ +	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; +	void __user *argp = (void __user *)arg; +	int ret; + +	switch (cmd) { +	case MIC_VIRTIO_ADD_DEVICE: +	{ +		ret = mic_virtio_add_device(mvdev, argp); +		if (ret < 0) { +			dev_err(mic_dev(mvdev), +				"%s %d errno ret %d\n", +				__func__, __LINE__, ret); +			return ret; +		} +		break; +	} +	case MIC_VIRTIO_COPY_DESC: +	{ +		struct mic_copy_desc copy; + +		ret = mic_vdev_inited(mvdev); +		if (ret) +			return ret; + +		if (copy_from_user(©, argp, sizeof(copy))) +			return -EFAULT; + +		dev_dbg(mic_dev(mvdev), +			"%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n", +			__func__, __LINE__, copy.iovcnt, copy.vr_idx, +			copy.update_used); + +		ret = mic_virtio_copy_desc(mvdev, ©); +		if (ret < 0) { +			dev_err(mic_dev(mvdev), +				"%s %d errno ret %d\n", +				__func__, __LINE__, ret); +			return ret; +		} +		if (copy_to_user( +			&((struct mic_copy_desc __user *)argp)->out_len, +			©.out_len, sizeof(copy.out_len))) { +			dev_err(mic_dev(mvdev), "%s %d errno ret %d\n", +				__func__, __LINE__, -EFAULT); +			return -EFAULT; +		} +		break; +	} +	case MIC_VIRTIO_CONFIG_CHANGE: +	{ +		ret = mic_vdev_inited(mvdev); +		if (ret) +			return ret; + +		ret = mic_virtio_config_change(mvdev, argp); +		if (ret < 0) { +			dev_err(mic_dev(mvdev), +				"%s %d errno ret %d\n", +				__func__, __LINE__, ret); +			return ret; +		} +		break; +	} +	default: +		return -ENOIOCTLCMD; +	}; +	return 0; +} + +/* + * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and + * not when previously enqueued buffers may be available. This means that + * in the card->host (TX) path, when userspace is unblocked by poll it + * must drain all available descriptors or it can stall. + */ +unsigned int mic_poll(struct file *f, poll_table *wait) +{ +	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; +	int mask = 0; + +	poll_wait(f, &mvdev->waitq, wait); + +	if (mic_vdev_inited(mvdev)) { +		mask = POLLERR; +	} else if (mvdev->poll_wake) { +		mvdev->poll_wake = 0; +		mask = POLLIN | POLLOUT; +	} + +	return mask; +} + +static inline int +mic_query_offset(struct mic_vdev *mvdev, unsigned long offset, +		 unsigned long *size, unsigned long *pa) +{ +	struct mic_device *mdev = mvdev->mdev; +	unsigned long start = MIC_DP_SIZE; +	int i; + +	/* +	 * MMAP interface is as follows: +	 * offset				region +	 * 0x0					virtio device_page +	 * 0x1000				first vring +	 * 0x1000 + size of 1st vring		second vring +	 * .... +	 */ +	if (!offset) { +		*pa = virt_to_phys(mdev->dp); +		*size = MIC_DP_SIZE; +		return 0; +	} + +	for (i = 0; i < mvdev->dd->num_vq; i++) { +		struct mic_vringh *mvr = &mvdev->mvr[i]; +		if (offset == start) { +			*pa = virt_to_phys(mvr->vring.va); +			*size = mvr->vring.len; +			return 0; +		} +		start += mvr->vring.len; +	} +	return -1; +} + +/* + * Maps the device page and virtio rings to user space for readonly access. + */ +int +mic_mmap(struct file *f, struct vm_area_struct *vma) +{ +	struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; +	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; +	unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; +	int i, err; + +	err = mic_vdev_inited(mvdev); +	if (err) +		return err; + +	if (vma->vm_flags & VM_WRITE) +		return -EACCES; + +	while (size_rem) { +		i = mic_query_offset(mvdev, offset, &size, &pa); +		if (i < 0) +			return -EINVAL; +		err = remap_pfn_range(vma, vma->vm_start + offset, +			pa >> PAGE_SHIFT, size, vma->vm_page_prot); +		if (err) +			return err; +		dev_dbg(mic_dev(mvdev), +			"%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n", +			__func__, __LINE__, mvdev->virtio_id, size, offset, +			pa, vma->vm_start + offset); +		size_rem -= size; +		offset += size; +	} +	return 0; +} diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h new file mode 100644 index 00000000000..dc3893dff66 --- /dev/null +++ b/drivers/misc/mic/host/mic_fops.h @@ -0,0 +1,32 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_FOPS_H_ +#define _MIC_FOPS_H_ + +int mic_open(struct inode *inode, struct file *filp); +int mic_release(struct inode *inode, struct file *filp); +ssize_t mic_read(struct file *filp, char __user *buf, +			size_t count, loff_t *pos); +long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int mic_mmap(struct file *f, struct vm_area_struct *vma); +unsigned int mic_poll(struct file *f, poll_table *wait); + +#endif diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c new file mode 100644 index 00000000000..dbc5afde139 --- /dev/null +++ b/drivers/misc/mic/host/mic_intr.c @@ -0,0 +1,630 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" + +/* + * mic_invoke_callback - Invoke callback functions registered for + * the corresponding source id. + * + * @mdev: pointer to the mic_device instance + * @idx: The interrupt source id. + * + * Returns none. + */ +static inline void mic_invoke_callback(struct mic_device *mdev, int idx) +{ +	struct mic_intr_cb *intr_cb; +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); + +	spin_lock(&mdev->irq_info.mic_intr_lock); +	list_for_each_entry(intr_cb, &mdev->irq_info.cb_list[idx], list) +		if (intr_cb->func) +			intr_cb->func(pdev->irq, intr_cb->data); +	spin_unlock(&mdev->irq_info.mic_intr_lock); +} + +/** + * mic_interrupt - Generic interrupt handler for + * MSI and INTx based interrupts. + */ +static irqreturn_t mic_interrupt(int irq, void *dev) +{ +	struct mic_device *mdev = dev; +	struct mic_intr_info *info = mdev->intr_info; +	u32 mask; +	int i; + +	mask = mdev->ops->ack_interrupt(mdev); +	if (!mask) +		return IRQ_NONE; + +	for (i = info->intr_start_idx[MIC_INTR_DB]; +			i < info->intr_len[MIC_INTR_DB]; i++) +		if (mask & BIT(i)) +			mic_invoke_callback(mdev, i); + +	return IRQ_HANDLED; +} + +/* Return the interrupt offset from the index. Index is 0 based. */ +static u16 mic_map_src_to_offset(struct mic_device *mdev, +		int intr_src, enum mic_intr_type type) +{ +	if (type >= MIC_NUM_INTR_TYPES) +		return MIC_NUM_OFFSETS; +	if (intr_src >= mdev->intr_info->intr_len[type]) +		return MIC_NUM_OFFSETS; + +	return mdev->intr_info->intr_start_idx[type] + intr_src; +} + +/* Return next available msix_entry. */ +static struct msix_entry *mic_get_available_vector(struct mic_device *mdev) +{ +	int i; +	struct mic_irq_info *info = &mdev->irq_info; + +	for (i = 0; i < info->num_vectors; i++) +		if (!info->mic_msi_map[i]) +			return &info->msix_entries[i]; +	return NULL; +} + +/** + * mic_register_intr_callback - Register a callback handler for the + * given source id. + * + * @mdev: pointer to the mic_device instance + * @idx: The source id to be registered. + * @func: The function to be called when the source id receives + * the interrupt. + * @data: Private data of the requester. + * Return the callback structure that was registered or an + * appropriate error on failure. + */ +static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev, +			u8 idx, irqreturn_t (*func) (int irq, void *dev), +			void *data) +{ +	struct mic_intr_cb *intr_cb; +	unsigned long flags; +	int rc; +	intr_cb = kmalloc(sizeof(*intr_cb), GFP_KERNEL); + +	if (!intr_cb) +		return ERR_PTR(-ENOMEM); + +	intr_cb->func = func; +	intr_cb->data = data; +	intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida, +		0, 0, GFP_KERNEL); +	if (intr_cb->cb_id < 0) { +		rc = intr_cb->cb_id; +		goto ida_fail; +	} + +	spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); +	list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]); +	spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); + +	return intr_cb; +ida_fail: +	kfree(intr_cb); +	return ERR_PTR(rc); +} + +/** + * mic_unregister_intr_callback - Unregister the callback handler + * identified by its callback id. + * + * @mdev: pointer to the mic_device instance + * @idx: The callback structure id to be unregistered. + * Return the source id that was unregistered or MIC_NUM_OFFSETS if no + * such callback handler was found. + */ +static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx) +{ +	struct list_head *pos, *tmp; +	struct mic_intr_cb *intr_cb; +	unsigned long flags; +	int i; + +	for (i = 0;  i < MIC_NUM_OFFSETS; i++) { +		spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); +		list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) { +			intr_cb = list_entry(pos, struct mic_intr_cb, list); +			if (intr_cb->cb_id == idx) { +				list_del(pos); +				ida_simple_remove(&mdev->irq_info.cb_ida, +						  intr_cb->cb_id); +				kfree(intr_cb); +				spin_unlock_irqrestore( +					&mdev->irq_info.mic_intr_lock, flags); +				return i; +			} +		} +		spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); +	} +	return MIC_NUM_OFFSETS; +} + +/** + * mic_setup_msix - Initializes MSIx interrupts. + * + * @mdev: pointer to mic_device instance + * + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev) +{ +	int rc, i; +	int entry_size = sizeof(*mdev->irq_info.msix_entries); + +	mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX, +						    entry_size, GFP_KERNEL); +	if (!mdev->irq_info.msix_entries) { +		rc = -ENOMEM; +		goto err_nomem1; +	} + +	for (i = 0; i < MIC_MIN_MSIX; i++) +		mdev->irq_info.msix_entries[i].entry = i; + +	rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries, +		MIC_MIN_MSIX); +	if (rc) { +		dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc); +		goto err_enable_msix; +	} + +	mdev->irq_info.num_vectors = MIC_MIN_MSIX; +	mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) * +		mdev->irq_info.num_vectors), GFP_KERNEL); + +	if (!mdev->irq_info.mic_msi_map) { +		rc = -ENOMEM; +		goto err_nomem2; +	} + +	dev_dbg(mdev->sdev->parent, +		"%d MSIx irqs setup\n", mdev->irq_info.num_vectors); +	return 0; +err_nomem2: +	pci_disable_msix(pdev); +err_enable_msix: +	kfree(mdev->irq_info.msix_entries); +err_nomem1: +	mdev->irq_info.num_vectors = 0; +	return rc; +} + +/** + * mic_setup_callbacks - Initialize data structures needed + * to handle callbacks. + * + * @mdev: pointer to mic_device instance + */ +static int mic_setup_callbacks(struct mic_device *mdev) +{ +	int i; + +	mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS, +					       sizeof(*mdev->irq_info.cb_list), +					       GFP_KERNEL); +	if (!mdev->irq_info.cb_list) +		return -ENOMEM; + +	for (i = 0; i < MIC_NUM_OFFSETS; i++) +		INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]); +	ida_init(&mdev->irq_info.cb_ida); +	spin_lock_init(&mdev->irq_info.mic_intr_lock); +	return 0; +} + +/** + * mic_release_callbacks - Uninitialize data structures needed + * to handle callbacks. + * + * @mdev: pointer to mic_device instance + */ +static void mic_release_callbacks(struct mic_device *mdev) +{ +	unsigned long flags; +	struct list_head *pos, *tmp; +	struct mic_intr_cb *intr_cb; +	int i; + +	for (i = 0; i < MIC_NUM_OFFSETS; i++) { +		spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); + +		if (list_empty(&mdev->irq_info.cb_list[i])) { +			spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, +					       flags); +			break; +		} + +		list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) { +			intr_cb = list_entry(pos, struct mic_intr_cb, list); +			list_del(pos); +			ida_simple_remove(&mdev->irq_info.cb_ida, +					  intr_cb->cb_id); +			kfree(intr_cb); +		} +		spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); +	} +	ida_destroy(&mdev->irq_info.cb_ida); +	kfree(mdev->irq_info.cb_list); +} + +/** + * mic_setup_msi - Initializes MSI interrupts. + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev) +{ +	int rc; + +	rc = pci_enable_msi(pdev); +	if (rc) { +		dev_dbg(&pdev->dev, "Error enabling MSI. rc = %d\n", rc); +		return rc; +	} + +	mdev->irq_info.num_vectors = 1; +	mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) * +		mdev->irq_info.num_vectors), GFP_KERNEL); + +	if (!mdev->irq_info.mic_msi_map) { +		rc = -ENOMEM; +		goto err_nomem1; +	} + +	rc = mic_setup_callbacks(mdev); +	if (rc) { +		dev_err(&pdev->dev, "Error setting up callbacks\n"); +		goto err_nomem2; +	} + +	rc = request_irq(pdev->irq, mic_interrupt, 0 , "mic-msi", mdev); +	if (rc) { +		dev_err(&pdev->dev, "Error allocating MSI interrupt\n"); +		goto err_irq_req_fail; +	} + +	dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors); +	return 0; +err_irq_req_fail: +	mic_release_callbacks(mdev); +err_nomem2: +	kfree(mdev->irq_info.mic_msi_map); +err_nomem1: +	pci_disable_msi(pdev); +	mdev->irq_info.num_vectors = 0; +	return rc; +} + +/** + * mic_setup_intx - Initializes legacy interrupts. + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev) +{ +	int rc; + +	pci_msi_off(pdev); + +	/* Enable intx */ +	pci_intx(pdev, 1); +	rc = mic_setup_callbacks(mdev); +	if (rc) { +		dev_err(&pdev->dev, "Error setting up callbacks\n"); +		goto err_nomem; +	} + +	rc = request_irq(pdev->irq, mic_interrupt, +		IRQF_SHARED, "mic-intx", mdev); +	if (rc) +		goto err; + +	dev_dbg(&pdev->dev, "intx irq setup\n"); +	return 0; +err: +	mic_release_callbacks(mdev); +err_nomem: +	return rc; +} + +/** + * mic_next_db - Retrieve the next doorbell interrupt source id. + * The id is picked sequentially from the available pool of + * doorlbell ids. + * + * @mdev: pointer to the mic_device instance. + * + * Returns the next doorbell interrupt source. + */ +int mic_next_db(struct mic_device *mdev) +{ +	int next_db; + +	next_db = mdev->irq_info.next_avail_src % +		mdev->intr_info->intr_len[MIC_INTR_DB]; +	mdev->irq_info.next_avail_src++; +	return next_db; +} + +#define COOKIE_ID_SHIFT 16 +#define GET_ENTRY(cookie) ((cookie) & 0xFFFF) +#define GET_OFFSET(cookie) ((cookie) >> COOKIE_ID_SHIFT) +#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT) + +/** + * mic_request_irq - request an irq. mic_mutex needs + * to be held before calling this function. + * + * @mdev: pointer to mic_device instance + * @func: The callback function that handles the interrupt. + * The function needs to call ack_interrupts + * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts. + * @name: The ASCII name of the callee requesting the irq. + * @data: private data that is returned back when calling the + * function handler. + * @intr_src: The source id of the requester. Its the doorbell id + * for Doorbell interrupts and DMA channel id for DMA interrupts. + * @type: The type of interrupt. Values defined in mic_intr_type + * + * returns: The cookie that is transparent to the caller. Passed + * back when calling mic_free_irq. An appropriate error code + * is returned on failure. Caller needs to use IS_ERR(return_val) + * to check for failure and PTR_ERR(return_val) to obtained the + * error code. + * + */ +struct mic_irq *mic_request_irq(struct mic_device *mdev, +	irqreturn_t (*func)(int irq, void *dev), +	const char *name, void *data, int intr_src, +	enum mic_intr_type type) +{ +	u16 offset; +	int rc = 0; +	struct msix_entry *msix = NULL; +	unsigned long cookie = 0; +	u16 entry; +	struct mic_intr_cb *intr_cb; +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); + +	offset = mic_map_src_to_offset(mdev, intr_src, type); +	if (offset >= MIC_NUM_OFFSETS) { +		dev_err(mdev->sdev->parent, +			"Error mapping index %d to a valid source id.\n", +			intr_src); +		rc = -EINVAL; +		goto err; +	} + +	if (mdev->irq_info.num_vectors > 1) { +		msix = mic_get_available_vector(mdev); +		if (!msix) { +			dev_err(mdev->sdev->parent, +				"No MSIx vectors available for use.\n"); +			rc = -ENOSPC; +			goto err; +		} + +		rc = request_irq(msix->vector, func, 0, name, data); +		if (rc) { +			dev_dbg(mdev->sdev->parent, +				"request irq failed rc = %d\n", rc); +			goto err; +		} +		entry = msix->entry; +		mdev->irq_info.mic_msi_map[entry] |= BIT(offset); +		mdev->intr_ops->program_msi_to_src_map(mdev, +				entry, offset, true); +		cookie = MK_COOKIE(entry, offset); +		dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n", +			msix->vector, intr_src); +	} else { +		intr_cb = mic_register_intr_callback(mdev, +				offset, func, data); +		if (IS_ERR(intr_cb)) { +			dev_err(mdev->sdev->parent, +				"No available callback entries for use\n"); +			rc = PTR_ERR(intr_cb); +			goto err; +		} + +		entry = 0; +		if (pci_dev_msi_enabled(pdev)) { +			mdev->irq_info.mic_msi_map[entry] |= (1 << offset); +			mdev->intr_ops->program_msi_to_src_map(mdev, +				entry, offset, true); +		} +		cookie = MK_COOKIE(entry, intr_cb->cb_id); +		dev_dbg(mdev->sdev->parent, "callback %d registered for src: %d\n", +			intr_cb->cb_id, intr_src); +	} +	return (struct mic_irq *)cookie; +err: +	return ERR_PTR(rc); +} + +/** + * mic_free_irq - free irq. mic_mutex + *  needs to be held before calling this function. + * + * @mdev: pointer to mic_device instance + * @cookie: cookie obtained during a successful call to mic_request_irq + * @data: private data specified by the calling function during the + * mic_request_irq + * + * returns: none. + */ +void mic_free_irq(struct mic_device *mdev, +	struct mic_irq *cookie, void *data) +{ +	u32 offset; +	u32 entry; +	u8 src_id; +	unsigned int irq; +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); + +	entry = GET_ENTRY((unsigned long)cookie); +	offset = GET_OFFSET((unsigned long)cookie); +	if (mdev->irq_info.num_vectors > 1) { +		if (entry >= mdev->irq_info.num_vectors) { +			dev_warn(mdev->sdev->parent, +				 "entry %d should be < num_irq %d\n", +				entry, mdev->irq_info.num_vectors); +			return; +		} +		irq = mdev->irq_info.msix_entries[entry].vector; +		free_irq(irq, data); +		mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset)); +		mdev->intr_ops->program_msi_to_src_map(mdev, +			entry, offset, false); + +		dev_dbg(mdev->sdev->parent, "irq: %d freed\n", irq); +	} else { +		irq = pdev->irq; +		src_id = mic_unregister_intr_callback(mdev, offset); +		if (src_id >= MIC_NUM_OFFSETS) { +			dev_warn(mdev->sdev->parent, "Error unregistering callback\n"); +			return; +		} +		if (pci_dev_msi_enabled(pdev)) { +			mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id)); +			mdev->intr_ops->program_msi_to_src_map(mdev, +				entry, src_id, false); +		} +		dev_dbg(mdev->sdev->parent, "callback %d unregistered for src: %d\n", +			offset, src_id); +	} +} + +/** + * mic_setup_interrupts - Initializes interrupts. + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev) +{ +	int rc; + +	rc = mic_setup_msix(mdev, pdev); +	if (!rc) +		goto done; + +	rc = mic_setup_msi(mdev, pdev); +	if (!rc) +		goto done; + +	rc = mic_setup_intx(mdev, pdev); +	if (rc) { +		dev_err(mdev->sdev->parent, "no usable interrupts\n"); +		return rc; +	} +done: +	mdev->intr_ops->enable_interrupts(mdev); +	return 0; +} + +/** + * mic_free_interrupts - Frees interrupts setup by mic_setup_interrupts + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * returns none. + */ +void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev) +{ +	int i; + +	mdev->intr_ops->disable_interrupts(mdev); +	if (mdev->irq_info.num_vectors > 1) { +		for (i = 0; i < mdev->irq_info.num_vectors; i++) { +			if (mdev->irq_info.mic_msi_map[i]) +				dev_warn(&pdev->dev, "irq %d may still be in use.\n", +					 mdev->irq_info.msix_entries[i].vector); +		} +		kfree(mdev->irq_info.mic_msi_map); +		kfree(mdev->irq_info.msix_entries); +		pci_disable_msix(pdev); +	} else { +		if (pci_dev_msi_enabled(pdev)) { +			free_irq(pdev->irq, mdev); +			kfree(mdev->irq_info.mic_msi_map); +			pci_disable_msi(pdev); +		} else { +			free_irq(pdev->irq, mdev); +		} +		mic_release_callbacks(mdev); +	} +} + +/** + * mic_intr_restore - Restore MIC interrupt registers. + * + * @mdev: pointer to mic_device instance. + * + * Restore the interrupt registers to values previously + * stored in the SW data structures. mic_mutex needs to + * be held before calling this function. + * + * returns None. + */ +void mic_intr_restore(struct mic_device *mdev) +{ +	int entry, offset; +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); + +	if (!pci_dev_msi_enabled(pdev)) +		return; + +	for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) { +		for (offset = 0; offset < MIC_NUM_OFFSETS; offset++) { +			if (mdev->irq_info.mic_msi_map[entry] & BIT(offset)) +				mdev->intr_ops->program_msi_to_src_map(mdev, +					entry, offset, true); +		} +	} +} diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h new file mode 100644 index 00000000000..6091aa97e11 --- /dev/null +++ b/drivers/misc/mic/host/mic_intr.h @@ -0,0 +1,137 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_INTR_H_ +#define _MIC_INTR_H_ + +/* + * The minimum number of msix vectors required for normal operation. + * 3 for virtio network, console and block devices. + * 1 for card shutdown notifications. + */ +#define MIC_MIN_MSIX 4 +#define MIC_NUM_OFFSETS 32 + +/** + * mic_intr_source - The type of source that will generate + * the interrupt.The number of types needs to be in sync with + * MIC_NUM_INTR_TYPES + * + * MIC_INTR_DB: The source is a doorbell + * MIC_INTR_DMA: The source is a DMA channel + * MIC_INTR_ERR: The source is an error interrupt e.g. SBOX ERR + * MIC_NUM_INTR_TYPES: Total number of interrupt sources. + */ +enum mic_intr_type { +	MIC_INTR_DB = 0, +	MIC_INTR_DMA, +	MIC_INTR_ERR, +	MIC_NUM_INTR_TYPES +}; + +/** + * struct mic_intr_info - Contains h/w specific interrupt sources + * information. + * + * @intr_start_idx: Contains the starting indexes of the + * interrupt types. + * @intr_len: Contains the length of the interrupt types. + */ +struct mic_intr_info { +	u16 intr_start_idx[MIC_NUM_INTR_TYPES]; +	u16 intr_len[MIC_NUM_INTR_TYPES]; +}; + +/** + * struct mic_irq_info - OS specific irq information + * + * @next_avail_src: next available doorbell that can be assigned. + * @msix_entries: msix entries allocated while setting up MSI-x + * @mic_msi_map: The MSI/MSI-x mapping information. + * @num_vectors: The number of MSI/MSI-x vectors that have been allocated. + * @cb_ida: callback ID allocator to track the callbacks registered. + * @mic_intr_lock: spinlock to protect the interrupt callback list. + * @cb_list: Array of callback lists one for each source. + */ +struct mic_irq_info { +	int next_avail_src; +	struct msix_entry *msix_entries; +	u32 *mic_msi_map; +	u16 num_vectors; +	struct ida cb_ida; +	spinlock_t mic_intr_lock; +	struct list_head *cb_list; +}; + +/** + * struct mic_intr_cb - Interrupt callback structure. + * + * @func: The callback function + * @data: Private data of the requester. + * @cb_id: The callback id. Identifies this callback. + * @list: list head pointing to the next callback structure. + */ +struct mic_intr_cb { +	irqreturn_t (*func) (int irq, void *data); +	void *data; +	int cb_id; +	struct list_head list; +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/* Forward declaration */ +struct mic_device; + +/** + * struct mic_hw_intr_ops: MIC HW specific interrupt operations + * @intr_init: Initialize H/W specific interrupt information. + * @enable_interrupts: Enable interrupts from the hardware. + * @disable_interrupts: Disable interrupts from the hardware. + * @program_msi_to_src_map: Update MSI mapping registers with + * irq information. + * @read_msi_to_src_map: Read MSI mapping registers containing + * irq information. + */ +struct mic_hw_intr_ops { +	void (*intr_init)(struct mic_device *mdev); +	void (*enable_interrupts)(struct mic_device *mdev); +	void (*disable_interrupts)(struct mic_device *mdev); +	void (*program_msi_to_src_map) (struct mic_device *mdev, +			int idx, int intr_src, bool set); +	u32 (*read_msi_to_src_map) (struct mic_device *mdev, +			int idx); +}; + +int mic_next_db(struct mic_device *mdev); +struct mic_irq *mic_request_irq(struct mic_device *mdev, +	irqreturn_t (*func)(int irq, void *data), +	const char *name, void *data, int intr_src, +	enum mic_intr_type type); + +void mic_free_irq(struct mic_device *mdev, +		struct mic_irq *cookie, void *data); +int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev); +void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev); +void mic_intr_restore(struct mic_device *mdev); +#endif diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c new file mode 100644 index 00000000000..c04a021e20c --- /dev/null +++ b/drivers/misc/mic/host/mic_main.c @@ -0,0 +1,536 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + * Global TODO's across the driver to be added after initial base + * patches are accepted upstream: + * 1) Enable DMA support. + * 2) Enable per vring interrupt support. + */ +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/suspend.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_x100.h" +#include "mic_smpt.h" +#include "mic_fops.h" +#include "mic_virtio.h" + +static const char mic_driver_name[] = "mic"; + +static DEFINE_PCI_DEVICE_TABLE(mic_pci_tbl) = { +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2253)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2254)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2255)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2256)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2257)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2258)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2259)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225a)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225b)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225c)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225d)}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225e)}, + +	/* required last entry */ +	{ 0, } +}; + +MODULE_DEVICE_TABLE(pci, mic_pci_tbl); + +/* ID allocator for MIC devices */ +static struct ida g_mic_ida; +/* Class of MIC devices for sysfs accessibility. */ +static struct class *g_mic_class; +/* Base device node number for MIC devices */ +static dev_t g_mic_devno; + +static const struct file_operations mic_fops = { +	.open = mic_open, +	.release = mic_release, +	.unlocked_ioctl = mic_ioctl, +	.poll = mic_poll, +	.mmap = mic_mmap, +	.owner = THIS_MODULE, +}; + +/* Initialize the device page */ +static int mic_dp_init(struct mic_device *mdev) +{ +	mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL); +	if (!mdev->dp) { +		dev_err(mdev->sdev->parent, "%s %d err %d\n", +			__func__, __LINE__, -ENOMEM); +		return -ENOMEM; +	} + +	mdev->dp_dma_addr = mic_map_single(mdev, +		mdev->dp, MIC_DP_SIZE); +	if (mic_map_error(mdev->dp_dma_addr)) { +		kfree(mdev->dp); +		dev_err(mdev->sdev->parent, "%s %d err %d\n", +			__func__, __LINE__, -ENOMEM); +		return -ENOMEM; +	} +	mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr); +	mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); +	return 0; +} + +/* Uninitialize the device page */ +static void mic_dp_uninit(struct mic_device *mdev) +{ +	mic_unmap_single(mdev, mdev->dp_dma_addr, MIC_DP_SIZE); +	kfree(mdev->dp); +} + +/** + * mic_shutdown_db - Shutdown doorbell interrupt handler. + */ +static irqreturn_t mic_shutdown_db(int irq, void *data) +{ +	struct mic_device *mdev = data; +	struct mic_bootparam *bootparam = mdev->dp; + +	mdev->ops->intr_workarounds(mdev); + +	switch (bootparam->shutdown_status) { +	case MIC_HALTED: +	case MIC_POWER_OFF: +	case MIC_RESTART: +		/* Fall through */ +	case MIC_CRASHED: +		schedule_work(&mdev->shutdown_work); +		break; +	default: +		break; +	}; +	return IRQ_HANDLED; +} + +/** + * mic_ops_init: Initialize HW specific operation tables. + * + * @mdev: pointer to mic_device instance + * + * returns none. + */ +static void mic_ops_init(struct mic_device *mdev) +{ +	switch (mdev->family) { +	case MIC_FAMILY_X100: +		mdev->ops = &mic_x100_ops; +		mdev->intr_ops = &mic_x100_intr_ops; +		mdev->smpt_ops = &mic_x100_smpt_ops; +		break; +	default: +		break; +	} +} + +/** + * mic_get_family - Determine hardware family to which this MIC belongs. + * + * @pdev: The pci device structure + * + * returns family. + */ +static enum mic_hw_family mic_get_family(struct pci_dev *pdev) +{ +	enum mic_hw_family family; + +	switch (pdev->device) { +	case MIC_X100_PCI_DEVICE_2250: +	case MIC_X100_PCI_DEVICE_2251: +	case MIC_X100_PCI_DEVICE_2252: +	case MIC_X100_PCI_DEVICE_2253: +	case MIC_X100_PCI_DEVICE_2254: +	case MIC_X100_PCI_DEVICE_2255: +	case MIC_X100_PCI_DEVICE_2256: +	case MIC_X100_PCI_DEVICE_2257: +	case MIC_X100_PCI_DEVICE_2258: +	case MIC_X100_PCI_DEVICE_2259: +	case MIC_X100_PCI_DEVICE_225a: +	case MIC_X100_PCI_DEVICE_225b: +	case MIC_X100_PCI_DEVICE_225c: +	case MIC_X100_PCI_DEVICE_225d: +	case MIC_X100_PCI_DEVICE_225e: +		family = MIC_FAMILY_X100; +		break; +	default: +		family = MIC_FAMILY_UNKNOWN; +		break; +	} +	return family; +} + +/** +* mic_pm_notifier: Notifier callback function that handles +* PM notifications. +* +* @notifier_block: The notifier structure. +* @pm_event: The event for which the driver was notified. +* @unused: Meaningless. Always NULL. +* +* returns NOTIFY_DONE +*/ +static int mic_pm_notifier(struct notifier_block *notifier, +		unsigned long pm_event, void *unused) +{ +	struct mic_device *mdev = container_of(notifier, +		struct mic_device, pm_notifier); + +	switch (pm_event) { +	case PM_HIBERNATION_PREPARE: +		/* Fall through */ +	case PM_SUSPEND_PREPARE: +		mic_prepare_suspend(mdev); +		break; +	case PM_POST_HIBERNATION: +		/* Fall through */ +	case PM_POST_SUSPEND: +		/* Fall through */ +	case PM_POST_RESTORE: +		mic_complete_resume(mdev); +		break; +	case PM_RESTORE_PREPARE: +		break; +	default: +		break; +	} +	return NOTIFY_DONE; +} + +/** + * mic_device_init - Allocates and initializes the MIC device structure + * + * @mdev: pointer to mic_device instance + * @pdev: The pci device structure + * + * returns none. + */ +static int +mic_device_init(struct mic_device *mdev, struct pci_dev *pdev) +{ +	int rc; + +	mdev->family = mic_get_family(pdev); +	mdev->stepping = pdev->revision; +	mic_ops_init(mdev); +	mic_sysfs_init(mdev); +	mutex_init(&mdev->mic_mutex); +	mdev->irq_info.next_avail_src = 0; +	INIT_WORK(&mdev->reset_trigger_work, mic_reset_trigger_work); +	INIT_WORK(&mdev->shutdown_work, mic_shutdown_work); +	init_completion(&mdev->reset_wait); +	INIT_LIST_HEAD(&mdev->vdev_list); +	mdev->pm_notifier.notifier_call = mic_pm_notifier; +	rc = register_pm_notifier(&mdev->pm_notifier); +	if (rc) { +		dev_err(&pdev->dev, "register_pm_notifier failed rc %d\n", +			rc); +		goto register_pm_notifier_fail; +	} +	return 0; +register_pm_notifier_fail: +	flush_work(&mdev->shutdown_work); +	flush_work(&mdev->reset_trigger_work); +	return rc; +} + +/** + * mic_device_uninit - Frees resources allocated during mic_device_init(..) + * + * @mdev: pointer to mic_device instance + * + * returns none + */ +static void mic_device_uninit(struct mic_device *mdev) +{ +	/* The cmdline sysfs entry might have allocated cmdline */ +	kfree(mdev->cmdline); +	kfree(mdev->firmware); +	kfree(mdev->ramdisk); +	kfree(mdev->bootmode); +	flush_work(&mdev->reset_trigger_work); +	flush_work(&mdev->shutdown_work); +	unregister_pm_notifier(&mdev->pm_notifier); +} + +/** + * mic_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mic_pci_tbl + * + * returns 0 on success, < 0 on failure. + */ +static int mic_probe(struct pci_dev *pdev, +		const struct pci_device_id *ent) +{ +	int rc; +	struct mic_device *mdev; + +	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); +	if (!mdev) { +		rc = -ENOMEM; +		dev_err(&pdev->dev, "mdev kmalloc failed rc %d\n", rc); +		goto mdev_alloc_fail; +	} +	mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL); +	if (mdev->id < 0) { +		rc = mdev->id; +		dev_err(&pdev->dev, "ida_simple_get failed rc %d\n", rc); +		goto ida_fail; +	} + +	rc = mic_device_init(mdev, pdev); +	if (rc) { +		dev_err(&pdev->dev, "mic_device_init failed rc %d\n", rc); +		goto device_init_fail; +	} + +	rc = pci_enable_device(pdev); +	if (rc) { +		dev_err(&pdev->dev, "failed to enable pci device.\n"); +		goto uninit_device; +	} + +	pci_set_master(pdev); + +	rc = pci_request_regions(pdev, mic_driver_name); +	if (rc) { +		dev_err(&pdev->dev, "failed to get pci regions.\n"); +		goto disable_device; +	} + +	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); +	if (rc) { +		dev_err(&pdev->dev, "Cannot set DMA mask\n"); +		goto release_regions; +	} + +	mdev->mmio.pa = pci_resource_start(pdev, mdev->ops->mmio_bar); +	mdev->mmio.len = pci_resource_len(pdev, mdev->ops->mmio_bar); +	mdev->mmio.va = pci_ioremap_bar(pdev, mdev->ops->mmio_bar); +	if (!mdev->mmio.va) { +		dev_err(&pdev->dev, "Cannot remap MMIO BAR\n"); +		rc = -EIO; +		goto release_regions; +	} + +	mdev->aper.pa = pci_resource_start(pdev, mdev->ops->aper_bar); +	mdev->aper.len = pci_resource_len(pdev, mdev->ops->aper_bar); +	mdev->aper.va = ioremap_wc(mdev->aper.pa, mdev->aper.len); +	if (!mdev->aper.va) { +		dev_err(&pdev->dev, "Cannot remap Aperture BAR\n"); +		rc = -EIO; +		goto unmap_mmio; +	} + +	mdev->intr_ops->intr_init(mdev); +	rc = mic_setup_interrupts(mdev, pdev); +	if (rc) { +		dev_err(&pdev->dev, "mic_setup_interrupts failed %d\n", rc); +		goto unmap_aper; +	} +	rc = mic_smpt_init(mdev); +	if (rc) { +		dev_err(&pdev->dev, "smpt_init failed %d\n", rc); +		goto free_interrupts; +	} + +	pci_set_drvdata(pdev, mdev); + +	mdev->sdev = device_create_with_groups(g_mic_class, &pdev->dev, +		MKDEV(MAJOR(g_mic_devno), mdev->id), NULL, +		mdev->attr_group, "mic%d", mdev->id); +	if (IS_ERR(mdev->sdev)) { +		rc = PTR_ERR(mdev->sdev); +		dev_err(&pdev->dev, +			"device_create_with_groups failed rc %d\n", rc); +		goto smpt_uninit; +	} +	mdev->state_sysfs = sysfs_get_dirent(mdev->sdev->kobj.sd, "state"); +	if (!mdev->state_sysfs) { +		rc = -ENODEV; +		dev_err(&pdev->dev, "sysfs_get_dirent failed rc %d\n", rc); +		goto destroy_device; +	} + +	rc = mic_dp_init(mdev); +	if (rc) { +		dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc); +		goto sysfs_put; +	} +	mutex_lock(&mdev->mic_mutex); + +	mdev->shutdown_db = mic_next_db(mdev); +	mdev->shutdown_cookie = mic_request_irq(mdev, mic_shutdown_db, +		"shutdown-interrupt", mdev, mdev->shutdown_db, MIC_INTR_DB); +	if (IS_ERR(mdev->shutdown_cookie)) { +		rc = PTR_ERR(mdev->shutdown_cookie); +		mutex_unlock(&mdev->mic_mutex); +		goto dp_uninit; +	} +	mutex_unlock(&mdev->mic_mutex); +	mic_bootparam_init(mdev); + +	mic_create_debug_dir(mdev); +	cdev_init(&mdev->cdev, &mic_fops); +	mdev->cdev.owner = THIS_MODULE; +	rc = cdev_add(&mdev->cdev, MKDEV(MAJOR(g_mic_devno), mdev->id), 1); +	if (rc) { +		dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc); +		goto cleanup_debug_dir; +	} +	return 0; +cleanup_debug_dir: +	mic_delete_debug_dir(mdev); +	mutex_lock(&mdev->mic_mutex); +	mic_free_irq(mdev, mdev->shutdown_cookie, mdev); +	mutex_unlock(&mdev->mic_mutex); +dp_uninit: +	mic_dp_uninit(mdev); +sysfs_put: +	sysfs_put(mdev->state_sysfs); +destroy_device: +	device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id)); +smpt_uninit: +	mic_smpt_uninit(mdev); +free_interrupts: +	mic_free_interrupts(mdev, pdev); +unmap_aper: +	iounmap(mdev->aper.va); +unmap_mmio: +	iounmap(mdev->mmio.va); +release_regions: +	pci_release_regions(pdev); +disable_device: +	pci_disable_device(pdev); +uninit_device: +	mic_device_uninit(mdev); +device_init_fail: +	ida_simple_remove(&g_mic_ida, mdev->id); +ida_fail: +	kfree(mdev); +mdev_alloc_fail: +	dev_err(&pdev->dev, "Probe failed rc %d\n", rc); +	return rc; +} + +/** + * mic_remove - Device Removal Routine + * mic_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + * + * @pdev: PCI device structure + */ +static void mic_remove(struct pci_dev *pdev) +{ +	struct mic_device *mdev; + +	mdev = pci_get_drvdata(pdev); +	if (!mdev) +		return; + +	mic_stop(mdev, false); +	cdev_del(&mdev->cdev); +	mic_delete_debug_dir(mdev); +	mutex_lock(&mdev->mic_mutex); +	mic_free_irq(mdev, mdev->shutdown_cookie, mdev); +	mutex_unlock(&mdev->mic_mutex); +	flush_work(&mdev->shutdown_work); +	mic_dp_uninit(mdev); +	sysfs_put(mdev->state_sysfs); +	device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id)); +	mic_smpt_uninit(mdev); +	mic_free_interrupts(mdev, pdev); +	iounmap(mdev->mmio.va); +	iounmap(mdev->aper.va); +	mic_device_uninit(mdev); +	pci_release_regions(pdev); +	pci_disable_device(pdev); +	ida_simple_remove(&g_mic_ida, mdev->id); +	kfree(mdev); +} +static struct pci_driver mic_driver = { +	.name = mic_driver_name, +	.id_table = mic_pci_tbl, +	.probe = mic_probe, +	.remove = mic_remove +}; + +static int __init mic_init(void) +{ +	int ret; + +	ret = alloc_chrdev_region(&g_mic_devno, 0, +		MIC_MAX_NUM_DEVS, mic_driver_name); +	if (ret) { +		pr_err("alloc_chrdev_region failed ret %d\n", ret); +		goto error; +	} + +	g_mic_class = class_create(THIS_MODULE, mic_driver_name); +	if (IS_ERR(g_mic_class)) { +		ret = PTR_ERR(g_mic_class); +		pr_err("class_create failed ret %d\n", ret); +		goto cleanup_chrdev; +	} + +	mic_init_debugfs(); +	ida_init(&g_mic_ida); +	ret = pci_register_driver(&mic_driver); +	if (ret) { +		pr_err("pci_register_driver failed ret %d\n", ret); +		goto cleanup_debugfs; +	} +	return ret; +cleanup_debugfs: +	mic_exit_debugfs(); +	class_destroy(g_mic_class); +cleanup_chrdev: +	unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); +error: +	return ret; +} + +static void __exit mic_exit(void) +{ +	pci_unregister_driver(&mic_driver); +	ida_destroy(&g_mic_ida); +	mic_exit_debugfs(); +	class_destroy(g_mic_class); +	unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); +} + +module_init(mic_init); +module_exit(mic_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MIC X100 Host driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c new file mode 100644 index 00000000000..fae474c4899 --- /dev/null +++ b/drivers/misc/mic/host/mic_smpt.c @@ -0,0 +1,442 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" + +static inline u64 mic_system_page_mask(struct mic_device *mdev) +{ +	return (1ULL << mdev->smpt->info.page_shift) - 1ULL; +} + +static inline u8 mic_sys_addr_to_smpt(struct mic_device *mdev, dma_addr_t pa) +{ +	return (pa - mdev->smpt->info.base) >> mdev->smpt->info.page_shift; +} + +static inline u64 mic_smpt_to_pa(struct mic_device *mdev, u8 index) +{ +	return mdev->smpt->info.base + (index * mdev->smpt->info.page_size); +} + +static inline u64 mic_smpt_offset(struct mic_device *mdev, dma_addr_t pa) +{ +	return pa & mic_system_page_mask(mdev); +} + +static inline u64 mic_smpt_align_low(struct mic_device *mdev, dma_addr_t pa) +{ +	return ALIGN(pa - mic_system_page_mask(mdev), +		mdev->smpt->info.page_size); +} + +static inline u64 mic_smpt_align_high(struct mic_device *mdev, dma_addr_t pa) +{ +	return ALIGN(pa, mdev->smpt->info.page_size); +} + +/* Total Cumulative system memory accessible by MIC across all SMPT entries */ +static inline u64 mic_max_system_memory(struct mic_device *mdev) +{ +	return mdev->smpt->info.num_reg * mdev->smpt->info.page_size; +} + +/* Maximum system memory address accessible by MIC */ +static inline u64 mic_max_system_addr(struct mic_device *mdev) +{ +	return mdev->smpt->info.base + mic_max_system_memory(mdev) - 1ULL; +} + +/* Check if the DMA address is a MIC system memory address */ +static inline bool +mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa) +{ +	return pa >= mdev->smpt->info.base && pa <= mic_max_system_addr(mdev); +} + +/* Populate an SMPT entry and update the reference counts. */ +static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr, +		int entries, struct mic_device *mdev) +{ +	struct mic_smpt_info *smpt_info = mdev->smpt; +	int i; + +	for (i = spt; i < spt + entries; i++, +		addr += smpt_info->info.page_size) { +		if (!smpt_info->entry[i].ref_count && +		    (smpt_info->entry[i].dma_addr != addr)) { +			mdev->smpt_ops->set(mdev, addr, i); +			smpt_info->entry[i].dma_addr = addr; +		} +		smpt_info->entry[i].ref_count += ref[i - spt]; +	} +} + +/* + * Find an available MIC address in MIC SMPT address space + * for a given DMA address and size. + */ +static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr, +				int entries, s64 *ref, size_t size) +{ +	int spt; +	int ae = 0; +	int i; +	unsigned long flags; +	dma_addr_t mic_addr = 0; +	dma_addr_t addr = dma_addr; +	struct mic_smpt_info *smpt_info = mdev->smpt; + +	spin_lock_irqsave(&smpt_info->smpt_lock, flags); + +	/* find existing entries */ +	for (i = 0; i < smpt_info->info.num_reg; i++) { +		if (smpt_info->entry[i].dma_addr == addr) { +			ae++; +			addr += smpt_info->info.page_size; +		} else if (ae) /* cannot find contiguous entries */ +			goto not_found; + +		if (ae == entries) +			goto found; +	} + +	/* find free entry */ +	for (ae = 0, i = 0; i < smpt_info->info.num_reg; i++) { +		ae = (smpt_info->entry[i].ref_count == 0) ? ae + 1 : 0; +		if (ae == entries) +			goto found; +	} + +not_found: +	spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); +	return mic_addr; + +found: +	spt = i - entries + 1; +	mic_addr = mic_smpt_to_pa(mdev, spt); +	mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev); +	smpt_info->map_count++; +	smpt_info->ref_count += (s64)size; +	spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); +	return mic_addr; +} + +/* + * Returns number of smpt entries needed for dma_addr to dma_addr + size + * also returns the reference count array for each of those entries + * and the starting smpt address + */ +static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr, +				size_t size, s64 *ref,  u64 *smpt_start) +{ +	u64 start =  dma_addr; +	u64 end = dma_addr + size; +	int i = 0; + +	while (start < end) { +		ref[i++] = min(mic_smpt_align_high(mdev, start + 1), +			end) - start; +		start = mic_smpt_align_high(mdev, start + 1); +	} + +	if (smpt_start) +		*smpt_start = mic_smpt_align_low(mdev, dma_addr); + +	return i; +} + +/* + * mic_to_dma_addr - Converts a MIC address to a DMA address. + * + * @mdev: pointer to mic_device instance. + * @mic_addr: MIC address. + * + * returns a DMA address. + */ +static dma_addr_t +mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr) +{ +	struct mic_smpt_info *smpt_info = mdev->smpt; +	int spt; +	dma_addr_t dma_addr; + +	if (!mic_is_system_addr(mdev, mic_addr)) { +		dev_err(mdev->sdev->parent, +			"mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr); +		return -EINVAL; +	} +	spt = mic_sys_addr_to_smpt(mdev, mic_addr); +	dma_addr = smpt_info->entry[spt].dma_addr + +		mic_smpt_offset(mdev, mic_addr); +	return dma_addr; +} + +/** + * mic_map - Maps a DMA address to a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @dma_addr: DMA address. + * @size: Size of the region to be mapped. + * + * This API converts the DMA address provided to a DMA address understood + * by MIC. Caller should check for errors by calling mic_map_error(..). + * + * returns DMA address as required by MIC. + */ +dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size) +{ +	dma_addr_t mic_addr = 0; +	int num_entries; +	s64 *ref; +	u64 smpt_start; + +	if (!size || size > mic_max_system_memory(mdev)) +		return mic_addr; + +	ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL); +	if (!ref) +		return mic_addr; + +	num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size, +		ref, &smpt_start); + +	/* Set the smpt table appropriately and get 16G aligned mic address */ +	mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size); + +	kfree(ref); + +	/* +	 * If mic_addr is zero then its an error case +	 * since mic_addr can never be zero. +	 * else generate mic_addr by adding the 16G offset in dma_addr +	 */ +	if (!mic_addr && MIC_FAMILY_X100 == mdev->family) { +		dev_err(mdev->sdev->parent, +			"mic_map failed dma_addr 0x%llx size 0x%lx\n", +			dma_addr, size); +		return mic_addr; +	} else { +		return mic_addr + mic_smpt_offset(mdev, dma_addr); +	} +} + +/** + * mic_unmap - Unmaps a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @mic_addr: MIC physical address. + * @size: Size of the region to be unmapped. + * + * This API unmaps the mappings created by mic_map(..). + * + * returns None. + */ +void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size) +{ +	struct mic_smpt_info *smpt_info = mdev->smpt; +	s64 *ref; +	int num_smpt; +	int spt; +	int i; +	unsigned long flags; + +	if (!size) +		return; + +	if (!mic_is_system_addr(mdev, mic_addr)) { +		dev_err(mdev->sdev->parent, +			"invalid address: 0x%llx\n", mic_addr); +		return; +	} + +	spt = mic_sys_addr_to_smpt(mdev, mic_addr); +	ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL); +	if (!ref) +		return; + +	/* Get number of smpt entries to be mapped, ref count array */ +	num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL); + +	spin_lock_irqsave(&smpt_info->smpt_lock, flags); +	smpt_info->unmap_count++; +	smpt_info->ref_count -= (s64)size; + +	for (i = spt; i < spt + num_smpt; i++) { +		smpt_info->entry[i].ref_count -= ref[i - spt]; +		if (smpt_info->entry[i].ref_count < 0) +			dev_warn(mdev->sdev->parent, +				 "ref count for entry %d is negative\n", i); +	} +	spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); +	kfree(ref); +} + +/** + * mic_map_single - Maps a virtual address to a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @va: Kernel direct mapped virtual address. + * @size: Size of the region to be mapped. + * + * This API calls pci_map_single(..) for the direct mapped virtual address + * and then converts the DMA address provided to a DMA address understood + * by MIC. Caller should check for errors by calling mic_map_error(..). + * + * returns DMA address as required by MIC. + */ +dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size) +{ +	dma_addr_t mic_addr = 0; +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); +	dma_addr_t dma_addr = +		pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL); + +	if (!pci_dma_mapping_error(pdev, dma_addr)) { +		mic_addr = mic_map(mdev, dma_addr, size); +		if (!mic_addr) { +			dev_err(mdev->sdev->parent, +				"mic_map failed dma_addr 0x%llx size 0x%lx\n", +				dma_addr, size); +			pci_unmap_single(pdev, dma_addr, +					 size, PCI_DMA_BIDIRECTIONAL); +		} +	} +	return mic_addr; +} + +/** + * mic_unmap_single - Unmaps a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @mic_addr: MIC physical address. + * @size: Size of the region to be unmapped. + * + * This API unmaps the mappings created by mic_map_single(..). + * + * returns None. + */ +void +mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size) +{ +	struct pci_dev *pdev = container_of(mdev->sdev->parent, +		struct pci_dev, dev); +	dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr); +	mic_unmap(mdev, mic_addr, size); +	pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); +} + +/** + * mic_smpt_init - Initialize MIC System Memory Page Tables. + * + * @mdev: pointer to mic_device instance. + * + * returns 0 for success and -errno for error. + */ +int mic_smpt_init(struct mic_device *mdev) +{ +	int i, err = 0; +	dma_addr_t dma_addr; +	struct mic_smpt_info *smpt_info; + +	mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL); +	if (!mdev->smpt) +		return -ENOMEM; + +	smpt_info = mdev->smpt; +	mdev->smpt_ops->init(mdev); +	smpt_info->entry = kmalloc_array(smpt_info->info.num_reg, +					 sizeof(*smpt_info->entry), GFP_KERNEL); +	if (!smpt_info->entry) { +		err = -ENOMEM; +		goto free_smpt; +	} +	spin_lock_init(&smpt_info->smpt_lock); +	for (i = 0; i < smpt_info->info.num_reg; i++) { +		dma_addr = i * smpt_info->info.page_size; +		smpt_info->entry[i].dma_addr = dma_addr; +		smpt_info->entry[i].ref_count = 0; +		mdev->smpt_ops->set(mdev, dma_addr, i); +	} +	smpt_info->ref_count = 0; +	smpt_info->map_count = 0; +	smpt_info->unmap_count = 0; +	return 0; +free_smpt: +	kfree(smpt_info); +	return err; +} + +/** + * mic_smpt_uninit - UnInitialize MIC System Memory Page Tables. + * + * @mdev: pointer to mic_device instance. + * + * returns None. + */ +void mic_smpt_uninit(struct mic_device *mdev) +{ +	struct mic_smpt_info *smpt_info = mdev->smpt; +	int i; + +	dev_dbg(mdev->sdev->parent, +		"nodeid %d SMPT ref count %lld map %lld unmap %lld\n", +		mdev->id, smpt_info->ref_count, +		smpt_info->map_count, smpt_info->unmap_count); + +	for (i = 0; i < smpt_info->info.num_reg; i++) { +		dev_dbg(mdev->sdev->parent, +			"SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n", +			i, smpt_info->entry[i].dma_addr, +			smpt_info->entry[i].ref_count); +		if (smpt_info->entry[i].ref_count) +			dev_warn(mdev->sdev->parent, +				 "ref count for entry %d is not zero\n", i); +	} +	kfree(smpt_info->entry); +	kfree(smpt_info); +} + +/** + * mic_smpt_restore - Restore MIC System Memory Page Tables. + * + * @mdev: pointer to mic_device instance. + * + * Restore the SMPT registers to values previously stored in the + * SW data structures. Some MIC steppings lose register state + * across resets and this API should be called for performing + * a restore operation if required. + * + * returns None. + */ +void mic_smpt_restore(struct mic_device *mdev) +{ +	int i; +	dma_addr_t dma_addr; + +	for (i = 0; i < mdev->smpt->info.num_reg; i++) { +		dma_addr = mdev->smpt->entry[i].dma_addr; +		mdev->smpt_ops->set(mdev, dma_addr, i); +	} +} diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h new file mode 100644 index 00000000000..51970abfe7d --- /dev/null +++ b/drivers/misc/mic/host/mic_smpt.h @@ -0,0 +1,98 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef MIC_SMPT_H +#define MIC_SMPT_H +/** + * struct mic_smpt_ops - MIC HW specific SMPT operations. + * @init: Initialize hardware specific SMPT information in mic_smpt_hw_info. + * @set: Set the value for a particular SMPT entry. + */ +struct mic_smpt_ops { +	void (*init)(struct mic_device *mdev); +	void (*set)(struct mic_device *mdev, dma_addr_t dma_addr, u8 index); +}; + +/** + * struct mic_smpt - MIC SMPT entry information. + * @dma_addr: Base DMA address for this SMPT entry. + * @ref_count: Number of active mappings for this SMPT entry in bytes. + */ +struct mic_smpt { +	dma_addr_t dma_addr; +	s64 ref_count; +}; + +/** + * struct mic_smpt_hw_info - MIC SMPT hardware specific information. + * @num_reg: Number of SMPT registers. + * @page_shift: System memory page shift. + * @page_size: System memory page size. + * @base: System address base. + */ +struct mic_smpt_hw_info { +	u8 num_reg; +	u8 page_shift; +	u64 page_size; +	u64 base; +}; + +/** + * struct mic_smpt_info - MIC SMPT information. + * @entry: Array of SMPT entries. + * @smpt_lock: Spin lock protecting access to SMPT data structures. + * @info: Hardware specific SMPT information. + * @ref_count: Number of active SMPT mappings (for debug). + * @map_count: Number of SMPT mappings created (for debug). + * @unmap_count: Number of SMPT mappings destroyed (for debug). + */ +struct mic_smpt_info { +	struct mic_smpt *entry; +	spinlock_t smpt_lock; +	struct mic_smpt_hw_info info; +	s64 ref_count; +	s64 map_count; +	s64 unmap_count; +}; + +dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size); +void mic_unmap_single(struct mic_device *mdev, +	dma_addr_t mic_addr, size_t size); +dma_addr_t mic_map(struct mic_device *mdev, +	dma_addr_t dma_addr, size_t size); +void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size); + +/** + * mic_map_error - Check a MIC address for errors. + * + * @mdev: pointer to mic_device instance. + * + * returns Whether there was an error during mic_map..(..) APIs. + */ +static inline bool mic_map_error(dma_addr_t mic_addr) +{ +	return !mic_addr; +} + +int mic_smpt_init(struct mic_device *mdev); +void mic_smpt_uninit(struct mic_device *mdev); +void mic_smpt_restore(struct mic_device *mdev); + +#endif diff --git a/drivers/misc/mic/host/mic_sysfs.c b/drivers/misc/mic/host/mic_sysfs.c new file mode 100644 index 00000000000..6dd864e4a61 --- /dev/null +++ b/drivers/misc/mic/host/mic_sysfs.c @@ -0,0 +1,459 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" + +/* + * A state-to-string lookup table, for exposing a human readable state + * via sysfs. Always keep in sync with enum mic_states + */ +static const char * const mic_state_string[] = { +	[MIC_OFFLINE] = "offline", +	[MIC_ONLINE] = "online", +	[MIC_SHUTTING_DOWN] = "shutting_down", +	[MIC_RESET_FAILED] = "reset_failed", +	[MIC_SUSPENDING] = "suspending", +	[MIC_SUSPENDED] = "suspended", +}; + +/* + * A shutdown-status-to-string lookup table, for exposing a human + * readable state via sysfs. Always keep in sync with enum mic_shutdown_status + */ +static const char * const mic_shutdown_status_string[] = { +	[MIC_NOP] = "nop", +	[MIC_CRASHED] = "crashed", +	[MIC_HALTED] = "halted", +	[MIC_POWER_OFF] = "poweroff", +	[MIC_RESTART] = "restart", +}; + +void mic_set_shutdown_status(struct mic_device *mdev, u8 shutdown_status) +{ +	dev_dbg(mdev->sdev->parent, "Shutdown Status %s -> %s\n", +		mic_shutdown_status_string[mdev->shutdown_status], +		mic_shutdown_status_string[shutdown_status]); +	mdev->shutdown_status = shutdown_status; +} + +void mic_set_state(struct mic_device *mdev, u8 state) +{ +	dev_dbg(mdev->sdev->parent, "State %s -> %s\n", +		mic_state_string[mdev->state], +		mic_state_string[state]); +	mdev->state = state; +	sysfs_notify_dirent(mdev->state_sysfs); +} + +static ssize_t +family_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	static const char x100[] = "x100"; +	static const char unknown[] = "Unknown"; +	const char *card = NULL; +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	switch (mdev->family) { +	case MIC_FAMILY_X100: +		card = x100; +		break; +	default: +		card = unknown; +		break; +	} +	return scnprintf(buf, PAGE_SIZE, "%s\n", card); +} +static DEVICE_ATTR_RO(family); + +static ssize_t +stepping_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	char *string = "??"; + +	if (!mdev) +		return -EINVAL; + +	switch (mdev->stepping) { +	case MIC_A0_STEP: +		string = "A0"; +		break; +	case MIC_B0_STEP: +		string = "B0"; +		break; +	case MIC_B1_STEP: +		string = "B1"; +		break; +	case MIC_C0_STEP: +		string = "C0"; +		break; +	default: +		break; +	} +	return scnprintf(buf, PAGE_SIZE, "%s\n", string); +} +static DEVICE_ATTR_RO(stepping); + +static ssize_t +state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev || mdev->state >= MIC_LAST) +		return -EINVAL; + +	return scnprintf(buf, PAGE_SIZE, "%s\n", +		mic_state_string[mdev->state]); +} + +static ssize_t +state_store(struct device *dev, struct device_attribute *attr, +	    const char *buf, size_t count) +{ +	int rc = 0; +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	if (!mdev) +		return -EINVAL; +	if (sysfs_streq(buf, "boot")) { +		rc = mic_start(mdev, buf); +		if (rc) { +			dev_err(mdev->sdev->parent, +				"mic_boot failed rc %d\n", rc); +			count = rc; +		} +		goto done; +	} + +	if (sysfs_streq(buf, "reset")) { +		schedule_work(&mdev->reset_trigger_work); +		goto done; +	} + +	if (sysfs_streq(buf, "shutdown")) { +		mic_shutdown(mdev); +		goto done; +	} + +	if (sysfs_streq(buf, "suspend")) { +		mic_suspend(mdev); +		goto done; +	} + +	count = -EINVAL; +done: +	return count; +} +static DEVICE_ATTR_RW(state); + +static ssize_t shutdown_status_show(struct device *dev, +				    struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev || mdev->shutdown_status >= MIC_STATUS_LAST) +		return -EINVAL; + +	return scnprintf(buf, PAGE_SIZE, "%s\n", +		mic_shutdown_status_string[mdev->shutdown_status]); +} +static DEVICE_ATTR_RO(shutdown_status); + +static ssize_t +cmdline_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	char *cmdline; + +	if (!mdev) +		return -EINVAL; + +	cmdline = mdev->cmdline; + +	if (cmdline) +		return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline); +	return 0; +} + +static ssize_t +cmdline_store(struct device *dev, struct device_attribute *attr, +	      const char *buf, size_t count) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	mutex_lock(&mdev->mic_mutex); +	kfree(mdev->cmdline); + +	mdev->cmdline = kmalloc(count + 1, GFP_KERNEL); +	if (!mdev->cmdline) { +		count = -ENOMEM; +		goto unlock; +	} + +	strncpy(mdev->cmdline, buf, count); + +	if (mdev->cmdline[count - 1] == '\n') +		mdev->cmdline[count - 1] = '\0'; +	else +		mdev->cmdline[count] = '\0'; +unlock: +	mutex_unlock(&mdev->mic_mutex); +	return count; +} +static DEVICE_ATTR_RW(cmdline); + +static ssize_t +firmware_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	char *firmware; + +	if (!mdev) +		return -EINVAL; + +	firmware = mdev->firmware; + +	if (firmware) +		return scnprintf(buf, PAGE_SIZE, "%s\n", firmware); +	return 0; +} + +static ssize_t +firmware_store(struct device *dev, struct device_attribute *attr, +	       const char *buf, size_t count) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	mutex_lock(&mdev->mic_mutex); +	kfree(mdev->firmware); + +	mdev->firmware = kmalloc(count + 1, GFP_KERNEL); +	if (!mdev->firmware) { +		count = -ENOMEM; +		goto unlock; +	} +	strncpy(mdev->firmware, buf, count); + +	if (mdev->firmware[count - 1] == '\n') +		mdev->firmware[count - 1] = '\0'; +	else +		mdev->firmware[count] = '\0'; +unlock: +	mutex_unlock(&mdev->mic_mutex); +	return count; +} +static DEVICE_ATTR_RW(firmware); + +static ssize_t +ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	char *ramdisk; + +	if (!mdev) +		return -EINVAL; + +	ramdisk = mdev->ramdisk; + +	if (ramdisk) +		return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk); +	return 0; +} + +static ssize_t +ramdisk_store(struct device *dev, struct device_attribute *attr, +	      const char *buf, size_t count) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	mutex_lock(&mdev->mic_mutex); +	kfree(mdev->ramdisk); + +	mdev->ramdisk = kmalloc(count + 1, GFP_KERNEL); +	if (!mdev->ramdisk) { +		count = -ENOMEM; +		goto unlock; +	} + +	strncpy(mdev->ramdisk, buf, count); + +	if (mdev->ramdisk[count - 1] == '\n') +		mdev->ramdisk[count - 1] = '\0'; +	else +		mdev->ramdisk[count] = '\0'; +unlock: +	mutex_unlock(&mdev->mic_mutex); +	return count; +} +static DEVICE_ATTR_RW(ramdisk); + +static ssize_t +bootmode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	char *bootmode; + +	if (!mdev) +		return -EINVAL; + +	bootmode = mdev->bootmode; + +	if (bootmode) +		return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode); +	return 0; +} + +static ssize_t +bootmode_store(struct device *dev, struct device_attribute *attr, +	       const char *buf, size_t count) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "elf")) +		return -EINVAL; + +	mutex_lock(&mdev->mic_mutex); +	kfree(mdev->bootmode); + +	mdev->bootmode = kmalloc(count + 1, GFP_KERNEL); +	if (!mdev->bootmode) { +		count = -ENOMEM; +		goto unlock; +	} + +	strncpy(mdev->bootmode, buf, count); + +	if (mdev->bootmode[count - 1] == '\n') +		mdev->bootmode[count - 1] = '\0'; +	else +		mdev->bootmode[count] = '\0'; +unlock: +	mutex_unlock(&mdev->mic_mutex); +	return count; +} +static DEVICE_ATTR_RW(bootmode); + +static ssize_t +log_buf_addr_show(struct device *dev, struct device_attribute *attr, +		  char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_addr); +} + +static ssize_t +log_buf_addr_store(struct device *dev, struct device_attribute *attr, +		   const char *buf, size_t count) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	int ret; +	unsigned long addr; + +	if (!mdev) +		return -EINVAL; + +	ret = kstrtoul(buf, 16, &addr); +	if (ret) +		goto exit; + +	mdev->log_buf_addr = (void *)addr; +	ret = count; +exit: +	return ret; +} +static DEVICE_ATTR_RW(log_buf_addr); + +static ssize_t +log_buf_len_show(struct device *dev, struct device_attribute *attr, +		 char *buf) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); + +	if (!mdev) +		return -EINVAL; + +	return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_len); +} + +static ssize_t +log_buf_len_store(struct device *dev, struct device_attribute *attr, +		  const char *buf, size_t count) +{ +	struct mic_device *mdev = dev_get_drvdata(dev->parent); +	int ret; +	unsigned long addr; + +	if (!mdev) +		return -EINVAL; + +	ret = kstrtoul(buf, 16, &addr); +	if (ret) +		goto exit; + +	mdev->log_buf_len = (int *)addr; +	ret = count; +exit: +	return ret; +} +static DEVICE_ATTR_RW(log_buf_len); + +static struct attribute *mic_default_attrs[] = { +	&dev_attr_family.attr, +	&dev_attr_stepping.attr, +	&dev_attr_state.attr, +	&dev_attr_shutdown_status.attr, +	&dev_attr_cmdline.attr, +	&dev_attr_firmware.attr, +	&dev_attr_ramdisk.attr, +	&dev_attr_bootmode.attr, +	&dev_attr_log_buf_addr.attr, +	&dev_attr_log_buf_len.attr, + +	NULL +}; + +ATTRIBUTE_GROUPS(mic_default); + +void mic_sysfs_init(struct mic_device *mdev) +{ +	mdev->attr_group = mic_default_groups; +} diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c new file mode 100644 index 00000000000..7e1ef0ebbb8 --- /dev/null +++ b/drivers/misc/mic/host/mic_virtio.c @@ -0,0 +1,701 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/uaccess.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" +#include "mic_virtio.h" + +/* + * Initiates the copies across the PCIe bus from card memory to + * a user space buffer. + */ +static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, +		void __user *ubuf, size_t len, u64 addr) +{ +	int err; +	void __iomem *dbuf = mvdev->mdev->aper.va + addr; +	/* +	 * We are copying from IO below an should ideally use something +	 * like copy_to_user_fromio(..) if it existed. +	 */ +	if (copy_to_user(ubuf, (void __force *)dbuf, len)) { +		err = -EFAULT; +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, err); +		goto err; +	} +	mvdev->in_bytes += len; +	err = 0; +err: +	return err; +} + +/* + * Initiates copies across the PCIe bus from a user space + * buffer to card memory. + */ +static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, +		void __user *ubuf, size_t len, u64 addr) +{ +	int err; +	void __iomem *dbuf = mvdev->mdev->aper.va + addr; +	/* +	 * We are copying to IO below and should ideally use something +	 * like copy_from_user_toio(..) if it existed. +	 */ +	if (copy_from_user((void __force *)dbuf, ubuf, len)) { +		err = -EFAULT; +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, err); +		goto err; +	} +	mvdev->out_bytes += len; +	err = 0; +err: +	return err; +} + +#define MIC_VRINGH_READ true + +/* The function to call to notify the card about added buffers */ +static void mic_notify(struct vringh *vrh) +{ +	struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh); +	struct mic_vdev *mvdev = mvrh->mvdev; +	s8 db = mvdev->dc->h2c_vdev_db; + +	if (db != -1) +		mvdev->mdev->ops->send_intr(mvdev->mdev, db); +} + +/* Determine the total number of bytes consumed in a VRINGH KIOV */ +static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov) +{ +	int i; +	u32 total = iov->consumed; + +	for (i = 0; i < iov->i; i++) +		total += iov->iov[i].iov_len; +	return total; +} + +/* + * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. + * This API is heavily based on the vringh_iov_xfer(..) implementation + * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) + * and vringh_iov_push_kern(..) directly is because there is no + * way to override the VRINGH xfer(..) routines as of v3.10. + */ +static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, +	void __user *ubuf, size_t len, bool read, size_t *out_len) +{ +	int ret = 0; +	size_t partlen, tot_len = 0; + +	while (len && iov->i < iov->used) { +		partlen = min(iov->iov[iov->i].iov_len, len); +		if (read) +			ret = mic_virtio_copy_to_user(mvdev, +				ubuf, partlen, +				(u64)iov->iov[iov->i].iov_base); +		else +			ret = mic_virtio_copy_from_user(mvdev, +				ubuf, partlen, +				(u64)iov->iov[iov->i].iov_base); +		if (ret) { +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			break; +		} +		len -= partlen; +		ubuf += partlen; +		tot_len += partlen; +		iov->consumed += partlen; +		iov->iov[iov->i].iov_len -= partlen; +		iov->iov[iov->i].iov_base += partlen; +		if (!iov->iov[iov->i].iov_len) { +			/* Fix up old iov element then increment. */ +			iov->iov[iov->i].iov_len = iov->consumed; +			iov->iov[iov->i].iov_base -= iov->consumed; + +			iov->consumed = 0; +			iov->i++; +		} +	} +	*out_len = tot_len; +	return ret; +} + +/* + * Use the standard VRINGH infrastructure in the kernel to fetch new + * descriptors, initiate the copies and update the used ring. + */ +static int _mic_virtio_copy(struct mic_vdev *mvdev, +	struct mic_copy_desc *copy) +{ +	int ret = 0; +	u32 iovcnt = copy->iovcnt; +	struct iovec iov; +	struct iovec __user *u_iov = copy->iov; +	void __user *ubuf = NULL; +	struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; +	struct vringh_kiov *riov = &mvr->riov; +	struct vringh_kiov *wiov = &mvr->wiov; +	struct vringh *vrh = &mvr->vrh; +	u16 *head = &mvr->head; +	struct mic_vring *vr = &mvr->vring; +	size_t len = 0, out_len; + +	copy->out_len = 0; +	/* Fetch a new IOVEC if all previous elements have been processed */ +	if (riov->i == riov->used && wiov->i == wiov->used) { +		ret = vringh_getdesc_kern(vrh, riov, wiov, +				head, GFP_KERNEL); +		/* Check if there are available descriptors */ +		if (ret <= 0) +			return ret; +	} +	while (iovcnt) { +		if (!len) { +			/* Copy over a new iovec from user space. */ +			ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); +			if (ret) { +				ret = -EINVAL; +				dev_err(mic_dev(mvdev), "%s %d err %d\n", +					__func__, __LINE__, ret); +				break; +			} +			len = iov.iov_len; +			ubuf = iov.iov_base; +		} +		/* Issue all the read descriptors first */ +		ret = mic_vringh_copy(mvdev, riov, ubuf, len, +			MIC_VRINGH_READ, &out_len); +		if (ret) { +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			break; +		} +		len -= out_len; +		ubuf += out_len; +		copy->out_len += out_len; +		/* Issue the write descriptors next */ +		ret = mic_vringh_copy(mvdev, wiov, ubuf, len, +			!MIC_VRINGH_READ, &out_len); +		if (ret) { +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			break; +		} +		len -= out_len; +		ubuf += out_len; +		copy->out_len += out_len; +		if (!len) { +			/* One user space iovec is now completed */ +			iovcnt--; +			u_iov++; +		} +		/* Exit loop if all elements in KIOVs have been processed. */ +		if (riov->i == riov->used && wiov->i == wiov->used) +			break; +	} +	/* +	 * Update the used ring if a descriptor was available and some data was +	 * copied in/out and the user asked for a used ring update. +	 */ +	if (*head != USHRT_MAX && copy->out_len && copy->update_used) { +		u32 total = 0; + +		/* Determine the total data consumed */ +		total += mic_vringh_iov_consumed(riov); +		total += mic_vringh_iov_consumed(wiov); +		vringh_complete_kern(vrh, *head, total); +		*head = USHRT_MAX; +		if (vringh_need_notify_kern(vrh) > 0) +			vringh_notify(vrh); +		vringh_kiov_cleanup(riov); +		vringh_kiov_cleanup(wiov); +		/* Update avail idx for user space */ +		vr->info->avail_idx = vrh->last_avail_idx; +	} +	return ret; +} + +static inline int mic_verify_copy_args(struct mic_vdev *mvdev, +		struct mic_copy_desc *copy) +{ +	if (copy->vr_idx >= mvdev->dd->num_vq) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -EINVAL); +		return -EINVAL; +	} +	return 0; +} + +/* Copy a specified number of virtio descriptors in a chain */ +int mic_virtio_copy_desc(struct mic_vdev *mvdev, +		struct mic_copy_desc *copy) +{ +	int err; +	struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; + +	err = mic_verify_copy_args(mvdev, copy); +	if (err) +		return err; + +	mutex_lock(&mvr->vr_mutex); +	if (!mic_vdevup(mvdev)) { +		err = -ENODEV; +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, err); +		goto err; +	} +	err = _mic_virtio_copy(mvdev, copy); +	if (err) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, err); +	} +err: +	mutex_unlock(&mvr->vr_mutex); +	return err; +} + +static void mic_virtio_init_post(struct mic_vdev *mvdev) +{ +	struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd); +	int i; + +	for (i = 0; i < mvdev->dd->num_vq; i++) { +		if (!le64_to_cpu(vqconfig[i].used_address)) { +			dev_warn(mic_dev(mvdev), "used_address zero??\n"); +			continue; +		} +		mvdev->mvr[i].vrh.vring.used = +			(void __force *)mvdev->mdev->aper.va + +			le64_to_cpu(vqconfig[i].used_address); +	} + +	mvdev->dc->used_address_updated = 0; + +	dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n", +		__func__, mvdev->virtio_id); +} + +static inline void mic_virtio_device_reset(struct mic_vdev *mvdev) +{ +	int i; + +	dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n", +		__func__, mvdev->dd->status, mvdev->virtio_id); + +	for (i = 0; i < mvdev->dd->num_vq; i++) +		/* +		 * Avoid lockdep false positive. The + 1 is for the mic +		 * mutex which is held in the reset devices code path. +		 */ +		mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); + +	/* 0 status means "reset" */ +	mvdev->dd->status = 0; +	mvdev->dc->vdev_reset = 0; +	mvdev->dc->host_ack = 1; + +	for (i = 0; i < mvdev->dd->num_vq; i++) { +		struct vringh *vrh = &mvdev->mvr[i].vrh; +		mvdev->mvr[i].vring.info->avail_idx = 0; +		vrh->completed = 0; +		vrh->last_avail_idx = 0; +		vrh->last_used_idx = 0; +	} + +	for (i = 0; i < mvdev->dd->num_vq; i++) +		mutex_unlock(&mvdev->mvr[i].vr_mutex); +} + +void mic_virtio_reset_devices(struct mic_device *mdev) +{ +	struct list_head *pos, *tmp; +	struct mic_vdev *mvdev; + +	dev_dbg(mdev->sdev->parent, "%s\n",  __func__); + +	list_for_each_safe(pos, tmp, &mdev->vdev_list) { +		mvdev = list_entry(pos, struct mic_vdev, list); +		mic_virtio_device_reset(mvdev); +		mvdev->poll_wake = 1; +		wake_up(&mvdev->waitq); +	} +} + +void mic_bh_handler(struct work_struct *work) +{ +	struct mic_vdev *mvdev = container_of(work, struct mic_vdev, +			virtio_bh_work); + +	if (mvdev->dc->used_address_updated) +		mic_virtio_init_post(mvdev); + +	if (mvdev->dc->vdev_reset) +		mic_virtio_device_reset(mvdev); + +	mvdev->poll_wake = 1; +	wake_up(&mvdev->waitq); +} + +static irqreturn_t mic_virtio_intr_handler(int irq, void *data) +{ +	struct mic_vdev *mvdev = data; +	struct mic_device *mdev = mvdev->mdev; + +	mdev->ops->intr_workarounds(mdev); +	schedule_work(&mvdev->virtio_bh_work); +	return IRQ_HANDLED; +} + +int mic_virtio_config_change(struct mic_vdev *mvdev, +			void __user *argp) +{ +	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); +	int ret = 0, retry, i; +	struct mic_bootparam *bootparam = mvdev->mdev->dp; +	s8 db = bootparam->h2c_config_db; + +	mutex_lock(&mvdev->mdev->mic_mutex); +	for (i = 0; i < mvdev->dd->num_vq; i++) +		mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); + +	if (db == -1 || mvdev->dd->type == -1) { +		ret = -EIO; +		goto exit; +	} + +	if (copy_from_user(mic_vq_configspace(mvdev->dd), +			   argp, mvdev->dd->config_len)) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -EFAULT); +		ret = -EFAULT; +		goto exit; +	} +	mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; +	mvdev->mdev->ops->send_intr(mvdev->mdev, db); + +	for (retry = 100; retry--;) { +		ret = wait_event_timeout(wake, +			mvdev->dc->guest_ack, msecs_to_jiffies(100)); +		if (ret) +			break; +	} + +	dev_dbg(mic_dev(mvdev), +		"%s %d retry: %d\n", __func__, __LINE__, retry); +	mvdev->dc->config_change = 0; +	mvdev->dc->guest_ack = 0; +exit: +	for (i = 0; i < mvdev->dd->num_vq; i++) +		mutex_unlock(&mvdev->mvr[i].vr_mutex); +	mutex_unlock(&mvdev->mdev->mic_mutex); +	return ret; +} + +static int mic_copy_dp_entry(struct mic_vdev *mvdev, +					void __user *argp, +					__u8 *type, +					struct mic_device_desc **devpage) +{ +	struct mic_device *mdev = mvdev->mdev; +	struct mic_device_desc dd, *dd_config, *devp; +	struct mic_vqconfig *vqconfig; +	int ret = 0, i; +	bool slot_found = false; + +	if (copy_from_user(&dd, argp, sizeof(dd))) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -EFAULT); +		return -EFAULT; +	} + +	if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || +	    dd.num_vq > MIC_MAX_VRINGS) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -EINVAL); +		return -EINVAL; +	} + +	dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL); +	if (dd_config == NULL) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -ENOMEM); +		return -ENOMEM; +	} +	if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { +		ret = -EFAULT; +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, ret); +		goto exit; +	} + +	vqconfig = mic_vq_config(dd_config); +	for (i = 0; i < dd.num_vq; i++) { +		if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { +			ret =  -EINVAL; +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			goto exit; +		} +	} + +	/* Find the first free device page entry */ +	for (i = sizeof(struct mic_bootparam); +		i < MIC_DP_SIZE - mic_total_desc_size(dd_config); +		i += mic_total_desc_size(devp)) { +		devp = mdev->dp + i; +		if (devp->type == 0 || devp->type == -1) { +			slot_found = true; +			break; +		} +	} +	if (!slot_found) { +		ret =  -EINVAL; +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, ret); +		goto exit; +	} +	/* +	 * Save off the type before doing the memcpy. Type will be set in the +	 * end after completing all initialization for the new device. +	 */ +	*type = dd_config->type; +	dd_config->type = 0; +	memcpy(devp, dd_config, mic_desc_size(dd_config)); + +	*devpage = devp; +exit: +	kfree(dd_config); +	return ret; +} + +static void mic_init_device_ctrl(struct mic_vdev *mvdev, +				struct mic_device_desc *devpage) +{ +	struct mic_device_ctrl *dc; + +	dc = (void *)devpage + mic_aligned_desc_size(devpage); + +	dc->config_change = 0; +	dc->guest_ack = 0; +	dc->vdev_reset = 0; +	dc->host_ack = 0; +	dc->used_address_updated = 0; +	dc->c2h_vdev_db = -1; +	dc->h2c_vdev_db = -1; +	mvdev->dc = dc; +} + +int mic_virtio_add_device(struct mic_vdev *mvdev, +			void __user *argp) +{ +	struct mic_device *mdev = mvdev->mdev; +	struct mic_device_desc *dd = NULL; +	struct mic_vqconfig *vqconfig; +	int vr_size, i, j, ret; +	u8 type = 0; +	s8 db; +	char irqname[10]; +	struct mic_bootparam *bootparam = mdev->dp; +	u16 num; +	dma_addr_t vr_addr; + +	mutex_lock(&mdev->mic_mutex); + +	ret = mic_copy_dp_entry(mvdev, argp, &type, &dd); +	if (ret) { +		mutex_unlock(&mdev->mic_mutex); +		return ret; +	} + +	mic_init_device_ctrl(mvdev, dd); + +	mvdev->dd = dd; +	mvdev->virtio_id = type; +	vqconfig = mic_vq_config(dd); +	INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler); + +	for (i = 0; i < dd->num_vq; i++) { +		struct mic_vringh *mvr = &mvdev->mvr[i]; +		struct mic_vring *vr = &mvdev->mvr[i].vring; +		num = le16_to_cpu(vqconfig[i].num); +		mutex_init(&mvr->vr_mutex); +		vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + +			sizeof(struct _mic_vring_info)); +		vr->va = (void *) +			__get_free_pages(GFP_KERNEL | __GFP_ZERO, +					 get_order(vr_size)); +		if (!vr->va) { +			ret = -ENOMEM; +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			goto err; +		} +		vr->len = vr_size; +		vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); +		vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); +		vr_addr = mic_map_single(mdev, vr->va, vr_size); +		if (mic_map_error(vr_addr)) { +			free_pages((unsigned long)vr->va, get_order(vr_size)); +			ret = -ENOMEM; +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			goto err; +		} +		vqconfig[i].address = cpu_to_le64(vr_addr); + +		vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); +		ret = vringh_init_kern(&mvr->vrh, +			*(u32 *)mic_vq_features(mvdev->dd), num, false, +			vr->vr.desc, vr->vr.avail, vr->vr.used); +		if (ret) { +			dev_err(mic_dev(mvdev), "%s %d err %d\n", +				__func__, __LINE__, ret); +			goto err; +		} +		vringh_kiov_init(&mvr->riov, NULL, 0); +		vringh_kiov_init(&mvr->wiov, NULL, 0); +		mvr->head = USHRT_MAX; +		mvr->mvdev = mvdev; +		mvr->vrh.notify = mic_notify; +		dev_dbg(mdev->sdev->parent, +			"%s %d index %d va %p info %p vr_size 0x%x\n", +			__func__, __LINE__, i, vr->va, vr->info, vr_size); +	} + +	snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, +		 mvdev->virtio_id); +	mvdev->virtio_db = mic_next_db(mdev); +	mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler, +			irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB); +	if (IS_ERR(mvdev->virtio_cookie)) { +		ret = PTR_ERR(mvdev->virtio_cookie); +		dev_dbg(mdev->sdev->parent, "request irq failed\n"); +		goto err; +	} + +	mvdev->dc->c2h_vdev_db = mvdev->virtio_db; + +	list_add_tail(&mvdev->list, &mdev->vdev_list); +	/* +	 * Order the type update with previous stores. This write barrier +	 * is paired with the corresponding read barrier before the uncached +	 * system memory read of the type, on the card while scanning the +	 * device page. +	 */ +	smp_wmb(); +	dd->type = type; + +	dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type); + +	db = bootparam->h2c_config_db; +	if (db != -1) +		mdev->ops->send_intr(mdev, db); +	mutex_unlock(&mdev->mic_mutex); +	return 0; +err: +	vqconfig = mic_vq_config(dd); +	for (j = 0; j < i; j++) { +		struct mic_vringh *mvr = &mvdev->mvr[j]; +		mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address), +				 mvr->vring.len); +		free_pages((unsigned long)mvr->vring.va, +			   get_order(mvr->vring.len)); +	} +	mutex_unlock(&mdev->mic_mutex); +	return ret; +} + +void mic_virtio_del_device(struct mic_vdev *mvdev) +{ +	struct list_head *pos, *tmp; +	struct mic_vdev *tmp_mvdev; +	struct mic_device *mdev = mvdev->mdev; +	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); +	int i, ret, retry; +	struct mic_vqconfig *vqconfig; +	struct mic_bootparam *bootparam = mdev->dp; +	s8 db; + +	mutex_lock(&mdev->mic_mutex); +	db = bootparam->h2c_config_db; +	if (db == -1) +		goto skip_hot_remove; +	dev_dbg(mdev->sdev->parent, +		"Requesting hot remove id %d\n", mvdev->virtio_id); +	mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; +	mdev->ops->send_intr(mdev, db); +	for (retry = 100; retry--;) { +		ret = wait_event_timeout(wake, +			mvdev->dc->guest_ack, msecs_to_jiffies(100)); +		if (ret) +			break; +	} +	dev_dbg(mdev->sdev->parent, +		"Device id %d config_change %d guest_ack %d retry %d\n", +		mvdev->virtio_id, mvdev->dc->config_change, +		mvdev->dc->guest_ack, retry); +	mvdev->dc->config_change = 0; +	mvdev->dc->guest_ack = 0; +skip_hot_remove: +	mic_free_irq(mdev, mvdev->virtio_cookie, mvdev); +	flush_work(&mvdev->virtio_bh_work); +	vqconfig = mic_vq_config(mvdev->dd); +	for (i = 0; i < mvdev->dd->num_vq; i++) { +		struct mic_vringh *mvr = &mvdev->mvr[i]; +		vringh_kiov_cleanup(&mvr->riov); +		vringh_kiov_cleanup(&mvr->wiov); +		mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), +				 mvr->vring.len); +		free_pages((unsigned long)mvr->vring.va, +			   get_order(mvr->vring.len)); +	} + +	list_for_each_safe(pos, tmp, &mdev->vdev_list) { +		tmp_mvdev = list_entry(pos, struct mic_vdev, list); +		if (tmp_mvdev == mvdev) { +			list_del(pos); +			dev_dbg(mdev->sdev->parent, +				"Removing virtio device id %d\n", +				mvdev->virtio_id); +			break; +		} +	} +	/* +	 * Order the type update with previous stores. This write barrier +	 * is paired with the corresponding read barrier before the uncached +	 * system memory read of the type, on the card while scanning the +	 * device page. +	 */ +	smp_wmb(); +	mvdev->dd->type = -1; +	mutex_unlock(&mdev->mic_mutex); +} diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h new file mode 100644 index 00000000000..184f3c84805 --- /dev/null +++ b/drivers/misc/mic/host/mic_virtio.h @@ -0,0 +1,138 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef MIC_VIRTIO_H +#define MIC_VIRTIO_H + +#include <linux/virtio_config.h> +#include <linux/mic_ioctl.h> + +/* + * Note on endianness. + * 1. Host can be both BE or LE + * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail + *    rings and ioreadXX/iowriteXX to access used ring. + * 3. Device page exposed by host to guest contains LE values. Guest + *    accesses these using ioreadXX/iowriteXX etc. This way in general we + *    obey the virtio spec according to which guest works with native + *    endianness and host is aware of guest endianness and does all + *    required endianness conversion. + * 4. Data provided from user space to guest (in ADD_DEVICE and + *    CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be + *    in guest endianness. + */ + +/** + * struct mic_vringh - Virtio ring host information. + * + * @vring: The MIC vring used for setting up user space mappings. + * @vrh: The host VRINGH used for accessing the card vrings. + * @riov: The VRINGH read kernel IOV. + * @wiov: The VRINGH write kernel IOV. + * @head: The VRINGH head index address passed to vringh_getdesc_kern(..). + * @vr_mutex: Mutex for synchronizing access to the VRING. + * @mvdev: Back pointer to MIC virtio device for vringh_notify(..). + */ +struct mic_vringh { +	struct mic_vring vring; +	struct vringh vrh; +	struct vringh_kiov riov; +	struct vringh_kiov wiov; +	u16 head; +	struct mutex vr_mutex; +	struct mic_vdev *mvdev; +}; + +/** + * struct mic_vdev - Host information for a card Virtio device. + * + * @virtio_id - Virtio device id. + * @waitq - Waitqueue to allow ring3 apps to poll. + * @mdev - Back pointer to host MIC device. + * @poll_wake - Used for waking up threads blocked in poll. + * @out_bytes - Debug stats for number of bytes copied from host to card. + * @in_bytes - Debug stats for number of bytes copied from card to host. + * @mvr - Store per VRING data structures. + * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. + * @dd - Virtio device descriptor. + * @dc - Virtio device control fields. + * @list - List of Virtio devices. + * @virtio_db - The doorbell used by the card to interrupt the host. + * @virtio_cookie - The cookie returned while requesting interrupts. + */ +struct mic_vdev { +	int virtio_id; +	wait_queue_head_t waitq; +	struct mic_device *mdev; +	int poll_wake; +	unsigned long out_bytes; +	unsigned long in_bytes; +	struct mic_vringh mvr[MIC_MAX_VRINGS]; +	struct work_struct virtio_bh_work; +	struct mic_device_desc *dd; +	struct mic_device_ctrl *dc; +	struct list_head list; +	int virtio_db; +	struct mic_irq *virtio_cookie; +}; + +void mic_virtio_uninit(struct mic_device *mdev); +int mic_virtio_add_device(struct mic_vdev *mvdev, +			void __user *argp); +void mic_virtio_del_device(struct mic_vdev *mvdev); +int mic_virtio_config_change(struct mic_vdev *mvdev, +			void __user *argp); +int mic_virtio_copy_desc(struct mic_vdev *mvdev, +	struct mic_copy_desc *request); +void mic_virtio_reset_devices(struct mic_device *mdev); +void mic_bh_handler(struct work_struct *work); + +/* Helper API to obtain the MIC PCIe device */ +static inline struct device *mic_dev(struct mic_vdev *mvdev) +{ +	return mvdev->mdev->sdev->parent; +} + +/* Helper API to check if a virtio device is initialized */ +static inline int mic_vdev_inited(struct mic_vdev *mvdev) +{ +	/* Device has not been created yet */ +	if (!mvdev->dd || !mvdev->dd->type) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -EINVAL); +		return -EINVAL; +	} + +	/* Device has been removed/deleted */ +	if (mvdev->dd->type == -1) { +		dev_err(mic_dev(mvdev), "%s %d err %d\n", +			__func__, __LINE__, -ENODEV); +		return -ENODEV; +	} + +	return 0; +} + +/* Helper API to check if a virtio device is running */ +static inline bool mic_vdevup(struct mic_vdev *mvdev) +{ +	return !!mvdev->dd->status; +} +#endif diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c new file mode 100644 index 00000000000..5562fdd3ef4 --- /dev/null +++ b/drivers/misc/mic/host/mic_x100.c @@ -0,0 +1,574 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_x100.h" +#include "mic_smpt.h" + +/** + * mic_x100_write_spad - write to the scratchpad register + * @mdev: pointer to mic_device instance + * @idx: index to the scratchpad register, 0 based + * @val: the data value to put into the register + * + * This function allows writing of a 32bit value to the indexed scratchpad + * register. + * + * RETURNS: none. + */ +static void +mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val) +{ +	dev_dbg(mdev->sdev->parent, "Writing 0x%x to scratch pad index %d\n", +		val, idx); +	mic_mmio_write(&mdev->mmio, val, +		       MIC_X100_SBOX_BASE_ADDRESS + +		       MIC_X100_SBOX_SPAD0 + idx * 4); +} + +/** + * mic_x100_read_spad - read from the scratchpad register + * @mdev: pointer to mic_device instance + * @idx: index to scratchpad register, 0 based + * + * This function allows reading of the 32bit scratchpad register. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static u32 +mic_x100_read_spad(struct mic_device *mdev, unsigned int idx) +{ +	u32 val = mic_mmio_read(&mdev->mmio, +		MIC_X100_SBOX_BASE_ADDRESS + +		MIC_X100_SBOX_SPAD0 + idx * 4); + +	dev_dbg(mdev->sdev->parent, +		"Reading 0x%x from scratch pad index %d\n", val, idx); +	return val; +} + +/** + * mic_x100_enable_interrupts - Enable interrupts. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_enable_interrupts(struct mic_device *mdev) +{ +	u32 reg; +	struct mic_mw *mw = &mdev->mmio; +	u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0; +	u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0; + +	reg = mic_mmio_read(mw, sice0); +	reg |= MIC_X100_SBOX_DBR_BITS(0xf) | MIC_X100_SBOX_DMA_BITS(0xff); +	mic_mmio_write(mw, reg, sice0); + +	/* +	 * Enable auto-clear when enabling interrupts. Applicable only for +	 * MSI-x. Legacy and MSI mode cannot have auto-clear enabled. +	 */ +	if (mdev->irq_info.num_vectors > 1) { +		reg = mic_mmio_read(mw, siac0); +		reg |= MIC_X100_SBOX_DBR_BITS(0xf) | +			MIC_X100_SBOX_DMA_BITS(0xff); +		mic_mmio_write(mw, reg, siac0); +	} +} + +/** + * mic_x100_disable_interrupts - Disable interrupts. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_disable_interrupts(struct mic_device *mdev) +{ +	u32 reg; +	struct mic_mw *mw = &mdev->mmio; +	u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0; +	u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0; +	u32 sicc0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICC0; + +	reg = mic_mmio_read(mw, sice0); +	mic_mmio_write(mw, reg, sicc0); + +	if (mdev->irq_info.num_vectors > 1) { +		reg = mic_mmio_read(mw, siac0); +		reg &= ~(MIC_X100_SBOX_DBR_BITS(0xf) | +			MIC_X100_SBOX_DMA_BITS(0xff)); +		mic_mmio_write(mw, reg, siac0); +	} +} + +/** + * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_send_sbox_intr(struct mic_device *mdev, +			int doorbell) +{ +	struct mic_mw *mw = &mdev->mmio; +	u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8; +	u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS + +					apic_icr_offset); + +	/* for MIC we need to make sure we "hit" the send_icr bit (13) */ +	apicicr_low = (apicicr_low | (1 << 13)); + +	/* Ensure that the interrupt is ordered w.r.t. previous stores. */ +	wmb(); +	mic_mmio_write(mw, apicicr_low, +		       MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset); +} + +/** + * mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_send_rdmasr_intr(struct mic_device *mdev, +			int doorbell) +{ +	int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2); +	/* Ensure that the interrupt is ordered w.r.t. previous stores. */ +	wmb(); +	mic_mmio_write(&mdev->mmio, 0, +		       MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset); +} + +/** + * __mic_x100_send_intr - Send interrupt to MIC. + * @mdev: pointer to mic_device instance + * @doorbell: doorbell number. + */ +static void mic_x100_send_intr(struct mic_device *mdev, int doorbell) +{ +	int rdmasr_db; +	if (doorbell < MIC_X100_NUM_SBOX_IRQ) { +		mic_x100_send_sbox_intr(mdev, doorbell); +	} else { +		rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ + +			MIC_X100_RDMASR_IRQ_BASE; +		mic_x100_send_rdmasr_intr(mdev, rdmasr_db); +	} +} + +/** + * mic_x100_ack_interrupt - Read the interrupt sources register and + * clear it. This function will be called in the MSI/INTx case. + * @mdev: Pointer to mic_device instance. + * + * Returns: bitmask of interrupt sources triggered. + */ +static u32 mic_x100_ack_interrupt(struct mic_device *mdev) +{ +	u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; +	u32 reg = mic_mmio_read(&mdev->mmio, sicr0); +	mic_mmio_write(&mdev->mmio, reg, sicr0); +	return reg; +} + +/** + * mic_x100_intr_workarounds - These hardware specific workarounds are + * to be invoked everytime an interrupt is handled. + * @mdev: Pointer to mic_device instance. + * + * Returns: none + */ +static void mic_x100_intr_workarounds(struct mic_device *mdev) +{ +	struct mic_mw *mw = &mdev->mmio; + +	/* Clear pending bit array. */ +	if (MIC_A0_STEP == mdev->stepping) +		mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + +			MIC_X100_SBOX_MSIXPBACR); + +	if (mdev->stepping >= MIC_B0_STEP) +		mdev->intr_ops->enable_interrupts(mdev); +} + +/** + * mic_x100_hw_intr_init - Initialize h/w specific interrupt + * information. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_hw_intr_init(struct mic_device *mdev) +{ +	mdev->intr_info = (struct mic_intr_info *)mic_x100_intr_init; +} + +/** + * mic_x100_read_msi_to_src_map - read from the MSI mapping registers + * @mdev: pointer to mic_device instance + * @idx: index to the mapping register, 0 based + * + * This function allows reading of the 32bit MSI mapping register. + * + * RETURNS: The value in the register. + */ +static u32 +mic_x100_read_msi_to_src_map(struct mic_device *mdev, int idx) +{ +	return mic_mmio_read(&mdev->mmio, +		MIC_X100_SBOX_BASE_ADDRESS + +		MIC_X100_SBOX_MXAR0 + idx * 4); +} + +/** + * mic_x100_program_msi_to_src_map - program the MSI mapping registers + * @mdev: pointer to mic_device instance + * @idx: index to the mapping register, 0 based + * @offset: The bit offset in the register that needs to be updated. + * @set: boolean specifying if the bit in the specified offset needs + * to be set or cleared. + * + * RETURNS: None. + */ +static void +mic_x100_program_msi_to_src_map(struct mic_device *mdev, +				int idx, int offset, bool set) +{ +	unsigned long reg; +	struct mic_mw *mw = &mdev->mmio; +	u32 mxar = MIC_X100_SBOX_BASE_ADDRESS + +		MIC_X100_SBOX_MXAR0 + idx * 4; + +	reg = mic_mmio_read(mw, mxar); +	if (set) +		__set_bit(offset, ®); +	else +		__clear_bit(offset, ®); +	mic_mmio_write(mw, reg, mxar); +} + +/* + * mic_x100_reset_fw_ready - Reset Firmware ready status field. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_reset_fw_ready(struct mic_device *mdev) +{ +	mdev->ops->write_spad(mdev, MIC_X100_DOWNLOAD_INFO, 0); +} + +/* + * mic_x100_is_fw_ready - Check if firmware is ready. + * @mdev: pointer to mic_device instance + */ +static bool mic_x100_is_fw_ready(struct mic_device *mdev) +{ +	u32 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); +	return MIC_X100_SPAD2_DOWNLOAD_STATUS(scratch2) ? true : false; +} + +/** + * mic_x100_get_apic_id - Get bootstrap APIC ID. + * @mdev: pointer to mic_device instance + */ +static u32 mic_x100_get_apic_id(struct mic_device *mdev) +{ +	u32 scratch2 = 0; + +	scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); +	return MIC_X100_SPAD2_APIC_ID(scratch2); +} + +/** + * mic_x100_send_firmware_intr - Send an interrupt to the firmware on MIC. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_send_firmware_intr(struct mic_device *mdev) +{ +	u32 apicicr_low; +	u64 apic_icr_offset = MIC_X100_SBOX_APICICR7; +	int vector = MIC_X100_BSP_INTERRUPT_VECTOR; +	struct mic_mw *mw = &mdev->mmio; + +	/* +	 * For MIC we need to make sure we "hit" +	 * the send_icr bit (13). +	 */ +	apicicr_low = (vector | (1 << 13)); + +	mic_mmio_write(mw, mic_x100_get_apic_id(mdev), +		       MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset + 4); + +	/* Ensure that the interrupt is ordered w.r.t. previous stores. */ +	wmb(); +	mic_mmio_write(mw, apicicr_low, +		       MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset); +} + +/** + * mic_x100_hw_reset - Reset the MIC device. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_hw_reset(struct mic_device *mdev) +{ +	u32 reset_reg; +	u32 rgcr = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_RGCR; +	struct mic_mw *mw = &mdev->mmio; + +	/* Ensure that the reset is ordered w.r.t. previous loads and stores */ +	mb(); +	/* Trigger reset */ +	reset_reg = mic_mmio_read(mw, rgcr); +	reset_reg |= 0x1; +	mic_mmio_write(mw, reset_reg, rgcr); +	/* +	 * It seems we really want to delay at least 1 second +	 * after touching reset to prevent a lot of problems. +	 */ +	msleep(1000); +} + +/** + * mic_x100_load_command_line - Load command line to MIC. + * @mdev: pointer to mic_device instance + * @fw: the firmware image + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw) +{ +	u32 len = 0; +	u32 boot_mem; +	char *buf; +	void __iomem *cmd_line_va = mdev->aper.va + mdev->bootaddr + fw->size; +#define CMDLINE_SIZE 2048 + +	boot_mem = mdev->aper.len >> 20; +	buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL); +	if (!buf) { +		dev_err(mdev->sdev->parent, +			"%s %d allocation failed\n", __func__, __LINE__); +		return -ENOMEM; +	} +	len += snprintf(buf, CMDLINE_SIZE - len, +		" mem=%dM", boot_mem); +	if (mdev->cmdline) +		snprintf(buf + len, CMDLINE_SIZE - len, " %s", mdev->cmdline); +	memcpy_toio(cmd_line_va, buf, strlen(buf) + 1); +	kfree(buf); +	return 0; +} + +/** + * mic_x100_load_ramdisk - Load ramdisk to MIC. + * @mdev: pointer to mic_device instance + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_load_ramdisk(struct mic_device *mdev) +{ +	const struct firmware *fw; +	int rc; +	struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr; + +	rc = request_firmware(&fw, +			mdev->ramdisk, mdev->sdev->parent); +	if (rc < 0) { +		dev_err(mdev->sdev->parent, +			"ramdisk request_firmware failed: %d %s\n", +			rc, mdev->ramdisk); +		goto error; +	} +	/* +	 * Typically the bootaddr for card OS is 64M +	 * so copy over the ramdisk @ 128M. +	 */ +	memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); +	iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image); +	iowrite32(fw->size, &bp->hdr.ramdisk_size); +	release_firmware(fw); +error: +	return rc; +} + +/** + * mic_x100_get_boot_addr - Get MIC boot address. + * @mdev: pointer to mic_device instance + * + * This function is called during firmware load to determine + * the address at which the OS should be downloaded in card + * memory i.e. GDDR. + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_get_boot_addr(struct mic_device *mdev) +{ +	u32 scratch2, boot_addr; +	int rc = 0; + +	scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); +	boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2); +	dev_dbg(mdev->sdev->parent, "%s %d boot_addr 0x%x\n", +		__func__, __LINE__, boot_addr); +	if (boot_addr > (1 << 31)) { +		dev_err(mdev->sdev->parent, +			"incorrect bootaddr 0x%x\n", +			boot_addr); +		rc = -EINVAL; +		goto error; +	} +	mdev->bootaddr = boot_addr; +error: +	return rc; +} + +/** + * mic_x100_load_firmware - Load firmware to MIC. + * @mdev: pointer to mic_device instance + * @buf: buffer containing boot string including firmware/ramdisk path. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_load_firmware(struct mic_device *mdev, const char *buf) +{ +	int rc; +	const struct firmware *fw; + +	rc = mic_x100_get_boot_addr(mdev); +	if (rc) +		goto error; +	/* load OS */ +	rc = request_firmware(&fw, mdev->firmware, mdev->sdev->parent); +	if (rc < 0) { +		dev_err(mdev->sdev->parent, +			"ramdisk request_firmware failed: %d %s\n", +			rc, mdev->firmware); +		goto error; +	} +	if (mdev->bootaddr > mdev->aper.len - fw->size) { +		rc = -EINVAL; +		dev_err(mdev->sdev->parent, "%s %d rc %d bootaddr 0x%x\n", +			__func__, __LINE__, rc, mdev->bootaddr); +		release_firmware(fw); +		goto error; +	} +	memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); +	mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); +	if (!strcmp(mdev->bootmode, "elf")) +		goto done; +	/* load command line */ +	rc = mic_x100_load_command_line(mdev, fw); +	if (rc) { +		dev_err(mdev->sdev->parent, "%s %d rc %d\n", +			__func__, __LINE__, rc); +		goto error; +	} +	release_firmware(fw); +	/* load ramdisk */ +	if (mdev->ramdisk) +		rc = mic_x100_load_ramdisk(mdev); +error: +	dev_dbg(mdev->sdev->parent, "%s %d rc %d\n", __func__, __LINE__, rc); +done: +	return rc; +} + +/** + * mic_x100_get_postcode - Get postcode status from firmware. + * @mdev: pointer to mic_device instance + * + * RETURNS: postcode. + */ +static u32 mic_x100_get_postcode(struct mic_device *mdev) +{ +	return mic_mmio_read(&mdev->mmio, MIC_X100_POSTCODE); +} + +/** + * mic_x100_smpt_set - Update an SMPT entry with a DMA address. + * @mdev: pointer to mic_device instance + * + * RETURNS: none. + */ +static void +mic_x100_smpt_set(struct mic_device *mdev, dma_addr_t dma_addr, u8 index) +{ +#define SNOOP_ON	(0 << 0) +#define SNOOP_OFF	(1 << 0) +/* + * Sbox Smpt Reg Bits: + * Bits	31:2	Host address + * Bits	1	RSVD + * Bits	0	No snoop + */ +#define BUILD_SMPT(NO_SNOOP, HOST_ADDR)  \ +	(u32)(((HOST_ADDR) << 2) | ((NO_SNOOP) & 0x01)) + +	uint32_t smpt_reg_val = BUILD_SMPT(SNOOP_ON, +			dma_addr >> mdev->smpt->info.page_shift); +	mic_mmio_write(&mdev->mmio, smpt_reg_val, +		       MIC_X100_SBOX_BASE_ADDRESS + +		       MIC_X100_SBOX_SMPT00 + (4 * index)); +} + +/** + * mic_x100_smpt_hw_init - Initialize SMPT X100 specific fields. + * @mdev: pointer to mic_device instance + * + * RETURNS: none. + */ +static void mic_x100_smpt_hw_init(struct mic_device *mdev) +{ +	struct mic_smpt_hw_info *info = &mdev->smpt->info; + +	info->num_reg = 32; +	info->page_shift = 34; +	info->page_size = (1ULL << info->page_shift); +	info->base = 0x8000000000ULL; +} + +struct mic_smpt_ops mic_x100_smpt_ops = { +	.init = mic_x100_smpt_hw_init, +	.set = mic_x100_smpt_set, +}; + +struct mic_hw_ops mic_x100_ops = { +	.aper_bar = MIC_X100_APER_BAR, +	.mmio_bar = MIC_X100_MMIO_BAR, +	.read_spad = mic_x100_read_spad, +	.write_spad = mic_x100_write_spad, +	.send_intr = mic_x100_send_intr, +	.ack_interrupt = mic_x100_ack_interrupt, +	.intr_workarounds = mic_x100_intr_workarounds, +	.reset = mic_x100_hw_reset, +	.reset_fw_ready = mic_x100_reset_fw_ready, +	.is_fw_ready = mic_x100_is_fw_ready, +	.send_firmware_intr = mic_x100_send_firmware_intr, +	.load_mic_fw = mic_x100_load_firmware, +	.get_postcode = mic_x100_get_postcode, +}; + +struct mic_hw_intr_ops mic_x100_intr_ops = { +	.intr_init = mic_x100_hw_intr_init, +	.enable_interrupts = mic_x100_enable_interrupts, +	.disable_interrupts = mic_x100_disable_interrupts, +	.program_msi_to_src_map = mic_x100_program_msi_to_src_map, +	.read_msi_to_src_map = mic_x100_read_msi_to_src_map, +}; diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h new file mode 100644 index 00000000000..8b7daa182e5 --- /dev/null +++ b/drivers/misc/mic/host/mic_x100.h @@ -0,0 +1,98 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_X100_HW_H_ +#define _MIC_X100_HW_H_ + +#define MIC_X100_PCI_DEVICE_2250 0x2250 +#define MIC_X100_PCI_DEVICE_2251 0x2251 +#define MIC_X100_PCI_DEVICE_2252 0x2252 +#define MIC_X100_PCI_DEVICE_2253 0x2253 +#define MIC_X100_PCI_DEVICE_2254 0x2254 +#define MIC_X100_PCI_DEVICE_2255 0x2255 +#define MIC_X100_PCI_DEVICE_2256 0x2256 +#define MIC_X100_PCI_DEVICE_2257 0x2257 +#define MIC_X100_PCI_DEVICE_2258 0x2258 +#define MIC_X100_PCI_DEVICE_2259 0x2259 +#define MIC_X100_PCI_DEVICE_225a 0x225a +#define MIC_X100_PCI_DEVICE_225b 0x225b +#define MIC_X100_PCI_DEVICE_225c 0x225c +#define MIC_X100_PCI_DEVICE_225d 0x225d +#define MIC_X100_PCI_DEVICE_225e 0x225e + +#define MIC_X100_APER_BAR 0 +#define MIC_X100_MMIO_BAR 4 + +#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000 +#define MIC_X100_SBOX_SPAD0 0x0000AB20 +#define MIC_X100_SBOX_SICR0_DBR(x) ((x) & 0xf) +#define MIC_X100_SBOX_SICR0_DMA(x) (((x) >> 8) & 0xff) +#define MIC_X100_SBOX_SICE0_DBR(x) ((x) & 0xf) +#define MIC_X100_SBOX_DBR_BITS(x) ((x) & 0xf) +#define MIC_X100_SBOX_SICE0_DMA(x) (((x) >> 8) & 0xff) +#define MIC_X100_SBOX_DMA_BITS(x) (((x) & 0xff) << 8) + +#define MIC_X100_SBOX_APICICR0 0x0000A9D0 +#define MIC_X100_SBOX_SICR0 0x00009004 +#define MIC_X100_SBOX_SICE0 0x0000900C +#define MIC_X100_SBOX_SICC0 0x00009010 +#define MIC_X100_SBOX_SIAC0 0x00009014 +#define MIC_X100_SBOX_MSIXPBACR 0x00009084 +#define MIC_X100_SBOX_MXAR0 0x00009044 +#define MIC_X100_SBOX_SMPT00 0x00003100 +#define MIC_X100_SBOX_RDMASR0 0x0000B180 + +#define MIC_X100_DOORBELL_IDX_START 0 +#define MIC_X100_NUM_DOORBELL 4 +#define MIC_X100_DMA_IDX_START 8 +#define MIC_X100_NUM_DMA 8 +#define MIC_X100_ERR_IDX_START 30 +#define MIC_X100_NUM_ERR 1 + +#define MIC_X100_NUM_SBOX_IRQ 8 +#define MIC_X100_NUM_RDMASR_IRQ 8 +#define MIC_X100_RDMASR_IRQ_BASE 17 +#define MIC_X100_SPAD2_DOWNLOAD_STATUS(x) ((x) & 0x1) +#define MIC_X100_SPAD2_APIC_ID(x)	(((x) >> 1) & 0x1ff) +#define MIC_X100_SPAD2_DOWNLOAD_ADDR(x) ((x) & 0xfffff000) +#define MIC_X100_SBOX_APICICR7 0x0000AA08 +#define MIC_X100_SBOX_RGCR 0x00004010 +#define MIC_X100_SBOX_SDBIC0 0x0000CC90 +#define MIC_X100_DOWNLOAD_INFO 2 +#define MIC_X100_FW_SIZE 5 +#define MIC_X100_POSTCODE 0x242c + +static const u16 mic_x100_intr_init[] = { +		MIC_X100_DOORBELL_IDX_START, +		MIC_X100_DMA_IDX_START, +		MIC_X100_ERR_IDX_START, +		MIC_X100_NUM_DOORBELL, +		MIC_X100_NUM_DMA, +		MIC_X100_NUM_ERR, +}; + +/* Host->Card(bootstrap) Interrupt Vector */ +#define MIC_X100_BSP_INTERRUPT_VECTOR 229 + +extern struct mic_hw_ops mic_x100_ops; +extern struct mic_smpt_ops mic_x100_smpt_ops; +extern struct mic_hw_intr_ops mic_x100_intr_ops; + +#endif diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index a5925f7f17f..956597321d2 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c @@ -636,6 +636,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,  	u8 mac[ETH_ALEN];  	ssize_t rom_size;  	struct pch_phub_reg *chip = dev_get_drvdata(dev); +	int ret;  	if (!mac_pton(buf, mac))  		return -EINVAL; @@ -644,8 +645,10 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,  	if (!chip->pch_phub_extrom_base_address)  		return -ENOMEM; -	pch_phub_write_gbe_mac_addr(chip, mac); +	ret = pch_phub_write_gbe_mac_addr(chip, mac);  	pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); +	if (ret) +		return ret;  	return count;  } diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 68b7c773d2c..30754927fd8 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -395,7 +395,7 @@ static int phantom_probe(struct pci_dev *pdev,  	iowrite32(0, pht->caddr + PHN_IRQCTL);  	ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */  	retval = request_irq(pdev->irq, phantom_isr, -			IRQF_SHARED | IRQF_DISABLED, "phantom", pht); +			IRQF_SHARED, "phantom", pht);  	if (retval) {  		dev_err(&pdev->dev, "can't establish ISR\n");  		goto err_unmo; diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index f84ff0c0603..eda38cbe853 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -892,7 +892,6 @@ static void pti_pci_remove(struct pci_dev *pdev)  	}  	iounmap(drv_data->pti_ioaddr); -	pci_set_drvdata(pdev, NULL);  	kfree(drv_data);  	pci_release_region(pdev, 1);  	pci_disable_device(pdev); diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 0535d1e0bc7..104a05f6b73 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -6,7 +6,7 @@   * This file supports the user system call for file open, close, mmap, etc.   * This also incudes the driver initialization code.   * - *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved. + *  Copyright (c) 2008-2014 Silicon Graphics, Inc.  All Rights Reserved.   *   *  This program is free software; you can redistribute it and/or modify   *  it under the terms of the GNU General Public License as published by @@ -58,6 +58,11 @@ static int max_user_cbrs, max_user_dsr_bytes;  static struct miscdevice gru_miscdev; +static int gru_supported(void) +{ +	return is_uv_system() && +		(uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE); +}  /*   * gru_vma_close @@ -518,7 +523,7 @@ static int __init gru_init(void)  {  	int ret; -	if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub())) +	if (!gru_supported())  		return 0;  #if defined CONFIG_IA64 @@ -573,7 +578,7 @@ exit0:  static void __exit gru_exit(void)  { -	if (!is_uv_system()) +	if (!gru_supported())  		return;  	gru_teardown_tlb_irqs(); diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 9b2062d1732..a3700a56b8f 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c @@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,  	ubuf += sizeof(hdr);  	ubufcch = ubuf; -	if (gru_user_copy_handle(&ubuf, cch)) -		goto fail; +	if (gru_user_copy_handle(&ubuf, cch)) { +		if (cch_locked) +			unlock_cch_handle(cch); +		return -EFAULT; +	}  	if (cch_locked)  		ubufcch->delresp = 0;  	bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; @@ -175,14 +178,10 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,  	hdr.cbrcnt = cbrcnt;  	hdr.dsrcnt = dsrcnt;  	hdr.cch_locked = cch_locked; -	if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr))) -		ret = -EFAULT; - -	return ret ? ret : bytes; +	if (copy_to_user(uhdr, &hdr, sizeof(hdr))) +		return -EFAULT; -fail: -	unlock_cch_handle(cch); -	return -EFAULT; +	return bytes;  }  int gru_dump_chiplet_request(unsigned long arg) diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 652593fc486..128d5615c80 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -828,6 +828,7 @@ enum xp_retval  xpc_allocate_msg_wait(struct xpc_channel *ch)  {  	enum xp_retval ret; +	DEFINE_WAIT(wait);  	if (ch->flags & XPC_C_DISCONNECTING) {  		DBUG_ON(ch->reason == xpInterrupted); @@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)  	}  	atomic_inc(&ch->n_on_msg_allocate_wq); -	ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); +	prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); +	ret = schedule_timeout(1); +	finish_wait(&ch->msg_allocate_wq, &wait);  	atomic_dec(&ch->n_on_msg_allocate_wq);  	if (ch->flags & XPC_C_DISCONNECTING) { diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index b9e2000969f..95c894482fd 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,  	nid = cpu_to_node(cpu);  	page = alloc_pages_exact_node(nid, -				      GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, +				      GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,  				      pg_order);  	if (page == NULL) {  		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index afe66571ce0..21181fa243d 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -24,6 +24,9 @@  #include <linux/err.h>  #include <linux/io.h>  #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/list.h> +#include <linux/list_sort.h>  #include <linux/platform_device.h>  #include <linux/slab.h>  #include <linux/spinlock.h> @@ -36,14 +39,35 @@ struct sram_dev {  	struct clk *clk;  }; +struct sram_reserve { +	struct list_head list; +	u32 start; +	u32 size; +}; + +static int sram_reserve_cmp(void *priv, struct list_head *a, +					struct list_head *b) +{ +	struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); +	struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); + +	return ra->start - rb->start; +} +  static int sram_probe(struct platform_device *pdev)  {  	void __iomem *virt_base;  	struct sram_dev *sram;  	struct resource *res; -	unsigned long size; +	struct device_node *np = pdev->dev.of_node, *child; +	unsigned long size, cur_start, cur_size; +	struct sram_reserve *rblocks, *block; +	struct list_head reserve_list; +	unsigned int nblocks;  	int ret; +	INIT_LIST_HEAD(&reserve_list); +  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	virt_base = devm_ioremap_resource(&pdev->dev, res);  	if (IS_ERR(virt_base)) @@ -65,19 +89,106 @@ static int sram_probe(struct platform_device *pdev)  	if (!sram->pool)  		return -ENOMEM; -	ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, -				res->start, size, -1); -	if (ret < 0) { -		if (sram->clk) -			clk_disable_unprepare(sram->clk); -		return ret; +	/* +	 * We need an additional block to mark the end of the memory region +	 * after the reserved blocks from the dt are processed. +	 */ +	nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; +	rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); +	if (!rblocks) { +		ret = -ENOMEM; +		goto err_alloc; +	} + +	block = &rblocks[0]; +	for_each_available_child_of_node(np, child) { +		struct resource child_res; + +		ret = of_address_to_resource(child, 0, &child_res); +		if (ret < 0) { +			dev_err(&pdev->dev, +				"could not get address for node %s\n", +				child->full_name); +			goto err_chunks; +		} + +		if (child_res.start < res->start || child_res.end > res->end) { +			dev_err(&pdev->dev, +				"reserved block %s outside the sram area\n", +				child->full_name); +			ret = -EINVAL; +			goto err_chunks; +		} + +		block->start = child_res.start - res->start; +		block->size = resource_size(&child_res); +		list_add_tail(&block->list, &reserve_list); + +		dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n", +			block->start, +			block->start + block->size); + +		block++; +	} + +	/* the last chunk marks the end of the region */ +	rblocks[nblocks - 1].start = size; +	rblocks[nblocks - 1].size = 0; +	list_add_tail(&rblocks[nblocks - 1].list, &reserve_list); + +	list_sort(NULL, &reserve_list, sram_reserve_cmp); + +	cur_start = 0; + +	list_for_each_entry(block, &reserve_list, list) { +		/* can only happen if sections overlap */ +		if (block->start < cur_start) { +			dev_err(&pdev->dev, +				"block at 0x%x starts after current offset 0x%lx\n", +				block->start, cur_start); +			ret = -EINVAL; +			goto err_chunks; +		} + +		/* current start is in a reserved block, so continue after it */ +		if (block->start == cur_start) { +			cur_start = block->start + block->size; +			continue; +		} + +		/* +		 * allocate the space between the current starting +		 * address and the following reserved block, or the +		 * end of the region. +		 */ +		cur_size = block->start - cur_start; + +		dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n", +			cur_start, cur_start + cur_size); +		ret = gen_pool_add_virt(sram->pool, +				(unsigned long)virt_base + cur_start, +				res->start + cur_start, cur_size, -1); +		if (ret < 0) +			goto err_chunks; + +		/* next allocation after this reserved block */ +		cur_start = block->start + block->size;  	} +	kfree(rblocks); +  	platform_set_drvdata(pdev, sram);  	dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);  	return 0; + +err_chunks: +	kfree(rblocks); +err_alloc: +	if (sram->clk) +		clk_disable_unprepare(sram->clk); +	return ret;  }  static int sram_remove(struct platform_device *pdev) @@ -87,8 +198,6 @@ static int sram_remove(struct platform_device *pdev)  	if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))  		dev_dbg(&pdev->dev, "removed while SRAM allocated\n"); -	gen_pool_destroy(sram->pool); -  	if (sram->clk)  		clk_disable_unprepare(sram->clk); diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 8d64b681dd9..1972d57aadb 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -22,7 +22,6 @@  #define pr_fmt(fmt)	"(stc): " fmt  #include <linux/module.h>  #include <linux/kernel.h> -#include <linux/init.h>  #include <linux/tty.h>  #include <linux/seq_file.h> @@ -812,7 +811,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty)  	kfree_skb(st_gdata->tx_skb);  	st_gdata->tx_skb = NULL; -	tty->ops->flush_buffer(tty); +	tty_driver_flush_buffer(tty);  	return;  } diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 83907c72059..9d3dbb28734 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -218,7 +218,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)  	pr_debug("%s", __func__); -	INIT_COMPLETION(kim_gdata->kim_rcvd); +	reinit_completion(&kim_gdata->kim_rcvd);  	if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {  		pr_err("kim: couldn't write 4 bytes");  		return -EIO; @@ -229,7 +229,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)  		pr_err(" waiting for ver info- timed out ");  		return -ETIMEDOUT;  	} -	INIT_COMPLETION(kim_gdata->kim_rcvd); +	reinit_completion(&kim_gdata->kim_rcvd);  	/* the positions 12 & 13 in the response buffer provide with the  	 * chip, major & minor numbers  	 */ @@ -362,7 +362,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)  			/* reinit completion before sending for the  			 * relevant wait  			 */ -			INIT_COMPLETION(kim_gdata->kim_rcvd); +			reinit_completion(&kim_gdata->kim_rcvd);  			/*  			 * Free space found in uart buffer, call st_int_write @@ -398,7 +398,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)  				release_firmware(kim_gdata->fw_entry);  				return -ETIMEDOUT;  			} -			INIT_COMPLETION(kim_gdata->kim_rcvd); +			reinit_completion(&kim_gdata->kim_rcvd);  			break;  		case ACTION_DELAY:	/* sleep */  			pr_info("sleep command in scr"); @@ -474,7 +474,7 @@ long st_kim_start(void *kim_data)  		gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);  		mdelay(100);  		/* re-initialize the completion */ -		INIT_COMPLETION(kim_gdata->ldisc_installed); +		reinit_completion(&kim_gdata->ldisc_installed);  		/* send notification to UIM */  		kim_gdata->ldisc_install = 1;  		pr_info("ldisc_install = 1"); @@ -525,13 +525,12 @@ long st_kim_stop(void *kim_data)  		kim_gdata->kim_pdev->dev.platform_data;  	struct tty_struct	*tty = kim_gdata->core_data->tty; -	INIT_COMPLETION(kim_gdata->ldisc_installed); +	reinit_completion(&kim_gdata->ldisc_installed);  	if (tty) {	/* can be called before ldisc is installed */  		/* Flush any pending characters in the driver and discipline. */  		tty_ldisc_flush(tty);  		tty_driver_flush_buffer(tty); -		tty->ops->flush_buffer(tty);  	}  	/* send uninstall notification to UIM */ diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c index 9b237221bc4..cb0289b44a1 100644 --- a/drivers/misc/ti_dac7512.c +++ b/drivers/misc/ti_dac7512.c @@ -20,11 +20,8 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/spi/spi.h> - -#define DAC7512_DRV_NAME	"dac7512" -#define DRIVER_VERSION		"1.0" +#include <linux/of.h>  static ssize_t dac7512_store_val(struct device *dev,  				 struct device_attribute *attr, @@ -75,13 +72,29 @@ static int dac7512_remove(struct spi_device *spi)  	return 0;  } +static const struct spi_device_id dac7512_id_table[] = { +	{ "dac7512", 0 }, +	{ } +}; +MODULE_DEVICE_TABLE(spi, dac7512_id_table); + +#ifdef CONFIG_OF +static const struct of_device_id dac7512_of_match[] = { +	{ .compatible = "ti,dac7512", }, +	{ } +}; +MODULE_DEVICE_TABLE(of, dac7512_of_match); +#endif +  static struct spi_driver dac7512_driver = {  	.driver = { -		.name	= DAC7512_DRV_NAME, +		.name	= "dac7512",  		.owner	= THIS_MODULE, +		.of_match_table = of_match_ptr(dac7512_of_match),  	},  	.probe	= dac7512_probe,  	.remove	= dac7512_remove, +	.id_table = dac7512_id_table,  };  module_spi_driver(dac7512_driver); @@ -89,4 +102,3 @@ module_spi_driver(dac7512_driver);  MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");  MODULE_DESCRIPTION("DAC7512 16-bit DAC");  MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index f8d6654391e..a606c8901e1 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -356,8 +356,10 @@ static int tifm_7xx1_probe(struct pci_dev *dev,  	pci_set_drvdata(dev, fm);  	fm->addr = pci_ioremap_bar(dev, 0); -	if (!fm->addr) +	if (!fm->addr) { +		rc = -ENODEV;  		goto err_out_free; +	}  	rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm);  	if (rc) @@ -378,7 +380,6 @@ err_out_irq:  err_out_unmap:  	iounmap(fm->addr);  err_out_free: -	pci_set_drvdata(dev, NULL);  	tifm_free_adapter(fm);  err_out_int:  	pci_intx(dev, 0); @@ -405,8 +406,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev)  	for (cnt = 0; cnt < fm->num_sockets; cnt++)  		tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt)); -	pci_set_drvdata(dev, NULL); -  	iounmap(fm->addr);  	pci_intx(dev, 0);  	pci_release_regions(dev); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 0ab7c922212..a511b2a713b 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -145,15 +145,17 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,  	struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);  	return sprintf(buf, "%x", sock->type);  } +static DEVICE_ATTR_RO(type); -static struct device_attribute tifm_dev_attrs[] = { -	__ATTR(type, S_IRUGO, type_show, NULL), -	__ATTR_NULL +static struct attribute *tifm_dev_attrs[] = { +	&dev_attr_type.attr, +	NULL,  }; +ATTRIBUTE_GROUPS(tifm_dev);  static struct bus_type tifm_bus_type = {  	.name      = "tifm", -	.dev_attrs = tifm_dev_attrs, +	.dev_groups = tifm_dev_groups,  	.match     = tifm_bus_match,  	.uevent    = tifm_uevent,  	.probe     = tifm_device_probe, diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index 5bc10fa193d..b00335652e5 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -20,7 +20,6 @@   */  #include <linux/module.h> -#include <linux/init.h>  #include <linux/slab.h>  #include <linux/i2c.h>  #include <linux/mutex.h> diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c new file mode 100644 index 00000000000..3250fc1df0a --- /dev/null +++ b/drivers/misc/vexpress-syscfg.c @@ -0,0 +1,328 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> +#include <linux/vexpress.h> + + +#define SYS_CFGDATA		0x0 + +#define SYS_CFGCTRL		0x4 +#define SYS_CFGCTRL_START	(1 << 31) +#define SYS_CFGCTRL_WRITE	(1 << 30) +#define SYS_CFGCTRL_DCC(n)	(((n) & 0xf) << 26) +#define SYS_CFGCTRL_FUNC(n)	(((n) & 0x3f) << 20) +#define SYS_CFGCTRL_SITE(n)	(((n) & 0x3) << 16) +#define SYS_CFGCTRL_POSITION(n)	(((n) & 0xf) << 12) +#define SYS_CFGCTRL_DEVICE(n)	(((n) & 0xfff) << 0) + +#define SYS_CFGSTAT		0x8 +#define SYS_CFGSTAT_ERR		(1 << 1) +#define SYS_CFGSTAT_COMPLETE	(1 << 0) + + +struct vexpress_syscfg { +	struct device *dev; +	void __iomem *base; +	struct list_head funcs; +}; + +struct vexpress_syscfg_func { +	struct list_head list; +	struct vexpress_syscfg *syscfg; +	struct regmap *regmap; +	int num_templates; +	u32 template[0]; /* Keep it last! */ +}; + + +static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, +		int index, bool write, u32 *data) +{ +	struct vexpress_syscfg *syscfg = func->syscfg; +	u32 command, status; +	int tries; +	long timeout; + +	if (WARN_ON(index > func->num_templates)) +		return -EINVAL; + +	command = readl(syscfg->base + SYS_CFGCTRL); +	if (WARN_ON(command & SYS_CFGCTRL_START)) +		return -EBUSY; + +	command = func->template[index]; +	command |= SYS_CFGCTRL_START; +	command |= write ? SYS_CFGCTRL_WRITE : 0; + +	/* Use a canary for reads */ +	if (!write) +		*data = 0xdeadbeef; + +	dev_dbg(syscfg->dev, "func %p, command %x, data %x\n", +			func, command, *data); +	writel(*data, syscfg->base + SYS_CFGDATA); +	writel(0, syscfg->base + SYS_CFGSTAT); +	writel(command, syscfg->base + SYS_CFGCTRL); +	mb(); + +	/* The operation can take ages... Go to sleep, 100us initially */ +	tries = 100; +	timeout = 100; +	do { +		if (!irqs_disabled()) { +			set_current_state(TASK_INTERRUPTIBLE); +			schedule_timeout(usecs_to_jiffies(timeout)); +			if (signal_pending(current)) +				return -EINTR; +		} else { +			udelay(timeout); +		} + +		status = readl(syscfg->base + SYS_CFGSTAT); +		if (status & SYS_CFGSTAT_ERR) +			return -EFAULT; + +		if (timeout > 20) +			timeout -= 20; +	} while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); +	if (WARN_ON_ONCE(!tries)) +		return -ETIMEDOUT; + +	if (!write) { +		*data = readl(syscfg->base + SYS_CFGDATA); +		dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data); +	} + +	return 0; +} + +static int vexpress_syscfg_read(void *context, unsigned int index, +		unsigned int *val) +{ +	struct vexpress_syscfg_func *func = context; + +	return vexpress_syscfg_exec(func, index, false, val); +} + +static int vexpress_syscfg_write(void *context, unsigned int index, +		unsigned int val) +{ +	struct vexpress_syscfg_func *func = context; + +	return vexpress_syscfg_exec(func, index, true, &val); +} + +struct regmap_config vexpress_syscfg_regmap_config = { +	.lock = vexpress_config_lock, +	.unlock = vexpress_config_unlock, +	.reg_bits = 32, +	.val_bits = 32, +	.reg_read = vexpress_syscfg_read, +	.reg_write = vexpress_syscfg_write, +	.reg_format_endian = REGMAP_ENDIAN_LITTLE, +	.val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + + +static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, +		void *context) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct vexpress_syscfg *syscfg = context; +	struct vexpress_syscfg_func *func; +	struct property *prop; +	const __be32 *val = NULL; +	__be32 energy_quirk[4]; +	int num; +	u32 site, position, dcc; +	int i; + +	if (dev->of_node) { +		int err = vexpress_config_get_topo(dev->of_node, &site, +				&position, &dcc); + +		if (err) +			return ERR_PTR(err); + +		prop = of_find_property(dev->of_node, +				"arm,vexpress-sysreg,func", NULL); +		if (!prop) +			return ERR_PTR(-EINVAL); + +		num = prop->length / sizeof(u32) / 2; +		val = prop->value; +	} else { +		if (pdev->num_resources != 1 || +				pdev->resource[0].flags != IORESOURCE_BUS) +			return ERR_PTR(-EFAULT); + +		site = pdev->resource[0].start; +		if (site == VEXPRESS_SITE_MASTER) +			site = vexpress_config_get_master(); +		position = 0; +		dcc = 0; +		num = 1; +	} + +	/* +	 * "arm,vexpress-energy" function used to be described +	 * by its first device only, now it requires both +	 */ +	if (num == 1 && of_device_is_compatible(dev->of_node, +			"arm,vexpress-energy")) { +		num = 2; +		energy_quirk[0] = *val; +		energy_quirk[2] = *val++; +		energy_quirk[1] = *val; +		energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); +		val = energy_quirk; +	} + +	func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, +			GFP_KERNEL); +	if (!func) +		return ERR_PTR(-ENOMEM); + +	func->syscfg = syscfg; +	func->num_templates = num; + +	for (i = 0; i < num; i++) { +		u32 function, device; + +		if (dev->of_node) { +			function = be32_to_cpup(val++); +			device = be32_to_cpup(val++); +		} else { +			function = pdev->resource[0].end; +			device = pdev->id; +		} + +		dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", +				func, site, position, dcc, +				function, device); + +		func->template[i] = SYS_CFGCTRL_DCC(dcc); +		func->template[i] |= SYS_CFGCTRL_SITE(site); +		func->template[i] |= SYS_CFGCTRL_POSITION(position); +		func->template[i] |= SYS_CFGCTRL_FUNC(function); +		func->template[i] |= SYS_CFGCTRL_DEVICE(device); +	} + +	vexpress_syscfg_regmap_config.max_register = num - 1; + +	func->regmap = regmap_init(dev, NULL, func, +			&vexpress_syscfg_regmap_config); + +	if (IS_ERR(func->regmap)) { +		void *err = func->regmap; + +		kfree(func); +		return err; +	} + +	list_add(&func->list, &syscfg->funcs); + +	return func->regmap; +} + +static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) +{ +	struct vexpress_syscfg *syscfg = context; +	struct vexpress_syscfg_func *func, *tmp; + +	regmap_exit(regmap); + +	list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) { +		if (func->regmap == regmap) { +			list_del(&syscfg->funcs); +			kfree(func); +			break; +		} +	} +} + +static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { +	.regmap_init = vexpress_syscfg_regmap_init, +	.regmap_exit = vexpress_syscfg_regmap_exit, +}; + + +/* Non-DT hack, to be gone... */ +static struct device *vexpress_syscfg_bridge; + +int vexpress_syscfg_device_register(struct platform_device *pdev) +{ +	pdev->dev.parent = vexpress_syscfg_bridge; + +	return platform_device_register(pdev); +} + + +int vexpress_syscfg_probe(struct platform_device *pdev) +{ +	struct vexpress_syscfg *syscfg; +	struct resource *res; +	struct device *bridge; + +	syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL); +	if (!syscfg) +		return -ENOMEM; +	syscfg->dev = &pdev->dev; +	INIT_LIST_HEAD(&syscfg->funcs); + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!devm_request_mem_region(&pdev->dev, res->start, +			resource_size(res), pdev->name)) +		return -EBUSY; + +	syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); +	if (!syscfg->base) +		return -EFAULT; + +	/* Must use dev.parent (MFD), as that's where DT phandle points at... */ +	bridge = vexpress_config_bridge_register(pdev->dev.parent, +			&vexpress_syscfg_bridge_ops, syscfg); +	if (IS_ERR(bridge)) +		return PTR_ERR(bridge); + +	/* Non-DT case */ +	if (!pdev->dev.of_node) +		vexpress_syscfg_bridge = bridge; + +	return 0; +} + +static const struct platform_device_id vexpress_syscfg_id_table[] = { +	{ "vexpress-syscfg", }, +	{}, +}; + +static struct platform_driver vexpress_syscfg_driver = { +	.driver.name = "vexpress-syscfg", +	.id_table = vexpress_syscfg_id_table, +	.probe = vexpress_syscfg_probe, +}; + +static int __init vexpress_syscfg_init(void) +{ +	return platform_driver_register(&vexpress_syscfg_driver); +} +core_initcall(vexpress_syscfg_init); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 2421835d5da..19161749218 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -17,7 +17,8 @@   * along with this program; if not, write to the Free Software   * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.   * - * Maintained by: Dmitry Torokhov <dtor@vmware.com> + * Maintained by:	Xavier Deguillard <xdeguillard@vmware.com> + *			Philip Moltmann <moltmann@vmware.com>   */  /* diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index b3a2b763ecf..e0d5017785e 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -165,7 +165,7 @@ static void vmci_guest_cid_update(u32 sub_id,   * true if required hypercalls (or fallback hypercalls) are   * supported by the host, false otherwise.   */ -static bool vmci_check_host_caps(struct pci_dev *pdev) +static int vmci_check_host_caps(struct pci_dev *pdev)  {  	bool result;  	struct vmci_resource_query_msg *msg; @@ -176,7 +176,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)  	check_msg = kmalloc(msg_size, GFP_KERNEL);  	if (!check_msg) {  		dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); -		return false; +		return -ENOMEM;  	}  	check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, @@ -196,7 +196,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)  		__func__, result ? "PASSED" : "FAILED");  	/* We need the vector. There are no fallbacks. */ -	return result; +	return result ? 0 : -ENXIO;  }  /* @@ -383,11 +383,12 @@ static int vmci_enable_msix(struct pci_dev *pdev,  		vmci_dev->msix_entries[i].vector = i;  	} -	result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); +	result = pci_enable_msix_exact(pdev, +				       vmci_dev->msix_entries, VMCI_MAX_INTRS);  	if (result == 0)  		vmci_dev->exclusive_vectors = true; -	else if (result > 0) -		result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); +	else if (result == -ENOSPC) +		result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);  	return result;  } @@ -564,12 +565,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,  			dev_warn(&pdev->dev,  				 "VMCI device unable to register notification bitmap with PPN 0x%x\n",  				 (u32) bitmap_ppn); +			error = -ENXIO;  			goto err_remove_vmci_dev_g;  		}  	}  	/* Check host capabilities. */ -	if (!vmci_check_host_caps(pdev)) +	error = vmci_check_host_caps(pdev); +	if (error)  		goto err_remove_bitmap;  	/* Enable device. */ @@ -649,7 +652,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,  	return 0;  err_free_irq: -	free_irq(vmci_dev->irq, &vmci_dev); +	free_irq(vmci_dev->irq, vmci_dev);  	tasklet_kill(&vmci_dev->datagram_tasklet);  	tasklet_kill(&vmci_dev->bm_tasklet); diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index d4722b3dc8e..1723a6e4f2e 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -243,11 +243,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,  	/*  	 * Lock physical page backing a given user VA.  	 */ -	down_read(¤t->mm->mmap_sem); -	retval = get_user_pages(current, current->mm, -				PAGE_ALIGN(uva), -				1, 1, 0, &page, NULL); -	up_read(¤t->mm->mmap_sem); +	retval = get_user_pages_fast(PAGE_ALIGN(uva), 1, 1, &page);  	if (retval != 1)  		return VMCI_ERROR_GENERIC; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index a0515a6d6eb..1b7b303085d 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -732,13 +732,9 @@ static int qp_host_get_user_memory(u64 produce_uva,  	int retval;  	int err = VMCI_SUCCESS; -	down_write(¤t->mm->mmap_sem); -	retval = get_user_pages(current, -				current->mm, -				(uintptr_t) produce_uva, -				produce_q->kernel_if->num_pages, -				1, 0, -				produce_q->kernel_if->u.h.header_page, NULL); +	retval = get_user_pages_fast((uintptr_t) produce_uva, +				     produce_q->kernel_if->num_pages, 1, +				     produce_q->kernel_if->u.h.header_page);  	if (retval < produce_q->kernel_if->num_pages) {  		pr_warn("get_user_pages(produce) failed (retval=%d)", retval);  		qp_release_pages(produce_q->kernel_if->u.h.header_page, @@ -747,12 +743,9 @@ static int qp_host_get_user_memory(u64 produce_uva,  		goto out;  	} -	retval = get_user_pages(current, -				current->mm, -				(uintptr_t) consume_uva, -				consume_q->kernel_if->num_pages, -				1, 0, -				consume_q->kernel_if->u.h.header_page, NULL); +	retval = get_user_pages_fast((uintptr_t) consume_uva, +				     consume_q->kernel_if->num_pages, 1, +				     consume_q->kernel_if->u.h.header_page);  	if (retval < consume_q->kernel_if->num_pages) {  		pr_warn("get_user_pages(consume) failed (retval=%d)", retval);  		qp_release_pages(consume_q->kernel_if->u.h.header_page, @@ -763,8 +756,6 @@ static int qp_host_get_user_memory(u64 produce_uva,  	}   out: -	up_write(¤t->mm->mmap_sem); -  	return err;  }  | 
