diff options
Diffstat (limited to 'drivers/misc')
219 files changed, 59673 insertions, 4431 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index e3551d20464..ee9402324a2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -2,23 +2,27 @@ # Misc strange devices # -menuconfig MISC_DEVICES - bool "Misc devices" - default y - ---help--- - Say Y here to get to see options for device drivers from various - different categories. This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and disabled. +menu "Misc devices" -if MISC_DEVICES +config SENSORS_LIS3LV02D + tristate + depends on INPUT + select INPUT_POLLDEV + default n config AD525X_DPOT - tristate "Analog Devices AD525x Digital Potentiometers" - depends on I2C && SYSFS + tristate "Analog Devices Digital Potentiometers" + depends on (I2C || SPI) && SYSFS help If you say yes here, you get support for the Analog Devices - AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255 + AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255 + AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203, + AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235, + AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293, + AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242, + AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282, + ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270, + AD5271, AD5272, AD5274 digital potentiometer chips. See Documentation/misc-devices/ad525x_dpot.txt for the @@ -27,9 +31,30 @@ config AD525X_DPOT This driver can also be built as a module. If so, the module will be called ad525x_dpot. +config AD525X_DPOT_I2C + tristate "support I2C bus connection" + depends on AD525X_DPOT && I2C + help + Say Y here if you have a digital potentiometers hooked to an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called ad525x_dpot-i2c. + +config AD525X_DPOT_SPI + tristate "support SPI bus connection" + depends on AD525X_DPOT && SPI_MASTER + help + Say Y here if you have a digital potentiometers hooked to an SPI bus. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad525x_dpot-spi. + config ATMEL_PWM tristate "Atmel AT32/AT91 PWM support" - depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 + depends on HAVE_CLK + depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 help This option enables device driver support for the PWM channels on certain Atmel processors. Pulse Width Modulation is used for @@ -46,7 +71,7 @@ config ATMEL_TCLIB config ATMEL_TCB_CLKSRC bool "TC Block Clocksource" - depends on ATMEL_TCLIB && GENERIC_TIME + depends on ATMEL_TCLIB default y help Select this to get a high precision clocksource based on a @@ -69,9 +94,17 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. +config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n + ---help--- + This module accepts a single 'irq' parameter, which it should register for. + The sole purpose of this module is to help with debugging of systems on + which spurious IRQs would happen on disabled IRQ vector. + config IBM_ASM tristate "Device driver for IBM RSA service processor" - depends on X86 && PCI && INPUT && EXPERIMENTAL + depends on X86 && PCI && INPUT ---help--- This option enables device driver support for in-band access to the IBM RSA (Condor) service processor in eServer xSeries systems. @@ -86,8 +119,8 @@ config IBM_ASM WARNING: This software may not be supported or function correctly on your IBM server. Please consult the IBM ServerProven - website <http://www.pc.ibm.com/ww/eserver/xseries/serverproven> for - information on the specific driver level and support statement + website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/> + for information on the specific driver level and support statement for your IBM server. config PHANTOM @@ -101,6 +134,20 @@ config PHANTOM If you choose to build module, its name will be phantom. If unsure, say N here. +config INTEL_MID_PTI + tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" + depends on PCI && TTY && (X86_INTEL_MID || COMPILE_TEST) + default n + help + The PTI (Parallel Trace Interface) driver directs + trace data routed from various parts in the system out + through an Intel Penwell PTI port and out of the mobile + device for analysis with a debugging tool (Lauterbach or Fido). + + You should select this driver if the target kernel is meant for + an Intel Atom (non-netbook) mobile device containing a MIPI + P1149.7 standard implementation. + config SGI_IOC4 tristate "SGI IOC4 Base IO support" depends on PCI @@ -114,8 +161,8 @@ config SGI_IOC4 Otherwise say N. config TIFM_CORE - tristate "TI Flash Media interface support (EXPERIMENTAL)" - depends on EXPERIMENTAL && PCI + tristate "TI Flash Media interface support" + depends on PCI help If you want support for Texas Instruments(R) Flash Media adapters you should select this option and then also choose an appropriate @@ -130,8 +177,8 @@ config TIFM_CORE be called tifm_core. config TIFM_7XX1 - tristate "TI Flash Media PCI74xx/PCI76xx host adapter support (EXPERIMENTAL)" - depends on PCI && TIFM_CORE && EXPERIMENTAL + tristate "TI Flash Media PCI74xx/PCI76xx host adapter support" + depends on PCI && TIFM_CORE default TIFM_CORE help This option enables support for Texas Instruments(R) PCI74xx and @@ -144,7 +191,7 @@ config TIFM_7XX1 config ICS932S401 tristate "Integrated Circuits ICS932S401" - depends on I2C && EXPERIMENTAL + depends on I2C help If you say yes here you get support for the Integrated Circuits ICS932S401 clock control chips. @@ -154,7 +201,7 @@ config ICS932S401 config ATMEL_SSC tristate "Device driver for Atmel SSC peripheral" - depends on AVR32 || ARCH_AT91 + depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST) ---help--- This option enables device driver support for Atmel Synchronized Serial Communication peripheral (SSC). @@ -189,8 +236,7 @@ config SGI_XP config CS5535_MFGPT tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" - depends on PCI - depends on X86 + depends on MFD_CS5535 default n help This driver provides access to MFGPT functionality for other @@ -212,16 +258,25 @@ config CS5535_MFGPT_DEFAULT_IRQ want to use a different IRQ by default. This is here for architectures to set as necessary. +config CS5535_CLOCK_EVENT_SRC + tristate "CS5535/CS5536 high-res timer (MFGPT) events" + depends on GENERIC_CLOCKEVENTS && CS5535_MFGPT + help + This driver provides a clock event source based on the MFGPT + timer(s) in the CS5535 and CS5536 companion chips. + MFGPTs have a better resolution and max interval than the + generic PIT, and are suitable for use as high-res timers. + config HP_ILO - tristate "Channel interface driver for HP iLO/iLO2 processor" + tristate "Channel interface driver for the HP iLO processor" depends on PCI default n help The channel interface driver allows applications to communicate - with iLO/iLO2 management processors present on HP ProLiant - servers. Upon loading, the driver creates /dev/hpilo/dXccbN files, - which can be used to gather data from the management processor, - via read and write system calls. + with iLO management processors present on HP ProLiant servers. + Upon loading, the driver creates /dev/hpilo/dXccbN files, which + can be used to gather data from the management processor, via + read and write system calls. To compile this driver as a module, choose M here: the module will be called hpilo. @@ -246,8 +301,18 @@ config SGI_GRU_DEBUG depends on SGI_GRU default n ---help--- - This option enables addition debugging code for the SGI GRU driver. If - you are unsure, say N. + This option enables additional debugging code for the SGI GRU driver. + If you are unsure, say N. + +config APDS9802ALS + tristate "Medfield Avago APDS9802 ALS Sensor module" + depends on I2C + help + If you say yes here you get support for the ALS APDS9802 ambient + light sensor. + + This driver can also be built as a module. If so, the module + will be called apds9802als. config ISL29003 tristate "Intersil ISL29003 ambient light sensor" @@ -259,22 +324,67 @@ config ISL29003 This driver can also be built as a module. If so, the module will be called isl29003. -config EP93XX_PWM - tristate "EP93xx PWM support" - depends on ARCH_EP93XX +config ISL29020 + tristate "Intersil ISL29020 ambient light sensor" + depends on I2C help - This option enables device driver support for the PWM channels - on the Cirrus EP93xx processors. The EP9307 chip only has one - PWM channel all the others have two, the second channel is an - alternate function of the EGPIO14 pin. A sysfs interface is - provided to control the PWM channels. + If you say yes here you get support for the Intersil ISL29020 + ambient light sensor. - To compile this driver as a module, choose M here: the module will - be called ep93xx_pwm. + This driver can also be built as a module. If so, the module + will be called isl29020. + +config SENSORS_TSL2550 + tristate "Taos TSL2550 ambient light sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the Taos TSL2550 + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called tsl2550. + +config SENSORS_BH1780 + tristate "ROHM BH1780GLI ambient light sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the ROHM BH1780GLI + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called bh1780gli. + +config SENSORS_BH1770 + tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" + depends on I2C + ---help--- + Say Y here if you want to build a driver for BH1770GLC (ROHM) or + SFH7770 (Osram) combined ambient light and proximity sensor chip. + + To compile this driver as a module, choose M here: the + module will be called bh1770glc. If unsure, say N here. + +config SENSORS_APDS990X + tristate "APDS990X combined als and proximity sensors" + depends on I2C + default n + ---help--- + Say Y here if you want to build a driver for Avago APDS990x + combined ambient light and proximity sensor chip. + + To compile this driver as a module, choose M here: the + module will be called apds990x. If unsure, say N here. + +config HMC6352 + tristate "Honeywell HMC6352 compass" + depends on I2C + help + This driver provides support for the Honeywell HMC6352 compass, + providing configuration and heading data via sysfs. config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" - depends on I2C && EXPERIMENTAL + depends on I2C help If you say yes here you get support for Dallas Semiconductor DS1682 Total Elapsed Time Recorder. @@ -282,6 +392,16 @@ config DS1682 This driver can also be built as a module. If so, the module will be called ds1682. +config SPEAR13XX_PCIE_GADGET + bool "PCIe gadget support for SPEAr13XX platform" + depends on ARCH_SPEAR13XX && BROKEN + default n + help + This option enables gadget support for PCIe controller. If + board file defines any controller as PCIe endpoint then a sysfs + entry will be created for that controller. User can use these + sysfs node to configure PCIe EP as per his requirements. + config TI_DAC7512 tristate "Texas Instruments DAC7512" depends on SPI && SYSFS @@ -290,11 +410,131 @@ config TI_DAC7512 DAC7512 16-bit digital-to-analog converter. This driver can also be built as a module. If so, the module - will be calles ti_dac7512. + will be called ti_dac7512. + +config VMWARE_BALLOON + tristate "VMware Balloon Driver" + depends on X86 && HYPERVISOR_GUEST + help + This is VMware physical memory management driver which acts + like a "balloon" that can be inflated to reclaim physical pages + by reserving them in the guest and invalidating them in the + monitor, freeing up the underlying machine pages so they can + be allocated to other guests. The balloon can also be deflated + to allow the guest to use more physical memory. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmw_balloon. + +config ARM_CHARLCD + bool "ARM Ltd. Character LCD Driver" + depends on PLAT_VERSATILE + help + This is a driver for the character LCD found on the ARM Ltd. + Versatile and RealView Platform Baseboards. It doesn't do + very much more than display the text "ARM Linux" on the first + line and the Linux version on the second line, but that's + still useful. + +config BMP085 + bool + depends on SYSFS + +config BMP085_I2C + tristate "BMP085 digital pressure sensor on I2C" + select BMP085 + select REGMAP_I2C + depends on I2C && SYSFS + help + Say Y here if you want to support Bosch Sensortec's digital pressure + sensor hooked to an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called bmp085-i2c. + +config BMP085_SPI + tristate "BMP085 digital pressure sensor on SPI" + select BMP085 + select REGMAP_SPI + depends on SPI_MASTER && SYSFS + help + Say Y here if you want to support Bosch Sensortec's digital pressure + sensor hooked to an SPI bus. + + To compile this driver as a module, choose M here: the + module will be called bmp085-spi. + +config PCH_PHUB + tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" + select GENERIC_NET_UTILS + depends on PCI && (X86_32 || COMPILE_TEST) + help + This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of + Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded + processor. The Topcliff has MAC address and Option ROM data in SROM. + This driver can access MAC address and Option ROM data in SROM. + + This driver also can be used for LAPIS Semiconductor's IOH, + ML7213/ML7223/ML7831. + ML7213 which is for IVI(In-Vehicle Infotainment) use. + ML7223 IOH is for MP(Media Phone) use. + ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. + + To compile this driver as a module, choose M here: the module will + be called pch_phub. + +config USB_SWITCH_FSA9480 + tristate "FSA9480 USB Switch" + depends on I2C + help + The FSA9480 is a USB port accessory detector and switch. + The FSA9480 is fully controlled using I2C and enables USB data, + stereo and mono audio, video, microphone and UART data to use + a common connector port. + +config LATTICE_ECP3_CONFIG + tristate "Lattice ECP3 FPGA bitstream configuration via SPI" + depends on SPI && SYSFS + select FW_LOADER + default n + help + This option enables support for bitstream configuration (programming + or loading) of the Lattice ECP3 FPGA family via SPI. + + If unsure, say N. + +config SRAM + bool "Generic on-chip SRAM driver" + depends on HAS_IOMEM + select GENERIC_ALLOCATOR + help + This driver allows you to declare a memory region to be managed by + the genalloc API. It is supposed to be used for small on-chip SRAM + areas found on many SoCs. + +config VEXPRESS_SYSCFG + bool "Versatile Express System Configuration driver" + depends on VEXPRESS_CONFIG + default y + help + ARM Ltd. Versatile Express uses specialised platform configuration + bus. System Configuration interface is one of the possible means + of generating transactions on this bus. source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" -source "drivers/misc/iwmc3200top/Kconfig" - -endif # MISC_DEVICES +source "drivers/misc/ti-st/Kconfig" +source "drivers/misc/lis3lv02d/Kconfig" +source "drivers/misc/carma/Kconfig" +source "drivers/misc/altera-stapl/Kconfig" +source "drivers/misc/mei/Kconfig" +source "drivers/misc/vmw_vmci/Kconfig" +source "drivers/misc/mic/Kconfig" +source "drivers/misc/genwqe/Kconfig" +source "drivers/misc/echo/Kconfig" +endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 049ff2482f3..d59ce1261b3 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -3,16 +3,25 @@ # obj-$(CONFIG_IBM_ASM) += ibmasm/ -obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o +obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o +obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o +obj-$(CONFIG_INTEL_MID_PTI) += pti.o obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o +obj-$(CONFIG_BMP085) += bmp085.o +obj-$(CONFIG_BMP085_I2C) += bmp085-i2c.o +obj-$(CONFIG_BMP085_SPI) += bmp085-spi.o +obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o obj-$(CONFIG_ICS932S401) += ics932s401.o obj-$(CONFIG_LKDTM) += lkdtm.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o +obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o +obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o @@ -20,11 +29,30 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o obj-$(CONFIG_HP_ILO) += hpilo.o +obj-$(CONFIG_APDS9802ALS) += apds9802als.o obj-$(CONFIG_ISL29003) += isl29003.o -obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o +obj-$(CONFIG_ISL29020) += isl29020.o +obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o obj-$(CONFIG_C2PORT) += c2port/ -obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ +obj-$(CONFIG_HMC6352) += hmc6352.o obj-y += eeprom/ obj-y += cb710/ +obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o +obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o +obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o +obj-$(CONFIG_PCH_PHUB) += pch_phub.o +obj-y += ti-st/ +obj-y += lis3lv02d/ +obj-y += carma/ +obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o +obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ +obj-$(CONFIG_INTEL_MEI) += mei/ +obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ +obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o +obj-$(CONFIG_SRAM) += sram.o +obj-y += mic/ +obj-$(CONFIG_GENWQE) += genwqe/ +obj-$(CONFIG_ECHO) += echo/ +obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c new file mode 100644 index 00000000000..705b881e186 --- /dev/null +++ b/drivers/misc/ad525x_dpot-i2c.c @@ -0,0 +1,121 @@ +/* + * Driver for the Analog Devices digital potentiometers (I2C bus) + * + * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/i2c.h> +#include <linux/module.h> + +#include "ad525x_dpot.h" + +/* I2C bus functions */ +static int write_d8(void *client, u8 val) +{ + return i2c_smbus_write_byte(client, val); +} + +static int write_r8d8(void *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, reg, val); +} + +static int write_r8d16(void *client, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(client, reg, val); +} + +static int read_d8(void *client) +{ + return i2c_smbus_read_byte(client); +} + +static int read_r8d8(void *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int read_r8d16(void *client, u8 reg) +{ + return i2c_smbus_read_word_data(client, reg); +} + +static const struct ad_dpot_bus_ops bops = { + .read_d8 = read_d8, + .read_r8d8 = read_r8d8, + .read_r8d16 = read_r8d16, + .write_d8 = write_d8, + .write_r8d8 = write_r8d8, + .write_r8d16 = write_r8d16, +}; + +static int ad_dpot_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ad_dpot_bus_data bdata = { + .client = client, + .bops = &bops, + }; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) { + dev_err(&client->dev, "SMBUS Word Data not Supported\n"); + return -EIO; + } + + return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name); +} + +static int ad_dpot_i2c_remove(struct i2c_client *client) +{ + return ad_dpot_remove(&client->dev); +} + +static const struct i2c_device_id ad_dpot_id[] = { + {"ad5258", AD5258_ID}, + {"ad5259", AD5259_ID}, + {"ad5251", AD5251_ID}, + {"ad5252", AD5252_ID}, + {"ad5253", AD5253_ID}, + {"ad5254", AD5254_ID}, + {"ad5255", AD5255_ID}, + {"ad5241", AD5241_ID}, + {"ad5242", AD5242_ID}, + {"ad5243", AD5243_ID}, + {"ad5245", AD5245_ID}, + {"ad5246", AD5246_ID}, + {"ad5247", AD5247_ID}, + {"ad5248", AD5248_ID}, + {"ad5280", AD5280_ID}, + {"ad5282", AD5282_ID}, + {"adn2860", ADN2860_ID}, + {"ad5273", AD5273_ID}, + {"ad5161", AD5161_ID}, + {"ad5171", AD5171_ID}, + {"ad5170", AD5170_ID}, + {"ad5172", AD5172_ID}, + {"ad5173", AD5173_ID}, + {"ad5272", AD5272_ID}, + {"ad5274", AD5274_ID}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ad_dpot_id); + +static struct i2c_driver ad_dpot_i2c_driver = { + .driver = { + .name = "ad_dpot", + .owner = THIS_MODULE, + }, + .probe = ad_dpot_i2c_probe, + .remove = ad_dpot_i2c_remove, + .id_table = ad_dpot_id, +}; + +module_i2c_driver(ad_dpot_i2c_driver); + +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("digital potentiometer I2C bus driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("i2c:ad_dpot"); diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c new file mode 100644 index 00000000000..9da04ede04f --- /dev/null +++ b/drivers/misc/ad525x_dpot-spi.c @@ -0,0 +1,143 @@ +/* + * Driver for the Analog Devices digital potentiometers (SPI bus) + * + * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/spi/spi.h> +#include <linux/module.h> + +#include "ad525x_dpot.h" + +/* SPI bus functions */ +static int write8(void *client, u8 val) +{ + u8 data = val; + return spi_write(client, &data, 1); +} + +static int write16(void *client, u8 reg, u8 val) +{ + u8 data[2] = {reg, val}; + return spi_write(client, data, 2); +} + +static int write24(void *client, u8 reg, u16 val) +{ + u8 data[3] = {reg, val >> 8, val}; + return spi_write(client, data, 3); +} + +static int read8(void *client) +{ + int ret; + u8 data; + ret = spi_read(client, &data, 1); + if (ret < 0) + return ret; + + return data; +} + +static int read16(void *client, u8 reg) +{ + int ret; + u8 buf_rx[2]; + + write16(client, reg, 0); + ret = spi_read(client, buf_rx, 2); + if (ret < 0) + return ret; + + return (buf_rx[0] << 8) | buf_rx[1]; +} + +static int read24(void *client, u8 reg) +{ + int ret; + u8 buf_rx[3]; + + write24(client, reg, 0); + ret = spi_read(client, buf_rx, 3); + if (ret < 0) + return ret; + + return (buf_rx[1] << 8) | buf_rx[2]; +} + +static const struct ad_dpot_bus_ops bops = { + .read_d8 = read8, + .read_r8d8 = read16, + .read_r8d16 = read24, + .write_d8 = write8, + .write_r8d8 = write16, + .write_r8d16 = write24, +}; +static int ad_dpot_spi_probe(struct spi_device *spi) +{ + struct ad_dpot_bus_data bdata = { + .client = spi, + .bops = &bops, + }; + + return ad_dpot_probe(&spi->dev, &bdata, + spi_get_device_id(spi)->driver_data, + spi_get_device_id(spi)->name); +} + +static int ad_dpot_spi_remove(struct spi_device *spi) +{ + return ad_dpot_remove(&spi->dev); +} + +static const struct spi_device_id ad_dpot_spi_id[] = { + {"ad5160", AD5160_ID}, + {"ad5161", AD5161_ID}, + {"ad5162", AD5162_ID}, + {"ad5165", AD5165_ID}, + {"ad5200", AD5200_ID}, + {"ad5201", AD5201_ID}, + {"ad5203", AD5203_ID}, + {"ad5204", AD5204_ID}, + {"ad5206", AD5206_ID}, + {"ad5207", AD5207_ID}, + {"ad5231", AD5231_ID}, + {"ad5232", AD5232_ID}, + {"ad5233", AD5233_ID}, + {"ad5235", AD5235_ID}, + {"ad5260", AD5260_ID}, + {"ad5262", AD5262_ID}, + {"ad5263", AD5263_ID}, + {"ad5290", AD5290_ID}, + {"ad5291", AD5291_ID}, + {"ad5292", AD5292_ID}, + {"ad5293", AD5293_ID}, + {"ad7376", AD7376_ID}, + {"ad8400", AD8400_ID}, + {"ad8402", AD8402_ID}, + {"ad8403", AD8403_ID}, + {"adn2850", ADN2850_ID}, + {"ad5270", AD5270_ID}, + {"ad5271", AD5271_ID}, + {} +}; +MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id); + +static struct spi_driver ad_dpot_spi_driver = { + .driver = { + .name = "ad_dpot", + .owner = THIS_MODULE, + }, + .probe = ad_dpot_spi_probe, + .remove = ad_dpot_spi_remove, + .id_table = ad_dpot_spi_id, +}; + +module_spi_driver(ad_dpot_spi_driver); + +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("digital potentiometer SPI bus driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ad_dpot"); diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 30a59f2bacd..a43053daad0 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -1,6 +1,6 @@ /* - * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers - * Copyright (c) 2009 Analog Devices, Inc. + * ad525x_dpot: Driver for the Analog Devices digital potentiometers + * Copyright (c) 2009-2010 Analog Devices, Inc. * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> * * DEVID #Wipers #Positions Resistor Options (kOhm) @@ -11,6 +11,51 @@ * AD5255 3 512 25, 250 * AD5253 4 64 1, 10, 50, 100 * AD5254 4 256 1, 10, 50, 100 + * AD5160 1 256 5, 10, 50, 100 + * AD5161 1 256 5, 10, 50, 100 + * AD5162 2 256 2.5, 10, 50, 100 + * AD5165 1 256 100 + * AD5200 1 256 10, 50 + * AD5201 1 33 10, 50 + * AD5203 4 64 10, 100 + * AD5204 4 256 10, 50, 100 + * AD5206 6 256 10, 50, 100 + * AD5207 2 256 10, 50, 100 + * AD5231 1 1024 10, 50, 100 + * AD5232 2 256 10, 50, 100 + * AD5233 4 64 10, 50, 100 + * AD5235 2 1024 25, 250 + * AD5260 1 256 20, 50, 200 + * AD5262 2 256 20, 50, 200 + * AD5263 4 256 20, 50, 200 + * AD5290 1 256 10, 50, 100 + * AD5291 1 256 20, 50, 100 (20-TP) + * AD5292 1 1024 20, 50, 100 (20-TP) + * AD5293 1 1024 20, 50, 100 + * AD7376 1 128 10, 50, 100, 1M + * AD8400 1 256 1, 10, 50, 100 + * AD8402 2 256 1, 10, 50, 100 + * AD8403 4 256 1, 10, 50, 100 + * ADN2850 3 512 25, 250 + * AD5241 1 256 10, 100, 1M + * AD5246 1 128 5, 10, 50, 100 + * AD5247 1 128 5, 10, 50, 100 + * AD5245 1 256 5, 10, 50, 100 + * AD5243 2 256 2.5, 10, 50, 100 + * AD5248 2 256 2.5, 10, 50, 100 + * AD5242 2 256 20, 50, 200 + * AD5280 1 256 20, 50, 200 + * AD5282 2 256 20, 50, 200 + * ADN2860 3 512 25, 250 + * AD5273 1 64 1, 10, 50, 100 (OTP) + * AD5171 1 64 5, 10, 50, 100 (OTP) + * AD5170 1 256 2.5, 10, 50, 100 (OTP) + * AD5172 2 256 2.5, 10, 50, 100 (OTP) + * AD5173 2 256 2.5, 10, 50, 100 (OTP) + * AD5270 1 1024 20, 50, 100 (50-TP) + * AD5271 1 256 20, 50, 100 (50-TP) + * AD5272 1 1024 20, 50, 100 (50-TP) + * AD5274 1 256 20, 50, 100 (50-TP) * * See Documentation/misc-devices/ad525x_dpot.txt for more info. * @@ -19,7 +64,7 @@ * Author: Chris Verges <chrisv@cyberswitching.com> * * derived from ad5252.c - * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org> + * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org> * * Licensed under the GPL-2 or later. */ @@ -27,375 +72,561 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/i2c.h> #include <linux/delay.h> +#include <linux/slab.h> -#define DRIVER_NAME "ad525x_dpot" -#define DRIVER_VERSION "0.1" - -enum dpot_devid { - AD5258_ID, - AD5259_ID, - AD5251_ID, - AD5252_ID, - AD5253_ID, - AD5254_ID, - AD5255_ID, -}; - -#define AD5258_MAX_POSITION 64 -#define AD5259_MAX_POSITION 256 -#define AD5251_MAX_POSITION 64 -#define AD5252_MAX_POSITION 256 -#define AD5253_MAX_POSITION 64 -#define AD5254_MAX_POSITION 256 -#define AD5255_MAX_POSITION 512 - -#define AD525X_RDAC0 0 -#define AD525X_RDAC1 1 -#define AD525X_RDAC2 2 -#define AD525X_RDAC3 3 - -#define AD525X_REG_TOL 0x18 -#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0) -#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1) -#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2) -#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3) - -/* RDAC-to-EEPROM Interface Commands */ -#define AD525X_I2C_RDAC (0x00 << 5) -#define AD525X_I2C_EEPROM (0x01 << 5) -#define AD525X_I2C_CMD (0x80) - -#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3)) -#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3)) -#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3)) -#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3)) - -static s32 ad525x_read(struct i2c_client *client, u8 reg); -static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value); +#include "ad525x_dpot.h" /* * Client data (each client gets its own) */ struct dpot_data { + struct ad_dpot_bus_data bdata; struct mutex update_lock; unsigned rdac_mask; unsigned max_pos; - unsigned devid; + unsigned long devid; + unsigned uid; + unsigned feat; + unsigned wipers; + u16 rdac_cache[MAX_RDACS]; + DECLARE_BITMAP(otp_en_mask, MAX_RDACS); }; -/* sysfs functions */ - -static ssize_t sysfs_show_reg(struct device *dev, - struct device_attribute *attr, char *buf, u32 reg) +static inline int dpot_read_d8(struct dpot_data *dpot) { - struct i2c_client *client = to_i2c_client(dev); - struct dpot_data *data = i2c_get_clientdata(client); - s32 value; - - mutex_lock(&data->update_lock); - value = ad525x_read(client, reg); - mutex_unlock(&data->update_lock); - - if (value < 0) - return -EINVAL; - /* - * Let someone else deal with converting this ... - * the tolerance is a two-byte value where the MSB - * is a sign + integer value, and the LSB is a - * decimal value. See page 18 of the AD5258 - * datasheet (Rev. A) for more details. - */ - - if (reg & AD525X_REG_TOL) - return sprintf(buf, "0x%04x\n", value & 0xFFFF); - else - return sprintf(buf, "%u\n", value & data->rdac_mask); + return dpot->bdata.bops->read_d8(dpot->bdata.client); } -static ssize_t sysfs_set_reg(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count, u32 reg) +static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg) { - struct i2c_client *client = to_i2c_client(dev); - struct dpot_data *data = i2c_get_clientdata(client); - unsigned long value; - int err; - - err = strict_strtoul(buf, 10, &value); - if (err) - return err; - - if (value > data->rdac_mask) - value = data->rdac_mask; - - mutex_lock(&data->update_lock); - ad525x_write(client, reg, value); - if (reg & AD525X_I2C_EEPROM) - msleep(26); /* Sleep while the EEPROM updates */ - mutex_unlock(&data->update_lock); - - return count; + return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg); } -static ssize_t sysfs_do_cmd(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count, u32 reg) +static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg) { - struct i2c_client *client = to_i2c_client(dev); - struct dpot_data *data = i2c_get_clientdata(client); - - mutex_lock(&data->update_lock); - ad525x_write(client, reg, 0); - mutex_unlock(&data->update_lock); - - return count; + return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg); } -/* ------------------------------------------------------------------------- */ - -static ssize_t show_rdac0(struct device *dev, - struct device_attribute *attr, char *buf) +static inline int dpot_write_d8(struct dpot_data *dpot, u8 val) { - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0); + return dpot->bdata.bops->write_d8(dpot->bdata.client, val); } -static ssize_t set_rdac0(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val) { - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC0); + return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val); } -static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0); - -static ssize_t show_eeprom0(struct device *dev, - struct device_attribute *attr, char *buf) +static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val) { - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0); + return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val); } -static ssize_t set_eeprom0(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg) { - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC0); -} + unsigned ctrl = 0; + int value; -static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0); + if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { -static ssize_t show_tolerance0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC0); -} + if (dpot->feat & F_RDACS_WONLY) + return dpot->rdac_cache[reg & DPOT_RDAC_MASK]; + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID) || + dpot->uid == DPOT_UID(AD5293_ID)) { -static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL); + value = dpot_read_r8d8(dpot, + DPOT_AD5291_READ_RDAC << 2); -/* ------------------------------------------------------------------------- */ + if (dpot->uid == DPOT_UID(AD5291_ID)) + value = value >> 2; -static ssize_t show_rdac1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1); -} + return value; + } else if (dpot->uid == DPOT_UID(AD5270_ID) || + dpot->uid == DPOT_UID(AD5271_ID)) { -static ssize_t set_rdac1(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC1); -} + value = dpot_read_r8d8(dpot, + DPOT_AD5270_1_2_4_READ_RDAC << 2); -static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1); + if (value < 0) + return value; -static ssize_t show_eeprom1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1); -} + if (dpot->uid == DPOT_UID(AD5271_ID)) + value = value >> 2; -static ssize_t set_eeprom1(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC1); -} + return value; + } -static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1); + ctrl = DPOT_SPI_READ_RDAC; + } else if (reg & DPOT_ADDR_EEPROM) { + ctrl = DPOT_SPI_READ_EEPROM; + } -static ssize_t show_tolerance1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC1); + if (dpot->feat & F_SPI_16BIT) + return dpot_read_r8d8(dpot, ctrl); + else if (dpot->feat & F_SPI_24BIT) + return dpot_read_r8d16(dpot, ctrl); + + return -EFAULT; +} + +static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) +{ + int value; + unsigned ctrl = 0; + switch (dpot->uid) { + case DPOT_UID(AD5246_ID): + case DPOT_UID(AD5247_ID): + return dpot_read_d8(dpot); + case DPOT_UID(AD5245_ID): + case DPOT_UID(AD5241_ID): + case DPOT_UID(AD5242_ID): + case DPOT_UID(AD5243_ID): + case DPOT_UID(AD5248_ID): + case DPOT_UID(AD5280_ID): + case DPOT_UID(AD5282_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5282_RDAC_AB; + return dpot_read_r8d8(dpot, ctrl); + case DPOT_UID(AD5170_ID): + case DPOT_UID(AD5171_ID): + case DPOT_UID(AD5273_ID): + return dpot_read_d8(dpot); + case DPOT_UID(AD5172_ID): + case DPOT_UID(AD5173_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5172_3_A0; + return dpot_read_r8d8(dpot, ctrl); + case DPOT_UID(AD5272_ID): + case DPOT_UID(AD5274_ID): + dpot_write_r8d8(dpot, + (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0); + + value = dpot_read_r8d16(dpot, + DPOT_AD5270_1_2_4_RDAC << 2); + + if (value < 0) + return value; + /* + * AD5272/AD5274 returns high byte first, however + * underling smbus expects low byte first. + */ + value = swab16(value); + + if (dpot->uid == DPOT_UID(AD5271_ID)) + value = value >> 2; + return value; + default: + if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) + return dpot_read_r8d16(dpot, (reg & 0xF8) | + ((reg & 0x7) << 1)); + else + return dpot_read_r8d8(dpot, reg); + } } -static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL); +static s32 dpot_read(struct dpot_data *dpot, u8 reg) +{ + if (dpot->feat & F_SPI) + return dpot_read_spi(dpot, reg); + else + return dpot_read_i2c(dpot, reg); +} + +static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value) +{ + unsigned val = 0; + + if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) { + if (dpot->feat & F_RDACS_WONLY) + dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value; + + if (dpot->feat & F_AD_APPDATA) { + if (dpot->feat & F_SPI_8BIT) { + val = ((reg & DPOT_RDAC_MASK) << + DPOT_MAX_POS(dpot->devid)) | + value; + return dpot_write_d8(dpot, val); + } else if (dpot->feat & F_SPI_16BIT) { + val = ((reg & DPOT_RDAC_MASK) << + DPOT_MAX_POS(dpot->devid)) | + value; + return dpot_write_r8d8(dpot, val >> 8, + val & 0xFF); + } else + BUG(); + } else { + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID) || + dpot->uid == DPOT_UID(AD5293_ID)) { + + dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2, + DPOT_AD5291_UNLOCK_CMD); + + if (dpot->uid == DPOT_UID(AD5291_ID)) + value = value << 2; + + return dpot_write_r8d8(dpot, + (DPOT_AD5291_RDAC << 2) | + (value >> 8), value & 0xFF); + } else if (dpot->uid == DPOT_UID(AD5270_ID) || + dpot->uid == DPOT_UID(AD5271_ID)) { + dpot_write_r8d8(dpot, + DPOT_AD5270_1_2_4_CTRLREG << 2, + DPOT_AD5270_1_2_4_UNLOCK_CMD); + + if (dpot->uid == DPOT_UID(AD5271_ID)) + value = value << 2; + + return dpot_write_r8d8(dpot, + (DPOT_AD5270_1_2_4_RDAC << 2) | + (value >> 8), value & 0xFF); + } + val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK); + } + } else if (reg & DPOT_ADDR_EEPROM) { + val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK); + } else if (reg & DPOT_ADDR_CMD) { + switch (reg) { + case DPOT_DEC_ALL_6DB: + val = DPOT_SPI_DEC_ALL_6DB; + break; + case DPOT_INC_ALL_6DB: + val = DPOT_SPI_INC_ALL_6DB; + break; + case DPOT_DEC_ALL: + val = DPOT_SPI_DEC_ALL; + break; + case DPOT_INC_ALL: + val = DPOT_SPI_INC_ALL; + break; + } + } else if (reg & DPOT_ADDR_OTP) { + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID)) { + return dpot_write_r8d8(dpot, + DPOT_AD5291_STORE_XTPM << 2, 0); + } else if (dpot->uid == DPOT_UID(AD5270_ID) || + dpot->uid == DPOT_UID(AD5271_ID)) { + return dpot_write_r8d8(dpot, + DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0); + } + } else + BUG(); + + if (dpot->feat & F_SPI_16BIT) + return dpot_write_r8d8(dpot, val, value); + else if (dpot->feat & F_SPI_24BIT) + return dpot_write_r8d16(dpot, val, value); + + return -EFAULT; +} + +static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value) +{ + /* Only write the instruction byte for certain commands */ + unsigned tmp = 0, ctrl = 0; -/* ------------------------------------------------------------------------- */ + switch (dpot->uid) { + case DPOT_UID(AD5246_ID): + case DPOT_UID(AD5247_ID): + return dpot_write_d8(dpot, value); + break; -static ssize_t show_rdac2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2); -} + case DPOT_UID(AD5245_ID): + case DPOT_UID(AD5241_ID): + case DPOT_UID(AD5242_ID): + case DPOT_UID(AD5243_ID): + case DPOT_UID(AD5248_ID): + case DPOT_UID(AD5280_ID): + case DPOT_UID(AD5282_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5282_RDAC_AB; + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5171_ID): + case DPOT_UID(AD5273_ID): + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_d8(dpot); + if (tmp >> 6) /* Ready to Program? */ + return -EFAULT; + ctrl = DPOT_AD5273_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5172_ID): + case DPOT_UID(AD5173_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5172_3_A0; + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_r8d16(dpot, ctrl); + if (tmp >> 14) /* Ready to Program? */ + return -EFAULT; + ctrl |= DPOT_AD5170_2_3_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5170_ID): + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_r8d16(dpot, tmp); + if (tmp >> 14) /* Ready to Program? */ + return -EFAULT; + ctrl = DPOT_AD5170_2_3_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5272_ID): + case DPOT_UID(AD5274_ID): + dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2, + DPOT_AD5270_1_2_4_UNLOCK_CMD); -static ssize_t set_rdac2(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC2); -} + if (reg & DPOT_ADDR_OTP) + return dpot_write_r8d8(dpot, + DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0); -static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2); + if (dpot->uid == DPOT_UID(AD5274_ID)) + value = value << 2; -static ssize_t show_eeprom2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2); + return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) | + (value >> 8), value & 0xFF); + break; + default: + if (reg & DPOT_ADDR_CMD) + return dpot_write_d8(dpot, reg); + + if (dpot->max_pos > 256) + return dpot_write_r8d16(dpot, (reg & 0xF8) | + ((reg & 0x7) << 1), value); + else + /* All other registers require instruction + data bytes */ + return dpot_write_r8d8(dpot, reg, value); + } } -static ssize_t set_eeprom2(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value) { - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC2); + if (dpot->feat & F_SPI) + return dpot_write_spi(dpot, reg, value); + else + return dpot_write_i2c(dpot, reg, value); } -static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2); +/* sysfs functions */ -static ssize_t show_tolerance2(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t sysfs_show_reg(struct device *dev, + struct device_attribute *attr, + char *buf, u32 reg) { - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC2); -} + struct dpot_data *data = dev_get_drvdata(dev); + s32 value; -static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL); + if (reg & DPOT_ADDR_OTP_EN) + return sprintf(buf, "%s\n", + test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ? + "enabled" : "disabled"); -/* ------------------------------------------------------------------------- */ -static ssize_t show_rdac3(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3); + mutex_lock(&data->update_lock); + value = dpot_read(data, reg); + mutex_unlock(&data->update_lock); + + if (value < 0) + return -EINVAL; + /* + * Let someone else deal with converting this ... + * the tolerance is a two-byte value where the MSB + * is a sign + integer value, and the LSB is a + * decimal value. See page 18 of the AD5258 + * datasheet (Rev. A) for more details. + */ + + if (reg & DPOT_REG_TOL) + return sprintf(buf, "0x%04x\n", value & 0xFFFF); + else + return sprintf(buf, "%u\n", value & data->rdac_mask); } -static ssize_t set_rdac3(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t sysfs_set_reg(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, u32 reg) { - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC3); -} + struct dpot_data *data = dev_get_drvdata(dev); + unsigned long value; + int err; -static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3); + if (reg & DPOT_ADDR_OTP_EN) { + if (!strncmp(buf, "enabled", sizeof("enabled"))) + set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); + else + clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); -static ssize_t show_eeprom3(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3); -} + return count; + } -static ssize_t set_eeprom3(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC3); -} + if ((reg & DPOT_ADDR_OTP) && + !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask)) + return -EPERM; + + err = kstrtoul(buf, 10, &value); + if (err) + return err; -static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3); + if (value > data->rdac_mask) + value = data->rdac_mask; -static ssize_t show_tolerance3(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC3); + mutex_lock(&data->update_lock); + dpot_write(data, reg, value); + if (reg & DPOT_ADDR_EEPROM) + msleep(26); /* Sleep while the EEPROM updates */ + else if (reg & DPOT_ADDR_OTP) + msleep(400); /* Sleep while the OTP updates */ + mutex_unlock(&data->update_lock); + + return count; } -static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL); - -static struct attribute *ad525x_attributes_wipers[4][4] = { - { - &dev_attr_rdac0.attr, - &dev_attr_eeprom0.attr, - &dev_attr_tolerance0.attr, - NULL - }, { - &dev_attr_rdac1.attr, - &dev_attr_eeprom1.attr, - &dev_attr_tolerance1.attr, - NULL - }, { - &dev_attr_rdac2.attr, - &dev_attr_eeprom2.attr, - &dev_attr_tolerance2.attr, - NULL - }, { - &dev_attr_rdac3.attr, - &dev_attr_eeprom3.attr, - &dev_attr_tolerance3.attr, - NULL - } -}; +static ssize_t sysfs_do_cmd(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count, u32 reg) +{ + struct dpot_data *data = dev_get_drvdata(dev); -static const struct attribute_group ad525x_group_wipers[] = { - {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]}, - {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]}, - {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]}, - {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]}, -}; + mutex_lock(&data->update_lock); + dpot_write(data, reg, 0); + mutex_unlock(&data->update_lock); + + return count; +} /* ------------------------------------------------------------------------- */ -static ssize_t set_inc_all(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL); -} +#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \ +show_##_name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return sysfs_show_reg(dev, attr, buf, _reg); \ +} + +#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \ +set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + return sysfs_set_reg(dev, attr, buf, count, _reg); \ +} + +#define DPOT_DEVICE_SHOW_SET(name, reg) \ +DPOT_DEVICE_SHOW(name, reg) \ +DPOT_DEVICE_SET(name, reg) \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name); + +#define DPOT_DEVICE_SHOW_ONLY(name, reg) \ +DPOT_DEVICE_SHOW(name, reg) \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL); + +DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0); +DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0); +DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0); +DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0); +DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0); + +DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1); +DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1); +DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1); +DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1); +DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1); + +DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2); +DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2); +DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2); +DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2); +DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2); + +DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3); +DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3); +DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3); +DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3); +DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3); + +DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4); +DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4); +DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4); +DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4); +DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4); + +DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5); +DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5); +DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5); +DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5); +DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5); + +static const struct attribute *dpot_attrib_wipers[] = { + &dev_attr_rdac0.attr, + &dev_attr_rdac1.attr, + &dev_attr_rdac2.attr, + &dev_attr_rdac3.attr, + &dev_attr_rdac4.attr, + &dev_attr_rdac5.attr, + NULL +}; -static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all); +static const struct attribute *dpot_attrib_eeprom[] = { + &dev_attr_eeprom0.attr, + &dev_attr_eeprom1.attr, + &dev_attr_eeprom2.attr, + &dev_attr_eeprom3.attr, + &dev_attr_eeprom4.attr, + &dev_attr_eeprom5.attr, + NULL +}; -static ssize_t set_dec_all(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL); -} +static const struct attribute *dpot_attrib_otp[] = { + &dev_attr_otp0.attr, + &dev_attr_otp1.attr, + &dev_attr_otp2.attr, + &dev_attr_otp3.attr, + &dev_attr_otp4.attr, + &dev_attr_otp5.attr, + NULL +}; -static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all); +static const struct attribute *dpot_attrib_otp_en[] = { + &dev_attr_otp0en.attr, + &dev_attr_otp1en.attr, + &dev_attr_otp2en.attr, + &dev_attr_otp3en.attr, + &dev_attr_otp4en.attr, + &dev_attr_otp5en.attr, + NULL +}; -static ssize_t set_inc_all_6db(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB); -} +static const struct attribute *dpot_attrib_tolerance[] = { + &dev_attr_tolerance0.attr, + &dev_attr_tolerance1.attr, + &dev_attr_tolerance2.attr, + &dev_attr_tolerance3.attr, + &dev_attr_tolerance4.attr, + &dev_attr_tolerance5.attr, + NULL +}; -static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db); +/* ------------------------------------------------------------------------- */ -static ssize_t set_dec_all_6db(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB); -} +#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \ +set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + return sysfs_do_cmd(dev, attr, buf, count, _cmd); \ +} \ +static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name); -static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db); +DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL); +DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL); +DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB); +DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB); static struct attribute *ad525x_attributes_commands[] = { &dev_attr_inc_all.attr, @@ -409,74 +640,57 @@ static const struct attribute_group ad525x_group_commands = { .attrs = ad525x_attributes_commands, }; -/* ------------------------------------------------------------------------- */ - -/* i2c device functions */ +static int ad_dpot_add_files(struct device *dev, + unsigned features, unsigned rdac) +{ + int err = sysfs_create_file(&dev->kobj, + dpot_attrib_wipers[rdac]); + if (features & F_CMD_EEP) + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_eeprom[rdac]); + if (features & F_CMD_TOL) + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_tolerance[rdac]); + if (features & F_CMD_OTP) { + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_otp_en[rdac]); + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_otp[rdac]); + } -/** - * ad525x_read - return the value contained in the specified register - * on the AD5258 device. - * @client: value returned from i2c_new_device() - * @reg: the register to read - * - * If the tolerance register is specified, 2 bytes are returned. - * Otherwise, 1 byte is returned. A negative value indicates an error - * occurred while reading the register. - */ -static s32 ad525x_read(struct i2c_client *client, u8 reg) -{ - struct dpot_data *data = i2c_get_clientdata(client); + if (err) + dev_err(dev, "failed to register sysfs hooks for RDAC%d\n", + rdac); - if ((reg & AD525X_REG_TOL) || (data->max_pos > 256)) - return i2c_smbus_read_word_data(client, (reg & 0xF8) | - ((reg & 0x7) << 1)); - else - return i2c_smbus_read_byte_data(client, reg); + return err; } -/** - * ad525x_write - store the given value in the specified register on - * the AD5258 device. - * @client: value returned from i2c_new_device() - * @reg: the register to write - * @value: the byte to store in the register - * - * For certain instructions that do not require a data byte, "NULL" - * should be specified for the "value" parameter. These instructions - * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM. - * - * A negative return value indicates an error occurred while reading - * the register. - */ -static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value) -{ - struct dpot_data *data = i2c_get_clientdata(client); - - /* Only write the instruction byte for certain commands */ - if (reg & AD525X_I2C_CMD) - return i2c_smbus_write_byte(client, reg); - - if (data->max_pos > 256) - return i2c_smbus_write_word_data(client, (reg & 0xF8) | - ((reg & 0x7) << 1), value); - else - /* All other registers require instruction + data bytes */ - return i2c_smbus_write_byte_data(client, reg, value); +static inline void ad_dpot_remove_files(struct device *dev, + unsigned features, unsigned rdac) +{ + sysfs_remove_file(&dev->kobj, + dpot_attrib_wipers[rdac]); + if (features & F_CMD_EEP) + sysfs_remove_file(&dev->kobj, + dpot_attrib_eeprom[rdac]); + if (features & F_CMD_TOL) + sysfs_remove_file(&dev->kobj, + dpot_attrib_tolerance[rdac]); + if (features & F_CMD_OTP) { + sysfs_remove_file(&dev->kobj, + dpot_attrib_otp_en[rdac]); + sysfs_remove_file(&dev->kobj, + dpot_attrib_otp[rdac]); + } } -static int ad525x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +int ad_dpot_probe(struct device *dev, + struct ad_dpot_bus_data *bdata, unsigned long devid, + const char *name) { - struct device *dev = &client->dev; - struct dpot_data *data; - int err = 0; - dev_dbg(dev, "%s\n", __func__); - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { - dev_err(dev, "missing I2C functionality for this driver\n"); - goto exit; - } + struct dpot_data *data; + int i, err = 0; data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); if (!data) { @@ -484,183 +698,73 @@ static int ad525x_probe(struct i2c_client *client, goto exit; } - i2c_set_clientdata(client, data); + dev_set_drvdata(dev, data); mutex_init(&data->update_lock); - switch (id->driver_data) { - case AD5258_ID: - data->max_pos = AD5258_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - break; - case AD5259_ID: - data->max_pos = AD5259_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - break; - case AD5251_ID: - data->max_pos = AD5251_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5252_ID: - data->max_pos = AD5252_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5253_ID: - data->max_pos = AD5253_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5254_ID: - data->max_pos = AD5254_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5255_ID: - data->max_pos = AD5255_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - default: - err = -ENODEV; - goto exit_free; - } + data->bdata = *bdata; + data->devid = devid; + + data->max_pos = 1 << DPOT_MAX_POS(devid); + data->rdac_mask = data->max_pos - 1; + data->feat = DPOT_FEAT(devid); + data->uid = DPOT_UID(devid); + data->wipers = DPOT_WIPERS(devid); + + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) { + err = ad_dpot_add_files(dev, data->feat, i); + if (err) + goto exit_remove_files; + /* power-up midscale */ + if (data->feat & F_RDACS_WONLY) + data->rdac_cache[i] = data->max_pos / 2; + } + + if (data->feat & F_CMD_INC) + err = sysfs_create_group(&dev->kobj, &ad525x_group_commands); if (err) { dev_err(dev, "failed to register sysfs hooks\n"); goto exit_free; } - data->devid = id->driver_data; - data->rdac_mask = data->max_pos - 1; - dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", - id->name, data->max_pos); + name, data->max_pos); return 0; +exit_remove_files: + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) + ad_dpot_remove_files(dev, data->feat, i); + exit_free: kfree(data); - i2c_set_clientdata(client, NULL); + dev_set_drvdata(dev, NULL); exit: - dev_err(dev, "failed to create client\n"); + dev_err(dev, "failed to create client for %s ID 0x%lX\n", + name, devid); return err; } +EXPORT_SYMBOL(ad_dpot_probe); -static int __devexit ad525x_remove(struct i2c_client *client) +int ad_dpot_remove(struct device *dev) { - struct dpot_data *data = i2c_get_clientdata(client); - struct device *dev = &client->dev; - - switch (data->devid) { - case AD5258_ID: - case AD5259_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - break; - case AD5251_ID: - case AD5252_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - sysfs_remove_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5253_ID: - case AD5254_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - sysfs_remove_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5255_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - sysfs_remove_group(&dev->kobj, &ad525x_group_commands); - break; - } + struct dpot_data *data = dev_get_drvdata(dev); + int i; + + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) + ad_dpot_remove_files(dev, data->feat, i); - i2c_set_clientdata(client, NULL); kfree(data); return 0; } +EXPORT_SYMBOL(ad_dpot_remove); -static const struct i2c_device_id ad525x_idtable[] = { - {"ad5258", AD5258_ID}, - {"ad5259", AD5259_ID}, - {"ad5251", AD5251_ID}, - {"ad5252", AD5252_ID}, - {"ad5253", AD5253_ID}, - {"ad5254", AD5254_ID}, - {"ad5255", AD5255_ID}, - {} -}; - -MODULE_DEVICE_TABLE(i2c, ad525x_idtable); - -static struct i2c_driver ad525x_driver = { - .driver = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - }, - .id_table = ad525x_idtable, - .probe = ad525x_probe, - .remove = __devexit_p(ad525x_remove), -}; - -static int __init ad525x_init(void) -{ - return i2c_add_driver(&ad525x_driver); -} - -module_init(ad525x_init); - -static void __exit ad525x_exit(void) -{ - i2c_del_driver(&ad525x_driver); -} - -module_exit(ad525x_exit); MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " - "Michael Hennerich <hennerich@blackfin.uclinux.org>, "); -MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver"); + "Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("Digital potentiometer driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h new file mode 100644 index 00000000000..6bd1eba23bc --- /dev/null +++ b/drivers/misc/ad525x_dpot.h @@ -0,0 +1,215 @@ +/* + * Driver for the Analog Devices digital potentiometers + * + * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _AD_DPOT_H_ +#define _AD_DPOT_H_ + +#include <linux/types.h> + +#define DPOT_CONF(features, wipers, max_pos, uid) \ + (((features) << 18) | (((wipers) & 0xFF) << 10) | \ + ((max_pos & 0xF) << 6) | (uid & 0x3F)) + +#define DPOT_UID(conf) (conf & 0x3F) +#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF) +#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF) +#define DPOT_FEAT(conf) (conf >> 18) + +#define BRDAC0 (1 << 0) +#define BRDAC1 (1 << 1) +#define BRDAC2 (1 << 2) +#define BRDAC3 (1 << 3) +#define BRDAC4 (1 << 4) +#define BRDAC5 (1 << 5) +#define MAX_RDACS 6 + +#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */ +#define F_CMD_EEP (1 << 1) /* Features EEPROM */ +#define F_CMD_OTP (1 << 2) /* Features OTP */ +#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */ +#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */ +#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */ +#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */ +#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */ +#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */ +#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */ + +#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL) +#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP) +#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT) + +enum dpot_devid { + AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */ + AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1), + AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC1 | BRDAC3, 6, 2), + AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC1 | BRDAC3, 8, 3), + AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4), + AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5), + AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2, 9, 6), + AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 7), /* SPI */ + AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 8), + AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 9), + AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 10), + AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 11), + AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 5, 12), + AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13), + AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14), + AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5, + 8, 15), + AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 16), + AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0, 10, 17), + AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 18), + AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19), + AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0 | BRDAC1, 10, 20), + AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 21), + AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 22), + AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23), + AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 24), + AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP, + BRDAC0, 8, 25), + AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP, + BRDAC0, 10, 26), + AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), + AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 7, 28), + AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0, 8, 29), + AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 30), + AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2, 8, 31), + ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0 | BRDAC1, 10, 32), + AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33), + AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34), + AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35), + AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36), + AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37), + AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38), + AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39), + AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40), + AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41), + ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2, 9, 42), + AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43), + AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44), + AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45), + AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46), + AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47), + AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT, + BRDAC0, 10, 48), + AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT, + BRDAC0, 8, 49), + AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50), + AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51), +}; + +#define DPOT_RDAC0 0 +#define DPOT_RDAC1 1 +#define DPOT_RDAC2 2 +#define DPOT_RDAC3 3 +#define DPOT_RDAC4 4 +#define DPOT_RDAC5 5 + +#define DPOT_RDAC_MASK 0x1F + +#define DPOT_REG_TOL 0x18 +#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0) +#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1) +#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2) +#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3) +#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4) +#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5) + +/* RDAC-to-EEPROM Interface Commands */ +#define DPOT_ADDR_RDAC (0x0 << 5) +#define DPOT_ADDR_EEPROM (0x1 << 5) +#define DPOT_ADDR_OTP (0x1 << 6) +#define DPOT_ADDR_CMD (0x1 << 7) +#define DPOT_ADDR_OTP_EN (0x1 << 9) + +#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3)) +#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3)) +#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3)) +#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3)) + +#define DPOT_SPI_RDAC 0xB0 +#define DPOT_SPI_EEPROM 0x30 +#define DPOT_SPI_READ_RDAC 0xA0 +#define DPOT_SPI_READ_EEPROM 0x90 +#define DPOT_SPI_DEC_ALL_6DB 0x50 +#define DPOT_SPI_INC_ALL_6DB 0xD0 +#define DPOT_SPI_DEC_ALL 0x70 +#define DPOT_SPI_INC_ALL 0xF0 + +/* AD5291/2/3 use special commands */ +#define DPOT_AD5291_RDAC 0x01 +#define DPOT_AD5291_READ_RDAC 0x02 +#define DPOT_AD5291_STORE_XTPM 0x03 +#define DPOT_AD5291_CTRLREG 0x06 +#define DPOT_AD5291_UNLOCK_CMD 0x03 + +/* AD5270/1/2/4 use special commands */ +#define DPOT_AD5270_1_2_4_RDAC 0x01 +#define DPOT_AD5270_1_2_4_READ_RDAC 0x02 +#define DPOT_AD5270_1_2_4_STORE_XTPM 0x03 +#define DPOT_AD5270_1_2_4_CTRLREG 0x07 +#define DPOT_AD5270_1_2_4_UNLOCK_CMD 0x03 + +#define DPOT_AD5282_RDAC_AB 0x80 + +#define DPOT_AD5273_FUSE 0x80 +#define DPOT_AD5170_2_3_FUSE 0x20 +#define DPOT_AD5170_2_3_OW 0x08 +#define DPOT_AD5172_3_A0 0x08 +#define DPOT_AD5170_2FUSE 0x80 + +struct dpot_data; + +struct ad_dpot_bus_ops { + int (*read_d8) (void *client); + int (*read_r8d8) (void *client, u8 reg); + int (*read_r8d16) (void *client, u8 reg); + int (*write_d8) (void *client, u8 val); + int (*write_r8d8) (void *client, u8 reg, u8 val); + int (*write_r8d16) (void *client, u8 reg, u16 val); +}; + +struct ad_dpot_bus_data { + void *client; + const struct ad_dpot_bus_ops *bops; +}; + +int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, + unsigned long devid, const char *name); +int ad_dpot_remove(struct device *dev); + +#endif diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig new file mode 100644 index 00000000000..7f01d8e9399 --- /dev/null +++ b/drivers/misc/altera-stapl/Kconfig @@ -0,0 +1,8 @@ +comment "Altera FPGA firmware download module" + +config ALTERA_STAPL + tristate "Altera FPGA firmware download module" + depends on I2C + default n + help + An Altera FPGA module. Say Y when you want to support this tool. diff --git a/drivers/misc/altera-stapl/Makefile b/drivers/misc/altera-stapl/Makefile new file mode 100644 index 00000000000..055f61ee781 --- /dev/null +++ b/drivers/misc/altera-stapl/Makefile @@ -0,0 +1,3 @@ +altera-stapl-objs = altera-lpt.o altera-jtag.o altera-comp.o altera.o + +obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o diff --git a/drivers/misc/altera-stapl/altera-comp.c b/drivers/misc/altera-stapl/altera-comp.c new file mode 100644 index 00000000000..49b103bedaa --- /dev/null +++ b/drivers/misc/altera-stapl/altera-comp.c @@ -0,0 +1,142 @@ +/* + * altera-comp.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include "altera-exprt.h" + +#define SHORT_BITS 16 +#define CHAR_BITS 8 +#define DATA_BLOB_LENGTH 3 +#define MATCH_DATA_LENGTH 8192 +#define ALTERA_REQUEST_SIZE 1024 +#define ALTERA_BUFFER_SIZE (MATCH_DATA_LENGTH + ALTERA_REQUEST_SIZE) + +static u32 altera_bits_req(u32 n) +{ + u32 result = SHORT_BITS; + + if (n == 0) + result = 1; + else { + /* Look for the highest non-zero bit position */ + while ((n & (1 << (SHORT_BITS - 1))) == 0) { + n <<= 1; + --result; + } + } + + return result; +} + +static u32 altera_read_packed(u8 *buffer, u32 bits, u32 *bits_avail, + u32 *in_index) +{ + u32 result = 0; + u32 shift = 0; + u32 databyte = 0; + + while (bits > 0) { + databyte = buffer[*in_index]; + result |= (((databyte >> (CHAR_BITS - *bits_avail)) + & (0xff >> (CHAR_BITS - *bits_avail))) << shift); + + if (bits <= *bits_avail) { + result &= (0xffff >> (SHORT_BITS - (bits + shift))); + *bits_avail -= bits; + bits = 0; + } else { + ++(*in_index); + shift += *bits_avail; + bits -= *bits_avail; + *bits_avail = CHAR_BITS; + } + } + + return result; +} + +u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version) +{ + u32 i, j, data_length = 0L; + u32 offset, length; + u32 match_data_length = MATCH_DATA_LENGTH; + u32 bits_avail = CHAR_BITS; + u32 in_index = 0L; + + if (version > 0) + --match_data_length; + + for (i = 0; i < out_length; ++i) + out[i] = 0; + + /* Read number of bytes in data. */ + for (i = 0; i < sizeof(in_length); ++i) { + data_length = data_length | ( + altera_read_packed(in, + CHAR_BITS, + &bits_avail, + &in_index) << (i * CHAR_BITS)); + } + + if (data_length > out_length) { + data_length = 0L; + return data_length; + } + + i = 0; + while (i < data_length) { + /* A 0 bit indicates literal data. */ + if (altera_read_packed(in, 1, &bits_avail, + &in_index) == 0) { + for (j = 0; j < DATA_BLOB_LENGTH; ++j) { + if (i < data_length) { + out[i] = (u8)altera_read_packed(in, + CHAR_BITS, + &bits_avail, + &in_index); + i++; + } + } + } else { + /* A 1 bit indicates offset/length to follow. */ + offset = altera_read_packed(in, altera_bits_req((s16) + (i > match_data_length ? + match_data_length : i)), + &bits_avail, + &in_index); + length = altera_read_packed(in, CHAR_BITS, + &bits_avail, + &in_index); + for (j = 0; j < length; ++j) { + if (i < data_length) { + out[i] = out[i - offset]; + i++; + } + } + } + } + + return data_length; +} diff --git a/drivers/misc/altera-stapl/altera-exprt.h b/drivers/misc/altera-stapl/altera-exprt.h new file mode 100644 index 00000000000..39c38d84a67 --- /dev/null +++ b/drivers/misc/altera-stapl/altera-exprt.h @@ -0,0 +1,33 @@ +/* + * altera-exprt.h + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef ALTERA_EXPRT_H +#define ALTERA_EXPRT_H + + +u32 altera_shrink(u8 *in, u32 in_length, u8 *out, u32 out_length, s32 version); +int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo); + +#endif /* ALTERA_EXPRT_H */ diff --git a/drivers/misc/altera-stapl/altera-jtag.c b/drivers/misc/altera-stapl/altera-jtag.c new file mode 100644 index 00000000000..f4bf2009697 --- /dev/null +++ b/drivers/misc/altera-stapl/altera-jtag.c @@ -0,0 +1,1021 @@ +/* + * altera-jtag.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/slab.h> +#include <misc/altera.h> +#include "altera-exprt.h" +#include "altera-jtag.h" + +#define alt_jtag_io(a, b, c)\ + astate->config->jtag_io(astate->config->dev, a, b, c); + +#define alt_malloc(a) kzalloc(a, GFP_KERNEL); + +/* + * This structure shows, for each JTAG state, which state is reached after + * a single TCK clock cycle with TMS high or TMS low, respectively. This + * describes all possible state transitions in the JTAG state machine. + */ +struct altera_jtag_machine { + enum altera_jtag_state tms_high; + enum altera_jtag_state tms_low; +}; + +static const struct altera_jtag_machine altera_transitions[] = { + /* RESET */ { RESET, IDLE }, + /* IDLE */ { DRSELECT, IDLE }, + /* DRSELECT */ { IRSELECT, DRCAPTURE }, + /* DRCAPTURE */ { DREXIT1, DRSHIFT }, + /* DRSHIFT */ { DREXIT1, DRSHIFT }, + /* DREXIT1 */ { DRUPDATE, DRPAUSE }, + /* DRPAUSE */ { DREXIT2, DRPAUSE }, + /* DREXIT2 */ { DRUPDATE, DRSHIFT }, + /* DRUPDATE */ { DRSELECT, IDLE }, + /* IRSELECT */ { RESET, IRCAPTURE }, + /* IRCAPTURE */ { IREXIT1, IRSHIFT }, + /* IRSHIFT */ { IREXIT1, IRSHIFT }, + /* IREXIT1 */ { IRUPDATE, IRPAUSE }, + /* IRPAUSE */ { IREXIT2, IRPAUSE }, + /* IREXIT2 */ { IRUPDATE, IRSHIFT }, + /* IRUPDATE */ { DRSELECT, IDLE } +}; + +/* + * This table contains the TMS value to be used to take the NEXT STEP on + * the path to the desired state. The array index is the current state, + * and the bit position is the desired endstate. To find out which state + * is used as the intermediate state, look up the TMS value in the + * altera_transitions[] table. + */ +static const u16 altera_jtag_path_map[16] = { + /* RST RTI SDRS CDR SDR E1DR PDR E2DR */ + 0x0001, 0xFFFD, 0xFE01, 0xFFE7, 0xFFEF, 0xFF0F, 0xFFBF, 0xFFFF, + /* UDR SIRS CIR SIR E1IR PIR E2IR UIR */ + 0xFEFD, 0x0001, 0xF3FF, 0xF7FF, 0x87FF, 0xDFFF, 0xFFFF, 0x7FFD +}; + +/* Flag bits for alt_jtag_io() function */ +#define TMS_HIGH 1 +#define TMS_LOW 0 +#define TDI_HIGH 1 +#define TDI_LOW 0 +#define READ_TDO 1 +#define IGNORE_TDO 0 + +int altera_jinit(struct altera_state *astate) +{ + struct altera_jtag *js = &astate->js; + + /* initial JTAG state is unknown */ + js->jtag_state = ILLEGAL_JTAG_STATE; + + /* initialize to default state */ + js->drstop_state = IDLE; + js->irstop_state = IDLE; + js->dr_pre = 0; + js->dr_post = 0; + js->ir_pre = 0; + js->ir_post = 0; + js->dr_length = 0; + js->ir_length = 0; + + js->dr_pre_data = NULL; + js->dr_post_data = NULL; + js->ir_pre_data = NULL; + js->ir_post_data = NULL; + js->dr_buffer = NULL; + js->ir_buffer = NULL; + + return 0; +} + +int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state) +{ + js->drstop_state = state; + + return 0; +} + +int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state) +{ + js->irstop_state = state; + + return 0; +} + +int altera_set_dr_pre(struct altera_jtag *js, + u32 count, u32 start_index, + u8 *preamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->dr_pre) { + kfree(js->dr_pre_data); + js->dr_pre_data = (u8 *)alt_malloc((count + 7) >> 3); + if (js->dr_pre_data == NULL) + status = -ENOMEM; + else + js->dr_pre = count; + } else + js->dr_pre = count; + + if (status == 0) { + for (i = 0; i < count; ++i) { + j = i + start_index; + + if (preamble_data == NULL) + js->dr_pre_data[i >> 3] |= (1 << (i & 7)); + else { + if (preamble_data[j >> 3] & (1 << (j & 7))) + js->dr_pre_data[i >> 3] |= + (1 << (i & 7)); + else + js->dr_pre_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + } + + return status; +} + +int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index, + u8 *preamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->ir_pre) { + kfree(js->ir_pre_data); + js->ir_pre_data = (u8 *)alt_malloc((count + 7) >> 3); + if (js->ir_pre_data == NULL) + status = -ENOMEM; + else + js->ir_pre = count; + + } else + js->ir_pre = count; + + if (status == 0) { + for (i = 0; i < count; ++i) { + j = i + start_index; + if (preamble_data == NULL) + js->ir_pre_data[i >> 3] |= (1 << (i & 7)); + else { + if (preamble_data[j >> 3] & (1 << (j & 7))) + js->ir_pre_data[i >> 3] |= + (1 << (i & 7)); + else + js->ir_pre_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + } + + return status; +} + +int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->dr_post) { + kfree(js->dr_post_data); + js->dr_post_data = (u8 *)alt_malloc((count + 7) >> 3); + + if (js->dr_post_data == NULL) + status = -ENOMEM; + else + js->dr_post = count; + + } else + js->dr_post = count; + + if (status == 0) { + for (i = 0; i < count; ++i) { + j = i + start_index; + + if (postamble_data == NULL) + js->dr_post_data[i >> 3] |= (1 << (i & 7)); + else { + if (postamble_data[j >> 3] & (1 << (j & 7))) + js->dr_post_data[i >> 3] |= + (1 << (i & 7)); + else + js->dr_post_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + } + + return status; +} + +int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data) +{ + int status = 0; + u32 i; + u32 j; + + if (count > js->ir_post) { + kfree(js->ir_post_data); + js->ir_post_data = (u8 *)alt_malloc((count + 7) >> 3); + if (js->ir_post_data == NULL) + status = -ENOMEM; + else + js->ir_post = count; + + } else + js->ir_post = count; + + if (status != 0) + return status; + + for (i = 0; i < count; ++i) { + j = i + start_index; + + if (postamble_data == NULL) + js->ir_post_data[i >> 3] |= (1 << (i & 7)); + else { + if (postamble_data[j >> 3] & (1 << (j & 7))) + js->ir_post_data[i >> 3] |= (1 << (i & 7)); + else + js->ir_post_data[i >> 3] &= + ~(u32)(1 << (i & 7)); + + } + } + + return status; +} + +static void altera_jreset_idle(struct altera_state *astate) +{ + struct altera_jtag *js = &astate->js; + int i; + /* Go to Test Logic Reset (no matter what the starting state may be) */ + for (i = 0; i < 5; ++i) + alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO); + + /* Now step to Run Test / Idle */ + alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO); + js->jtag_state = IDLE; +} + +int altera_goto_jstate(struct altera_state *astate, + enum altera_jtag_state state) +{ + struct altera_jtag *js = &astate->js; + int tms; + int count = 0; + int status = 0; + + if (js->jtag_state == ILLEGAL_JTAG_STATE) + /* initialize JTAG chain to known state */ + altera_jreset_idle(astate); + + if (js->jtag_state == state) { + /* + * We are already in the desired state. + * If it is a stable state, loop here. + * Otherwise do nothing (no clock cycles). + */ + if ((state == IDLE) || (state == DRSHIFT) || + (state == DRPAUSE) || (state == IRSHIFT) || + (state == IRPAUSE)) { + alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO); + } else if (state == RESET) + alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO); + + } else { + while ((js->jtag_state != state) && (count < 9)) { + /* Get TMS value to take a step toward desired state */ + tms = (altera_jtag_path_map[js->jtag_state] & + (1 << state)) + ? TMS_HIGH : TMS_LOW; + + /* Take a step */ + alt_jtag_io(tms, TDI_LOW, IGNORE_TDO); + + if (tms) + js->jtag_state = + altera_transitions[js->jtag_state].tms_high; + else + js->jtag_state = + altera_transitions[js->jtag_state].tms_low; + + ++count; + } + } + + if (js->jtag_state != state) + status = -EREMOTEIO; + + return status; +} + +int altera_wait_cycles(struct altera_state *astate, + s32 cycles, + enum altera_jtag_state wait_state) +{ + struct altera_jtag *js = &astate->js; + int tms; + s32 count; + int status = 0; + + if (js->jtag_state != wait_state) + status = altera_goto_jstate(astate, wait_state); + + if (status == 0) { + /* + * Set TMS high to loop in RESET state + * Set TMS low to loop in any other stable state + */ + tms = (wait_state == RESET) ? TMS_HIGH : TMS_LOW; + + for (count = 0L; count < cycles; count++) + alt_jtag_io(tms, TDI_LOW, IGNORE_TDO); + + } + + return status; +} + +int altera_wait_msecs(struct altera_state *astate, + s32 microseconds, enum altera_jtag_state wait_state) +/* + * Causes JTAG hardware to sit in the specified stable + * state for the specified duration of real time. If + * no JTAG operations have been performed yet, then only + * a delay is performed. This permits the WAIT USECS + * statement to be used in VECTOR programs without causing + * any JTAG operations. + * Returns 0 for success, else appropriate error code. + */ +{ + struct altera_jtag *js = &astate->js; + int status = 0; + + if ((js->jtag_state != ILLEGAL_JTAG_STATE) && + (js->jtag_state != wait_state)) + status = altera_goto_jstate(astate, wait_state); + + if (status == 0) + /* Wait for specified time interval */ + udelay(microseconds); + + return status; +} + +static void altera_concatenate_data(u8 *buffer, + u8 *preamble_data, + u32 preamble_count, + u8 *target_data, + u32 start_index, + u32 target_count, + u8 *postamble_data, + u32 postamble_count) +/* + * Copies preamble data, target data, and postamble data + * into one buffer for IR or DR scans. + */ +{ + u32 i, j, k; + + for (i = 0L; i < preamble_count; ++i) { + if (preamble_data[i >> 3L] & (1L << (i & 7L))) + buffer[i >> 3L] |= (1L << (i & 7L)); + else + buffer[i >> 3L] &= ~(u32)(1L << (i & 7L)); + + } + + j = start_index; + k = preamble_count + target_count; + for (; i < k; ++i, ++j) { + if (target_data[j >> 3L] & (1L << (j & 7L))) + buffer[i >> 3L] |= (1L << (i & 7L)); + else + buffer[i >> 3L] &= ~(u32)(1L << (i & 7L)); + + } + + j = 0L; + k = preamble_count + target_count + postamble_count; + for (; i < k; ++i, ++j) { + if (postamble_data[j >> 3L] & (1L << (j & 7L))) + buffer[i >> 3L] |= (1L << (i & 7L)); + else + buffer[i >> 3L] &= ~(u32)(1L << (i & 7L)); + + } +} + +static int alt_jtag_drscan(struct altera_state *astate, + int start_state, + int count, + u8 *tdi, + u8 *tdo) +{ + int i = 0; + int tdo_bit = 0; + int status = 1; + + /* First go to DRSHIFT state */ + switch (start_state) { + case 0: /* IDLE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(0, 0, 0); /* DRCAPTURE */ + alt_jtag_io(0, 0, 0); /* DRSHIFT */ + break; + + case 1: /* DRPAUSE */ + alt_jtag_io(1, 0, 0); /* DREXIT2 */ + alt_jtag_io(1, 0, 0); /* DRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(0, 0, 0); /* DRCAPTURE */ + alt_jtag_io(0, 0, 0); /* DRSHIFT */ + break; + + case 2: /* IRPAUSE */ + alt_jtag_io(1, 0, 0); /* IREXIT2 */ + alt_jtag_io(1, 0, 0); /* IRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(0, 0, 0); /* DRCAPTURE */ + alt_jtag_io(0, 0, 0); /* DRSHIFT */ + break; + + default: + status = 0; + } + + if (status) { + /* loop in the SHIFT-DR state */ + for (i = 0; i < count; i++) { + tdo_bit = alt_jtag_io( + (i == count - 1), + tdi[i >> 3] & (1 << (i & 7)), + (tdo != NULL)); + + if (tdo != NULL) { + if (tdo_bit) + tdo[i >> 3] |= (1 << (i & 7)); + else + tdo[i >> 3] &= ~(u32)(1 << (i & 7)); + + } + } + + alt_jtag_io(0, 0, 0); /* DRPAUSE */ + } + + return status; +} + +static int alt_jtag_irscan(struct altera_state *astate, + int start_state, + int count, + u8 *tdi, + u8 *tdo) +{ + int i = 0; + int tdo_bit = 0; + int status = 1; + + /* First go to IRSHIFT state */ + switch (start_state) { + case 0: /* IDLE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(1, 0, 0); /* IRSELECT */ + alt_jtag_io(0, 0, 0); /* IRCAPTURE */ + alt_jtag_io(0, 0, 0); /* IRSHIFT */ + break; + + case 1: /* DRPAUSE */ + alt_jtag_io(1, 0, 0); /* DREXIT2 */ + alt_jtag_io(1, 0, 0); /* DRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(1, 0, 0); /* IRSELECT */ + alt_jtag_io(0, 0, 0); /* IRCAPTURE */ + alt_jtag_io(0, 0, 0); /* IRSHIFT */ + break; + + case 2: /* IRPAUSE */ + alt_jtag_io(1, 0, 0); /* IREXIT2 */ + alt_jtag_io(1, 0, 0); /* IRUPDATE */ + alt_jtag_io(1, 0, 0); /* DRSELECT */ + alt_jtag_io(1, 0, 0); /* IRSELECT */ + alt_jtag_io(0, 0, 0); /* IRCAPTURE */ + alt_jtag_io(0, 0, 0); /* IRSHIFT */ + break; + + default: + status = 0; + } + + if (status) { + /* loop in the SHIFT-IR state */ + for (i = 0; i < count; i++) { + tdo_bit = alt_jtag_io( + (i == count - 1), + tdi[i >> 3] & (1 << (i & 7)), + (tdo != NULL)); + if (tdo != NULL) { + if (tdo_bit) + tdo[i >> 3] |= (1 << (i & 7)); + else + tdo[i >> 3] &= ~(u32)(1 << (i & 7)); + + } + } + + alt_jtag_io(0, 0, 0); /* IRPAUSE */ + } + + return status; +} + +static void altera_extract_target_data(u8 *buffer, + u8 *target_data, + u32 start_index, + u32 preamble_count, + u32 target_count) +/* + * Copies target data from scan buffer, filtering out + * preamble and postamble data. + */ +{ + u32 i; + u32 j; + u32 k; + + j = preamble_count; + k = start_index + target_count; + for (i = start_index; i < k; ++i, ++j) { + if (buffer[j >> 3] & (1 << (j & 7))) + target_data[i >> 3] |= (1 << (i & 7)); + else + target_data[i >> 3] &= ~(u32)(1 << (i & 7)); + + } +} + +int altera_irscan(struct altera_state *astate, + u32 count, + u8 *tdi_data, + u32 start_index) +/* Shifts data into instruction register */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->ir_pre + count + js->ir_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->ir_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->ir_buffer); + js->ir_buffer = (u8 *)alt_malloc(alloc_chars); + if (js->ir_buffer == NULL) + status = -ENOMEM; + else + js->ir_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, IR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->ir_buffer, + js->ir_pre_data, + js->ir_pre, + tdi_data, + start_index, + count, + js->ir_post_data, + js->ir_post); + /* Do the IRSCAN */ + alt_jtag_irscan(astate, + start_code, + shift_count, + js->ir_buffer, + NULL); + + /* alt_jtag_irscan() always ends in IRPAUSE state */ + js->jtag_state = IRPAUSE; + } + + if (status == 0) + if (js->irstop_state != IRPAUSE) + status = altera_goto_jstate(astate, js->irstop_state); + + + return status; +} + +int altera_swap_ir(struct altera_state *astate, + u32 count, + u8 *in_data, + u32 in_index, + u8 *out_data, + u32 out_index) +/* Shifts data into instruction register, capturing output data */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->ir_pre + count + js->ir_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->ir_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->ir_buffer); + js->ir_buffer = (u8 *)alt_malloc(alloc_chars); + if (js->ir_buffer == NULL) + status = -ENOMEM; + else + js->ir_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, IR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->ir_buffer, + js->ir_pre_data, + js->ir_pre, + in_data, + in_index, + count, + js->ir_post_data, + js->ir_post); + + /* Do the IRSCAN */ + alt_jtag_irscan(astate, + start_code, + shift_count, + js->ir_buffer, + js->ir_buffer); + + /* alt_jtag_irscan() always ends in IRPAUSE state */ + js->jtag_state = IRPAUSE; + } + + if (status == 0) + if (js->irstop_state != IRPAUSE) + status = altera_goto_jstate(astate, js->irstop_state); + + + if (status == 0) + /* Now extract the returned data from the buffer */ + altera_extract_target_data(js->ir_buffer, + out_data, out_index, + js->ir_pre, count); + + return status; +} + +int altera_drscan(struct altera_state *astate, + u32 count, + u8 *tdi_data, + u32 start_index) +/* Shifts data into data register (ignoring output data) */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->dr_pre + count + js->dr_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->dr_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->dr_buffer); + js->dr_buffer = (u8 *)alt_malloc(alloc_chars); + if (js->dr_buffer == NULL) + status = -ENOMEM; + else + js->dr_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, DR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->dr_buffer, + js->dr_pre_data, + js->dr_pre, + tdi_data, + start_index, + count, + js->dr_post_data, + js->dr_post); + /* Do the DRSCAN */ + alt_jtag_drscan(astate, start_code, shift_count, + js->dr_buffer, NULL); + /* alt_jtag_drscan() always ends in DRPAUSE state */ + js->jtag_state = DRPAUSE; + } + + if (status == 0) + if (js->drstop_state != DRPAUSE) + status = altera_goto_jstate(astate, js->drstop_state); + + return status; +} + +int altera_swap_dr(struct altera_state *astate, u32 count, + u8 *in_data, u32 in_index, + u8 *out_data, u32 out_index) +/* Shifts data into data register, capturing output data */ +{ + struct altera_jtag *js = &astate->js; + int start_code = 0; + u32 alloc_chars = 0; + u32 shift_count = js->dr_pre + count + js->dr_post; + int status = 0; + enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE; + + switch (js->jtag_state) { + case ILLEGAL_JTAG_STATE: + case RESET: + case IDLE: + start_code = 0; + start_state = IDLE; + break; + + case DRSELECT: + case DRCAPTURE: + case DRSHIFT: + case DREXIT1: + case DRPAUSE: + case DREXIT2: + case DRUPDATE: + start_code = 1; + start_state = DRPAUSE; + break; + + case IRSELECT: + case IRCAPTURE: + case IRSHIFT: + case IREXIT1: + case IRPAUSE: + case IREXIT2: + case IRUPDATE: + start_code = 2; + start_state = IRPAUSE; + break; + + default: + status = -EREMOTEIO; + break; + } + + if (status == 0) + if (js->jtag_state != start_state) + status = altera_goto_jstate(astate, start_state); + + if (status == 0) { + if (shift_count > js->dr_length) { + alloc_chars = (shift_count + 7) >> 3; + kfree(js->dr_buffer); + js->dr_buffer = (u8 *)alt_malloc(alloc_chars); + + if (js->dr_buffer == NULL) + status = -ENOMEM; + else + js->dr_length = alloc_chars * 8; + + } + } + + if (status == 0) { + /* + * Copy preamble data, DR data, + * and postamble data into a buffer + */ + altera_concatenate_data(js->dr_buffer, + js->dr_pre_data, + js->dr_pre, + in_data, + in_index, + count, + js->dr_post_data, + js->dr_post); + + /* Do the DRSCAN */ + alt_jtag_drscan(astate, + start_code, + shift_count, + js->dr_buffer, + js->dr_buffer); + + /* alt_jtag_drscan() always ends in DRPAUSE state */ + js->jtag_state = DRPAUSE; + } + + if (status == 0) + if (js->drstop_state != DRPAUSE) + status = altera_goto_jstate(astate, js->drstop_state); + + if (status == 0) + /* Now extract the returned data from the buffer */ + altera_extract_target_data(js->dr_buffer, + out_data, + out_index, + js->dr_pre, + count); + + return status; +} + +void altera_free_buffers(struct altera_state *astate) +{ + struct altera_jtag *js = &astate->js; + /* If the JTAG interface was used, reset it to TLR */ + if (js->jtag_state != ILLEGAL_JTAG_STATE) + altera_jreset_idle(astate); + + kfree(js->dr_pre_data); + js->dr_pre_data = NULL; + + kfree(js->dr_post_data); + js->dr_post_data = NULL; + + kfree(js->dr_buffer); + js->dr_buffer = NULL; + + kfree(js->ir_pre_data); + js->ir_pre_data = NULL; + + kfree(js->ir_post_data); + js->ir_post_data = NULL; + + kfree(js->ir_buffer); + js->ir_buffer = NULL; +} diff --git a/drivers/misc/altera-stapl/altera-jtag.h b/drivers/misc/altera-stapl/altera-jtag.h new file mode 100644 index 00000000000..2f97e36a2fb --- /dev/null +++ b/drivers/misc/altera-stapl/altera-jtag.h @@ -0,0 +1,113 @@ +/* + * altera-jtag.h + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef ALTERA_JTAG_H +#define ALTERA_JTAG_H + +/* Function Prototypes */ +enum altera_jtag_state { + ILLEGAL_JTAG_STATE = -1, + RESET = 0, + IDLE = 1, + DRSELECT = 2, + DRCAPTURE = 3, + DRSHIFT = 4, + DREXIT1 = 5, + DRPAUSE = 6, + DREXIT2 = 7, + DRUPDATE = 8, + IRSELECT = 9, + IRCAPTURE = 10, + IRSHIFT = 11, + IREXIT1 = 12, + IRPAUSE = 13, + IREXIT2 = 14, + IRUPDATE = 15 + +}; + +struct altera_jtag { + /* Global variable to store the current JTAG state */ + enum altera_jtag_state jtag_state; + + /* Store current stop-state for DR and IR scan commands */ + enum altera_jtag_state drstop_state; + enum altera_jtag_state irstop_state; + + /* Store current padding values */ + u32 dr_pre; + u32 dr_post; + u32 ir_pre; + u32 ir_post; + u32 dr_length; + u32 ir_length; + u8 *dr_pre_data; + u8 *dr_post_data; + u8 *ir_pre_data; + u8 *ir_post_data; + u8 *dr_buffer; + u8 *ir_buffer; +}; + +#define ALTERA_STACK_SIZE 128 +#define ALTERA_MESSAGE_LENGTH 1024 + +struct altera_state { + struct altera_config *config; + struct altera_jtag js; + char msg_buff[ALTERA_MESSAGE_LENGTH + 1]; + long stack[ALTERA_STACK_SIZE]; +}; + +int altera_jinit(struct altera_state *astate); +int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state); +int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state); +int altera_set_dr_pre(struct altera_jtag *js, u32 count, u32 start_index, + u8 *preamble_data); +int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index, + u8 *preamble_data); +int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data); +int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index, + u8 *postamble_data); +int altera_goto_jstate(struct altera_state *astate, + enum altera_jtag_state state); +int altera_wait_cycles(struct altera_state *astate, s32 cycles, + enum altera_jtag_state wait_state); +int altera_wait_msecs(struct altera_state *astate, s32 microseconds, + enum altera_jtag_state wait_state); +int altera_irscan(struct altera_state *astate, u32 count, + u8 *tdi_data, u32 start_index); +int altera_swap_ir(struct altera_state *astate, + u32 count, u8 *in_data, + u32 in_index, u8 *out_data, + u32 out_index); +int altera_drscan(struct altera_state *astate, u32 count, + u8 *tdi_data, u32 start_index); +int altera_swap_dr(struct altera_state *astate, u32 count, + u8 *in_data, u32 in_index, + u8 *out_data, u32 out_index); +void altera_free_buffers(struct altera_state *astate); +#endif /* ALTERA_JTAG_H */ diff --git a/drivers/misc/altera-stapl/altera-lpt.c b/drivers/misc/altera-stapl/altera-lpt.c new file mode 100644 index 00000000000..91456a03612 --- /dev/null +++ b/drivers/misc/altera-stapl/altera-lpt.c @@ -0,0 +1,70 @@ +/* + * altera-lpt.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010 NetUP Inc. + * Copyright (C) 2010 Abylay Ospan <aospan@netup.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/io.h> +#include <linux/kernel.h> +#include "altera-exprt.h" + +static int lpt_hardware_initialized; + +static void byteblaster_write(int port, int data) +{ + outb((u8)data, (u16)(port + 0x378)); +}; + +static int byteblaster_read(int port) +{ + int data = 0; + data = inb((u16)(port + 0x378)); + return data & 0xff; +}; + +int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo) +{ + int data = 0; + int tdo = 0; + int initial_lpt_ctrl = 0; + + if (!lpt_hardware_initialized) { + initial_lpt_ctrl = byteblaster_read(2); + byteblaster_write(2, (initial_lpt_ctrl | 0x02) & 0xdf); + lpt_hardware_initialized = 1; + } + + data = ((tdi ? 0x40 : 0) | (tms ? 0x02 : 0)); + + byteblaster_write(0, data); + + if (read_tdo) { + tdo = byteblaster_read(1); + tdo = ((tdo & 0x80) ? 0 : 1); + } + + byteblaster_write(0, data | 0x01); + + byteblaster_write(0, data); + + return tdo; +} diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c new file mode 100644 index 00000000000..24272e022be --- /dev/null +++ b/drivers/misc/altera-stapl/altera.c @@ -0,0 +1,2537 @@ +/* + * altera.c + * + * altera FPGA driver + * + * Copyright (C) Altera Corporation 1998-2001 + * Copyright (C) 2010,2011 NetUP Inc. + * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <asm/unaligned.h> +#include <linux/ctype.h> +#include <linux/string.h> +#include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <misc/altera.h> +#include "altera-exprt.h" +#include "altera-jtag.h" + +static int debug = 1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "enable debugging information"); + +MODULE_DESCRIPTION("altera FPGA kernel module"); +MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>"); +MODULE_LICENSE("GPL"); + +#define dprintk(args...) \ + if (debug) { \ + printk(KERN_DEBUG args); \ + } + +enum altera_fpga_opcode { + OP_NOP = 0, + OP_DUP, + OP_SWP, + OP_ADD, + OP_SUB, + OP_MULT, + OP_DIV, + OP_MOD, + OP_SHL, + OP_SHR, + OP_NOT, + OP_AND, + OP_OR, + OP_XOR, + OP_INV, + OP_GT, + OP_LT, + OP_RET, + OP_CMPS, + OP_PINT, + OP_PRNT, + OP_DSS, + OP_DSSC, + OP_ISS, + OP_ISSC, + OP_DPR = 0x1c, + OP_DPRL, + OP_DPO, + OP_DPOL, + OP_IPR, + OP_IPRL, + OP_IPO, + OP_IPOL, + OP_PCHR, + OP_EXIT, + OP_EQU, + OP_POPT, + OP_ABS = 0x2c, + OP_BCH0, + OP_PSH0 = 0x2f, + OP_PSHL = 0x40, + OP_PSHV, + OP_JMP, + OP_CALL, + OP_NEXT, + OP_PSTR, + OP_SINT = 0x47, + OP_ST, + OP_ISTP, + OP_DSTP, + OP_SWPN, + OP_DUPN, + OP_POPV, + OP_POPE, + OP_POPA, + OP_JMPZ, + OP_DS, + OP_IS, + OP_DPRA, + OP_DPOA, + OP_IPRA, + OP_IPOA, + OP_EXPT, + OP_PSHE, + OP_PSHA, + OP_DYNA, + OP_EXPV = 0x5c, + OP_COPY = 0x80, + OP_REVA, + OP_DSC, + OP_ISC, + OP_WAIT, + OP_VS, + OP_CMPA = 0xc0, + OP_VSC, +}; + +struct altera_procinfo { + char *name; + u8 attrs; + struct altera_procinfo *next; +}; + +/* This function checks if enough parameters are available on the stack. */ +static int altera_check_stack(int stack_ptr, int count, int *status) +{ + if (stack_ptr < count) { + *status = -EOVERFLOW; + return 0; + } + + return 1; +} + +static void altera_export_int(char *key, s32 value) +{ + dprintk("Export: key = \"%s\", value = %d\n", key, value); +} + +#define HEX_LINE_CHARS 72 +#define HEX_LINE_BITS (HEX_LINE_CHARS * 4) + +static void altera_export_bool_array(char *key, u8 *data, s32 count) +{ + char string[HEX_LINE_CHARS + 1]; + s32 i, offset; + u32 size, line, lines, linebits, value, j, k; + + if (count > HEX_LINE_BITS) { + dprintk("Export: key = \"%s\", %d bits, value = HEX\n", + key, count); + lines = (count + (HEX_LINE_BITS - 1)) / HEX_LINE_BITS; + + for (line = 0; line < lines; ++line) { + if (line < (lines - 1)) { + linebits = HEX_LINE_BITS; + size = HEX_LINE_CHARS; + offset = count - ((line + 1) * HEX_LINE_BITS); + } else { + linebits = + count - ((lines - 1) * HEX_LINE_BITS); + size = (linebits + 3) / 4; + offset = 0L; + } + + string[size] = '\0'; + j = size - 1; + value = 0; + + for (k = 0; k < linebits; ++k) { + i = k + offset; + if (data[i >> 3] & (1 << (i & 7))) + value |= (1 << (i & 3)); + if ((i & 3) == 3) { + sprintf(&string[j], "%1x", value); + value = 0; + --j; + } + } + if ((k & 3) > 0) + sprintf(&string[j], "%1x", value); + + dprintk("%s\n", string); + } + + } else { + size = (count + 3) / 4; + string[size] = '\0'; + j = size - 1; + value = 0; + + for (i = 0; i < count; ++i) { + if (data[i >> 3] & (1 << (i & 7))) + value |= (1 << (i & 3)); + if ((i & 3) == 3) { + sprintf(&string[j], "%1x", value); + value = 0; + --j; + } + } + if ((i & 3) > 0) + sprintf(&string[j], "%1x", value); + + dprintk("Export: key = \"%s\", %d bits, value = HEX %s\n", + key, count, string); + } +} + +static int altera_execute(struct altera_state *astate, + u8 *p, + s32 program_size, + s32 *error_address, + int *exit_code, + int *format_version) +{ + struct altera_config *aconf = astate->config; + char *msg_buff = astate->msg_buff; + long *stack = astate->stack; + int status = 0; + u32 first_word = 0L; + u32 action_table = 0L; + u32 proc_table = 0L; + u32 str_table = 0L; + u32 sym_table = 0L; + u32 data_sect = 0L; + u32 code_sect = 0L; + u32 debug_sect = 0L; + u32 action_count = 0L; + u32 proc_count = 0L; + u32 sym_count = 0L; + long *vars = NULL; + s32 *var_size = NULL; + char *attrs = NULL; + u8 *proc_attributes = NULL; + u32 pc; + u32 opcode_address; + u32 args[3]; + u32 opcode; + u32 name_id; + u8 charbuf[4]; + long long_tmp; + u32 variable_id; + u8 *charptr_tmp; + u8 *charptr_tmp2; + long *longptr_tmp; + int version = 0; + int delta = 0; + int stack_ptr = 0; + u32 arg_count; + int done = 0; + int bad_opcode = 0; + u32 count; + u32 index; + u32 index2; + s32 long_count; + s32 long_idx; + s32 long_idx2; + u32 i; + u32 j; + u32 uncomp_size; + u32 offset; + u32 value; + int current_proc = 0; + int reverse; + + char *name; + + dprintk("%s\n", __func__); + + /* Read header information */ + if (program_size > 52L) { + first_word = get_unaligned_be32(&p[0]); + version = (first_word & 1L); + *format_version = version + 1; + delta = version * 8; + + action_table = get_unaligned_be32(&p[4]); + proc_table = get_unaligned_be32(&p[8]); + str_table = get_unaligned_be32(&p[4 + delta]); + sym_table = get_unaligned_be32(&p[16 + delta]); + data_sect = get_unaligned_be32(&p[20 + delta]); + code_sect = get_unaligned_be32(&p[24 + delta]); + debug_sect = get_unaligned_be32(&p[28 + delta]); + action_count = get_unaligned_be32(&p[40 + delta]); + proc_count = get_unaligned_be32(&p[44 + delta]); + sym_count = get_unaligned_be32(&p[48 + (2 * delta)]); + } + + if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) { + done = 1; + status = -EIO; + goto exit_done; + } + + if (sym_count <= 0) + goto exit_done; + + vars = kzalloc(sym_count * sizeof(long), GFP_KERNEL); + + if (vars == NULL) + status = -ENOMEM; + + if (status == 0) { + var_size = kzalloc(sym_count * sizeof(s32), GFP_KERNEL); + + if (var_size == NULL) + status = -ENOMEM; + } + + if (status == 0) { + attrs = kzalloc(sym_count, GFP_KERNEL); + + if (attrs == NULL) + status = -ENOMEM; + } + + if ((status == 0) && (version > 0)) { + proc_attributes = kzalloc(proc_count, GFP_KERNEL); + + if (proc_attributes == NULL) + status = -ENOMEM; + } + + if (status != 0) + goto exit_done; + + delta = version * 2; + + for (i = 0; i < sym_count; ++i) { + offset = (sym_table + ((11 + delta) * i)); + + value = get_unaligned_be32(&p[offset + 3 + delta]); + + attrs[i] = p[offset]; + + /* + * use bit 7 of attribute byte to indicate that + * this buffer was dynamically allocated + * and should be freed later + */ + attrs[i] &= 0x7f; + + var_size[i] = get_unaligned_be32(&p[offset + 7 + delta]); + + /* + * Attribute bits: + * bit 0: 0 = read-only, 1 = read-write + * bit 1: 0 = not compressed, 1 = compressed + * bit 2: 0 = not initialized, 1 = initialized + * bit 3: 0 = scalar, 1 = array + * bit 4: 0 = Boolean, 1 = integer + * bit 5: 0 = declared variable, + * 1 = compiler created temporary variable + */ + + if ((attrs[i] & 0x0c) == 0x04) + /* initialized scalar variable */ + vars[i] = value; + else if ((attrs[i] & 0x1e) == 0x0e) { + /* initialized compressed Boolean array */ + uncomp_size = get_unaligned_le32(&p[data_sect + value]); + + /* allocate a buffer for the uncompressed data */ + vars[i] = (long)kzalloc(uncomp_size, GFP_KERNEL); + if (vars[i] == 0L) + status = -ENOMEM; + else { + /* set flag so buffer will be freed later */ + attrs[i] |= 0x80; + + /* uncompress the data */ + if (altera_shrink(&p[data_sect + value], + var_size[i], + (u8 *)vars[i], + uncomp_size, + version) != uncomp_size) + /* decompression failed */ + status = -EIO; + else + var_size[i] = uncomp_size * 8L; + + } + } else if ((attrs[i] & 0x1e) == 0x0c) { + /* initialized Boolean array */ + vars[i] = value + data_sect + (long)p; + } else if ((attrs[i] & 0x1c) == 0x1c) { + /* initialized integer array */ + vars[i] = value + data_sect; + } else if ((attrs[i] & 0x0c) == 0x08) { + /* uninitialized array */ + + /* flag attrs so that memory is freed */ + attrs[i] |= 0x80; + + if (var_size[i] > 0) { + u32 size; + + if (attrs[i] & 0x10) + /* integer array */ + size = (var_size[i] * sizeof(s32)); + else + /* Boolean array */ + size = ((var_size[i] + 7L) / 8L); + + vars[i] = (long)kzalloc(size, GFP_KERNEL); + + if (vars[i] == 0) { + status = -ENOMEM; + } else { + /* zero out memory */ + for (j = 0; j < size; ++j) + ((u8 *)(vars[i]))[j] = 0; + + } + } else + vars[i] = 0; + + } else + vars[i] = 0; + + } + +exit_done: + if (status != 0) + done = 1; + + altera_jinit(astate); + + pc = code_sect; + msg_buff[0] = '\0'; + + /* + * For JBC version 2, we will execute the procedures corresponding to + * the selected ACTION + */ + if (version > 0) { + if (aconf->action == NULL) { + status = -EINVAL; + done = 1; + } else { + int action_found = 0; + for (i = 0; (i < action_count) && !action_found; ++i) { + name_id = get_unaligned_be32(&p[action_table + + (12 * i)]); + + name = &p[str_table + name_id]; + + if (strnicmp(aconf->action, name, strlen(name)) == 0) { + action_found = 1; + current_proc = + get_unaligned_be32(&p[action_table + + (12 * i) + 8]); + } + } + + if (!action_found) { + status = -EINVAL; + done = 1; + } + } + + if (status == 0) { + int first_time = 1; + i = current_proc; + while ((i != 0) || first_time) { + first_time = 0; + /* check procedure attribute byte */ + proc_attributes[i] = + (p[proc_table + + (13 * i) + 8] & + 0x03); + + /* + * BIT0 - OPTIONAL + * BIT1 - RECOMMENDED + * BIT6 - FORCED OFF + * BIT7 - FORCED ON + */ + + i = get_unaligned_be32(&p[proc_table + + (13 * i) + 4]); + } + + /* + * Set current_proc to the first procedure + * to be executed + */ + i = current_proc; + while ((i != 0) && + ((proc_attributes[i] == 1) || + ((proc_attributes[i] & 0xc0) == 0x40))) { + i = get_unaligned_be32(&p[proc_table + + (13 * i) + 4]); + } + + if ((i != 0) || ((i == 0) && (current_proc == 0) && + ((proc_attributes[0] != 1) && + ((proc_attributes[0] & 0xc0) != 0x40)))) { + current_proc = i; + pc = code_sect + + get_unaligned_be32(&p[proc_table + + (13 * i) + 9]); + if ((pc < code_sect) || (pc >= debug_sect)) + status = -ERANGE; + } else + /* there are no procedures to execute! */ + done = 1; + + } + } + + msg_buff[0] = '\0'; + + while (!done) { + opcode = (p[pc] & 0xff); + opcode_address = pc; + ++pc; + + if (debug > 1) + printk("opcode: %02x\n", opcode); + + arg_count = (opcode >> 6) & 3; + for (i = 0; i < arg_count; ++i) { + args[i] = get_unaligned_be32(&p[pc]); + pc += 4; + } + + switch (opcode) { + case OP_NOP: + break; + case OP_DUP: + if (altera_check_stack(stack_ptr, 1, &status)) { + stack[stack_ptr] = stack[stack_ptr - 1]; + ++stack_ptr; + } + break; + case OP_SWP: + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + break; + case OP_ADD: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] += stack[stack_ptr]; + } + break; + case OP_SUB: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] -= stack[stack_ptr]; + } + break; + case OP_MULT: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] *= stack[stack_ptr]; + } + break; + case OP_DIV: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] /= stack[stack_ptr]; + } + break; + case OP_MOD: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] %= stack[stack_ptr]; + } + break; + case OP_SHL: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] <<= stack[stack_ptr]; + } + break; + case OP_SHR: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] >>= stack[stack_ptr]; + } + break; + case OP_NOT: + if (altera_check_stack(stack_ptr, 1, &status)) + stack[stack_ptr - 1] ^= (-1L); + + break; + case OP_AND: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] &= stack[stack_ptr]; + } + break; + case OP_OR: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] |= stack[stack_ptr]; + } + break; + case OP_XOR: + if (altera_check_stack(stack_ptr, 2, &status)) { + --stack_ptr; + stack[stack_ptr - 1] ^= stack[stack_ptr]; + } + break; + case OP_INV: + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + stack[stack_ptr - 1] = stack[stack_ptr - 1] ? 0L : 1L; + break; + case OP_GT: + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + --stack_ptr; + stack[stack_ptr - 1] = + (stack[stack_ptr - 1] > stack[stack_ptr]) ? + 1L : 0L; + + break; + case OP_LT: + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + --stack_ptr; + stack[stack_ptr - 1] = + (stack[stack_ptr - 1] < stack[stack_ptr]) ? + 1L : 0L; + + break; + case OP_RET: + if ((version > 0) && (stack_ptr == 0)) { + /* + * We completed one of the main procedures + * of an ACTION. + * Find the next procedure + * to be executed and jump to it. + * If there are no more procedures, then EXIT. + */ + i = get_unaligned_be32(&p[proc_table + + (13 * current_proc) + 4]); + while ((i != 0) && + ((proc_attributes[i] == 1) || + ((proc_attributes[i] & 0xc0) == 0x40))) + i = get_unaligned_be32(&p[proc_table + + (13 * i) + 4]); + + if (i == 0) { + /* no procedures to execute! */ + done = 1; + *exit_code = 0; /* success */ + } else { + current_proc = i; + pc = code_sect + get_unaligned_be32( + &p[proc_table + + (13 * i) + 9]); + if ((pc < code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + } + + } else + if (altera_check_stack(stack_ptr, 1, &status)) { + pc = stack[--stack_ptr] + code_sect; + if ((pc <= code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + + } + + break; + case OP_CMPS: + /* + * Array short compare + * ...stack 0 is source 1 value + * ...stack 1 is source 2 value + * ...stack 2 is mask value + * ...stack 3 is count + */ + if (altera_check_stack(stack_ptr, 4, &status)) { + s32 a = stack[--stack_ptr]; + s32 b = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + count = stack[stack_ptr - 1]; + + if ((count < 1) || (count > 32)) + status = -ERANGE; + else { + long_tmp &= ((-1L) >> (32 - count)); + + stack[stack_ptr - 1] = + ((a & long_tmp) == (b & long_tmp)) + ? 1L : 0L; + } + } + break; + case OP_PINT: + /* + * PRINT add integer + * ...stack 0 is integer value + */ + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + sprintf(&msg_buff[strlen(msg_buff)], + "%ld", stack[--stack_ptr]); + break; + case OP_PRNT: + /* PRINT finish */ + if (debug) + printk(msg_buff, "\n"); + + msg_buff[0] = '\0'; + break; + case OP_DSS: + /* + * DRSCAN short + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_drscan(astate, count, charbuf, 0); + break; + case OP_DSSC: + /* + * DRSCAN short with capture + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[stack_ptr - 1]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_swap_dr(astate, count, charbuf, + 0, charbuf, 0); + stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); + break; + case OP_ISS: + /* + * IRSCAN short + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_irscan(astate, count, charbuf, 0); + break; + case OP_ISSC: + /* + * IRSCAN short with capture + * ...stack 0 is scan data + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + count = stack[stack_ptr - 1]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_swap_ir(astate, count, charbuf, + 0, charbuf, 0); + stack[stack_ptr - 1] = get_unaligned_le32(&charbuf[0]); + break; + case OP_DPR: + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + count = stack[--stack_ptr]; + status = altera_set_dr_pre(&astate->js, count, 0, NULL); + break; + case OP_DPRL: + /* + * DRPRE with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_dr_pre(&astate->js, count, 0, + charbuf); + break; + case OP_DPO: + /* + * DRPOST + * ...stack 0 is count + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + count = stack[--stack_ptr]; + status = altera_set_dr_post(&astate->js, count, + 0, NULL); + } + break; + case OP_DPOL: + /* + * DRPOST with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_dr_post(&astate->js, count, 0, + charbuf); + break; + case OP_IPR: + if (altera_check_stack(stack_ptr, 1, &status)) { + count = stack[--stack_ptr]; + status = altera_set_ir_pre(&astate->js, count, + 0, NULL); + } + break; + case OP_IPRL: + /* + * IRPRE with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (altera_check_stack(stack_ptr, 2, &status)) { + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_ir_pre(&astate->js, count, + 0, charbuf); + } + break; + case OP_IPO: + /* + * IRPOST + * ...stack 0 is count + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + count = stack[--stack_ptr]; + status = altera_set_ir_post(&astate->js, count, + 0, NULL); + } + break; + case OP_IPOL: + /* + * IRPOST with literal data + * ...stack 0 is count + * ...stack 1 is literal data + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + count = stack[--stack_ptr]; + long_tmp = stack[--stack_ptr]; + put_unaligned_le32(long_tmp, &charbuf[0]); + status = altera_set_ir_post(&astate->js, count, 0, + charbuf); + break; + case OP_PCHR: + if (altera_check_stack(stack_ptr, 1, &status)) { + u8 ch; + count = strlen(msg_buff); + ch = (char) stack[--stack_ptr]; + if ((ch < 1) || (ch > 127)) { + /* + * character code out of range + * instead of flagging an error, + * force the value to 127 + */ + ch = 127; + } + msg_buff[count] = ch; + msg_buff[count + 1] = '\0'; + } + break; + case OP_EXIT: + if (altera_check_stack(stack_ptr, 1, &status)) + *exit_code = stack[--stack_ptr]; + + done = 1; + break; + case OP_EQU: + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + --stack_ptr; + stack[stack_ptr - 1] = + (stack[stack_ptr - 1] == stack[stack_ptr]) ? + 1L : 0L; + break; + case OP_POPT: + if (altera_check_stack(stack_ptr, 1, &status)) + --stack_ptr; + + break; + case OP_ABS: + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + if (stack[stack_ptr - 1] < 0) + stack[stack_ptr - 1] = 0 - stack[stack_ptr - 1]; + + break; + case OP_BCH0: + /* + * Batch operation 0 + * SWP + * SWPN 7 + * SWP + * SWPN 6 + * DUPN 8 + * SWPN 2 + * SWP + * DUPN 6 + * DUPN 6 + */ + + /* SWP */ + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWPN 7 */ + index = 7 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWP */ + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWPN 6 */ + index = 6 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* DUPN 8 */ + index = 8 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + + /* SWPN 2 */ + index = 2 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* SWP */ + if (altera_check_stack(stack_ptr, 2, &status)) { + long_tmp = stack[stack_ptr - 2]; + stack[stack_ptr - 2] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + + /* DUPN 6 */ + index = 6 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + + /* DUPN 6 */ + index = 6 + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + break; + case OP_PSH0: + stack[stack_ptr++] = 0; + break; + case OP_PSHL: + stack[stack_ptr++] = (s32) args[0]; + break; + case OP_PSHV: + stack[stack_ptr++] = vars[args[0]]; + break; + case OP_JMP: + pc = args[0] + code_sect; + if ((pc < code_sect) || (pc >= debug_sect)) + status = -ERANGE; + break; + case OP_CALL: + stack[stack_ptr++] = pc; + pc = args[0] + code_sect; + if ((pc < code_sect) || (pc >= debug_sect)) + status = -ERANGE; + break; + case OP_NEXT: + /* + * Process FOR / NEXT loop + * ...argument 0 is variable ID + * ...stack 0 is step value + * ...stack 1 is end value + * ...stack 2 is top address + */ + if (altera_check_stack(stack_ptr, 3, &status)) { + s32 step = stack[stack_ptr - 1]; + s32 end = stack[stack_ptr - 2]; + s32 top = stack[stack_ptr - 3]; + s32 iterator = vars[args[0]]; + int break_out = 0; + + if (step < 0) { + if (iterator <= end) + break_out = 1; + } else if (iterator >= end) + break_out = 1; + + if (break_out) { + stack_ptr -= 3; + } else { + vars[args[0]] = iterator + step; + pc = top + code_sect; + if ((pc < code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + } + } + break; + case OP_PSTR: + /* + * PRINT add string + * ...argument 0 is string ID + */ + count = strlen(msg_buff); + strlcpy(&msg_buff[count], + &p[str_table + args[0]], + ALTERA_MESSAGE_LENGTH - count); + break; + case OP_SINT: + /* + * STATE intermediate state + * ...argument 0 is state code + */ + status = altera_goto_jstate(astate, args[0]); + break; + case OP_ST: + /* + * STATE final state + * ...argument 0 is state code + */ + status = altera_goto_jstate(astate, args[0]); + break; + case OP_ISTP: + /* + * IRSTOP state + * ...argument 0 is state code + */ + status = altera_set_irstop(&astate->js, args[0]); + break; + case OP_DSTP: + /* + * DRSTOP state + * ...argument 0 is state code + */ + status = altera_set_drstop(&astate->js, args[0]); + break; + + case OP_SWPN: + /* + * Exchange top with Nth stack value + * ...argument 0 is 0-based stack entry + * to swap with top element + */ + index = (args[0]) + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + long_tmp = stack[stack_ptr - index]; + stack[stack_ptr - index] = stack[stack_ptr - 1]; + stack[stack_ptr - 1] = long_tmp; + } + break; + case OP_DUPN: + /* + * Duplicate Nth stack value + * ...argument 0 is 0-based stack entry to duplicate + */ + index = (args[0]) + 1; + if (altera_check_stack(stack_ptr, index, &status)) { + stack[stack_ptr] = stack[stack_ptr - index]; + ++stack_ptr; + } + break; + case OP_POPV: + /* + * Pop stack into scalar variable + * ...argument 0 is variable ID + * ...stack 0 is value + */ + if (altera_check_stack(stack_ptr, 1, &status)) + vars[args[0]] = stack[--stack_ptr]; + + break; + case OP_POPE: + /* + * Pop stack into integer array element + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is value + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + variable_id = args[0]; + + /* + * If variable is read-only, + * convert to writable array + */ + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x1c)) { + /* Allocate a writable buffer for this array */ + count = var_size[variable_id]; + long_tmp = vars[variable_id]; + longptr_tmp = kzalloc(count * sizeof(long), + GFP_KERNEL); + vars[variable_id] = (long)longptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* copy previous contents into buffer */ + for (i = 0; i < count; ++i) { + longptr_tmp[i] = + get_unaligned_be32(&p[long_tmp]); + long_tmp += sizeof(long); + } + + /* + * set bit 7 - buffer was + * dynamically allocated + */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + + } + + /* check that variable is a writable integer array */ + if ((attrs[variable_id] & 0x1c) != 0x18) + status = -ERANGE; + else { + longptr_tmp = (long *)vars[variable_id]; + + /* pop the array index */ + index = stack[--stack_ptr]; + + /* pop the value and store it into the array */ + longptr_tmp[index] = stack[--stack_ptr]; + } + + break; + case OP_POPA: + /* + * Pop stack into Boolean array + * ...argument 0 is variable ID + * ...stack 0 is count + * ...stack 1 is array index + * ...stack 2 is value + */ + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + variable_id = args[0]; + + /* + * If variable is read-only, + * convert to writable array + */ + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x0c)) { + /* Allocate a writable buffer for this array */ + long_tmp = + (var_size[variable_id] + 7L) >> 3L; + charptr_tmp2 = (u8 *)vars[variable_id]; + charptr_tmp = + kzalloc(long_tmp, GFP_KERNEL); + vars[variable_id] = (long)charptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* zero the buffer */ + for (long_idx = 0L; + long_idx < long_tmp; + ++long_idx) { + charptr_tmp[long_idx] = 0; + } + + /* copy previous contents into buffer */ + for (long_idx = 0L; + long_idx < var_size[variable_id]; + ++long_idx) { + long_idx2 = long_idx; + + if (charptr_tmp2[long_idx2 >> 3] & + (1 << (long_idx2 & 7))) { + charptr_tmp[long_idx >> 3] |= + (1 << (long_idx & 7)); + } + } + + /* + * set bit 7 - buffer was + * dynamically allocated + */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + + } + + /* + * check that variable is + * a writable Boolean array + */ + if ((attrs[variable_id] & 0x1c) != 0x08) { + status = -ERANGE; + break; + } + + charptr_tmp = (u8 *)vars[variable_id]; + + /* pop the count (number of bits to copy) */ + long_count = stack[--stack_ptr]; + + /* pop the array index */ + long_idx = stack[--stack_ptr]; + + reverse = 0; + + if (version > 0) { + /* + * stack 0 = array right index + * stack 1 = array left index + */ + + if (long_idx > long_count) { + reverse = 1; + long_tmp = long_count; + long_count = 1 + long_idx - + long_count; + long_idx = long_tmp; + + /* reverse POPA is not supported */ + status = -ERANGE; + break; + } else + long_count = 1 + long_count - + long_idx; + + } + + /* pop the data */ + long_tmp = stack[--stack_ptr]; + + if (long_count < 1) { + status = -ERANGE; + break; + } + + for (i = 0; i < long_count; ++i) { + if (long_tmp & (1L << (s32) i)) + charptr_tmp[long_idx >> 3L] |= + (1L << (long_idx & 7L)); + else + charptr_tmp[long_idx >> 3L] &= + ~(1L << (long_idx & 7L)); + + ++long_idx; + } + + break; + case OP_JMPZ: + /* + * Pop stack and branch if zero + * ...argument 0 is address + * ...stack 0 is condition value + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + if (stack[--stack_ptr] == 0) { + pc = args[0] + code_sect; + if ((pc < code_sect) || + (pc >= debug_sect)) + status = -ERANGE; + } + } + break; + case OP_DS: + case OP_IS: + /* + * DRSCAN + * IRSCAN + * ...argument 0 is scan data variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_idx = stack[--stack_ptr]; + long_count = stack[--stack_ptr]; + reverse = 0; + if (version > 0) { + /* + * stack 0 = array right index + * stack 1 = array left index + * stack 2 = count + */ + long_tmp = long_count; + long_count = stack[--stack_ptr]; + + if (long_idx > long_tmp) { + reverse = 1; + long_idx = long_tmp; + } + } + + charptr_tmp = (u8 *)vars[args[0]]; + + if (reverse) { + /* + * allocate a buffer + * and reverse the data order + */ + charptr_tmp2 = charptr_tmp; + charptr_tmp = kzalloc((long_count >> 3) + 1, + GFP_KERNEL); + if (charptr_tmp == NULL) { + status = -ENOMEM; + break; + } + + long_tmp = long_idx + long_count - 1; + long_idx2 = 0; + while (long_idx2 < long_count) { + if (charptr_tmp2[long_tmp >> 3] & + (1 << (long_tmp & 7))) + charptr_tmp[long_idx2 >> 3] |= + (1 << (long_idx2 & 7)); + else + charptr_tmp[long_idx2 >> 3] &= + ~(1 << (long_idx2 & 7)); + + --long_tmp; + ++long_idx2; + } + } + + if (opcode == 0x51) /* DS */ + status = altera_drscan(astate, long_count, + charptr_tmp, long_idx); + else /* IS */ + status = altera_irscan(astate, long_count, + charptr_tmp, long_idx); + + if (reverse) + kfree(charptr_tmp); + + break; + case OP_DPRA: + /* + * DRPRE with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_dr_pre(&astate->js, count, index, + charptr_tmp); + break; + case OP_DPOA: + /* + * DRPOST with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_dr_post(&astate->js, count, index, + charptr_tmp); + break; + case OP_IPRA: + /* + * IRPRE with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_ir_pre(&astate->js, count, index, + charptr_tmp); + + break; + case OP_IPOA: + /* + * IRPOST with array data + * ...argument 0 is variable ID + * ...stack 0 is array index + * ...stack 1 is count + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + index = stack[--stack_ptr]; + count = stack[--stack_ptr]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + charptr_tmp = (u8 *)vars[args[0]]; + status = altera_set_ir_post(&astate->js, count, index, + charptr_tmp); + + break; + case OP_EXPT: + /* + * EXPORT + * ...argument 0 is string ID + * ...stack 0 is integer expression + */ + if (altera_check_stack(stack_ptr, 1, &status)) { + name = &p[str_table + args[0]]; + long_tmp = stack[--stack_ptr]; + altera_export_int(name, long_tmp); + } + break; + case OP_PSHE: + /* + * Push integer array element + * ...argument 0 is variable ID + * ...stack 0 is array index + */ + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + variable_id = args[0]; + index = stack[stack_ptr - 1]; + + /* check variable type */ + if ((attrs[variable_id] & 0x1f) == 0x19) { + /* writable integer array */ + longptr_tmp = (long *)vars[variable_id]; + stack[stack_ptr - 1] = longptr_tmp[index]; + } else if ((attrs[variable_id] & 0x1f) == 0x1c) { + /* read-only integer array */ + long_tmp = vars[variable_id] + + (index * sizeof(long)); + stack[stack_ptr - 1] = + get_unaligned_be32(&p[long_tmp]); + } else + status = -ERANGE; + + break; + case OP_PSHA: + /* + * Push Boolean array + * ...argument 0 is variable ID + * ...stack 0 is count + * ...stack 1 is array index + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + variable_id = args[0]; + + /* check that variable is a Boolean array */ + if ((attrs[variable_id] & 0x18) != 0x08) { + status = -ERANGE; + break; + } + + charptr_tmp = (u8 *)vars[variable_id]; + + /* pop the count (number of bits to copy) */ + count = stack[--stack_ptr]; + + /* pop the array index */ + index = stack[stack_ptr - 1]; + + if (version > 0) + /* + * stack 0 = array right index + * stack 1 = array left index + */ + count = 1 + count - index; + + if ((count < 1) || (count > 32)) { + status = -ERANGE; + break; + } + + long_tmp = 0L; + + for (i = 0; i < count; ++i) + if (charptr_tmp[(i + index) >> 3] & + (1 << ((i + index) & 7))) + long_tmp |= (1L << i); + + stack[stack_ptr - 1] = long_tmp; + + break; + case OP_DYNA: + /* + * Dynamically change size of array + * ...argument 0 is variable ID + * ...stack 0 is new size + */ + if (!altera_check_stack(stack_ptr, 1, &status)) + break; + variable_id = args[0]; + long_tmp = stack[--stack_ptr]; + + if (long_tmp > var_size[variable_id]) { + var_size[variable_id] = long_tmp; + + if (attrs[variable_id] & 0x10) + /* allocate integer array */ + long_tmp *= sizeof(long); + else + /* allocate Boolean array */ + long_tmp = (long_tmp + 7) >> 3; + + /* + * If the buffer was previously allocated, + * free it + */ + if (attrs[variable_id] & 0x80) { + kfree((void *)vars[variable_id]); + vars[variable_id] = 0; + } + + /* + * Allocate a new buffer + * of the requested size + */ + vars[variable_id] = (long) + kzalloc(long_tmp, GFP_KERNEL); + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* + * Set the attribute bit to indicate that + * this buffer was dynamically allocated and + * should be freed later + */ + attrs[variable_id] |= 0x80; + + /* zero out memory */ + count = ((var_size[variable_id] + 7L) / + 8L); + charptr_tmp = (u8 *)(vars[variable_id]); + for (index = 0; index < count; ++index) + charptr_tmp[index] = 0; + + } + + break; + case OP_EXPV: + /* + * Export Boolean array + * ...argument 0 is string ID + * ...stack 0 is variable ID + * ...stack 1 is array right index + * ...stack 2 is array left index + */ + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + if (version == 0) { + /* EXPV is not supported in JBC 1.0 */ + bad_opcode = 1; + break; + } + name = &p[str_table + args[0]]; + variable_id = stack[--stack_ptr]; + long_idx = stack[--stack_ptr];/* right indx */ + long_idx2 = stack[--stack_ptr];/* left indx */ + + if (long_idx > long_idx2) { + /* reverse indices not supported */ + status = -ERANGE; + break; + } + + long_count = 1 + long_idx2 - long_idx; + + charptr_tmp = (u8 *)vars[variable_id]; + charptr_tmp2 = NULL; + + if ((long_idx & 7L) != 0) { + s32 k = long_idx; + charptr_tmp2 = + kzalloc(((long_count + 7L) / 8L), + GFP_KERNEL); + if (charptr_tmp2 == NULL) { + status = -ENOMEM; + break; + } + + for (i = 0; i < long_count; ++i) { + if (charptr_tmp[k >> 3] & + (1 << (k & 7))) + charptr_tmp2[i >> 3] |= + (1 << (i & 7)); + else + charptr_tmp2[i >> 3] &= + ~(1 << (i & 7)); + + ++k; + } + charptr_tmp = charptr_tmp2; + + } else if (long_idx != 0) + charptr_tmp = &charptr_tmp[long_idx >> 3]; + + altera_export_bool_array(name, charptr_tmp, + long_count); + + /* free allocated buffer */ + if ((long_idx & 7L) != 0) + kfree(charptr_tmp2); + + break; + case OP_COPY: { + /* + * Array copy + * ...argument 0 is dest ID + * ...argument 1 is source ID + * ...stack 0 is count + * ...stack 1 is dest index + * ...stack 2 is source index + */ + s32 copy_count; + s32 copy_index; + s32 copy_index2; + s32 destleft; + s32 src_count; + s32 dest_count; + int src_reverse = 0; + int dest_reverse = 0; + + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + + copy_count = stack[--stack_ptr]; + copy_index = stack[--stack_ptr]; + copy_index2 = stack[--stack_ptr]; + reverse = 0; + + if (version > 0) { + /* + * stack 0 = source right index + * stack 1 = source left index + * stack 2 = destination right index + * stack 3 = destination left index + */ + destleft = stack[--stack_ptr]; + + if (copy_count > copy_index) { + src_reverse = 1; + reverse = 1; + src_count = 1 + copy_count - copy_index; + /* copy_index = source start index */ + } else { + src_count = 1 + copy_index - copy_count; + /* source start index */ + copy_index = copy_count; + } + + if (copy_index2 > destleft) { + dest_reverse = 1; + reverse = !reverse; + dest_count = 1 + copy_index2 - destleft; + /* destination start index */ + copy_index2 = destleft; + } else + dest_count = 1 + destleft - copy_index2; + + copy_count = (src_count < dest_count) ? + src_count : dest_count; + + if ((src_reverse || dest_reverse) && + (src_count != dest_count)) + /* + * If either the source or destination + * is reversed, we can't tolerate + * a length mismatch, because we + * "left justify" arrays when copying. + * This won't work correctly + * with reversed arrays. + */ + status = -ERANGE; + + } + + count = copy_count; + index = copy_index; + index2 = copy_index2; + + /* + * If destination is a read-only array, + * allocate a buffer and convert it to a writable array + */ + variable_id = args[1]; + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x0c)) { + /* Allocate a writable buffer for this array */ + long_tmp = + (var_size[variable_id] + 7L) >> 3L; + charptr_tmp2 = (u8 *)vars[variable_id]; + charptr_tmp = + kzalloc(long_tmp, GFP_KERNEL); + vars[variable_id] = (long)charptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* zero the buffer */ + for (long_idx = 0L; long_idx < long_tmp; + ++long_idx) + charptr_tmp[long_idx] = 0; + + /* copy previous contents into buffer */ + for (long_idx = 0L; + long_idx < var_size[variable_id]; + ++long_idx) { + long_idx2 = long_idx; + + if (charptr_tmp2[long_idx2 >> 3] & + (1 << (long_idx2 & 7))) + charptr_tmp[long_idx >> 3] |= + (1 << (long_idx & 7)); + + } + + /* + set bit 7 - buffer was dynamically allocated */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + } + + charptr_tmp = (u8 *)vars[args[1]]; + charptr_tmp2 = (u8 *)vars[args[0]]; + + /* check if destination is a writable Boolean array */ + if ((attrs[args[1]] & 0x1c) != 0x08) { + status = -ERANGE; + break; + } + + if (count < 1) { + status = -ERANGE; + break; + } + + if (reverse) + index2 += (count - 1); + + for (i = 0; i < count; ++i) { + if (charptr_tmp2[index >> 3] & + (1 << (index & 7))) + charptr_tmp[index2 >> 3] |= + (1 << (index2 & 7)); + else + charptr_tmp[index2 >> 3] &= + ~(1 << (index2 & 7)); + + ++index; + if (reverse) + --index2; + else + ++index2; + } + + break; + } + case OP_DSC: + case OP_ISC: { + /* + * DRSCAN with capture + * IRSCAN with capture + * ...argument 0 is scan data variable ID + * ...argument 1 is capture variable ID + * ...stack 0 is capture index + * ...stack 1 is scan data index + * ...stack 2 is count + */ + s32 scan_right, scan_left; + s32 capture_count = 0; + s32 scan_count = 0; + s32 capture_index; + s32 scan_index; + + if (!altera_check_stack(stack_ptr, 3, &status)) + break; + + capture_index = stack[--stack_ptr]; + scan_index = stack[--stack_ptr]; + + if (version > 0) { + /* + * stack 0 = capture right index + * stack 1 = capture left index + * stack 2 = scan right index + * stack 3 = scan left index + * stack 4 = count + */ + scan_right = stack[--stack_ptr]; + scan_left = stack[--stack_ptr]; + capture_count = 1 + scan_index - capture_index; + scan_count = 1 + scan_left - scan_right; + scan_index = scan_right; + } + + long_count = stack[--stack_ptr]; + /* + * If capture array is read-only, allocate a buffer + * and convert it to a writable array + */ + variable_id = args[1]; + if ((version > 0) && + ((attrs[variable_id] & 0x9c) == 0x0c)) { + /* Allocate a writable buffer for this array */ + long_tmp = + (var_size[variable_id] + 7L) >> 3L; + charptr_tmp2 = (u8 *)vars[variable_id]; + charptr_tmp = + kzalloc(long_tmp, GFP_KERNEL); + vars[variable_id] = (long)charptr_tmp; + + if (vars[variable_id] == 0) { + status = -ENOMEM; + break; + } + + /* zero the buffer */ + for (long_idx = 0L; long_idx < long_tmp; + ++long_idx) + charptr_tmp[long_idx] = 0; + + /* copy previous contents into buffer */ + for (long_idx = 0L; + long_idx < var_size[variable_id]; + ++long_idx) { + long_idx2 = long_idx; + + if (charptr_tmp2[long_idx2 >> 3] & + (1 << (long_idx2 & 7))) + charptr_tmp[long_idx >> 3] |= + (1 << (long_idx & 7)); + + } + + /* + * set bit 7 - buffer was + * dynamically allocated + */ + attrs[variable_id] |= 0x80; + + /* clear bit 2 - variable is writable */ + attrs[variable_id] &= ~0x04; + attrs[variable_id] |= 0x01; + + } + + charptr_tmp = (u8 *)vars[args[0]]; + charptr_tmp2 = (u8 *)vars[args[1]]; + + if ((version > 0) && + ((long_count > capture_count) || + (long_count > scan_count))) { + status = -ERANGE; + break; + } + + /* + * check that capture array + * is a writable Boolean array + */ + if ((attrs[args[1]] & 0x1c) != 0x08) { + status = -ERANGE; + break; + } + + if (status == 0) { + if (opcode == 0x82) /* DSC */ + status = altera_swap_dr(astate, + long_count, + charptr_tmp, + scan_index, + charptr_tmp2, + capture_index); + else /* ISC */ + status = altera_swap_ir(astate, + long_count, + charptr_tmp, + scan_index, + charptr_tmp2, + capture_index); + + } + + break; + } + case OP_WAIT: + /* + * WAIT + * ...argument 0 is wait state + * ...argument 1 is end state + * ...stack 0 is cycles + * ...stack 1 is microseconds + */ + if (!altera_check_stack(stack_ptr, 2, &status)) + break; + long_tmp = stack[--stack_ptr]; + + if (long_tmp != 0L) + status = altera_wait_cycles(astate, long_tmp, + args[0]); + + long_tmp = stack[--stack_ptr]; + + if ((status == 0) && (long_tmp != 0L)) + status = altera_wait_msecs(astate, + long_tmp, + args[0]); + + if ((status == 0) && (args[1] != args[0])) + status = altera_goto_jstate(astate, + args[1]); + + if (version > 0) { + --stack_ptr; /* throw away MAX cycles */ + --stack_ptr; /* throw away MAX microseconds */ + } + break; + case OP_CMPA: { + /* + * Array compare + * ...argument 0 is source 1 ID + * ...argument 1 is source 2 ID + * ...argument 2 is mask ID + * ...stack 0 is source 1 index + * ...stack 1 is source 2 index + * ...stack 2 is mask index + * ...stack 3 is count + */ + s32 a, b; + u8 *source1 = (u8 *)vars[args[0]]; + u8 *source2 = (u8 *)vars[args[1]]; + u8 *mask = (u8 *)vars[args[2]]; + u32 index1; + u32 index2; + u32 mask_index; + + if (!altera_check_stack(stack_ptr, 4, &status)) + break; + + index1 = stack[--stack_ptr]; + index2 = stack[--stack_ptr]; + mask_index = stack[--stack_ptr]; + long_count = stack[--stack_ptr]; + + if (version > 0) { + /* + * stack 0 = source 1 right index + * stack 1 = source 1 left index + * stack 2 = source 2 right index + * stack 3 = source 2 left index + * stack 4 = mask right index + * stack 5 = mask left index + */ + s32 mask_right = stack[--stack_ptr]; + s32 mask_left = stack[--stack_ptr]; + /* source 1 count */ + a = 1 + index2 - index1; + /* source 2 count */ + b = 1 + long_count - mask_index; + a = (a < b) ? a : b; + /* mask count */ + b = 1 + mask_left - mask_right; + a = (a < b) ? a : b; + /* source 2 start index */ + index2 = mask_index; + /* mask start index */ + mask_index = mask_right; + long_count = a; + } + + long_tmp = 1L; + + if (long_count < 1) + status = -ERANGE; + else { + count = long_count; + + for (i = 0; i < count; ++i) { + if (mask[mask_index >> 3] & + (1 << (mask_index & 7))) { + a = source1[index1 >> 3] & + (1 << (index1 & 7)) + ? 1 : 0; + b = source2[index2 >> 3] & + (1 << (index2 & 7)) + ? 1 : 0; + + if (a != b) /* failure */ + long_tmp = 0L; + } + ++index1; + ++index2; + ++mask_index; + } + } + + stack[stack_ptr++] = long_tmp; + + break; + } + default: + /* Unrecognized opcode -- ERROR! */ + bad_opcode = 1; + break; + } + + if (bad_opcode) + status = -ENOSYS; + + if ((stack_ptr < 0) || (stack_ptr >= ALTERA_STACK_SIZE)) + status = -EOVERFLOW; + + if (status != 0) { + done = 1; + *error_address = (s32)(opcode_address - code_sect); + } + } + + altera_free_buffers(astate); + + /* Free all dynamically allocated arrays */ + if ((attrs != NULL) && (vars != NULL)) + for (i = 0; i < sym_count; ++i) + if (attrs[i] & 0x80) + kfree((void *)vars[i]); + + kfree(vars); + kfree(var_size); + kfree(attrs); + kfree(proc_attributes); + + return status; +} + +static int altera_get_note(u8 *p, s32 program_size, + s32 *offset, char *key, char *value, int length) +/* + * Gets key and value of NOTE fields in the JBC file. + * Can be called in two modes: if offset pointer is NULL, + * then the function searches for note fields which match + * the key string provided. If offset is not NULL, then + * the function finds the next note field of any key, + * starting at the offset specified by the offset pointer. + * Returns 0 for success, else appropriate error code + */ +{ + int status = -ENODATA; + u32 note_strings = 0L; + u32 note_table = 0L; + u32 note_count = 0L; + u32 first_word = 0L; + int version = 0; + int delta = 0; + char *key_ptr; + char *value_ptr; + int i; + + /* Read header information */ + if (program_size > 52L) { + first_word = get_unaligned_be32(&p[0]); + version = (first_word & 1L); + delta = version * 8; + + note_strings = get_unaligned_be32(&p[8 + delta]); + note_table = get_unaligned_be32(&p[12 + delta]); + note_count = get_unaligned_be32(&p[44 + (2 * delta)]); + } + + if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) + return -EIO; + + if (note_count <= 0L) + return status; + + if (offset == NULL) { + /* + * We will search for the first note with a specific key, + * and return only the value + */ + for (i = 0; (i < note_count) && + (status != 0); ++i) { + key_ptr = &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i)])]; + if ((strnicmp(key, key_ptr, strlen(key_ptr)) == 0) && + (key != NULL)) { + status = 0; + + value_ptr = &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i) + 4])]; + + if (value != NULL) + strlcpy(value, value_ptr, length); + + } + } + } else { + /* + * We will search for the next note, regardless of the key, + * and return both the value and the key + */ + + i = *offset; + + if ((i >= 0) && (i < note_count)) { + status = 0; + + if (key != NULL) + strlcpy(key, &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i)])], + length); + + if (value != NULL) + strlcpy(value, &p[note_strings + + get_unaligned_be32( + &p[note_table + (8 * i) + 4])], + length); + + *offset = i + 1; + } + } + + return status; +} + +static int altera_check_crc(u8 *p, s32 program_size) +{ + int status = 0; + u16 local_expected = 0, + local_actual = 0, + shift_reg = 0xffff; + int bit, feedback; + u8 databyte; + u32 i; + u32 crc_section = 0L; + u32 first_word = 0L; + int version = 0; + int delta = 0; + + if (program_size > 52L) { + first_word = get_unaligned_be32(&p[0]); + version = (first_word & 1L); + delta = version * 8; + + crc_section = get_unaligned_be32(&p[32 + delta]); + } + + if ((first_word != 0x4A414D00L) && (first_word != 0x4A414D01L)) + status = -EIO; + + if (crc_section >= program_size) + status = -EIO; + + if (status == 0) { + local_expected = (u16)get_unaligned_be16(&p[crc_section]); + + for (i = 0; i < crc_section; ++i) { + databyte = p[i]; + for (bit = 0; bit < 8; bit++) { + feedback = (databyte ^ shift_reg) & 0x01; + shift_reg >>= 1; + if (feedback) + shift_reg ^= 0x8408; + + databyte >>= 1; + } + } + + local_actual = (u16)~shift_reg; + + if (local_expected != local_actual) + status = -EILSEQ; + + } + + if (debug || status) { + switch (status) { + case 0: + printk(KERN_INFO "%s: CRC matched: %04x\n", __func__, + local_actual); + break; + case -EILSEQ: + printk(KERN_ERR "%s: CRC mismatch: expected %04x, " + "actual %04x\n", __func__, local_expected, + local_actual); + break; + case -ENODATA: + printk(KERN_ERR "%s: expected CRC not found, " + "actual CRC = %04x\n", __func__, + local_actual); + break; + case -EIO: + printk(KERN_ERR "%s: error: format isn't " + "recognized.\n", __func__); + break; + default: + printk(KERN_ERR "%s: CRC function returned error " + "code %d\n", __func__, status); + break; + } + } + + return status; +} + +static int altera_get_file_info(u8 *p, + s32 program_size, + int *format_version, + int *action_count, + int *procedure_count) +{ + int status = -EIO; + u32 first_word = 0; + int version = 0; + + if (program_size <= 52L) + return status; + + first_word = get_unaligned_be32(&p[0]); + + if ((first_word == 0x4A414D00L) || (first_word == 0x4A414D01L)) { + status = 0; + + version = (first_word & 1L); + *format_version = version + 1; + + if (version > 0) { + *action_count = get_unaligned_be32(&p[48]); + *procedure_count = get_unaligned_be32(&p[52]); + } + } + + return status; +} + +static int altera_get_act_info(u8 *p, + s32 program_size, + int index, + char **name, + char **description, + struct altera_procinfo **proc_list) +{ + int status = -EIO; + struct altera_procinfo *procptr = NULL; + struct altera_procinfo *tmpptr = NULL; + u32 first_word = 0L; + u32 action_table = 0L; + u32 proc_table = 0L; + u32 str_table = 0L; + u32 note_strings = 0L; + u32 action_count = 0L; + u32 proc_count = 0L; + u32 act_name_id = 0L; + u32 act_desc_id = 0L; + u32 act_proc_id = 0L; + u32 act_proc_name = 0L; + u8 act_proc_attribute = 0; + + if (program_size <= 52L) + return status; + /* Read header information */ + first_word = get_unaligned_be32(&p[0]); + + if (first_word != 0x4A414D01L) + return status; + + action_table = get_unaligned_be32(&p[4]); + proc_table = get_unaligned_be32(&p[8]); + str_table = get_unaligned_be32(&p[12]); + note_strings = get_unaligned_be32(&p[16]); + action_count = get_unaligned_be32(&p[48]); + proc_count = get_unaligned_be32(&p[52]); + + if (index >= action_count) + return status; + + act_name_id = get_unaligned_be32(&p[action_table + (12 * index)]); + act_desc_id = get_unaligned_be32(&p[action_table + (12 * index) + 4]); + act_proc_id = get_unaligned_be32(&p[action_table + (12 * index) + 8]); + + *name = &p[str_table + act_name_id]; + + if (act_desc_id < (note_strings - str_table)) + *description = &p[str_table + act_desc_id]; + + do { + act_proc_name = get_unaligned_be32( + &p[proc_table + (13 * act_proc_id)]); + act_proc_attribute = + (p[proc_table + (13 * act_proc_id) + 8] & 0x03); + + procptr = + kzalloc(sizeof(struct altera_procinfo), + GFP_KERNEL); + + if (procptr == NULL) + status = -ENOMEM; + else { + procptr->name = &p[str_table + act_proc_name]; + procptr->attrs = act_proc_attribute; + procptr->next = NULL; + + /* add record to end of linked list */ + if (*proc_list == NULL) + *proc_list = procptr; + else { + tmpptr = *proc_list; + while (tmpptr->next != NULL) + tmpptr = tmpptr->next; + tmpptr->next = procptr; + } + } + + act_proc_id = get_unaligned_be32( + &p[proc_table + (13 * act_proc_id) + 4]); + } while ((act_proc_id != 0) && (act_proc_id < proc_count)); + + return status; +} + +int altera_init(struct altera_config *config, const struct firmware *fw) +{ + struct altera_state *astate = NULL; + struct altera_procinfo *proc_list = NULL; + struct altera_procinfo *procptr = NULL; + char *key = NULL; + char *value = NULL; + char *action_name = NULL; + char *description = NULL; + int exec_result = 0; + int exit_code = 0; + int format_version = 0; + int action_count = 0; + int procedure_count = 0; + int index = 0; + s32 offset = 0L; + s32 error_address = 0L; + int retval = 0; + + key = kzalloc(33, GFP_KERNEL); + if (!key) { + retval = -ENOMEM; + goto out; + } + value = kzalloc(257, GFP_KERNEL); + if (!value) { + retval = -ENOMEM; + goto free_key; + } + astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL); + if (!astate) { + retval = -ENOMEM; + goto free_value; + } + + astate->config = config; + if (!astate->config->jtag_io) { + dprintk(KERN_INFO "%s: using byteblaster!\n", __func__); + astate->config->jtag_io = netup_jtag_io_lpt; + } + + altera_check_crc((u8 *)fw->data, fw->size); + + if (debug) { + altera_get_file_info((u8 *)fw->data, fw->size, &format_version, + &action_count, &procedure_count); + printk(KERN_INFO "%s: File format is %s ByteCode format\n", + __func__, (format_version == 2) ? "Jam STAPL" : + "pre-standardized Jam 1.1"); + while (altera_get_note((u8 *)fw->data, fw->size, + &offset, key, value, 256) == 0) + printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n", + __func__, key, value); + } + + if (debug && (format_version == 2) && (action_count > 0)) { + printk(KERN_INFO "%s: Actions available:\n", __func__); + for (index = 0; index < action_count; ++index) { + altera_get_act_info((u8 *)fw->data, fw->size, + index, &action_name, + &description, + &proc_list); + + if (description == NULL) + printk(KERN_INFO "%s: %s\n", + __func__, + action_name); + else + printk(KERN_INFO "%s: %s \"%s\"\n", + __func__, + action_name, + description); + + procptr = proc_list; + while (procptr != NULL) { + if (procptr->attrs != 0) + printk(KERN_INFO "%s: %s (%s)\n", + __func__, + procptr->name, + (procptr->attrs == 1) ? + "optional" : "recommended"); + + proc_list = procptr->next; + kfree(procptr); + procptr = proc_list; + } + } + + printk(KERN_INFO "\n"); + } + + exec_result = altera_execute(astate, (u8 *)fw->data, fw->size, + &error_address, &exit_code, &format_version); + + if (exit_code) + exec_result = -EREMOTEIO; + + if ((format_version == 2) && (exec_result == -EINVAL)) { + if (astate->config->action == NULL) + printk(KERN_ERR "%s: error: no action specified for " + "Jam STAPL file.\nprogram terminated.\n", + __func__); + else + printk(KERN_ERR "%s: error: action \"%s\"" + " is not supported " + "for this Jam STAPL file.\n" + "Program terminated.\n", __func__, + astate->config->action); + + } else if (exec_result) + printk(KERN_ERR "%s: error %d\n", __func__, exec_result); + + kfree(astate); +free_value: + kfree(value); +free_key: + kfree(key); +out: + return retval; +} +EXPORT_SYMBOL(altera_init); diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c new file mode 100644 index 00000000000..c6cc3dc8ae1 --- /dev/null +++ b/drivers/misc/apds9802als.c @@ -0,0 +1,322 @@ +/* + * apds9802als.c - apds9802 ALS Driver + * + * Copyright (C) 2009 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/sysfs.h> +#include <linux/pm_runtime.h> + +#define ALS_MIN_RANGE_VAL 1 +#define ALS_MAX_RANGE_VAL 2 +#define POWER_STA_ENABLE 1 +#define POWER_STA_DISABLE 0 + +#define DRIVER_NAME "apds9802als" + +struct als_data { + struct mutex mutex; +}; + +static ssize_t als_sensing_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int val; + + val = i2c_smbus_read_byte_data(client, 0x81); + if (val < 0) + return val; + if (val & 1) + return sprintf(buf, "4095\n"); + else + return sprintf(buf, "65535\n"); +} + +static int als_wait_for_data_ready(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret; + int retry = 10; + + do { + msleep(30); + ret = i2c_smbus_read_byte_data(client, 0x86); + } while (!(ret & 0x80) && retry--); + + if (retry < 0) { + dev_warn(dev, "timeout waiting for data ready\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static ssize_t als_lux0_input_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct als_data *data = i2c_get_clientdata(client); + int ret_val; + int temp; + + /* Protect against parallel reads */ + pm_runtime_get_sync(dev); + mutex_lock(&data->mutex); + + /* clear EOC interrupt status */ + i2c_smbus_write_byte(client, 0x40); + /* start measurement */ + temp = i2c_smbus_read_byte_data(client, 0x81); + i2c_smbus_write_byte_data(client, 0x81, temp | 0x08); + + ret_val = als_wait_for_data_ready(dev); + if (ret_val < 0) + goto failed; + + temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */ + if (temp < 0) { + ret_val = temp; + goto failed; + } + ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */ + if (ret_val < 0) + goto failed; + + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + + temp = (ret_val << 8) | temp; + return sprintf(buf, "%d\n", temp); +failed: + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + return ret_val; +} + +static ssize_t als_sensing_range_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct als_data *data = i2c_get_clientdata(client); + int ret_val; + unsigned long val; + + ret_val = kstrtoul(buf, 10, &val); + if (ret_val) + return ret_val; + + if (val < 4096) + val = 1; + else if (val < 65536) + val = 2; + else + return -ERANGE; + + pm_runtime_get_sync(dev); + + /* Make sure nobody else reads/modifies/writes 0x81 while we + are active */ + mutex_lock(&data->mutex); + + ret_val = i2c_smbus_read_byte_data(client, 0x81); + if (ret_val < 0) + goto fail; + + /* Reset the bits before setting them */ + ret_val = ret_val & 0xFA; + + if (val == 1) /* Setting detection range up to 4k LUX */ + ret_val = (ret_val | 0x01); + else /* Setting detection range up to 64k LUX*/ + ret_val = (ret_val | 0x00); + + ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val); + + if (ret_val >= 0) { + /* All OK */ + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + return count; + } +fail: + mutex_unlock(&data->mutex); + pm_runtime_put_sync(dev); + return ret_val; +} + +static int als_set_power_state(struct i2c_client *client, bool on_off) +{ + int ret_val; + struct als_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->mutex); + ret_val = i2c_smbus_read_byte_data(client, 0x80); + if (ret_val < 0) + goto fail; + if (on_off) + ret_val = ret_val | 0x01; + else + ret_val = ret_val & 0xFE; + ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val); +fail: + mutex_unlock(&data->mutex); + return ret_val; +} + +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR, + als_sensing_range_show, als_sensing_range_store); +static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL); + +static struct attribute *mid_att_als[] = { + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_input.attr, + NULL +}; + +static struct attribute_group m_als_gr = { + .name = "apds9802als", + .attrs = mid_att_als +}; + +static int als_set_default_config(struct i2c_client *client) +{ + int ret_val; + /* Write the command and then switch on */ + ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01); + if (ret_val < 0) { + dev_err(&client->dev, "failed default switch on write\n"); + return ret_val; + } + /* detection range: 1~64K Lux, maunal measurement */ + ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08); + if (ret_val < 0) + dev_err(&client->dev, "failed default LUX on write\n"); + + /* We always get 0 for the 1st measurement after system power on, + * so make sure it is finished before user asks for data. + */ + als_wait_for_data_ready(&client->dev); + + return ret_val; +} + +static int apds9802als_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + struct als_data *data; + + data = kzalloc(sizeof(struct als_data), GFP_KERNEL); + if (data == NULL) { + dev_err(&client->dev, "Memory allocation failed\n"); + return -ENOMEM; + } + i2c_set_clientdata(client, data); + res = sysfs_create_group(&client->dev.kobj, &m_als_gr); + if (res) { + dev_err(&client->dev, "device create file failed\n"); + goto als_error1; + } + dev_info(&client->dev, "ALS chip found\n"); + als_set_default_config(client); + mutex_init(&data->mutex); + + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + + return res; +als_error1: + kfree(data); + return res; +} + +static int apds9802als_remove(struct i2c_client *client) +{ + struct als_data *data = i2c_get_clientdata(client); + + pm_runtime_get_sync(&client->dev); + + als_set_power_state(client, false); + sysfs_remove_group(&client->dev.kobj, &m_als_gr); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + + kfree(data); + return 0; +} + +#ifdef CONFIG_PM + +static int apds9802als_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + als_set_power_state(client, false); + return 0; +} + +static int apds9802als_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + als_set_power_state(client, true); + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend, + apds9802als_resume, NULL); + +#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops) + +#else /* CONFIG_PM */ +#define APDS9802ALS_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct i2c_device_id apds9802als_id[] = { + { DRIVER_NAME, 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, apds9802als_id); + +static struct i2c_driver apds9802als_driver = { + .driver = { + .name = DRIVER_NAME, + .pm = APDS9802ALS_PM_OPS, + }, + .probe = apds9802als_probe, + .remove = apds9802als_remove, + .id_table = apds9802als_id, +}; + +module_i2c_driver(apds9802als_driver); + +MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com"); +MODULE_DESCRIPTION("Avago apds9802als ALS Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c new file mode 100644 index 00000000000..868a30a1b41 --- /dev/null +++ b/drivers/misc/apds990x.c @@ -0,0 +1,1290 @@ +/* + * This file is part of the APDS990x sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/slab.h> +#include <linux/i2c/apds990x.h> + +/* Register map */ +#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */ +#define APDS990X_ATIME 0x01 /* ALS ADC time */ +#define APDS990X_PTIME 0x02 /* Proximity ADC time */ +#define APDS990X_WTIME 0x03 /* Wait time */ +#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */ +#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */ +#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */ +#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */ +#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */ +#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */ +#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */ +#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */ +#define APDS990X_PERS 0x0c /* Interrupt persistence filters */ +#define APDS990X_CONFIG 0x0d /* Configuration */ +#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */ +#define APDS990X_CONTROL 0x0f /* Gain control register */ +#define APDS990X_REV 0x11 /* Revision Number */ +#define APDS990X_ID 0x12 /* Device ID */ +#define APDS990X_STATUS 0x13 /* Device status */ +#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */ +#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */ +#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */ +#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */ +#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */ +#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */ + +/* Control */ +#define APDS990X_MAX_AGAIN 3 + +/* Enable register */ +#define APDS990X_EN_PIEN (0x1 << 5) +#define APDS990X_EN_AIEN (0x1 << 4) +#define APDS990X_EN_WEN (0x1 << 3) +#define APDS990X_EN_PEN (0x1 << 2) +#define APDS990X_EN_AEN (0x1 << 1) +#define APDS990X_EN_PON (0x1 << 0) +#define APDS990X_EN_DISABLE_ALL 0 + +/* Status register */ +#define APDS990X_ST_PINT (0x1 << 5) +#define APDS990X_ST_AINT (0x1 << 4) + +/* I2C access types */ +#define APDS990x_CMD_TYPE_MASK (0x03 << 5) +#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */ +#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */ +#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */ + +#define APDS990x_ADDR_SHIFT 0 +#define APDS990x_CMD 0x80 + +/* Interrupt ack commands */ +#define APDS990X_INT_ACK_ALS 0x6 +#define APDS990X_INT_ACK_PS 0x5 +#define APDS990X_INT_ACK_BOTH 0x7 + +/* ptime */ +#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/ + +/* wtime */ +#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */ + +#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */ + +/* Persistence */ +#define APDS990X_APERS_SHIFT 0 +#define APDS990X_PPERS_SHIFT 4 + +/* Supported ID:s */ +#define APDS990X_ID_0 0x0 +#define APDS990X_ID_4 0x4 +#define APDS990X_ID_29 0x29 + +/* pgain and pdiode settings */ +#define APDS_PGAIN_1X 0x0 +#define APDS_PDIODE_IR 0x2 + +#define APDS990X_LUX_OUTPUT_SCALE 10 + +/* Reverse chip factors for threshold calculation */ +struct reverse_factors { + u32 afactor; + int cf1; + int irf1; + int cf2; + int irf2; +}; + +struct apds990x_chip { + struct apds990x_platform_data *pdata; + struct i2c_client *client; + struct mutex mutex; /* avoid parallel access */ + struct regulator_bulk_data regs[2]; + wait_queue_head_t wait; + + int prox_en; + bool prox_continuous_mode; + bool lux_wait_fresh_res; + + /* Chip parameters */ + struct apds990x_chip_factors cf; + struct reverse_factors rcf; + u16 atime; /* als integration time */ + u16 arate; /* als reporting rate */ + u16 a_max_result; /* Max possible ADC value with current atime */ + u8 again_meas; /* Gain used in last measurement */ + u8 again_next; /* Next calculated gain */ + u8 pgain; + u8 pdiode; + u8 pdrive; + u8 lux_persistence; + u8 prox_persistence; + + u32 lux_raw; + u32 lux; + u16 lux_clear; + u16 lux_ir; + u16 lux_calib; + u32 lux_thres_hi; + u32 lux_thres_lo; + + u32 prox_thres; + u16 prox_data; + u16 prox_calib; + + char chipname[10]; + u8 revision; +}; + +#define APDS_CALIB_SCALER 8192 +#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER) +#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER) + +#define APDS_PROX_DEF_THRES 600 +#define APDS_PROX_HYSTERESIS 50 +#define APDS_LUX_DEF_THRES_HI 101 +#define APDS_LUX_DEF_THRES_LO 100 +#define APDS_DEFAULT_PROX_PERS 1 + +#define APDS_TIMEOUT 2000 +#define APDS_STARTUP_DELAY 25000 /* us */ +#define APDS_RANGE 65535 +#define APDS_PROX_RANGE 1023 +#define APDS_LUX_GAIN_LO_LIMIT 100 +#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25 + +#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */ +#define TIME_STEP_SCALER 32 + +#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */ +#define APDS_LUX_DEFAULT_RATE 200 + +static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */ +static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */ + +/* Following two tables must match i.e 10Hz rate means 1 as persistence value */ +static const u16 arates_hz[] = {10, 5, 2, 1}; +static const u8 apersis[] = {1, 2, 4, 5}; + +/* Regulators */ +static const char reg_vcc[] = "Vdd"; +static const char reg_vled[] = "Vled"; + +static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB; + + ret = i2c_smbus_read_byte_data(client, reg); + *data = ret; + return (int)ret; +} + +static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC; + + ret = i2c_smbus_read_word_data(client, reg); + *data = ret; + return (int)ret; +} + +static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB; + + ret = i2c_smbus_write_byte_data(client, reg, data); + return (int)ret; +} + +static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data) +{ + struct i2c_client *client = chip->client; + s32 ret; + + reg &= ~APDS990x_CMD_TYPE_MASK; + reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC; + + ret = i2c_smbus_write_word_data(client, reg, data); + return (int)ret; +} + +static int apds990x_mode_on(struct apds990x_chip *chip) +{ + /* ALS is mandatory, proximity optional */ + u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN | + APDS990X_EN_WEN; + + if (chip->prox_en) + reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN; + + return apds990x_write_byte(chip, APDS990X_ENABLE, reg); +} + +static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux) +{ + u32 thres; + u32 cpl; + u32 ir; + + if (lux == 0) + return 0; + else if (lux == APDS_RANGE) + return APDS_RANGE; + + /* + * Reported LUX value is a combination of the IR and CLEAR channel + * values. However, interrupt threshold is only for clear channel. + * This function approximates needed HW threshold value for a given + * LUX value in the current lightning type. + * IR level compared to visible light varies heavily depending on the + * source of the light + * + * Calculate threshold value for the next measurement period. + * Math: threshold = lux * cpl where + * cpl = atime * again / (glass_attenuation * device_factor) + * (count-per-lux) + * + * First remove calibration. Division by four is to avoid overflow + */ + lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4); + + /* Multiplication by 64 is to increase accuracy */ + cpl = ((u32)chip->atime * (u32)again[chip->again_next] * + APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df); + + thres = lux * cpl / 64; + /* + * Convert IR light from the latest result to match with + * new gain step. This helps to adapt with the current + * source of light. + */ + ir = (u32)chip->lux_ir * (u32)again[chip->again_next] / + (u32)again[chip->again_meas]; + + /* + * Compensate count with IR light impact + * IAC1 > IAC2 (see apds990x_get_lux for formulas) + */ + if (chip->lux_clear * APDS_PARAM_SCALE >= + chip->rcf.afactor * chip->lux_ir) + thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) / + APDS_PARAM_SCALE; + else + thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) / + APDS_PARAM_SCALE; + + if (thres >= chip->a_max_result) + thres = chip->a_max_result - 1; + return thres; +} + +static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms) +{ + u8 reg_value; + + chip->atime = time_ms; + /* Formula is specified in the data sheet */ + reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP); + /* Calculate max ADC value for given integration time */ + chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC; + return apds990x_write_byte(chip, APDS990X_ATIME, reg_value); +} + +/* Called always with mutex locked */ +static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data) +{ + int ret, lo, hi; + + /* If the chip is not in use, don't try to access it */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + if (data < chip->prox_thres) { + lo = 0; + hi = chip->prox_thres; + } else { + lo = chip->prox_thres - APDS_PROX_HYSTERESIS; + if (chip->prox_continuous_mode) + hi = chip->prox_thres; + else + hi = APDS_RANGE; + } + + ret = apds990x_write_word(chip, APDS990X_PILTL, lo); + ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi); + return ret; +} + +/* Called always with mutex locked */ +static int apds990x_refresh_athres(struct apds990x_chip *chip) +{ + int ret; + /* If the chip is not in use, don't try to access it */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + ret = apds990x_write_word(chip, APDS990X_AILTL, + apds990x_lux_to_threshold(chip, chip->lux_thres_lo)); + ret |= apds990x_write_word(chip, APDS990X_AIHTL, + apds990x_lux_to_threshold(chip, chip->lux_thres_hi)); + + return ret; +} + +/* Called always with mutex locked */ +static void apds990x_force_a_refresh(struct apds990x_chip *chip) +{ + /* This will force ALS interrupt after the next measurement. */ + apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO); + apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI); +} + +/* Called always with mutex locked */ +static void apds990x_force_p_refresh(struct apds990x_chip *chip) +{ + /* This will force proximity interrupt after the next measurement. */ + apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1); + apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES); +} + +/* Called always with mutex locked */ +static int apds990x_calc_again(struct apds990x_chip *chip) +{ + int curr_again = chip->again_meas; + int next_again = chip->again_meas; + int ret = 0; + + /* Calculate suitable als gain */ + if (chip->lux_clear == chip->a_max_result) + next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */ + else if (chip->lux_clear > chip->a_max_result / 2) + next_again--; + else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT) + next_again += 2; /* Too dark. Increase gain by 2 steps */ + else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT) + next_again++; + + /* Limit gain to available range */ + if (next_again < 0) + next_again = 0; + else if (next_again > APDS990X_MAX_AGAIN) + next_again = APDS990X_MAX_AGAIN; + + /* Let's check can we trust the measured result */ + if (chip->lux_clear == chip->a_max_result) + /* Result can be totally garbage due to saturation */ + ret = -ERANGE; + else if (next_again != curr_again && + chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT) + /* + * Gain is changed and measurement result is very small. + * Result can be totally garbage due to underflow + */ + ret = -ERANGE; + + chip->again_next = next_again; + apds990x_write_byte(chip, APDS990X_CONTROL, + (chip->pdrive << 6) | + (chip->pdiode << 4) | + (chip->pgain << 2) | + (chip->again_next << 0)); + + /* + * Error means bad result -> re-measurement is needed. The forced + * refresh uses fastest possible persistence setting to get result + * as soon as possible. + */ + if (ret < 0) + apds990x_force_a_refresh(chip); + else + apds990x_refresh_athres(chip); + + return ret; +} + +/* Called always with mutex locked */ +static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir) +{ + int iac, iac1, iac2; /* IR adjusted counts */ + u32 lpc; /* Lux per count */ + + /* Formulas: + * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH + * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH + */ + iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE; + iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE; + + iac = max(iac1, iac2); + iac = max(iac, 0); + + lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) / + (u32)(again[chip->again_meas] * (u32)chip->atime); + + return (iac * lpc) / APDS_PARAM_SCALE; +} + +static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode) +{ + struct i2c_client *client = chip->client; + s32 ret; + u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE; + + switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) { + case APDS990X_ST_AINT: + reg |= APDS990X_INT_ACK_ALS; + break; + case APDS990X_ST_PINT: + reg |= APDS990X_INT_ACK_PS; + break; + default: + reg |= APDS990X_INT_ACK_BOTH; + break; + } + + ret = i2c_smbus_read_byte_data(client, reg); + return (int)ret; +} + +static irqreturn_t apds990x_irq(int irq, void *data) +{ + struct apds990x_chip *chip = data; + u8 status; + + apds990x_read_byte(chip, APDS990X_STATUS, &status); + apds990x_ack_int(chip, status); + + mutex_lock(&chip->mutex); + if (!pm_runtime_suspended(&chip->client->dev)) { + if (status & APDS990X_ST_AINT) { + apds990x_read_word(chip, APDS990X_CDATAL, + &chip->lux_clear); + apds990x_read_word(chip, APDS990X_IRDATAL, + &chip->lux_ir); + /* Store used gain for calculations */ + chip->again_meas = chip->again_next; + + chip->lux_raw = apds990x_get_lux(chip, + chip->lux_clear, + chip->lux_ir); + + if (apds990x_calc_again(chip) == 0) { + /* Result is valid */ + chip->lux = chip->lux_raw; + chip->lux_wait_fresh_res = false; + wake_up(&chip->wait); + sysfs_notify(&chip->client->dev.kobj, + NULL, "lux0_input"); + } + } + + if ((status & APDS990X_ST_PINT) && chip->prox_en) { + u16 clr_ch; + + apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch); + /* + * If ALS channel is saturated at min gain, + * proximity gives false posivite values. + * Just ignore them. + */ + if (chip->again_meas == 0 && + clr_ch == chip->a_max_result) + chip->prox_data = 0; + else + apds990x_read_word(chip, + APDS990X_PDATAL, + &chip->prox_data); + + apds990x_refresh_pthres(chip, chip->prox_data); + if (chip->prox_data < chip->prox_thres) + chip->prox_data = 0; + else if (!chip->prox_continuous_mode) + chip->prox_data = APDS_PROX_RANGE; + sysfs_notify(&chip->client->dev.kobj, + NULL, "prox0_raw"); + } + } + mutex_unlock(&chip->mutex); + return IRQ_HANDLED; +} + +static int apds990x_configure(struct apds990x_chip *chip) +{ + /* It is recommended to use disabled mode during these operations */ + apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL); + + /* conversion and wait times for different state machince states */ + apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT); + apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT); + apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME); + + apds990x_write_byte(chip, APDS990X_CONFIG, 0); + + /* Persistence levels */ + apds990x_write_byte(chip, APDS990X_PERS, + (chip->lux_persistence << APDS990X_APERS_SHIFT) | + (chip->prox_persistence << APDS990X_PPERS_SHIFT)); + + apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount); + + /* Start with relatively small gain */ + chip->again_meas = 1; + chip->again_next = 1; + apds990x_write_byte(chip, APDS990X_CONTROL, + (chip->pdrive << 6) | + (chip->pdiode << 4) | + (chip->pgain << 2) | + (chip->again_next << 0)); + return 0; +} + +static int apds990x_detect(struct apds990x_chip *chip) +{ + struct i2c_client *client = chip->client; + int ret; + u8 id; + + ret = apds990x_read_byte(chip, APDS990X_ID, &id); + if (ret < 0) { + dev_err(&client->dev, "ID read failed\n"); + return ret; + } + + ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision); + if (ret < 0) { + dev_err(&client->dev, "REV read failed\n"); + return ret; + } + + switch (id) { + case APDS990X_ID_0: + case APDS990X_ID_4: + case APDS990X_ID_29: + snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x"); + break; + default: + ret = -ENODEV; + break; + } + return ret; +} + +#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME) +static int apds990x_chip_on(struct apds990x_chip *chip) +{ + int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), + chip->regs); + if (err < 0) + return err; + + usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY); + + /* Refresh all configs in case of regulators were off */ + chip->prox_data = 0; + apds990x_configure(chip); + apds990x_mode_on(chip); + return 0; +} +#endif + +static int apds990x_chip_off(struct apds990x_chip *chip) +{ + apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL); + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); + return 0; +} + +static ssize_t apds990x_lux_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + ssize_t ret; + u32 result; + long timeout; + + if (pm_runtime_suspended(dev)) + return -EIO; + + timeout = wait_event_interruptible_timeout(chip->wait, + !chip->lux_wait_fresh_res, + msecs_to_jiffies(APDS_TIMEOUT)); + if (!timeout) + return -EIO; + + mutex_lock(&chip->mutex); + result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER; + if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE)) + result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE; + + ret = sprintf(buf, "%d.%d\n", + result / APDS990X_LUX_OUTPUT_SCALE, + result % APDS990X_LUX_OUTPUT_SCALE); + mutex_unlock(&chip->mutex); + return ret; +} + +static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL); + +static ssize_t apds990x_lux_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", APDS_RANGE); +} + +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL); + +static ssize_t apds990x_lux_calib_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", APDS_CALIB_SCALER); +} + +static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO, + apds990x_lux_calib_format_show, NULL); + +static ssize_t apds990x_lux_calib_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", chip->lux_calib); +} + +static ssize_t apds990x_lux_calib_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + chip->lux_calib = value; + + return len; +} + +static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show, + apds990x_lux_calib_store); + +static ssize_t apds990x_rate_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + int pos = 0; + for (i = 0; i < ARRAY_SIZE(arates_hz); i++) + pos += sprintf(buf + pos, "%d ", arates_hz[i]); + sprintf(buf + pos - 1, "\n"); + return pos; +} + +static ssize_t apds990x_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->arate); +} + +static int apds990x_set_arate(struct apds990x_chip *chip, int rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(arates_hz); i++) + if (rate >= arates_hz[i]) + break; + + if (i == ARRAY_SIZE(arates_hz)) + return -EINVAL; + + /* Pick up corresponding persistence value */ + chip->lux_persistence = apersis[i]; + chip->arate = arates_hz[i]; + + /* If the chip is not in use, don't try to access it */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + /* Persistence levels */ + return apds990x_write_byte(chip, APDS990X_PERS, + (chip->lux_persistence << APDS990X_APERS_SHIFT) | + (chip->prox_persistence << APDS990X_PPERS_SHIFT)); +} + +static ssize_t apds990x_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + ret = apds990x_set_arate(chip, value); + mutex_unlock(&chip->mutex); + + if (ret < 0) + return ret; + return len; +} + +static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL); + +static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show, + apds990x_rate_store); + +static ssize_t apds990x_prox_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret; + struct apds990x_chip *chip = dev_get_drvdata(dev); + if (pm_runtime_suspended(dev) || !chip->prox_en) + return -EIO; + + mutex_lock(&chip->mutex); + ret = sprintf(buf, "%d\n", chip->prox_data); + mutex_unlock(&chip->mutex); + return ret; +} + +static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL); + +static ssize_t apds990x_prox_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", APDS_PROX_RANGE); +} + +static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL); + +static ssize_t apds990x_prox_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->prox_en); +} + +static ssize_t apds990x_prox_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + + if (!chip->prox_en) + chip->prox_data = 0; + + if (value) + chip->prox_en++; + else if (chip->prox_en > 0) + chip->prox_en--; + + if (!pm_runtime_suspended(dev)) + apds990x_mode_on(chip); + mutex_unlock(&chip->mutex); + return len; +} + +static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show, + apds990x_prox_enable_store); + +static const char reporting_modes[][9] = {"trigger", "periodic"}; + +static ssize_t apds990x_prox_reporting_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", + reporting_modes[!!chip->prox_continuous_mode]); +} + +static ssize_t apds990x_prox_reporting_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + + if (sysfs_streq(buf, reporting_modes[0])) + chip->prox_continuous_mode = 0; + else if (sysfs_streq(buf, reporting_modes[1])) + chip->prox_continuous_mode = 1; + else + return -EINVAL; + return len; +} + +static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR, + apds990x_prox_reporting_mode_show, + apds990x_prox_reporting_mode_store); + +static ssize_t apds990x_prox_reporting_avail_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]); +} + +static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR, + apds990x_prox_reporting_avail_show, NULL); + + +static ssize_t apds990x_lux_thresh_above_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->lux_thres_hi); +} + +static ssize_t apds990x_lux_thresh_below_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->lux_thres_lo); +} + +static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target, + const char *buf) +{ + unsigned long thresh; + int ret; + + ret = kstrtoul(buf, 0, &thresh); + if (ret) + return ret; + + if (thresh > APDS_RANGE) + return -EINVAL; + + mutex_lock(&chip->mutex); + *target = thresh; + /* + * Don't update values in HW if we are still waiting for + * first interrupt to come after device handle open call. + */ + if (!chip->lux_wait_fresh_res) + apds990x_refresh_athres(chip); + mutex_unlock(&chip->mutex); + return ret; + +} + +static ssize_t apds990x_lux_thresh_above_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf); + if (ret < 0) + return ret; + return len; +} + +static ssize_t apds990x_lux_thresh_below_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf); + if (ret < 0) + return ret; + return len; +} + +static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR, + apds990x_lux_thresh_above_show, + apds990x_lux_thresh_above_store); + +static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR, + apds990x_lux_thresh_below_show, + apds990x_lux_thresh_below_store); + +static ssize_t apds990x_prox_threshold_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->prox_thres); +} + +static ssize_t apds990x_prox_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if ((value > APDS_RANGE) || (value == 0) || + (value < APDS_PROX_HYSTERESIS)) + return -EINVAL; + + mutex_lock(&chip->mutex); + chip->prox_thres = value; + + apds990x_force_p_refresh(chip); + mutex_unlock(&chip->mutex); + return len; +} + +static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR, + apds990x_prox_threshold_show, + apds990x_prox_threshold_store); + +static ssize_t apds990x_power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", !pm_runtime_suspended(dev)); + return 0; +} + +static ssize_t apds990x_power_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value) { + pm_runtime_get_sync(dev); + mutex_lock(&chip->mutex); + chip->lux_wait_fresh_res = true; + apds990x_force_a_refresh(chip); + apds990x_force_p_refresh(chip); + mutex_unlock(&chip->mutex); + } else { + if (!pm_runtime_suspended(dev)) + pm_runtime_put(dev); + } + return len; +} + +static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, + apds990x_power_state_show, + apds990x_power_state_store); + +static ssize_t apds990x_chip_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct apds990x_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%s %d\n", chip->chipname, chip->revision); +} + +static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL); + +static struct attribute *sysfs_attrs_ctrl[] = { + &dev_attr_lux0_calibscale.attr, + &dev_attr_lux0_calibscale_default.attr, + &dev_attr_lux0_input.attr, + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_rate.attr, + &dev_attr_lux0_rate_avail.attr, + &dev_attr_lux0_thresh_above_value.attr, + &dev_attr_lux0_thresh_below_value.attr, + &dev_attr_prox0_raw_en.attr, + &dev_attr_prox0_raw.attr, + &dev_attr_prox0_sensor_range.attr, + &dev_attr_prox0_thresh_above_value.attr, + &dev_attr_prox0_reporting_mode.attr, + &dev_attr_prox0_reporting_mode_avail.attr, + &dev_attr_chip_id.attr, + &dev_attr_power_state.attr, + NULL +}; + +static struct attribute_group apds990x_attribute_group[] = { + {.attrs = sysfs_attrs_ctrl }, +}; + +static int apds990x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct apds990x_chip *chip; + int err; + + chip = kzalloc(sizeof *chip, GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + init_waitqueue_head(&chip->wait); + mutex_init(&chip->mutex); + chip->pdata = client->dev.platform_data; + + if (chip->pdata == NULL) { + dev_err(&client->dev, "platform data is mandatory\n"); + err = -EINVAL; + goto fail1; + } + + if (chip->pdata->cf.ga == 0) { + /* set uncovered sensor default parameters */ + chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */ + chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */ + chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */ + chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */ + chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */ + chip->cf.df = 52; + } else { + chip->cf = chip->pdata->cf; + } + + /* precalculate inverse chip factors for threshold control */ + chip->rcf.afactor = + (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE / + (chip->cf.cf1 - chip->cf.cf2); + chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE / + chip->cf.cf1; + chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE / + chip->cf.cf1; + chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE / + chip->cf.cf2; + chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE / + chip->cf.cf2; + + /* Set something to start with */ + chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI; + chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO; + chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE; + + chip->prox_thres = APDS_PROX_DEF_THRES; + chip->pdrive = chip->pdata->pdrive; + chip->pdiode = APDS_PDIODE_IR; + chip->pgain = APDS_PGAIN_1X; + chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE; + chip->prox_persistence = APDS_DEFAULT_PROX_PERS; + chip->prox_continuous_mode = false; + + chip->regs[0].supply = reg_vcc; + chip->regs[1].supply = reg_vled; + + err = regulator_bulk_get(&client->dev, + ARRAY_SIZE(chip->regs), chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot get regulators\n"); + goto fail1; + } + + err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot enable regulators\n"); + goto fail2; + } + + usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY); + + err = apds990x_detect(chip); + if (err < 0) { + dev_err(&client->dev, "APDS990X not found\n"); + goto fail3; + } + + pm_runtime_set_active(&client->dev); + + apds990x_configure(chip); + apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE); + apds990x_mode_on(chip); + + pm_runtime_enable(&client->dev); + + if (chip->pdata->setup_resources) { + err = chip->pdata->setup_resources(); + if (err) { + err = -EINVAL; + goto fail3; + } + } + + err = sysfs_create_group(&chip->client->dev.kobj, + apds990x_attribute_group); + if (err < 0) { + dev_err(&chip->client->dev, "Sysfs registration failed\n"); + goto fail4; + } + + err = request_threaded_irq(client->irq, NULL, + apds990x_irq, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW | + IRQF_ONESHOT, + "apds990x", chip); + if (err) { + dev_err(&client->dev, "could not get IRQ %d\n", + client->irq); + goto fail5; + } + return err; +fail5: + sysfs_remove_group(&chip->client->dev.kobj, + &apds990x_attribute_group[0]); +fail4: + if (chip->pdata && chip->pdata->release_resources) + chip->pdata->release_resources(); +fail3: + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); +fail2: + regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); +fail1: + kfree(chip); + return err; +} + +static int apds990x_remove(struct i2c_client *client) +{ + struct apds990x_chip *chip = i2c_get_clientdata(client); + + free_irq(client->irq, chip); + sysfs_remove_group(&chip->client->dev.kobj, + apds990x_attribute_group); + + if (chip->pdata && chip->pdata->release_resources) + chip->pdata->release_resources(); + + if (!pm_runtime_suspended(&client->dev)) + apds990x_chip_off(chip); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); + + kfree(chip); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int apds990x_suspend(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + apds990x_chip_off(chip); + return 0; +} + +static int apds990x_resume(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + /* + * If we were enabled at suspend time, it is expected + * everything works nice and smoothly. Chip_on is enough + */ + apds990x_chip_on(chip); + + return 0; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int apds990x_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + apds990x_chip_off(chip); + return 0; +} + +static int apds990x_runtime_resume(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct apds990x_chip *chip = i2c_get_clientdata(client); + + apds990x_chip_on(chip); + return 0; +} + +#endif + +static const struct i2c_device_id apds990x_id[] = { + {"apds990x", 0 }, + {} +}; + +MODULE_DEVICE_TABLE(i2c, apds990x_id); + +static const struct dev_pm_ops apds990x_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume) + SET_RUNTIME_PM_OPS(apds990x_runtime_suspend, + apds990x_runtime_resume, + NULL) +}; + +static struct i2c_driver apds990x_driver = { + .driver = { + .name = "apds990x", + .owner = THIS_MODULE, + .pm = &apds990x_pm_ops, + }, + .probe = apds990x_probe, + .remove = apds990x_remove, + .id_table = apds990x_id, +}; + +module_i2c_driver(apds990x_driver); + +MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor"); +MODULE_AUTHOR("Samu Onkalo, Nokia Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c new file mode 100644 index 00000000000..c72e96b523e --- /dev/null +++ b/drivers/misc/arm-charlcd.c @@ -0,0 +1,389 @@ +/* + * Driver for the on-board character LCD found on some ARM reference boards + * This is basically an Hitachi HD44780 LCD with a custom IP block to drive it + * http://en.wikipedia.org/wiki/HD44780_Character_LCD + * Currently it will just display the text "ARM Linux" and the linux version + * + * License terms: GNU General Public License (GPL) version 2 + * Author: Linus Walleij <triad@df.lth.se> + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <generated/utsrelease.h> + +#define DRIVERNAME "arm-charlcd" +#define CHARLCD_TIMEOUT (msecs_to_jiffies(1000)) + +/* Offsets to registers */ +#define CHAR_COM 0x00U +#define CHAR_DAT 0x04U +#define CHAR_RD 0x08U +#define CHAR_RAW 0x0CU +#define CHAR_MASK 0x10U +#define CHAR_STAT 0x14U + +#define CHAR_RAW_CLEAR 0x00000000U +#define CHAR_RAW_VALID 0x00000100U + +/* Hitachi HD44780 display commands */ +#define HD_CLEAR 0x01U +#define HD_HOME 0x02U +#define HD_ENTRYMODE 0x04U +#define HD_ENTRYMODE_INCREMENT 0x02U +#define HD_ENTRYMODE_SHIFT 0x01U +#define HD_DISPCTRL 0x08U +#define HD_DISPCTRL_ON 0x04U +#define HD_DISPCTRL_CURSOR_ON 0x02U +#define HD_DISPCTRL_CURSOR_BLINK 0x01U +#define HD_CRSR_SHIFT 0x10U +#define HD_CRSR_SHIFT_DISPLAY 0x08U +#define HD_CRSR_SHIFT_DISPLAY_RIGHT 0x04U +#define HD_FUNCSET 0x20U +#define HD_FUNCSET_8BIT 0x10U +#define HD_FUNCSET_2_LINES 0x08U +#define HD_FUNCSET_FONT_5X10 0x04U +#define HD_SET_CGRAM 0x40U +#define HD_SET_DDRAM 0x80U +#define HD_BUSY_FLAG 0x80U + +/** + * @dev: a pointer back to containing device + * @phybase: the offset to the controller in physical memory + * @physize: the size of the physical page + * @virtbase: the offset to the controller in virtual memory + * @irq: reserved interrupt number + * @complete: completion structure for the last LCD command + */ +struct charlcd { + struct device *dev; + u32 phybase; + u32 physize; + void __iomem *virtbase; + int irq; + struct completion complete; + struct delayed_work init_work; +}; + +static irqreturn_t charlcd_interrupt(int irq, void *data) +{ + struct charlcd *lcd = data; + u8 status; + + status = readl(lcd->virtbase + CHAR_STAT) & 0x01; + /* Clear IRQ */ + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + if (status) + complete(&lcd->complete); + else + dev_info(lcd->dev, "Spurious IRQ (%02x)\n", status); + return IRQ_HANDLED; +} + + +static void charlcd_wait_complete_irq(struct charlcd *lcd) +{ + int ret; + + ret = wait_for_completion_interruptible_timeout(&lcd->complete, + CHARLCD_TIMEOUT); + /* Disable IRQ after completion */ + writel(0x00, lcd->virtbase + CHAR_MASK); + + if (ret < 0) { + dev_err(lcd->dev, + "wait_for_completion_interruptible_timeout() " + "returned %d waiting for ready\n", ret); + return; + } + + if (ret == 0) { + dev_err(lcd->dev, "charlcd controller timed out " + "waiting for ready\n"); + return; + } +} + +static u8 charlcd_4bit_read_char(struct charlcd *lcd) +{ + u8 data; + u32 val; + int i; + + /* If we can, use an IRQ to wait for the data, else poll */ + if (lcd->irq >= 0) + charlcd_wait_complete_irq(lcd); + else { + i = 0; + val = 0; + while (!(val & CHAR_RAW_VALID) && i < 10) { + udelay(100); + val = readl(lcd->virtbase + CHAR_RAW); + i++; + } + + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + } + msleep(1); + + /* Read the 4 high bits of the data */ + data = readl(lcd->virtbase + CHAR_RD) & 0xf0; + + /* + * The second read for the low bits does not trigger an IRQ + * so in this case we have to poll for the 4 lower bits + */ + i = 0; + val = 0; + while (!(val & CHAR_RAW_VALID) && i < 10) { + udelay(100); + val = readl(lcd->virtbase + CHAR_RAW); + i++; + } + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + msleep(1); + + /* Read the 4 low bits of the data */ + data |= (readl(lcd->virtbase + CHAR_RD) >> 4) & 0x0f; + + return data; +} + +static bool charlcd_4bit_read_bf(struct charlcd *lcd) +{ + if (lcd->irq >= 0) { + /* + * If we'll use IRQs to wait for the busyflag, clear any + * pending flag and enable IRQ + */ + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + init_completion(&lcd->complete); + writel(0x01, lcd->virtbase + CHAR_MASK); + } + readl(lcd->virtbase + CHAR_COM); + return charlcd_4bit_read_char(lcd) & HD_BUSY_FLAG ? true : false; +} + +static void charlcd_4bit_wait_busy(struct charlcd *lcd) +{ + int retries = 50; + + udelay(100); + while (charlcd_4bit_read_bf(lcd) && retries) + retries--; + if (!retries) + dev_err(lcd->dev, "timeout waiting for busyflag\n"); +} + +static void charlcd_4bit_command(struct charlcd *lcd, u8 cmd) +{ + u32 cmdlo = (cmd << 4) & 0xf0; + u32 cmdhi = (cmd & 0xf0); + + writel(cmdhi, lcd->virtbase + CHAR_COM); + udelay(10); + writel(cmdlo, lcd->virtbase + CHAR_COM); + charlcd_4bit_wait_busy(lcd); +} + +static void charlcd_4bit_char(struct charlcd *lcd, u8 ch) +{ + u32 chlo = (ch << 4) & 0xf0; + u32 chhi = (ch & 0xf0); + + writel(chhi, lcd->virtbase + CHAR_DAT); + udelay(10); + writel(chlo, lcd->virtbase + CHAR_DAT); + charlcd_4bit_wait_busy(lcd); +} + +static void charlcd_4bit_print(struct charlcd *lcd, int line, const char *str) +{ + u8 offset; + int i; + + /* + * We support line 0, 1 + * Line 1 runs from 0x00..0x27 + * Line 2 runs from 0x28..0x4f + */ + if (line == 0) + offset = 0; + else if (line == 1) + offset = 0x28; + else + return; + + /* Set offset */ + charlcd_4bit_command(lcd, HD_SET_DDRAM | offset); + + /* Send string */ + for (i = 0; i < strlen(str) && i < 0x28; i++) + charlcd_4bit_char(lcd, str[i]); +} + +static void charlcd_4bit_init(struct charlcd *lcd) +{ + /* These commands cannot be checked with the busy flag */ + writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); + msleep(5); + writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); + udelay(100); + writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); + udelay(100); + /* Go to 4bit mode */ + writel(HD_FUNCSET, lcd->virtbase + CHAR_COM); + udelay(100); + /* + * 4bit mode, 2 lines, 5x8 font, after this the number of lines + * and the font cannot be changed until the next initialization sequence + */ + charlcd_4bit_command(lcd, HD_FUNCSET | HD_FUNCSET_2_LINES); + charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON); + charlcd_4bit_command(lcd, HD_ENTRYMODE | HD_ENTRYMODE_INCREMENT); + charlcd_4bit_command(lcd, HD_CLEAR); + charlcd_4bit_command(lcd, HD_HOME); + /* Put something useful in the display */ + charlcd_4bit_print(lcd, 0, "ARM Linux"); + charlcd_4bit_print(lcd, 1, UTS_RELEASE); +} + +static void charlcd_init_work(struct work_struct *work) +{ + struct charlcd *lcd = + container_of(work, struct charlcd, init_work.work); + + charlcd_4bit_init(lcd); +} + +static int __init charlcd_probe(struct platform_device *pdev) +{ + int ret; + struct charlcd *lcd; + struct resource *res; + + lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL); + if (!lcd) + return -ENOMEM; + + lcd->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENOENT; + goto out_no_resource; + } + lcd->phybase = res->start; + lcd->physize = resource_size(res); + + if (request_mem_region(lcd->phybase, lcd->physize, + DRIVERNAME) == NULL) { + ret = -EBUSY; + goto out_no_memregion; + } + + lcd->virtbase = ioremap(lcd->phybase, lcd->physize); + if (!lcd->virtbase) { + ret = -ENOMEM; + goto out_no_memregion; + } + + lcd->irq = platform_get_irq(pdev, 0); + /* If no IRQ is supplied, we'll survive without it */ + if (lcd->irq >= 0) { + if (request_irq(lcd->irq, charlcd_interrupt, 0, + DRIVERNAME, lcd)) { + ret = -EIO; + goto out_no_irq; + } + } + + platform_set_drvdata(pdev, lcd); + + /* + * Initialize the display in a delayed work, because + * it is VERY slow and would slow down the boot of the system. + */ + INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work); + schedule_delayed_work(&lcd->init_work, 0); + + dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n", + lcd->phybase); + + return 0; + +out_no_irq: + iounmap(lcd->virtbase); +out_no_memregion: + release_mem_region(lcd->phybase, SZ_4K); +out_no_resource: + kfree(lcd); + return ret; +} + +static int __exit charlcd_remove(struct platform_device *pdev) +{ + struct charlcd *lcd = platform_get_drvdata(pdev); + + if (lcd) { + free_irq(lcd->irq, lcd); + iounmap(lcd->virtbase); + release_mem_region(lcd->phybase, lcd->physize); + kfree(lcd); + } + + return 0; +} + +static int charlcd_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct charlcd *lcd = platform_get_drvdata(pdev); + + /* Power the display off */ + charlcd_4bit_command(lcd, HD_DISPCTRL); + return 0; +} + +static int charlcd_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct charlcd *lcd = platform_get_drvdata(pdev); + + /* Turn the display back on */ + charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON); + return 0; +} + +static const struct dev_pm_ops charlcd_pm_ops = { + .suspend = charlcd_suspend, + .resume = charlcd_resume, +}; + +static const struct of_device_id charlcd_match[] = { + { .compatible = "arm,versatile-lcd", }, + {} +}; + +static struct platform_driver charlcd_driver = { + .driver = { + .name = DRIVERNAME, + .owner = THIS_MODULE, + .pm = &charlcd_pm_ops, + .of_match_table = of_match_ptr(charlcd_match), + }, + .remove = __exit_p(charlcd_remove), +}; + +module_platform_driver_probe(charlcd_driver, charlcd_probe); + +MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>"); +MODULE_DESCRIPTION("ARM Character LCD Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index 558bf3f2c27..22de1372764 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -15,6 +15,10 @@ #include <linux/io.h> #include <linux/spinlock.h> #include <linux/atmel-ssc.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include <linux/of.h> /* Serialize access to ssc_list and user count */ static DEFINE_SPINLOCK(user_lock); @@ -27,7 +31,13 @@ struct ssc_device *ssc_request(unsigned int ssc_num) spin_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { - if (ssc->pdev->id == ssc_num) { + if (ssc->pdev->dev.of_node) { + if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") + == ssc_num) { + ssc_valid = 1; + break; + } + } else if (ssc->pdev->id == ssc_num) { ssc_valid = 1; break; } @@ -47,7 +57,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) ssc->user++; spin_unlock(&user_lock); - clk_enable(ssc->clk); + clk_prepare_enable(ssc->clk); return ssc; } @@ -55,63 +65,120 @@ EXPORT_SYMBOL(ssc_request); void ssc_free(struct ssc_device *ssc) { + bool disable_clk = true; + spin_lock(&user_lock); - if (ssc->user) { + if (ssc->user) ssc->user--; - clk_disable(ssc->clk); - } else { + else { + disable_clk = false; dev_dbg(&ssc->pdev->dev, "device already free\n"); } spin_unlock(&user_lock); + + if (disable_clk) + clk_disable_unprepare(ssc->clk); } EXPORT_SYMBOL(ssc_free); -static int __init ssc_probe(struct platform_device *pdev) +static struct atmel_ssc_platform_data at91rm9200_config = { + .use_dma = 0, +}; + +static struct atmel_ssc_platform_data at91sam9g45_config = { + .use_dma = 1, +}; + +static const struct platform_device_id atmel_ssc_devtypes[] = { + { + .name = "at91rm9200_ssc", + .driver_data = (unsigned long) &at91rm9200_config, + }, { + .name = "at91sam9g45_ssc", + .driver_data = (unsigned long) &at91sam9g45_config, + }, { + /* sentinel */ + } +}; + +#ifdef CONFIG_OF +static const struct of_device_id atmel_ssc_dt_ids[] = { + { + .compatible = "atmel,at91rm9200-ssc", + .data = &at91rm9200_config, + }, { + .compatible = "atmel,at91sam9g45-ssc", + .data = &at91sam9g45_config, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); +#endif + +static inline const struct atmel_ssc_platform_data * __init + atmel_ssc_get_driver_data(struct platform_device *pdev) +{ + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(atmel_ssc_dt_ids, pdev->dev.of_node); + if (match == NULL) + return NULL; + return match->data; + } + + return (struct atmel_ssc_platform_data *) + platform_get_device_id(pdev)->driver_data; +} + +static int ssc_probe(struct platform_device *pdev) { - int retval = 0; struct resource *regs; struct ssc_device *ssc; + const struct atmel_ssc_platform_data *plat_dat; - ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL); + ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL); if (!ssc) { dev_dbg(&pdev->dev, "out of memory\n"); - retval = -ENOMEM; - goto out; + return -ENOMEM; } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_dbg(&pdev->dev, "no mmio resource defined\n"); - retval = -ENXIO; - goto out_free; + ssc->pdev = pdev; + + plat_dat = atmel_ssc_get_driver_data(pdev); + if (!plat_dat) + return -ENODEV; + ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; + + if (pdev->dev.of_node) { + struct device_node *np = pdev->dev.of_node; + ssc->clk_from_rk_pin = + of_property_read_bool(np, "atmel,clk-from-rk-pin"); } - ssc->clk = clk_get(&pdev->dev, "pclk"); + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ssc->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(ssc->regs)) + return PTR_ERR(ssc->regs); + + ssc->phybase = regs->start; + + ssc->clk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(ssc->clk)) { dev_dbg(&pdev->dev, "no pclk clock defined\n"); - retval = -ENXIO; - goto out_free; - } - - ssc->pdev = pdev; - ssc->regs = ioremap(regs->start, regs->end - regs->start + 1); - if (!ssc->regs) { - dev_dbg(&pdev->dev, "ioremap failed\n"); - retval = -EINVAL; - goto out_clk; + return -ENXIO; } /* disable all interrupts */ - clk_enable(ssc->clk); - ssc_writel(ssc->regs, IDR, ~0UL); + clk_prepare_enable(ssc->clk); + ssc_writel(ssc->regs, IDR, -1); ssc_readl(ssc->regs, SR); - clk_disable(ssc->clk); + clk_disable_unprepare(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); if (!ssc->irq) { dev_dbg(&pdev->dev, "could not get irq\n"); - retval = -ENXIO; - goto out_unmap; + return -ENXIO; } spin_lock(&user_lock); @@ -123,51 +190,31 @@ static int __init ssc_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n", ssc->regs, ssc->irq); - goto out; - -out_unmap: - iounmap(ssc->regs); -out_clk: - clk_put(ssc->clk); -out_free: - kfree(ssc); -out: - return retval; + return 0; } -static int __devexit ssc_remove(struct platform_device *pdev) +static int ssc_remove(struct platform_device *pdev) { struct ssc_device *ssc = platform_get_drvdata(pdev); spin_lock(&user_lock); - iounmap(ssc->regs); - clk_put(ssc->clk); list_del(&ssc->list); - kfree(ssc); spin_unlock(&user_lock); return 0; } static struct platform_driver ssc_driver = { - .remove = __devexit_p(ssc_remove), .driver = { .name = "ssc", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(atmel_ssc_dt_ids), }, + .id_table = atmel_ssc_devtypes, + .probe = ssc_probe, + .remove = ssc_remove, }; - -static int __init ssc_init(void) -{ - return platform_driver_probe(&ssc_driver, ssc_probe); -} -module_init(ssc_init); - -static void __exit ssc_exit(void) -{ - platform_driver_unregister(&ssc_driver); -} -module_exit(ssc_exit); +module_platform_driver(ssc_driver); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91"); diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c index 6aa5294dfec..a6dc56e1bc5 100644 --- a/drivers/misc/atmel_pwm.c +++ b/drivers/misc/atmel_pwm.c @@ -1,6 +1,7 @@ #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/slab.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/platform_device.h> @@ -89,8 +90,10 @@ int pwm_channel_alloc(int index, struct pwm_channel *ch) unsigned long flags; int status = 0; - /* insist on PWM init, with this signal pinned out */ - if (!pwm || !(pwm->mask & 1 << index)) + if (!pwm) + return -EPROBE_DEFER; + + if (!(pwm->mask & 1 << index)) return -ENODEV; if (index < 0 || index >= PWM_NCHAN || !ch) @@ -328,7 +331,7 @@ static int __init pwm_probe(struct platform_device *pdev) p->pdev = pdev; p->mask = *mp; p->irq = irq; - p->base = ioremap(r->start, r->end - r->start + 1); + p->base = ioremap(r->start, resource_size(r)); if (!p->base) goto fail; p->clk = clk_get(&pdev->dev, "pwm_clk"); @@ -392,17 +395,7 @@ static struct platform_driver atmel_pwm_driver = { */ }; -static int __init pwm_init(void) -{ - return platform_driver_probe(&atmel_pwm_driver, pwm_probe); -} -module_init(pwm_init); - -static void __exit pwm_exit(void) -{ - platform_driver_unregister(&atmel_pwm_driver); -} -module_exit(pwm_exit); +module_platform_driver_probe(atmel_pwm_driver, pwm_probe); MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module"); MODULE_LICENSE("GPL"); diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c index 05dc8a31f28..c8d8e38d0d8 100644 --- a/drivers/misc/atmel_tclib.c +++ b/drivers/misc/atmel_tclib.c @@ -6,10 +6,10 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/platform_device.h> - -/* Number of bytes to reserve for the iomem resource */ -#define ATMEL_TC_IOMEM_SIZE 256 - +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/of.h> /* * This is a thin library to solve the problem of how to portably allocate @@ -46,10 +46,17 @@ struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name) struct atmel_tc *tc; struct platform_device *pdev = NULL; struct resource *r; + size_t size; spin_lock(&tc_list_lock); list_for_each_entry(tc, &tc_list, node) { - if (tc->pdev->id == block) { + if (tc->pdev->dev.of_node) { + if (of_alias_get_id(tc->pdev->dev.of_node, "tcb") + == block) { + pdev = tc->pdev; + break; + } + } else if (tc->pdev->id == block) { pdev = tc->pdev; break; } @@ -59,11 +66,15 @@ struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name) goto fail; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - r = request_mem_region(r->start, ATMEL_TC_IOMEM_SIZE, name); if (!r) goto fail; - tc->regs = ioremap(r->start, ATMEL_TC_IOMEM_SIZE); + size = resource_size(r); + r = request_mem_region(r->start, size, name); + if (!r) + goto fail; + + tc->regs = ioremap(r->start, size); if (!tc->regs) goto fail_ioremap; @@ -74,7 +85,7 @@ out: return tc; fail_ioremap: - release_resource(r); + release_mem_region(r->start, size); fail: tc = NULL; goto out; @@ -94,7 +105,7 @@ void atmel_tc_free(struct atmel_tc *tc) spin_lock(&tc_list_lock); if (tc->regs) { iounmap(tc->regs); - release_resource(tc->iomem); + release_mem_region(tc->iomem->start, resource_size(tc->iomem)); tc->regs = NULL; tc->iomem = NULL; } @@ -102,6 +113,30 @@ void atmel_tc_free(struct atmel_tc *tc) } EXPORT_SYMBOL_GPL(atmel_tc_free); +#if defined(CONFIG_OF) +static struct atmel_tcb_config tcb_rm9200_config = { + .counter_width = 16, +}; + +static struct atmel_tcb_config tcb_sam9x5_config = { + .counter_width = 32, +}; + +static const struct of_device_id atmel_tcb_dt_ids[] = { + { + .compatible = "atmel,at91rm9200-tcb", + .data = &tcb_rm9200_config, + }, { + .compatible = "atmel,at91sam9x5-tcb", + .data = &tcb_sam9x5_config, + }, { + /* sentinel */ + } +}; + +MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids); +#endif + static int __init tc_probe(struct platform_device *pdev) { struct atmel_tc *tc; @@ -127,6 +162,14 @@ static int __init tc_probe(struct platform_device *pdev) return -EINVAL; } + /* Now take SoC information if available */ + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node); + if (match) + tc->tcb_config = match->data; + } + tc->clk[0] = clk; tc->clk[1] = clk_get(&pdev->dev, "t1_clk"); if (IS_ERR(tc->clk[1])) @@ -151,7 +194,10 @@ static int __init tc_probe(struct platform_device *pdev) } static struct platform_driver tc_driver = { - .driver.name = "atmel_tcb", + .driver = { + .name = "atmel_tcb", + .of_match_table = of_match_ptr(atmel_tcb_dt_ids), + }, }; static int __init tc_init(void) diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c new file mode 100644 index 00000000000..99a04686e45 --- /dev/null +++ b/drivers/misc/bh1770glc.c @@ -0,0 +1,1418 @@ +/* + * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/i2c/bh1770glc.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/slab.h> + +#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */ +#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */ +#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */ +#define BH1770_I_LED3 0x83 /* LED3 current setting */ +#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */ +#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */ +#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */ +#define BH1770_PART_ID 0x8a /* Part number and revision ID */ +#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */ +#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */ +#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */ +#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */ +#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */ +#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */ +#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */ +#define BH1770_INTERRUPT 0x92 /* Interrupt setting */ +#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */ +#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */ +#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */ +#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */ +#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */ +#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */ +#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */ + +/* MANUFACT_ID */ +#define BH1770_MANUFACT_ROHM 0x01 +#define BH1770_MANUFACT_OSRAM 0x03 + +/* PART_ID */ +#define BH1770_PART 0x90 +#define BH1770_PART_MASK 0xf0 +#define BH1770_REV_MASK 0x0f +#define BH1770_REV_SHIFT 0 +#define BH1770_REV_0 0x00 +#define BH1770_REV_1 0x01 + +/* Operating modes for both */ +#define BH1770_STANDBY 0x00 +#define BH1770_FORCED 0x02 +#define BH1770_STANDALONE 0x03 +#define BH1770_SWRESET (0x01 << 2) + +#define BH1770_PS_TRIG_MEAS (1 << 0) +#define BH1770_ALS_TRIG_MEAS (1 << 1) + +/* Interrupt control */ +#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */ +#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */ +#define BH1770_INT_ALS_ENA (1 << 1) +#define BH1770_INT_PS_ENA (1 << 0) + +/* Interrupt status */ +#define BH1770_INT_LED1_DATA (1 << 0) +#define BH1770_INT_LED1_INT (1 << 1) +#define BH1770_INT_LED2_DATA (1 << 2) +#define BH1770_INT_LED2_INT (1 << 3) +#define BH1770_INT_LED3_DATA (1 << 4) +#define BH1770_INT_LED3_INT (1 << 5) +#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5)) +#define BH1770_INT_ALS_DATA (1 << 6) +#define BH1770_INT_ALS_INT (1 << 7) + +/* Led channels */ +#define BH1770_LED1 0x00 + +#define BH1770_DISABLE 0 +#define BH1770_ENABLE 1 +#define BH1770_PROX_CHANNELS 1 + +#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */ +#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */ +#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */ +#define BH1770_STARTUP_DELAY 50 +#define BH1770_RESET_TIME 10 +#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */ + +#define BH1770_LUX_RANGE 65535 +#define BH1770_PROX_RANGE 255 +#define BH1770_COEF_SCALER 1024 +#define BH1770_CALIB_SCALER 8192 +#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER) +#define BH1770_LUX_DEF_THRES 1000 +#define BH1770_PROX_DEF_THRES 70 +#define BH1770_PROX_DEF_ABS_THRES 100 +#define BH1770_DEFAULT_PERSISTENCE 10 +#define BH1770_PROX_MAX_PERSISTENCE 50 +#define BH1770_LUX_GA_SCALE 16384 +#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */ +#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE +#define BH1770_LUX_CORR_SCALE 4096 + +#define PROX_ABOVE_THRESHOLD 1 +#define PROX_BELOW_THRESHOLD 0 + +#define PROX_IGNORE_LUX_LIMIT 500 + +struct bh1770_chip { + struct bh1770_platform_data *pdata; + char chipname[10]; + u8 revision; + struct i2c_client *client; + struct regulator_bulk_data regs[2]; + struct mutex mutex; /* avoid parallel access */ + wait_queue_head_t wait; + + bool int_mode_prox; + bool int_mode_lux; + struct delayed_work prox_work; + u32 lux_cf; /* Chip specific factor */ + u32 lux_ga; + u32 lux_calib; + int lux_rate_index; + u32 lux_corr; + u16 lux_data_raw; + u16 lux_threshold_hi; + u16 lux_threshold_lo; + u16 lux_thres_hi_onchip; + u16 lux_thres_lo_onchip; + bool lux_wait_result; + + int prox_enable_count; + u16 prox_coef; + u16 prox_const; + int prox_rate; + int prox_rate_threshold; + u8 prox_persistence; + u8 prox_persistence_counter; + u8 prox_data; + u8 prox_threshold; + u8 prox_threshold_hw; + bool prox_force_update; + u8 prox_abs_thres; + u8 prox_led; +}; + +static const char reg_vcc[] = "Vcc"; +static const char reg_vleds[] = "Vleds"; + +/* + * Supported stand alone rates in ms from chip data sheet + * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000}; + */ +static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2}; +static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500}; + +/* Supported IR-led currents in mA */ +static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200}; + +/* + * Supported stand alone rates in ms from chip data sheet + * {100, 200, 500, 1000, 2000}; + */ +static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0}; + +/* + * interrupt control functions are called while keeping chip->mutex + * excluding module probe / remove + */ +static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip, + int lux) +{ + chip->int_mode_lux = lux; + /* Set interrupt modes, interrupt active low, latched */ + return i2c_smbus_write_byte_data(chip->client, + BH1770_INTERRUPT, + (lux << 1) | chip->int_mode_prox); +} + +static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip, + int ps) +{ + chip->int_mode_prox = ps; + return i2c_smbus_write_byte_data(chip->client, + BH1770_INTERRUPT, + (chip->int_mode_lux << 1) | (ps << 0)); +} + +/* chip->mutex is always kept here */ +static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index) +{ + /* sysfs may call this when the chip is powered off */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + /* Proper proximity response needs fastest lux rate (100ms) */ + if (chip->prox_enable_count) + rate_index = 0; + + return i2c_smbus_write_byte_data(chip->client, + BH1770_ALS_MEAS_RATE, + rate_index); +} + +static int bh1770_prox_rate(struct bh1770_chip *chip, int mode) +{ + int rate; + + rate = (mode == PROX_ABOVE_THRESHOLD) ? + chip->prox_rate_threshold : chip->prox_rate; + + return i2c_smbus_write_byte_data(chip->client, + BH1770_PS_MEAS_RATE, + rate); +} + +/* InfraredLED is controlled by the chip during proximity scanning */ +static inline int bh1770_led_cfg(struct bh1770_chip *chip) +{ + /* LED cfg, current for leds 1 and 2 */ + return i2c_smbus_write_byte_data(chip->client, + BH1770_I_LED, + (BH1770_LED1 << 6) | + (BH1770_LED_5mA << 3) | + chip->prox_led); +} + +/* + * Following two functions converts raw ps values from HW to normalized + * values. Purpose is to compensate differences between different sensor + * versions and variants so that result means about the same between + * versions. + */ +static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw) +{ + u16 adjusted; + adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) / + BH1770_COEF_SCALER); + if (adjusted > BH1770_PROX_RANGE) + adjusted = BH1770_PROX_RANGE; + return adjusted; +} + +static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps) +{ + u16 raw; + + raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef); + if (raw > chip->prox_const) + raw = raw - chip->prox_const; + else + raw = 0; + return raw; +} + +/* + * Following two functions converts raw lux values from HW to normalized + * values. Purpose is to compensate differences between different sensor + * versions and variants so that result means about the same between + * versions. Chip->mutex is kept when this is called. + */ +static int bh1770_prox_set_threshold(struct bh1770_chip *chip) +{ + u8 tmp = 0; + + /* sysfs may call this when the chip is powered off */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold); + chip->prox_threshold_hw = tmp; + + return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1, + tmp); +} + +static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw) +{ + u32 lux; + lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE; + return min(lux, (u32)BH1770_LUX_RANGE); +} + +static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip, + u16 adjusted) +{ + return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr; +} + +/* chip->mutex is kept when this is called */ +static int bh1770_lux_update_thresholds(struct bh1770_chip *chip, + u16 threshold_hi, u16 threshold_lo) +{ + u8 data[4]; + int ret; + + /* sysfs may call this when the chip is powered off */ + if (pm_runtime_suspended(&chip->client->dev)) + return 0; + + /* + * Compensate threshold values with the correction factors if not + * set to minimum or maximum. + * Min & max values disables interrupts. + */ + if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0) + threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi); + + if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0) + threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo); + + if (chip->lux_thres_hi_onchip == threshold_hi && + chip->lux_thres_lo_onchip == threshold_lo) + return 0; + + chip->lux_thres_hi_onchip = threshold_hi; + chip->lux_thres_lo_onchip = threshold_lo; + + data[0] = threshold_hi; + data[1] = threshold_hi >> 8; + data[2] = threshold_lo; + data[3] = threshold_lo >> 8; + + ret = i2c_smbus_write_i2c_block_data(chip->client, + BH1770_ALS_TH_UP_0, + ARRAY_SIZE(data), + data); + return ret; +} + +static int bh1770_lux_get_result(struct bh1770_chip *chip) +{ + u16 data; + int ret; + + ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0); + if (ret < 0) + return ret; + + data = ret & 0xff; + ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1); + if (ret < 0) + return ret; + + chip->lux_data_raw = data | ((ret & 0xff) << 8); + + return 0; +} + +/* Calculate correction value which contains chip and device specific parts */ +static u32 bh1770_get_corr_value(struct bh1770_chip *chip) +{ + u32 tmp; + /* Impact of glass attenuation correction */ + tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE; + /* Impact of chip factor correction */ + tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE; + /* Impact of Device specific calibration correction */ + tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER; + return tmp; +} + +static int bh1770_lux_read_result(struct bh1770_chip *chip) +{ + bh1770_lux_get_result(chip); + return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw); +} + +/* + * Chip on / off functions are called while keeping mutex except probe + * or remove phase + */ +static int bh1770_chip_on(struct bh1770_chip *chip) +{ + int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs), + chip->regs); + if (ret < 0) + return ret; + + usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2); + + /* Reset the chip */ + i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL, + BH1770_SWRESET); + usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2); + + /* + * ALS is started always since proximity needs als results + * for realibility estimation. + * Let's assume dark until the first ALS measurement is ready. + */ + chip->lux_data_raw = 0; + chip->prox_data = 0; + ret = i2c_smbus_write_byte_data(chip->client, + BH1770_ALS_CONTROL, BH1770_STANDALONE); + + /* Assume reset defaults */ + chip->lux_thres_hi_onchip = BH1770_LUX_RANGE; + chip->lux_thres_lo_onchip = 0; + + return ret; +} + +static void bh1770_chip_off(struct bh1770_chip *chip) +{ + i2c_smbus_write_byte_data(chip->client, + BH1770_INTERRUPT, BH1770_DISABLE); + i2c_smbus_write_byte_data(chip->client, + BH1770_ALS_CONTROL, BH1770_STANDBY); + i2c_smbus_write_byte_data(chip->client, + BH1770_PS_CONTROL, BH1770_STANDBY); + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); +} + +/* chip->mutex is kept when this is called */ +static int bh1770_prox_mode_control(struct bh1770_chip *chip) +{ + if (chip->prox_enable_count) { + chip->prox_force_update = true; /* Force immediate update */ + + bh1770_lux_rate(chip, chip->lux_rate_index); + bh1770_prox_set_threshold(chip); + bh1770_led_cfg(chip); + bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD); + bh1770_prox_interrupt_control(chip, BH1770_ENABLE); + i2c_smbus_write_byte_data(chip->client, + BH1770_PS_CONTROL, BH1770_STANDALONE); + } else { + chip->prox_data = 0; + bh1770_lux_rate(chip, chip->lux_rate_index); + bh1770_prox_interrupt_control(chip, BH1770_DISABLE); + i2c_smbus_write_byte_data(chip->client, + BH1770_PS_CONTROL, BH1770_STANDBY); + } + return 0; +} + +/* chip->mutex is kept when this is called */ +static int bh1770_prox_read_result(struct bh1770_chip *chip) +{ + int ret; + bool above; + u8 mode; + + ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1); + if (ret < 0) + goto out; + + if (ret > chip->prox_threshold_hw) + above = true; + else + above = false; + + /* + * when ALS levels goes above limit, proximity result may be + * false proximity. Thus ignore the result. With real proximity + * there is a shadow causing low als levels. + */ + if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT) + ret = 0; + + chip->prox_data = bh1770_psraw_to_adjusted(chip, ret); + + /* Strong proximity level or force mode requires immediate response */ + if (chip->prox_data >= chip->prox_abs_thres || + chip->prox_force_update) + chip->prox_persistence_counter = chip->prox_persistence; + + chip->prox_force_update = false; + + /* Persistence filttering to reduce false proximity events */ + if (likely(above)) { + if (chip->prox_persistence_counter < chip->prox_persistence) { + chip->prox_persistence_counter++; + ret = -ENODATA; + } else { + mode = PROX_ABOVE_THRESHOLD; + ret = 0; + } + } else { + chip->prox_persistence_counter = 0; + mode = PROX_BELOW_THRESHOLD; + chip->prox_data = 0; + ret = 0; + } + + /* Set proximity detection rate based on above or below value */ + if (ret == 0) { + bh1770_prox_rate(chip, mode); + sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw"); + } +out: + return ret; +} + +static int bh1770_detect(struct bh1770_chip *chip) +{ + struct i2c_client *client = chip->client; + s32 ret; + u8 manu, part; + + ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID); + if (ret < 0) + goto error; + manu = (u8)ret; + + ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID); + if (ret < 0) + goto error; + part = (u8)ret; + + chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT; + chip->prox_coef = BH1770_COEF_SCALER; + chip->prox_const = 0; + chip->lux_cf = BH1770_NEUTRAL_CF; + + if ((manu == BH1770_MANUFACT_ROHM) && + ((part & BH1770_PART_MASK) == BH1770_PART)) { + snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC"); + return 0; + } + + if ((manu == BH1770_MANUFACT_OSRAM) && + ((part & BH1770_PART_MASK) == BH1770_PART)) { + snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770"); + /* Values selected by comparing different versions */ + chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */ + chip->prox_const = 40; + return 0; + } + + ret = -ENODEV; +error: + dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n"); + + return ret; +} + +/* + * This work is re-scheduled at every proximity interrupt. + * If this work is running, it means that there hasn't been any + * proximity interrupt in time. Situation is handled as no-proximity. + * It would be nice to have low-threshold interrupt or interrupt + * when measurement and hi-threshold are both 0. But neither of those exists. + * This is a workaroud for missing HW feature. + */ + +static void bh1770_prox_work(struct work_struct *work) +{ + struct bh1770_chip *chip = + container_of(work, struct bh1770_chip, prox_work.work); + + mutex_lock(&chip->mutex); + bh1770_prox_read_result(chip); + mutex_unlock(&chip->mutex); +} + +/* This is threaded irq handler */ +static irqreturn_t bh1770_irq(int irq, void *data) +{ + struct bh1770_chip *chip = data; + int status; + int rate = 0; + + mutex_lock(&chip->mutex); + status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS); + + /* Acknowledge interrupt by reading this register */ + i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT); + + /* + * Check if there is fresh data available for als. + * If this is the very first data, update thresholds after that. + */ + if (status & BH1770_INT_ALS_DATA) { + bh1770_lux_get_result(chip); + if (unlikely(chip->lux_wait_result)) { + chip->lux_wait_result = false; + wake_up(&chip->wait); + bh1770_lux_update_thresholds(chip, + chip->lux_threshold_hi, + chip->lux_threshold_lo); + } + } + + /* Disable interrupt logic to guarantee acknowledgement */ + i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT, + (0 << 1) | (0 << 0)); + + if ((status & BH1770_INT_ALS_INT)) + sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input"); + + if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) { + rate = prox_rates_ms[chip->prox_rate_threshold]; + bh1770_prox_read_result(chip); + } + + /* Re-enable interrupt logic */ + i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT, + (chip->int_mode_lux << 1) | + (chip->int_mode_prox << 0)); + mutex_unlock(&chip->mutex); + + /* + * Can't cancel work while keeping mutex since the work uses the + * same mutex. + */ + if (rate) { + /* + * Simulate missing no-proximity interrupt 50ms after the + * next expected interrupt time. + */ + cancel_delayed_work_sync(&chip->prox_work); + schedule_delayed_work(&chip->prox_work, + msecs_to_jiffies(rate + 50)); + } + return IRQ_HANDLED; +} + +static ssize_t bh1770_power_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + ssize_t ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + if (value) { + pm_runtime_get_sync(dev); + + ret = bh1770_lux_rate(chip, chip->lux_rate_index); + if (ret < 0) { + pm_runtime_put(dev); + goto leave; + } + + ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE); + if (ret < 0) { + pm_runtime_put(dev); + goto leave; + } + + /* This causes interrupt after the next measurement cycle */ + bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES, + BH1770_LUX_DEF_THRES); + /* Inform that we are waiting for a result from ALS */ + chip->lux_wait_result = true; + bh1770_prox_mode_control(chip); + } else if (!pm_runtime_suspended(dev)) { + pm_runtime_put(dev); + } + ret = count; +leave: + mutex_unlock(&chip->mutex); + return ret; +} + +static ssize_t bh1770_power_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", !pm_runtime_suspended(dev)); +} + +static ssize_t bh1770_lux_result_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t ret; + long timeout; + + if (pm_runtime_suspended(dev)) + return -EIO; /* Chip is not enabled at all */ + + timeout = wait_event_interruptible_timeout(chip->wait, + !chip->lux_wait_result, + msecs_to_jiffies(BH1770_TIMEOUT)); + if (!timeout) + return -EIO; + + mutex_lock(&chip->mutex); + ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip)); + mutex_unlock(&chip->mutex); + + return ret; +} + +static ssize_t bh1770_lux_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", BH1770_LUX_RANGE); +} + +static ssize_t bh1770_prox_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + /* Assume no proximity. Sensor will tell real state soon */ + if (!chip->prox_enable_count) + chip->prox_data = 0; + + if (value) + chip->prox_enable_count++; + else if (chip->prox_enable_count > 0) + chip->prox_enable_count--; + else + goto leave; + + /* Run control only when chip is powered on */ + if (!pm_runtime_suspended(dev)) + bh1770_prox_mode_control(chip); +leave: + mutex_unlock(&chip->mutex); + return count; +} + +static ssize_t bh1770_prox_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t len; + + mutex_lock(&chip->mutex); + len = sprintf(buf, "%d\n", chip->prox_enable_count); + mutex_unlock(&chip->mutex); + return len; +} + +static ssize_t bh1770_prox_result_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t ret; + + mutex_lock(&chip->mutex); + if (chip->prox_enable_count && !pm_runtime_suspended(dev)) + ret = sprintf(buf, "%d\n", chip->prox_data); + else + ret = -EIO; + mutex_unlock(&chip->mutex); + return ret; +} + +static ssize_t bh1770_prox_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", BH1770_PROX_RANGE); +} + +static ssize_t bh1770_get_prox_rate_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + int pos = 0; + for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++) + pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]); + sprintf(buf + pos - 1, "\n"); + return pos; +} + +static ssize_t bh1770_get_prox_rate_above(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]); +} + +static ssize_t bh1770_get_prox_rate_below(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]); +} + +static int bh1770_prox_rate_validate(int rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++) + if (rate >= prox_rates_hz[i]) + break; + return i; +} + +static ssize_t bh1770_set_prox_rate_above(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + chip->prox_rate_threshold = bh1770_prox_rate_validate(value); + mutex_unlock(&chip->mutex); + return count; +} + +static ssize_t bh1770_set_prox_rate_below(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + chip->prox_rate = bh1770_prox_rate_validate(value); + mutex_unlock(&chip->mutex); + return count; +} + +static ssize_t bh1770_get_prox_thres(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->prox_threshold); +} + +static ssize_t bh1770_set_prox_thres(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value > BH1770_PROX_RANGE) + return -EINVAL; + + mutex_lock(&chip->mutex); + chip->prox_threshold = value; + ret = bh1770_prox_set_threshold(chip); + mutex_unlock(&chip->mutex); + if (ret < 0) + return ret; + return count; +} + +static ssize_t bh1770_prox_persistence_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", chip->prox_persistence); +} + +static ssize_t bh1770_prox_persistence_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value > BH1770_PROX_MAX_PERSISTENCE) + return -EINVAL; + + chip->prox_persistence = value; + + return len; +} + +static ssize_t bh1770_prox_abs_thres_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", chip->prox_abs_thres); +} + +static ssize_t bh1770_prox_abs_thres_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + if (value > BH1770_PROX_RANGE) + return -EINVAL; + + chip->prox_abs_thres = value; + + return len; +} + +static ssize_t bh1770_chip_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision); +} + +static ssize_t bh1770_lux_calib_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", BH1770_CALIB_SCALER); +} + +static ssize_t bh1770_lux_calib_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + ssize_t len; + + mutex_lock(&chip->mutex); + len = sprintf(buf, "%u\n", chip->lux_calib); + mutex_unlock(&chip->mutex); + return len; +} + +static ssize_t bh1770_lux_calib_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long value; + u32 old_calib; + u32 new_corr; + int ret; + + ret = kstrtoul(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&chip->mutex); + old_calib = chip->lux_calib; + chip->lux_calib = value; + new_corr = bh1770_get_corr_value(chip); + if (new_corr == 0) { + chip->lux_calib = old_calib; + mutex_unlock(&chip->mutex); + return -EINVAL; + } + chip->lux_corr = new_corr; + /* Refresh thresholds on HW after changing correction value */ + bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi, + chip->lux_threshold_lo); + + mutex_unlock(&chip->mutex); + + return len; +} + +static ssize_t bh1770_get_lux_rate_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + int pos = 0; + for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++) + pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]); + sprintf(buf + pos - 1, "\n"); + return pos; +} + +static ssize_t bh1770_get_lux_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]); +} + +static ssize_t bh1770_set_lux_rate(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + unsigned long rate_hz; + int ret, i; + + ret = kstrtoul(buf, 0, &rate_hz); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++) + if (rate_hz >= lux_rates_hz[i]) + break; + + mutex_lock(&chip->mutex); + chip->lux_rate_index = i; + ret = bh1770_lux_rate(chip, i); + mutex_unlock(&chip->mutex); + + if (ret < 0) + return ret; + + return count; +} + +static ssize_t bh1770_get_lux_thresh_above(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->lux_threshold_hi); +} + +static ssize_t bh1770_get_lux_thresh_below(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", chip->lux_threshold_lo); +} + +static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target, + const char *buf) +{ + unsigned long thresh; + int ret; + + ret = kstrtoul(buf, 0, &thresh); + if (ret) + return ret; + + if (thresh > BH1770_LUX_RANGE) + return -EINVAL; + + mutex_lock(&chip->mutex); + *target = thresh; + /* + * Don't update values in HW if we are still waiting for + * first interrupt to come after device handle open call. + */ + if (!chip->lux_wait_result) + ret = bh1770_lux_update_thresholds(chip, + chip->lux_threshold_hi, + chip->lux_threshold_lo); + mutex_unlock(&chip->mutex); + return ret; + +} + +static ssize_t bh1770_set_lux_thresh_above(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf); + if (ret < 0) + return ret; + return len; +} + +static ssize_t bh1770_set_lux_thresh_below(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct bh1770_chip *chip = dev_get_drvdata(dev); + int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf); + if (ret < 0) + return ret; + return len; +} + +static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show, + bh1770_prox_enable_store); +static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR, + bh1770_prox_abs_thres_show, + bh1770_prox_abs_thres_store); +static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR, + bh1770_get_prox_thres, + bh1770_set_prox_thres); +static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL); +static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL); +static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR, + bh1770_prox_persistence_show, + bh1770_prox_persistence_store); +static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR, + bh1770_get_prox_rate_above, + bh1770_set_prox_rate_above); +static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR, + bh1770_get_prox_rate_below, + bh1770_set_prox_rate_below); +static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL); + +static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show, + bh1770_lux_calib_store); +static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO, + bh1770_lux_calib_default_show, + NULL); +static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL); +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL); +static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate, + bh1770_set_lux_rate); +static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL); +static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR, + bh1770_get_lux_thresh_above, + bh1770_set_lux_thresh_above); +static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR, + bh1770_get_lux_thresh_below, + bh1770_set_lux_thresh_below); +static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL); +static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show, + bh1770_power_state_store); + + +static struct attribute *sysfs_attrs[] = { + &dev_attr_lux0_calibscale.attr, + &dev_attr_lux0_calibscale_default.attr, + &dev_attr_lux0_input.attr, + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_rate.attr, + &dev_attr_lux0_rate_avail.attr, + &dev_attr_lux0_thresh_above_value.attr, + &dev_attr_lux0_thresh_below_value.attr, + &dev_attr_prox0_raw.attr, + &dev_attr_prox0_sensor_range.attr, + &dev_attr_prox0_raw_en.attr, + &dev_attr_prox0_thresh_above_count.attr, + &dev_attr_prox0_rate_above.attr, + &dev_attr_prox0_rate_below.attr, + &dev_attr_prox0_rate_avail.attr, + &dev_attr_prox0_thresh_above0_value.attr, + &dev_attr_prox0_thresh_above1_value.attr, + &dev_attr_chip_id.attr, + &dev_attr_power_state.attr, + NULL +}; + +static struct attribute_group bh1770_attribute_group = { + .attrs = sysfs_attrs +}; + +static int bh1770_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct bh1770_chip *chip; + int err; + + chip = kzalloc(sizeof *chip, GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + mutex_init(&chip->mutex); + init_waitqueue_head(&chip->wait); + INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work); + + if (client->dev.platform_data == NULL) { + dev_err(&client->dev, "platform data is mandatory\n"); + err = -EINVAL; + goto fail1; + } + + chip->pdata = client->dev.platform_data; + chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE; + chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE; + chip->lux_threshold_lo = BH1770_LUX_DEF_THRES; + chip->lux_threshold_hi = BH1770_LUX_DEF_THRES; + + if (chip->pdata->glass_attenuation == 0) + chip->lux_ga = BH1770_NEUTRAL_GA; + else + chip->lux_ga = chip->pdata->glass_attenuation; + + chip->prox_threshold = BH1770_PROX_DEF_THRES; + chip->prox_led = chip->pdata->led_def_curr; + chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES; + chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE; + chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH; + chip->prox_rate = BH1770_PROX_DEFAULT_RATE; + chip->prox_data = 0; + + chip->regs[0].supply = reg_vcc; + chip->regs[1].supply = reg_vleds; + + err = regulator_bulk_get(&client->dev, + ARRAY_SIZE(chip->regs), chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot get regulators\n"); + goto fail1; + } + + err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), + chip->regs); + if (err < 0) { + dev_err(&client->dev, "Cannot enable regulators\n"); + goto fail2; + } + + usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2); + err = bh1770_detect(chip); + if (err < 0) + goto fail3; + + /* Start chip */ + bh1770_chip_on(chip); + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + + chip->lux_corr = bh1770_get_corr_value(chip); + if (chip->lux_corr == 0) { + dev_err(&client->dev, "Improper correction values\n"); + err = -EINVAL; + goto fail3; + } + + if (chip->pdata->setup_resources) { + err = chip->pdata->setup_resources(); + if (err) { + err = -EINVAL; + goto fail3; + } + } + + err = sysfs_create_group(&chip->client->dev.kobj, + &bh1770_attribute_group); + if (err < 0) { + dev_err(&chip->client->dev, "Sysfs registration failed\n"); + goto fail4; + } + + /* + * Chip needs level triggered interrupt to work. However, + * level triggering doesn't work always correctly with power + * management. Select both + */ + err = request_threaded_irq(client->irq, NULL, + bh1770_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT | + IRQF_TRIGGER_LOW, + "bh1770", chip); + if (err) { + dev_err(&client->dev, "could not get IRQ %d\n", + client->irq); + goto fail5; + } + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); + return err; +fail5: + sysfs_remove_group(&chip->client->dev.kobj, + &bh1770_attribute_group); +fail4: + if (chip->pdata->release_resources) + chip->pdata->release_resources(); +fail3: + regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); +fail2: + regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); +fail1: + kfree(chip); + return err; +} + +static int bh1770_remove(struct i2c_client *client) +{ + struct bh1770_chip *chip = i2c_get_clientdata(client); + + free_irq(client->irq, chip); + + sysfs_remove_group(&chip->client->dev.kobj, + &bh1770_attribute_group); + + if (chip->pdata->release_resources) + chip->pdata->release_resources(); + + cancel_delayed_work_sync(&chip->prox_work); + + if (!pm_runtime_suspended(&client->dev)) + bh1770_chip_off(chip); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); + kfree(chip); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bh1770_suspend(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + + bh1770_chip_off(chip); + + return 0; +} + +static int bh1770_resume(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + int ret = 0; + + bh1770_chip_on(chip); + + if (!pm_runtime_suspended(dev)) { + /* + * If we were enabled at suspend time, it is expected + * everything works nice and smoothly + */ + ret = bh1770_lux_rate(chip, chip->lux_rate_index); + ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); + + /* This causes interrupt after the next measurement cycle */ + bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES, + BH1770_LUX_DEF_THRES); + /* Inform that we are waiting for a result from ALS */ + chip->lux_wait_result = true; + bh1770_prox_mode_control(chip); + } + return ret; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int bh1770_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + + bh1770_chip_off(chip); + + return 0; +} + +static int bh1770_runtime_resume(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct bh1770_chip *chip = i2c_get_clientdata(client); + + bh1770_chip_on(chip); + + return 0; +} +#endif + +static const struct i2c_device_id bh1770_id[] = { + {"bh1770glc", 0 }, + {"sfh7770", 0 }, + {} +}; + +MODULE_DEVICE_TABLE(i2c, bh1770_id); + +static const struct dev_pm_ops bh1770_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume) + SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL) +}; + +static struct i2c_driver bh1770_driver = { + .driver = { + .name = "bh1770glc", + .owner = THIS_MODULE, + .pm = &bh1770_pm_ops, + }, + .probe = bh1770_probe, + .remove = bh1770_remove, + .id_table = bh1770_id, +}; + +module_i2c_driver(bh1770_driver); + +MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor"); +MODULE_AUTHOR("Samu Onkalo, Nokia Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c new file mode 100644 index 00000000000..48ea33d15a7 --- /dev/null +++ b/drivers/misc/bh1780gli.c @@ -0,0 +1,272 @@ +/* + * bh1780gli.c + * ROHM Ambient Light Sensor Driver + * + * Copyright (C) 2010 Texas Instruments + * Author: Hemanth V <hemanthv@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> + +#define BH1780_REG_CONTROL 0x80 +#define BH1780_REG_PARTID 0x8A +#define BH1780_REG_MANFID 0x8B +#define BH1780_REG_DLOW 0x8C +#define BH1780_REG_DHIGH 0x8D + +#define BH1780_REVMASK (0xf) +#define BH1780_POWMASK (0x3) +#define BH1780_POFF (0x0) +#define BH1780_PON (0x3) + +/* power on settling time in ms */ +#define BH1780_PON_DELAY 2 + +struct bh1780_data { + struct i2c_client *client; + int power_state; + /* lock for sysfs operations */ + struct mutex lock; +}; + +static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg) +{ + int ret = i2c_smbus_write_byte_data(ddata->client, reg, val); + if (ret < 0) + dev_err(&ddata->client->dev, + "i2c_smbus_write_byte_data failed error %d Register (%s)\n", + ret, msg); + return ret; +} + +static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg) +{ + int ret = i2c_smbus_read_byte_data(ddata->client, reg); + if (ret < 0) + dev_err(&ddata->client->dev, + "i2c_smbus_read_byte_data failed error %d Register (%s)\n", + ret, msg); + return ret; +} + +static ssize_t bh1780_show_lux(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + int lsb, msb; + + lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW"); + if (lsb < 0) + return lsb; + + msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH"); + if (msb < 0) + return msb; + + return sprintf(buf, "%d\n", (msb << 8) | lsb); +} + +static ssize_t bh1780_show_power_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + int state; + + state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); + if (state < 0) + return state; + + return sprintf(buf, "%d\n", state & BH1780_POWMASK); +} + +static ssize_t bh1780_store_power_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + unsigned long val; + int error; + + error = kstrtoul(buf, 0, &val); + if (error) + return error; + + if (val < BH1780_POFF || val > BH1780_PON) + return -EINVAL; + + mutex_lock(&ddata->lock); + + error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL"); + if (error < 0) { + mutex_unlock(&ddata->lock); + return error; + } + + msleep(BH1780_PON_DELAY); + ddata->power_state = val; + mutex_unlock(&ddata->lock); + + return count; +} + +static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL); + +static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, + bh1780_show_power_state, bh1780_store_power_state); + +static struct attribute *bh1780_attributes[] = { + &dev_attr_power_state.attr, + &dev_attr_lux.attr, + NULL +}; + +static const struct attribute_group bh1780_attr_group = { + .attrs = bh1780_attributes, +}; + +static int bh1780_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct bh1780_data *ddata = NULL; + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) { + ret = -EIO; + goto err_op_failed; + } + + ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL); + if (ddata == NULL) { + ret = -ENOMEM; + goto err_op_failed; + } + + ddata->client = client; + i2c_set_clientdata(client, ddata); + + ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); + if (ret < 0) + goto err_op_failed; + + dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", + (ret & BH1780_REVMASK)); + + mutex_init(&ddata->lock); + + ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); + if (ret) + goto err_op_failed; + + return 0; + +err_op_failed: + kfree(ddata); + return ret; +} + +static int bh1780_remove(struct i2c_client *client) +{ + struct bh1780_data *ddata; + + ddata = i2c_get_clientdata(client); + sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); + kfree(ddata); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bh1780_suspend(struct device *dev) +{ + struct bh1780_data *ddata; + int state, ret; + struct i2c_client *client = to_i2c_client(dev); + + ddata = i2c_get_clientdata(client); + state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); + if (state < 0) + return state; + + ddata->power_state = state & BH1780_POWMASK; + + ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, + "CONTROL"); + + if (ret < 0) + return ret; + + return 0; +} + +static int bh1780_resume(struct device *dev) +{ + struct bh1780_data *ddata; + int state, ret; + struct i2c_client *client = to_i2c_client(dev); + + ddata = i2c_get_clientdata(client); + state = ddata->power_state; + ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, + "CONTROL"); + + if (ret < 0) + return ret; + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume); + +static const struct i2c_device_id bh1780_id[] = { + { "bh1780", 0 }, + { }, +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_bh1780_match[] = { + { .compatible = "rohm,bh1780gli", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_bh1780_match); +#endif + +static struct i2c_driver bh1780_driver = { + .probe = bh1780_probe, + .remove = bh1780_remove, + .id_table = bh1780_id, + .driver = { + .name = "bh1780", + .pm = &bh1780_pm, + .of_match_table = of_match_ptr(of_bh1780_match), + }, +}; + +module_i2c_driver(bh1780_driver); + +MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>"); diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c new file mode 100644 index 00000000000..a7c16295b81 --- /dev/null +++ b/drivers/misc/bmp085-i2c.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2012 Bosch Sensortec GmbH + * Copyright (c) 2012 Unixphere AB + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include "bmp085.h" + +#define BMP085_I2C_ADDRESS 0x77 + +static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS, + I2C_CLIENT_END }; + +static int bmp085_i2c_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + if (client->addr != BMP085_I2C_ADDRESS) + return -ENODEV; + + return bmp085_detect(&client->dev); +} + +static int bmp085_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int err; + struct regmap *regmap = devm_regmap_init_i2c(client, + &bmp085_regmap_config); + + if (IS_ERR(regmap)) { + err = PTR_ERR(regmap); + dev_err(&client->dev, "Failed to init regmap: %d\n", err); + return err; + } + + return bmp085_probe(&client->dev, regmap, client->irq); +} + +static int bmp085_i2c_remove(struct i2c_client *client) +{ + return bmp085_remove(&client->dev); +} + +static const struct i2c_device_id bmp085_id[] = { + { BMP085_NAME, 0 }, + { "bmp180", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, bmp085_id); + +static struct i2c_driver bmp085_i2c_driver = { + .driver = { + .owner = THIS_MODULE, + .name = BMP085_NAME, + }, + .id_table = bmp085_id, + .probe = bmp085_i2c_probe, + .remove = bmp085_i2c_remove, + + .detect = bmp085_i2c_detect, + .address_list = normal_i2c +}; + +module_i2c_driver(bmp085_i2c_driver); + +MODULE_AUTHOR("Eric Andersson <eric.andersson@unixphere.com>"); +MODULE_DESCRIPTION("BMP085 I2C bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c new file mode 100644 index 00000000000..864ecac3237 --- /dev/null +++ b/drivers/misc/bmp085-spi.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2012 Bosch Sensortec GmbH + * Copyright (c) 2012 Unixphere AB + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/err.h> +#include "bmp085.h" + +static int bmp085_spi_probe(struct spi_device *client) +{ + int err; + struct regmap *regmap; + + client->bits_per_word = 8; + err = spi_setup(client); + if (err < 0) { + dev_err(&client->dev, "spi_setup failed!\n"); + return err; + } + + regmap = devm_regmap_init_spi(client, &bmp085_regmap_config); + if (IS_ERR(regmap)) { + err = PTR_ERR(regmap); + dev_err(&client->dev, "Failed to init regmap: %d\n", err); + return err; + } + + return bmp085_probe(&client->dev, regmap, client->irq); +} + +static int bmp085_spi_remove(struct spi_device *client) +{ + return bmp085_remove(&client->dev); +} + +static const struct of_device_id bmp085_of_match[] = { + { .compatible = "bosch,bmp085", }, + { }, +}; +MODULE_DEVICE_TABLE(of, bmp085_of_match); + +static const struct spi_device_id bmp085_id[] = { + { "bmp180", 0 }, + { "bmp181", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, bmp085_id); + +static struct spi_driver bmp085_spi_driver = { + .driver = { + .owner = THIS_MODULE, + .name = BMP085_NAME, + .of_match_table = bmp085_of_match + }, + .id_table = bmp085_id, + .probe = bmp085_spi_probe, + .remove = bmp085_spi_remove +}; + +module_spi_driver(bmp085_spi_driver); + +MODULE_AUTHOR("Eric Andersson <eric.andersson@unixphere.com>"); +MODULE_DESCRIPTION("BMP085 SPI bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c new file mode 100644 index 00000000000..9b313f7810f --- /dev/null +++ b/drivers/misc/bmp085.c @@ -0,0 +1,506 @@ +/* Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com> + * Copyright (c) 2012 Bosch Sensortec GmbH + * Copyright (c) 2012 Unixphere AB + * + * This driver supports the bmp085 and bmp18x digital barometric pressure + * and temperature sensors from Bosch Sensortec. The datasheets + * are available from their website: + * http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP085-DS000-05.pdf + * http://www.bosch-sensortec.com/content/language1/downloads/BST-BMP180-DS000-07.pdf + * + * A pressure measurement is issued by reading from pressure0_input. + * The return value ranges from 30000 to 110000 pascal with a resulution + * of 1 pascal (0.01 millibar) which enables measurements from 9000m above + * to 500m below sea level. + * + * The temperature can be read from temp0_input. Values range from + * -400 to 850 representing the ambient temperature in degree celsius + * multiplied by 10.The resolution is 0.1 celsius. + * + * Because ambient pressure is temperature dependent, a temperature + * measurement will be executed automatically even if the user is reading + * from pressure0_input. This happens if the last temperature measurement + * has been executed more then one second ago. + * + * To decrease RMS noise from pressure measurements, the bmp085 can + * autonomously calculate the average of up to eight samples. This is + * set up by writing to the oversampling sysfs file. Accepted values + * are 0, 1, 2 and 3. 2^x when x is the value written to this file + * specifies the number of samples used to calculate the ambient pressure. + * RMS noise is specified with six pascal (without averaging) and decreases + * down to 3 pascal when using an oversampling setting of 3. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/of.h> +#include "bmp085.h" +#include <linux/interrupt.h> +#include <linux/completion.h> +#include <linux/gpio.h> + +#define BMP085_CHIP_ID 0x55 +#define BMP085_CALIBRATION_DATA_START 0xAA +#define BMP085_CALIBRATION_DATA_LENGTH 11 /* 16 bit values */ +#define BMP085_CHIP_ID_REG 0xD0 +#define BMP085_CTRL_REG 0xF4 +#define BMP085_TEMP_MEASUREMENT 0x2E +#define BMP085_PRESSURE_MEASUREMENT 0x34 +#define BMP085_CONVERSION_REGISTER_MSB 0xF6 +#define BMP085_CONVERSION_REGISTER_LSB 0xF7 +#define BMP085_CONVERSION_REGISTER_XLSB 0xF8 +#define BMP085_TEMP_CONVERSION_TIME 5 + +struct bmp085_calibration_data { + s16 AC1, AC2, AC3; + u16 AC4, AC5, AC6; + s16 B1, B2; + s16 MB, MC, MD; +}; + +struct bmp085_data { + struct device *dev; + struct regmap *regmap; + struct mutex lock; + struct bmp085_calibration_data calibration; + u8 oversampling_setting; + u32 raw_temperature; + u32 raw_pressure; + u32 temp_measurement_period; + unsigned long last_temp_measurement; + u8 chip_id; + s32 b6; /* calculated temperature correction coefficient */ + int irq; + struct completion done; +}; + +static irqreturn_t bmp085_eoc_isr(int irq, void *devid) +{ + struct bmp085_data *data = devid; + + complete(&data->done); + + return IRQ_HANDLED; +} + +static s32 bmp085_read_calibration_data(struct bmp085_data *data) +{ + u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; + struct bmp085_calibration_data *cali = &(data->calibration); + s32 status = regmap_bulk_read(data->regmap, + BMP085_CALIBRATION_DATA_START, (u8 *)tmp, + (BMP085_CALIBRATION_DATA_LENGTH << 1)); + if (status < 0) + return status; + + cali->AC1 = be16_to_cpu(tmp[0]); + cali->AC2 = be16_to_cpu(tmp[1]); + cali->AC3 = be16_to_cpu(tmp[2]); + cali->AC4 = be16_to_cpu(tmp[3]); + cali->AC5 = be16_to_cpu(tmp[4]); + cali->AC6 = be16_to_cpu(tmp[5]); + cali->B1 = be16_to_cpu(tmp[6]); + cali->B2 = be16_to_cpu(tmp[7]); + cali->MB = be16_to_cpu(tmp[8]); + cali->MC = be16_to_cpu(tmp[9]); + cali->MD = be16_to_cpu(tmp[10]); + return 0; +} + +static s32 bmp085_update_raw_temperature(struct bmp085_data *data) +{ + u16 tmp; + s32 status; + + mutex_lock(&data->lock); + + init_completion(&data->done); + + status = regmap_write(data->regmap, BMP085_CTRL_REG, + BMP085_TEMP_MEASUREMENT); + if (status < 0) { + dev_err(data->dev, + "Error while requesting temperature measurement.\n"); + goto exit; + } + wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( + BMP085_TEMP_CONVERSION_TIME)); + + status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, + &tmp, sizeof(tmp)); + if (status < 0) { + dev_err(data->dev, + "Error while reading temperature measurement result\n"); + goto exit; + } + data->raw_temperature = be16_to_cpu(tmp); + data->last_temp_measurement = jiffies; + status = 0; /* everything ok, return 0 */ + +exit: + mutex_unlock(&data->lock); + return status; +} + +static s32 bmp085_update_raw_pressure(struct bmp085_data *data) +{ + u32 tmp = 0; + s32 status; + + mutex_lock(&data->lock); + + init_completion(&data->done); + + status = regmap_write(data->regmap, BMP085_CTRL_REG, + BMP085_PRESSURE_MEASUREMENT + + (data->oversampling_setting << 6)); + if (status < 0) { + dev_err(data->dev, + "Error while requesting pressure measurement.\n"); + goto exit; + } + + /* wait for the end of conversion */ + wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies( + 2+(3 << data->oversampling_setting))); + /* copy data into a u32 (4 bytes), but skip the first byte. */ + status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, + ((u8 *)&tmp)+1, 3); + if (status < 0) { + dev_err(data->dev, + "Error while reading pressure measurement results\n"); + goto exit; + } + data->raw_pressure = be32_to_cpu((tmp)); + data->raw_pressure >>= (8-data->oversampling_setting); + status = 0; /* everything ok, return 0 */ + +exit: + mutex_unlock(&data->lock); + return status; +} + +/* + * This function starts the temperature measurement and returns the value + * in tenth of a degree celsius. + */ +static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature) +{ + struct bmp085_calibration_data *cali = &data->calibration; + long x1, x2; + int status; + + status = bmp085_update_raw_temperature(data); + if (status < 0) + goto exit; + + x1 = ((data->raw_temperature - cali->AC6) * cali->AC5) >> 15; + x2 = (cali->MC << 11) / (x1 + cali->MD); + data->b6 = x1 + x2 - 4000; + /* if NULL just update b6. Used for pressure only measurements */ + if (temperature != NULL) + *temperature = (x1+x2+8) >> 4; + +exit: + return status; +} + +/* + * This function starts the pressure measurement and returns the value + * in millibar. Since the pressure depends on the ambient temperature, + * a temperature measurement is executed according to the given temperature + * measurement period (default is 1 sec boundary). This period could vary + * and needs to be adjusted according to the sensor environment, i.e. if big + * temperature variations then the temperature needs to be read out often. + */ +static s32 bmp085_get_pressure(struct bmp085_data *data, int *pressure) +{ + struct bmp085_calibration_data *cali = &data->calibration; + s32 x1, x2, x3, b3; + u32 b4, b7; + s32 p; + int status; + + /* alt least every second force an update of the ambient temperature */ + if ((data->last_temp_measurement == 0) || + time_is_before_jiffies(data->last_temp_measurement + 1*HZ)) { + status = bmp085_get_temperature(data, NULL); + if (status < 0) + return status; + } + + status = bmp085_update_raw_pressure(data); + if (status < 0) + return status; + + x1 = (data->b6 * data->b6) >> 12; + x1 *= cali->B2; + x1 >>= 11; + + x2 = cali->AC2 * data->b6; + x2 >>= 11; + + x3 = x1 + x2; + + b3 = (((((s32)cali->AC1) * 4 + x3) << data->oversampling_setting) + 2); + b3 >>= 2; + + x1 = (cali->AC3 * data->b6) >> 13; + x2 = (cali->B1 * ((data->b6 * data->b6) >> 12)) >> 16; + x3 = (x1 + x2 + 2) >> 2; + b4 = (cali->AC4 * (u32)(x3 + 32768)) >> 15; + + b7 = ((u32)data->raw_pressure - b3) * + (50000 >> data->oversampling_setting); + p = ((b7 < 0x80000000) ? ((b7 << 1) / b4) : ((b7 / b4) * 2)); + + x1 = p >> 8; + x1 *= x1; + x1 = (x1 * 3038) >> 16; + x2 = (-7357 * p) >> 16; + p += (x1 + x2 + 3791) >> 4; + + *pressure = p; + + return 0; +} + +/* + * This function sets the chip-internal oversampling. Valid values are 0..3. + * The chip will use 2^oversampling samples for internal averaging. + * This influences the measurement time and the accuracy; larger values + * increase both. The datasheet gives an overview on how measurement time, + * accuracy and noise correlate. + */ +static void bmp085_set_oversampling(struct bmp085_data *data, + unsigned char oversampling) +{ + if (oversampling > 3) + oversampling = 3; + data->oversampling_setting = oversampling; +} + +/* + * Returns the currently selected oversampling. Range: 0..3 + */ +static unsigned char bmp085_get_oversampling(struct bmp085_data *data) +{ + return data->oversampling_setting; +} + +/* sysfs callbacks */ +static ssize_t set_oversampling(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bmp085_data *data = dev_get_drvdata(dev); + unsigned long oversampling; + int err = kstrtoul(buf, 10, &oversampling); + + if (err == 0) { + mutex_lock(&data->lock); + bmp085_set_oversampling(data, oversampling); + mutex_unlock(&data->lock); + return count; + } + + return err; +} + +static ssize_t show_oversampling(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bmp085_data *data = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", bmp085_get_oversampling(data)); +} +static DEVICE_ATTR(oversampling, S_IWUSR | S_IRUGO, + show_oversampling, set_oversampling); + + +static ssize_t show_temperature(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int temperature; + int status; + struct bmp085_data *data = dev_get_drvdata(dev); + + status = bmp085_get_temperature(data, &temperature); + if (status < 0) + return status; + else + return sprintf(buf, "%d\n", temperature); +} +static DEVICE_ATTR(temp0_input, S_IRUGO, show_temperature, NULL); + + +static ssize_t show_pressure(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int pressure; + int status; + struct bmp085_data *data = dev_get_drvdata(dev); + + status = bmp085_get_pressure(data, &pressure); + if (status < 0) + return status; + else + return sprintf(buf, "%d\n", pressure); +} +static DEVICE_ATTR(pressure0_input, S_IRUGO, show_pressure, NULL); + + +static struct attribute *bmp085_attributes[] = { + &dev_attr_temp0_input.attr, + &dev_attr_pressure0_input.attr, + &dev_attr_oversampling.attr, + NULL +}; + +static const struct attribute_group bmp085_attr_group = { + .attrs = bmp085_attributes, +}; + +int bmp085_detect(struct device *dev) +{ + struct bmp085_data *data = dev_get_drvdata(dev); + unsigned int id; + int ret; + + ret = regmap_read(data->regmap, BMP085_CHIP_ID_REG, &id); + if (ret < 0) + return ret; + + if (id != data->chip_id) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL_GPL(bmp085_detect); + +static void bmp085_get_of_properties(struct bmp085_data *data) +{ +#ifdef CONFIG_OF + struct device_node *np = data->dev->of_node; + u32 prop; + + if (!np) + return; + + if (!of_property_read_u32(np, "chip-id", &prop)) + data->chip_id = prop & 0xff; + + if (!of_property_read_u32(np, "temp-measurement-period", &prop)) + data->temp_measurement_period = (prop/100)*HZ; + + if (!of_property_read_u32(np, "default-oversampling", &prop)) + data->oversampling_setting = prop & 0xff; +#endif +} + +static int bmp085_init_client(struct bmp085_data *data) +{ + int status = bmp085_read_calibration_data(data); + + if (status < 0) + return status; + + /* default settings */ + data->chip_id = BMP085_CHIP_ID; + data->last_temp_measurement = 0; + data->temp_measurement_period = 1*HZ; + data->oversampling_setting = 3; + + bmp085_get_of_properties(data); + + mutex_init(&data->lock); + + return 0; +} + +struct regmap_config bmp085_regmap_config = { + .reg_bits = 8, + .val_bits = 8 +}; +EXPORT_SYMBOL_GPL(bmp085_regmap_config); + +int bmp085_probe(struct device *dev, struct regmap *regmap, int irq) +{ + struct bmp085_data *data; + int err = 0; + + data = kzalloc(sizeof(struct bmp085_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + dev_set_drvdata(dev, data); + data->dev = dev; + data->regmap = regmap; + data->irq = irq; + + if (data->irq > 0) { + err = devm_request_irq(dev, data->irq, bmp085_eoc_isr, + IRQF_TRIGGER_RISING, "bmp085", + data); + if (err < 0) + goto exit_free; + } + + /* Initialize the BMP085 chip */ + err = bmp085_init_client(data); + if (err < 0) + goto exit_free; + + err = bmp085_detect(dev); + if (err < 0) { + dev_err(dev, "%s: chip_id failed!\n", BMP085_NAME); + goto exit_free; + } + + /* Register sysfs hooks */ + err = sysfs_create_group(&dev->kobj, &bmp085_attr_group); + if (err) + goto exit_free; + + dev_info(dev, "Successfully initialized %s!\n", BMP085_NAME); + + return 0; + +exit_free: + kfree(data); +exit: + return err; +} +EXPORT_SYMBOL_GPL(bmp085_probe); + +int bmp085_remove(struct device *dev) +{ + struct bmp085_data *data = dev_get_drvdata(dev); + + sysfs_remove_group(&data->dev->kobj, &bmp085_attr_group); + kfree(data); + + return 0; +} +EXPORT_SYMBOL_GPL(bmp085_remove); + +MODULE_AUTHOR("Christoph Mair <christoph.mair@gmail.com>"); +MODULE_DESCRIPTION("BMP085 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h new file mode 100644 index 00000000000..8b8e3b1f5ca --- /dev/null +++ b/drivers/misc/bmp085.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2012 Bosch Sensortec GmbH + * Copyright (c) 2012 Unixphere AB + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _BMP085_H +#define _BMP085_H + +#include <linux/regmap.h> + +#define BMP085_NAME "bmp085" + +extern struct regmap_config bmp085_regmap_config; + +int bmp085_probe(struct device *dev, struct regmap *regmap, int irq); +int bmp085_remove(struct device *dev); +int bmp085_detect(struct device *dev); + +#endif diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig index e46af9a5810..0dd690e61d3 100644 --- a/drivers/misc/c2port/Kconfig +++ b/drivers/misc/c2port/Kconfig @@ -3,9 +3,8 @@ # menuconfig C2PORT - tristate "Silicon Labs C2 port support (EXPERIMENTAL)" - depends on EXPERIMENTAL - default no + tristate "Silicon Labs C2 port support" + default n help This option enables support for Silicon Labs C2 port used to program Silicon micro controller chips (and other 8051 compatible). @@ -22,9 +21,9 @@ menuconfig C2PORT if C2PORT config C2PORT_DURAMAR_2150 - tristate "C2 port support for Eurotech's Duramar 2150 (EXPERIMENTAL)" - depends on X86 && C2PORT - default no + tristate "C2 port support for Eurotech's Duramar 2150" + depends on X86 + default n help This option enables C2 support for the Eurotech's Duramar 2150 on board micro controller. diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c index 338dcc12150..5484301d57d 100644 --- a/drivers/misc/c2port/c2port-duramar2150.c +++ b/drivers/misc/c2port/c2port-duramar2150.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/ioport.h> #include <linux/c2port.h> #define DATA_PORT 0x325 @@ -41,7 +42,7 @@ static void duramar2150_c2port_access(struct c2port_device *dev, int status) outb(v | (C2D | C2CK), DIR_PORT); else /* When access is "off" is important that both lines are set - * as inputs or hi-impedence */ + * as inputs or hi-impedance */ outb(v & ~(C2D | C2CK), DIR_PORT); mutex_unlock(&update_lock); diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index b5346b4db91..464419b3644 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -20,6 +20,7 @@ #include <linux/delay.h> #include <linux/idr.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/c2port.h> @@ -310,6 +311,7 @@ static ssize_t c2port_show_name(struct device *dev, return sprintf(buf, "%s\n", c2dev->name); } +static DEVICE_ATTR(name, 0444, c2port_show_name, NULL); static ssize_t c2port_show_flash_blocks_num(struct device *dev, struct device_attribute *attr, char *buf) @@ -319,6 +321,7 @@ static ssize_t c2port_show_flash_blocks_num(struct device *dev, return sprintf(buf, "%d\n", ops->blocks_num); } +static DEVICE_ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL); static ssize_t c2port_show_flash_block_size(struct device *dev, struct device_attribute *attr, char *buf) @@ -328,6 +331,7 @@ static ssize_t c2port_show_flash_block_size(struct device *dev, return sprintf(buf, "%d\n", ops->block_size); } +static DEVICE_ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL); static ssize_t c2port_show_flash_size(struct device *dev, struct device_attribute *attr, char *buf) @@ -337,18 +341,18 @@ static ssize_t c2port_show_flash_size(struct device *dev, return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size); } +static DEVICE_ATTR(flash_size, 0444, c2port_show_flash_size, NULL); -static ssize_t c2port_show_access(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t access_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct c2port_device *c2dev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", c2dev->access); } -static ssize_t c2port_store_access(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t access_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct c2port_device *c2dev = dev_get_drvdata(dev); struct c2port_ops *ops = c2dev->ops; @@ -374,6 +378,7 @@ static ssize_t c2port_store_access(struct device *dev, return count; } +static DEVICE_ATTR_RW(access); static ssize_t c2port_store_reset(struct device *dev, struct device_attribute *attr, @@ -394,6 +399,7 @@ static ssize_t c2port_store_reset(struct device *dev, return count; } +static DEVICE_ATTR(reset, 0200, NULL, c2port_store_reset); static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf) { @@ -430,6 +436,7 @@ static ssize_t c2port_show_dev_id(struct device *dev, return ret; } +static DEVICE_ATTR(dev_id, 0444, c2port_show_dev_id, NULL); static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf) { @@ -466,6 +473,7 @@ static ssize_t c2port_show_rev_id(struct device *dev, return ret; } +static DEVICE_ATTR(rev_id, 0444, c2port_show_rev_id, NULL); static ssize_t c2port_show_flash_access(struct device *dev, struct device_attribute *attr, char *buf) @@ -535,6 +543,8 @@ static ssize_t c2port_store_flash_access(struct device *dev, return count; } +static DEVICE_ATTR(flash_access, 0644, c2port_show_flash_access, + c2port_store_flash_access); static ssize_t __c2port_write_flash_erase(struct c2port_device *dev) { @@ -615,6 +625,7 @@ static ssize_t c2port_store_flash_erase(struct device *dev, return count; } +static DEVICE_ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase); static ssize_t __c2port_read_flash_data(struct c2port_device *dev, char *buffer, loff_t offset, size_t count) @@ -706,7 +717,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev, return nread; } -static ssize_t c2port_read_flash_data(struct kobject *kobj, +static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buffer, loff_t offset, size_t count) { @@ -823,7 +834,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev, return nwrite; } -static ssize_t c2port_write_flash_data(struct kobject *kobj, +static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buffer, loff_t offset, size_t count) { @@ -845,35 +856,40 @@ static ssize_t c2port_write_flash_data(struct kobject *kobj, return ret; } +/* size is computed at run-time */ +static BIN_ATTR(flash_data, 0644, c2port_read_flash_data, + c2port_write_flash_data, 0); /* * Class attributes */ +static struct attribute *c2port_attrs[] = { + &dev_attr_name.attr, + &dev_attr_flash_blocks_num.attr, + &dev_attr_flash_block_size.attr, + &dev_attr_flash_size.attr, + &dev_attr_access.attr, + &dev_attr_reset.attr, + &dev_attr_dev_id.attr, + &dev_attr_rev_id.attr, + &dev_attr_flash_access.attr, + &dev_attr_flash_erase.attr, + NULL, +}; -static struct device_attribute c2port_attrs[] = { - __ATTR(name, 0444, c2port_show_name, NULL), - __ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL), - __ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL), - __ATTR(flash_size, 0444, c2port_show_flash_size, NULL), - __ATTR(access, 0644, c2port_show_access, c2port_store_access), - __ATTR(reset, 0200, NULL, c2port_store_reset), - __ATTR(dev_id, 0444, c2port_show_dev_id, NULL), - __ATTR(rev_id, 0444, c2port_show_rev_id, NULL), - - __ATTR(flash_access, 0644, c2port_show_flash_access, - c2port_store_flash_access), - __ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase), - __ATTR_NULL, +static struct bin_attribute *c2port_bin_attrs[] = { + &bin_attr_flash_data, + NULL, }; -static struct bin_attribute c2port_bin_attrs = { - .attr = { - .name = "flash_data", - .mode = 0644 - }, - .read = c2port_read_flash_data, - .write = c2port_write_flash_data, - /* .size is computed at run-time */ +static const struct attribute_group c2port_group = { + .attrs = c2port_attrs, + .bin_attrs = c2port_bin_attrs, +}; + +static const struct attribute_group *c2port_groups[] = { + &c2port_group, + NULL, }; /* @@ -884,7 +900,7 @@ struct c2port_device *c2port_device_register(char *name, struct c2port_ops *ops, void *devdata) { struct c2port_device *c2dev; - int id, ret; + int ret; if (unlikely(!ops) || unlikely(!ops->access) || \ unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \ @@ -896,24 +912,22 @@ struct c2port_device *c2port_device_register(char *name, if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); - ret = idr_pre_get(&c2port_idr, GFP_KERNEL); - if (!ret) { - ret = -ENOMEM; - goto error_idr_get_new; - } - + idr_preload(GFP_KERNEL); spin_lock_irq(&c2port_idr_lock); - ret = idr_get_new(&c2port_idr, c2dev, &id); + ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT); spin_unlock_irq(&c2port_idr_lock); + idr_preload_end(); if (ret < 0) - goto error_idr_get_new; - c2dev->id = id; + goto error_idr_alloc; + c2dev->id = ret; + + bin_attr_flash_data.size = ops->blocks_num * ops->block_size; c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, - "c2port%d", id); - if (unlikely(!c2dev->dev)) { - ret = -ENOMEM; + "c2port%d", c2dev->id); + if (unlikely(IS_ERR(c2dev->dev))) { + ret = PTR_ERR(c2dev->dev); goto error_device_create; } dev_set_drvdata(c2dev->dev, c2dev); @@ -922,12 +936,6 @@ struct c2port_device *c2port_device_register(char *name, c2dev->ops = ops; mutex_init(&c2dev->mutex); - /* Create binary file */ - c2port_bin_attrs.size = ops->blocks_num * ops->block_size; - ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs); - if (unlikely(ret)) - goto error_device_create_bin_file; - /* By default C2 port access is off */ c2dev->access = c2dev->flash_access = 0; ops->access(c2dev, 0); @@ -940,15 +948,12 @@ struct c2port_device *c2port_device_register(char *name, return c2dev; -error_device_create_bin_file: - device_destroy(c2port_class, 0); - error_device_create: spin_lock_irq(&c2port_idr_lock); - idr_remove(&c2port_idr, id); + idr_remove(&c2port_idr, c2dev->id); spin_unlock_irq(&c2port_idr_lock); -error_idr_get_new: +error_idr_alloc: kfree(c2dev); return ERR_PTR(ret); @@ -962,7 +967,6 @@ void c2port_device_unregister(struct c2port_device *c2dev) dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name); - device_remove_bin_file(c2dev->dev, &c2port_bin_attrs); spin_lock_irq(&c2port_idr_lock); idr_remove(&c2port_idr, c2dev->id); spin_unlock_irq(&c2port_idr_lock); @@ -983,11 +987,11 @@ static int __init c2port_init(void) " - (C) 2007 Rodolfo Giometti\n"); c2port_class = class_create(THIS_MODULE, "c2port"); - if (!c2port_class) { + if (IS_ERR(c2port_class)) { printk(KERN_ERR "c2port: failed to allocate class\n"); - return -ENOMEM; + return PTR_ERR(c2port_class); } - c2port_class->dev_attrs = c2port_attrs; + c2port_class->dev_groups = c2port_groups; return 0; } diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig new file mode 100644 index 00000000000..c90370ed712 --- /dev/null +++ b/drivers/misc/carma/Kconfig @@ -0,0 +1,17 @@ +config CARMA_FPGA + tristate "CARMA DATA-FPGA Access Driver" + depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA + select VIDEOBUF_DMA_SG + default n + help + Say Y here to include support for communicating with the data + processing FPGAs on the OVRO CARMA board. + +config CARMA_FPGA_PROGRAM + tristate "CARMA DATA-FPGA Programmer" + depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA + select VIDEOBUF_DMA_SG + default n + help + Say Y here to include support for programming the data processing + FPGAs on the OVRO CARMA board. diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile new file mode 100644 index 00000000000..ff36ac2ce53 --- /dev/null +++ b/drivers/misc/carma/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o +obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c new file mode 100644 index 00000000000..7be89832db1 --- /dev/null +++ b/drivers/misc/carma/carma-fpga-program.c @@ -0,0 +1,1143 @@ +/* + * CARMA Board DATA-FPGA Programmer + * + * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/dma-mapping.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/completion.h> +#include <linux/miscdevice.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/leds.h> +#include <linux/slab.h> +#include <linux/kref.h> +#include <linux/fs.h> +#include <linux/io.h> + +#include <media/videobuf-dma-sg.h> + +/* MPC8349EMDS specific get_immrbase() */ +#include <sysdev/fsl_soc.h> + +static const char drv_name[] = "carma-fpga-program"; + +/* + * Firmware images are always this exact size + * + * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs) + * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs) + */ +#define FW_SIZE_EP2S90 12849552 +#define FW_SIZE_EP2S130 18662880 + +struct fpga_dev { + struct miscdevice miscdev; + + /* Reference count */ + struct kref ref; + + /* Device Registers */ + struct device *dev; + void __iomem *regs; + void __iomem *immr; + + /* Freescale DMA Device */ + struct dma_chan *chan; + + /* Interrupts */ + int irq, status; + struct completion completion; + + /* FPGA Bitfile */ + struct mutex lock; + + struct videobuf_dmabuf vb; + bool vb_allocated; + + /* max size and written bytes */ + size_t fw_size; + size_t bytes; +}; + +/* + * FPGA Bitfile Helpers + */ + +/** + * fpga_drop_firmware_data() - drop the bitfile image from memory + * @priv: the driver's private data structure + * + * LOCKING: must hold priv->lock + */ +static void fpga_drop_firmware_data(struct fpga_dev *priv) +{ + videobuf_dma_free(&priv->vb); + priv->vb_allocated = false; + priv->bytes = 0; +} + +/* + * Private Data Reference Count + */ + +static void fpga_dev_remove(struct kref *ref) +{ + struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref); + + /* free any firmware image that was not programmed */ + fpga_drop_firmware_data(priv); + + mutex_destroy(&priv->lock); + kfree(priv); +} + +/* + * LED Trigger (could be a seperate module) + */ + +/* + * NOTE: this whole thing does have the problem that whenever the led's are + * NOTE: first set to use the fpga trigger, they could be in the wrong state + */ + +DEFINE_LED_TRIGGER(ledtrig_fpga); + +static void ledtrig_fpga_programmed(bool enabled) +{ + if (enabled) + led_trigger_event(ledtrig_fpga, LED_FULL); + else + led_trigger_event(ledtrig_fpga, LED_OFF); +} + +/* + * FPGA Register Helpers + */ + +/* Register Definitions */ +#define FPGA_CONFIG_CONTROL 0x40 +#define FPGA_CONFIG_STATUS 0x44 +#define FPGA_CONFIG_FIFO_SIZE 0x48 +#define FPGA_CONFIG_FIFO_USED 0x4C +#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50 +#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54 + +#define FPGA_FIFO_ADDRESS 0x3000 + +static int fpga_fifo_size(void __iomem *regs) +{ + return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE); +} + +#define CFG_STATUS_ERR_MASK 0xfffe + +static int fpga_config_error(void __iomem *regs) +{ + return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK; +} + +static int fpga_fifo_empty(void __iomem *regs) +{ + return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0; +} + +static void fpga_fifo_write(void __iomem *regs, u32 val) +{ + iowrite32be(val, regs + FPGA_FIFO_ADDRESS); +} + +static void fpga_set_byte_count(void __iomem *regs, u32 count) +{ + iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT); +} + +#define CFG_CTL_ENABLE (1 << 0) +#define CFG_CTL_RESET (1 << 1) +#define CFG_CTL_DMA (1 << 2) + +static void fpga_programmer_enable(struct fpga_dev *priv, bool dma) +{ + u32 val; + + val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE; + iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL); +} + +static void fpga_programmer_disable(struct fpga_dev *priv) +{ + iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL); +} + +static void fpga_dump_registers(struct fpga_dev *priv) +{ + u32 control, status, size, used, total, curr; + + /* good status: do nothing */ + if (priv->status == 0) + return; + + /* Dump all status registers */ + control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL); + status = ioread32be(priv->regs + FPGA_CONFIG_STATUS); + size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE); + used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED); + total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT); + curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT); + + dev_err(priv->dev, "Configuration failed, dumping status registers\n"); + dev_err(priv->dev, "Control: 0x%.8x\n", control); + dev_err(priv->dev, "Status: 0x%.8x\n", status); + dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size); + dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used); + dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total); + dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr); +} + +/* + * FPGA Power Supply Code + */ + +#define CTL_PWR_CONTROL 0x2006 +#define CTL_PWR_STATUS 0x200A +#define CTL_PWR_FAIL 0x200B + +#define PWR_CONTROL_ENABLE 0x01 + +#define PWR_STATUS_ERROR_MASK 0x10 +#define PWR_STATUS_GOOD 0x0f + +/* + * Determine if the FPGA power is good for all supplies + */ +static bool fpga_power_good(struct fpga_dev *priv) +{ + u8 val; + + val = ioread8(priv->regs + CTL_PWR_STATUS); + if (val & PWR_STATUS_ERROR_MASK) + return false; + + return val == PWR_STATUS_GOOD; +} + +/* + * Disable the FPGA power supplies + */ +static void fpga_disable_power_supplies(struct fpga_dev *priv) +{ + unsigned long start; + u8 val; + + iowrite8(0x0, priv->regs + CTL_PWR_CONTROL); + + /* + * Wait 500ms for the power rails to discharge + * + * Without this delay, the CTL-CPLD state machine can get into a + * state where it is waiting for the power-goods to assert, but they + * never do. This only happens when enabling and disabling the + * power sequencer very rapidly. + * + * The loop below will also wait for the power goods to de-assert, + * but testing has shown that they are always disabled by the time + * the sleep completes. However, omitting the sleep and only waiting + * for the power-goods to de-assert was not sufficient to ensure + * that the power sequencer would not wedge itself. + */ + msleep(500); + + start = jiffies; + while (time_before(jiffies, start + HZ)) { + val = ioread8(priv->regs + CTL_PWR_STATUS); + if (!(val & PWR_STATUS_GOOD)) + break; + + usleep_range(5000, 10000); + } + + val = ioread8(priv->regs + CTL_PWR_STATUS); + if (val & PWR_STATUS_GOOD) { + dev_err(priv->dev, "power disable failed: " + "power goods: status 0x%.2x\n", val); + } + + if (val & PWR_STATUS_ERROR_MASK) { + dev_err(priv->dev, "power disable failed: " + "alarm bit set: status 0x%.2x\n", val); + } +} + +/** + * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies + * @priv: the driver's private data structure + * + * Enable the DATA-FPGA power supplies, waiting up to 1 second for + * them to enable successfully. + * + * Returns 0 on success, -ERRNO otherwise + */ +static int fpga_enable_power_supplies(struct fpga_dev *priv) +{ + unsigned long start = jiffies; + + if (fpga_power_good(priv)) { + dev_dbg(priv->dev, "power was already good\n"); + return 0; + } + + iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL); + while (time_before(jiffies, start + HZ)) { + if (fpga_power_good(priv)) + return 0; + + usleep_range(5000, 10000); + } + + return fpga_power_good(priv) ? 0 : -ETIMEDOUT; +} + +/* + * Determine if the FPGA power supplies are all enabled + */ +static bool fpga_power_enabled(struct fpga_dev *priv) +{ + u8 val; + + val = ioread8(priv->regs + CTL_PWR_CONTROL); + if (val & PWR_CONTROL_ENABLE) + return true; + + return false; +} + +/* + * Determine if the FPGA's are programmed and running correctly + */ +static bool fpga_running(struct fpga_dev *priv) +{ + if (!fpga_power_good(priv)) + return false; + + /* Check the config done bit */ + return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18); +} + +/* + * FPGA Programming Code + */ + +/** + * fpga_program_block() - put a block of data into the programmer's FIFO + * @priv: the driver's private data structure + * @buf: the data to program + * @count: the length of data to program (must be a multiple of 4 bytes) + * + * Returns 0 on success, -ERRNO otherwise + */ +static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count) +{ + u32 *data = buf; + int size = fpga_fifo_size(priv->regs); + int i, len; + unsigned long timeout; + + /* enforce correct data length for the FIFO */ + BUG_ON(count % 4 != 0); + + while (count > 0) { + + /* Get the size of the block to write (maximum is FIFO_SIZE) */ + len = min_t(size_t, count, size); + timeout = jiffies + HZ / 4; + + /* Write the block */ + for (i = 0; i < len / 4; i++) + fpga_fifo_write(priv->regs, data[i]); + + /* Update the amounts left */ + count -= len; + data += len / 4; + + /* Wait for the fifo to empty */ + while (true) { + + if (fpga_fifo_empty(priv->regs)) { + break; + } else { + dev_dbg(priv->dev, "Fifo not empty\n"); + cpu_relax(); + } + + if (fpga_config_error(priv->regs)) { + dev_err(priv->dev, "Error detected\n"); + return -EIO; + } + + if (time_after(jiffies, timeout)) { + dev_err(priv->dev, "Fifo drain timeout\n"); + return -ETIMEDOUT; + } + + usleep_range(5000, 10000); + } + } + + return 0; +} + +/** + * fpga_program_cpu() - program the DATA-FPGA's using the CPU + * @priv: the driver's private data structure + * + * This is useful when the DMA programming method fails. It is possible to + * wedge the Freescale DMA controller such that the DMA programming method + * always fails. This method has always succeeded. + * + * Returns 0 on success, -ERRNO otherwise + */ +static noinline int fpga_program_cpu(struct fpga_dev *priv) +{ + int ret; + + /* Disable the programmer */ + fpga_programmer_disable(priv); + + /* Set the total byte count */ + fpga_set_byte_count(priv->regs, priv->bytes); + dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes); + + /* Enable the controller for programming */ + fpga_programmer_enable(priv, false); + dev_dbg(priv->dev, "enabled the controller\n"); + + /* Write each chunk of the FPGA bitfile to FPGA programmer */ + ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes); + if (ret) + goto out_disable_controller; + + /* Wait for the interrupt handler to signal that programming finished */ + ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); + if (!ret) { + dev_err(priv->dev, "Timed out waiting for completion\n"); + ret = -ETIMEDOUT; + goto out_disable_controller; + } + + /* Retrieve the status from the interrupt handler */ + ret = priv->status; + +out_disable_controller: + fpga_programmer_disable(priv); + return ret; +} + +#define FIFO_DMA_ADDRESS 0xf0003000 +#define FIFO_MAX_LEN 4096 + +/** + * fpga_program_dma() - program the DATA-FPGA's using the DMA engine + * @priv: the driver's private data structure + * + * Program the DATA-FPGA's using the Freescale DMA engine. This requires that + * the engine is programmed such that the hardware DMA request lines can + * control the entire DMA transaction. The system controller FPGA then + * completely offloads the programming from the CPU. + * + * Returns 0 on success, -ERRNO otherwise + */ +static noinline int fpga_program_dma(struct fpga_dev *priv) +{ + struct videobuf_dmabuf *vb = &priv->vb; + struct dma_chan *chan = priv->chan; + struct dma_async_tx_descriptor *tx; + size_t num_pages, len, avail = 0; + struct dma_slave_config config; + struct scatterlist *sg; + struct sg_table table; + dma_cookie_t cookie; + int ret, i; + + /* Disable the programmer */ + fpga_programmer_disable(priv); + + /* Allocate a scatterlist for the DMA destination */ + num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN); + ret = sg_alloc_table(&table, num_pages, GFP_KERNEL); + if (ret) { + dev_err(priv->dev, "Unable to allocate dst scatterlist\n"); + ret = -ENOMEM; + goto out_return; + } + + /* + * This is an ugly hack + * + * We fill in a scatterlist as if it were mapped for DMA. This is + * necessary because there exists no better structure for this + * inside the kernel code. + * + * As an added bonus, we can use the DMAEngine API for all of this, + * rather than inventing another extremely similar API. + */ + avail = priv->bytes; + for_each_sg(table.sgl, sg, num_pages, i) { + len = min_t(size_t, avail, FIFO_MAX_LEN); + sg_dma_address(sg) = FIFO_DMA_ADDRESS; + sg_dma_len(sg) = len; + + avail -= len; + } + + /* Map the buffer for DMA */ + ret = videobuf_dma_map(priv->dev, &priv->vb); + if (ret) { + dev_err(priv->dev, "Unable to map buffer for DMA\n"); + goto out_free_table; + } + + /* + * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per + * transaction, and then put it under external control + */ + memset(&config, 0, sizeof(config)); + config.direction = DMA_MEM_TO_DEV; + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; + ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, + (unsigned long)&config); + if (ret) { + dev_err(priv->dev, "DMA slave configuration failed\n"); + goto out_dma_unmap; + } + + ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1); + if (ret) { + dev_err(priv->dev, "DMA external control setup failed\n"); + goto out_dma_unmap; + } + + /* setup and submit the DMA transaction */ + tx = chan->device->device_prep_dma_sg(chan, + table.sgl, num_pages, + vb->sglist, vb->sglen, 0); + if (!tx) { + dev_err(priv->dev, "Unable to prep DMA transaction\n"); + ret = -ENOMEM; + goto out_dma_unmap; + } + + cookie = tx->tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(priv->dev, "Unable to submit DMA transaction\n"); + ret = -ENOMEM; + goto out_dma_unmap; + } + + dma_async_issue_pending(chan); + + /* Set the total byte count */ + fpga_set_byte_count(priv->regs, priv->bytes); + dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes); + + /* Enable the controller for DMA programming */ + fpga_programmer_enable(priv, true); + dev_dbg(priv->dev, "enabled the controller\n"); + + /* Wait for the interrupt handler to signal that programming finished */ + ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); + if (!ret) { + dev_err(priv->dev, "Timed out waiting for completion\n"); + ret = -ETIMEDOUT; + goto out_disable_controller; + } + + /* Retrieve the status from the interrupt handler */ + ret = priv->status; + +out_disable_controller: + fpga_programmer_disable(priv); +out_dma_unmap: + videobuf_dma_unmap(priv->dev, vb); +out_free_table: + sg_free_table(&table); +out_return: + return ret; +} + +/* + * Interrupt Handling + */ + +static irqreturn_t fpga_irq(int irq, void *dev_id) +{ + struct fpga_dev *priv = dev_id; + + /* Save the status */ + priv->status = fpga_config_error(priv->regs) ? -EIO : 0; + dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status); + fpga_dump_registers(priv); + + /* Disabling the programmer clears the interrupt */ + fpga_programmer_disable(priv); + + /* Notify any waiters */ + complete(&priv->completion); + + return IRQ_HANDLED; +} + +/* + * SYSFS Helpers + */ + +/** + * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's + * @priv: the driver's private data structure + * + * LOCKING: must hold priv->lock + */ +static int fpga_do_stop(struct fpga_dev *priv) +{ + u32 val; + + /* Set the led to unprogrammed */ + ledtrig_fpga_programmed(false); + + /* Pulse the config line to reset the FPGA's */ + val = CFG_CTL_ENABLE | CFG_CTL_RESET; + iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL); + iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL); + + return 0; +} + +static noinline int fpga_do_program(struct fpga_dev *priv) +{ + int ret; + + if (priv->bytes != priv->fw_size) { + dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, " + "should be %zu bytes\n", + priv->bytes, priv->fw_size); + return -EINVAL; + } + + if (!fpga_power_enabled(priv)) { + dev_err(priv->dev, "Power not enabled\n"); + return -EINVAL; + } + + if (!fpga_power_good(priv)) { + dev_err(priv->dev, "Power not good\n"); + return -EINVAL; + } + + /* Set the LED to unprogrammed */ + ledtrig_fpga_programmed(false); + + /* Try to program the FPGA's using DMA */ + ret = fpga_program_dma(priv); + + /* If DMA failed or doesn't exist, try with CPU */ + if (ret) { + dev_warn(priv->dev, "Falling back to CPU programming\n"); + ret = fpga_program_cpu(priv); + } + + if (ret) { + dev_err(priv->dev, "Unable to program FPGA's\n"); + return ret; + } + + /* Drop the firmware bitfile from memory */ + fpga_drop_firmware_data(priv); + + dev_dbg(priv->dev, "FPGA programming successful\n"); + ledtrig_fpga_programmed(true); + + return 0; +} + +/* + * File Operations + */ + +static int fpga_open(struct inode *inode, struct file *filp) +{ + /* + * The miscdevice layer puts our struct miscdevice into the + * filp->private_data field. We use this to find our private + * data and then overwrite it with our own private structure. + */ + struct fpga_dev *priv = container_of(filp->private_data, + struct fpga_dev, miscdev); + unsigned int nr_pages; + int ret; + + /* We only allow one process at a time */ + ret = mutex_lock_interruptible(&priv->lock); + if (ret) + return ret; + + filp->private_data = priv; + kref_get(&priv->ref); + + /* Truncation: drop any existing data */ + if (filp->f_flags & O_TRUNC) + priv->bytes = 0; + + /* Check if we have already allocated a buffer */ + if (priv->vb_allocated) + return 0; + + /* Allocate a buffer to hold enough data for the bitfile */ + nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE); + ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages); + if (ret) { + dev_err(priv->dev, "unable to allocate data buffer\n"); + mutex_unlock(&priv->lock); + kref_put(&priv->ref, fpga_dev_remove); + return ret; + } + + priv->vb_allocated = true; + return 0; +} + +static int fpga_release(struct inode *inode, struct file *filp) +{ + struct fpga_dev *priv = filp->private_data; + + mutex_unlock(&priv->lock); + kref_put(&priv->ref, fpga_dev_remove); + return 0; +} + +static ssize_t fpga_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct fpga_dev *priv = filp->private_data; + + /* FPGA bitfiles have an exact size: disallow anything else */ + if (priv->bytes >= priv->fw_size) + return -ENOSPC; + + count = min_t(size_t, priv->fw_size - priv->bytes, count); + if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count)) + return -EFAULT; + + priv->bytes += count; + return count; +} + +static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + struct fpga_dev *priv = filp->private_data; + + count = min_t(size_t, priv->bytes - *f_pos, count); + if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count)) + return -EFAULT; + + *f_pos += count; + return count; +} + +static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin) +{ + struct fpga_dev *priv = filp->private_data; + loff_t newpos; + + /* only read-only opens are allowed to seek */ + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + return -EINVAL; + + switch (origin) { + case SEEK_SET: /* seek relative to the beginning of the file */ + newpos = offset; + break; + case SEEK_CUR: /* seek relative to current position in the file */ + newpos = filp->f_pos + offset; + break; + case SEEK_END: /* seek relative to the end of the file */ + newpos = priv->fw_size - offset; + break; + default: + return -EINVAL; + } + + /* check for sanity */ + if (newpos > priv->fw_size) + return -EINVAL; + + filp->f_pos = newpos; + return newpos; +} + +static const struct file_operations fpga_fops = { + .open = fpga_open, + .release = fpga_release, + .write = fpga_write, + .read = fpga_read, + .llseek = fpga_llseek, +}; + +/* + * Device Attributes + */ + +static ssize_t pfail_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + u8 val; + + val = ioread8(priv->regs + CTL_PWR_FAIL); + return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val); +} + +static ssize_t pgood_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv)); +} + +static ssize_t penable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv)); +} + +static ssize_t penable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + if (val) { + ret = fpga_enable_power_supplies(priv); + if (ret) + return ret; + } else { + fpga_do_stop(priv); + fpga_disable_power_supplies(priv); + } + + return count; +} + +static ssize_t program_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv)); +} + +static ssize_t program_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + /* We can't have an image writer and be programming simultaneously */ + if (mutex_lock_interruptible(&priv->lock)) + return -ERESTARTSYS; + + /* Program or Reset the FPGA's */ + ret = val ? fpga_do_program(priv) : fpga_do_stop(priv); + if (ret) + goto out_unlock; + + /* Success */ + ret = count; + +out_unlock: + mutex_unlock(&priv->lock); + return ret; +} + +static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL); +static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL); +static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR, + penable_show, penable_store); + +static DEVICE_ATTR(program, S_IRUGO | S_IWUSR, + program_show, program_store); + +static struct attribute *fpga_attributes[] = { + &dev_attr_power_fail.attr, + &dev_attr_power_good.attr, + &dev_attr_power_enable.attr, + &dev_attr_program.attr, + NULL, +}; + +static const struct attribute_group fpga_attr_group = { + .attrs = fpga_attributes, +}; + +/* + * OpenFirmware Device Subsystem + */ + +#define SYS_REG_VERSION 0x00 +#define SYS_REG_GEOGRAPHIC 0x10 + +static bool dma_filter(struct dma_chan *chan, void *data) +{ + /* + * DMA Channel #0 is the only acceptable device + * + * This probably won't survive an unload/load cycle of the Freescale + * DMAEngine driver, but that won't be a problem + */ + return chan->chan_id == 0 && chan->device->dev_id == 0; +} + +static int fpga_of_remove(struct platform_device *op) +{ + struct fpga_dev *priv = platform_get_drvdata(op); + struct device *this_device = priv->miscdev.this_device; + + sysfs_remove_group(&this_device->kobj, &fpga_attr_group); + misc_deregister(&priv->miscdev); + + free_irq(priv->irq, priv); + irq_dispose_mapping(priv->irq); + + /* make sure the power supplies are off */ + fpga_disable_power_supplies(priv); + + /* unmap registers */ + iounmap(priv->immr); + iounmap(priv->regs); + + dma_release_channel(priv->chan); + + /* drop our reference to the private data structure */ + kref_put(&priv->ref, fpga_dev_remove); + return 0; +} + +/* CTL-CPLD Version Register */ +#define CTL_CPLD_VERSION 0x2000 + +static int fpga_of_probe(struct platform_device *op) +{ + struct device_node *of_node = op->dev.of_node; + struct device *this_device; + struct fpga_dev *priv; + dma_cap_mask_t mask; + u32 ver; + int ret; + + /* Allocate private data */ + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&op->dev, "Unable to allocate private data\n"); + ret = -ENOMEM; + goto out_return; + } + + /* Setup the miscdevice */ + priv->miscdev.minor = MISC_DYNAMIC_MINOR; + priv->miscdev.name = drv_name; + priv->miscdev.fops = &fpga_fops; + + kref_init(&priv->ref); + + platform_set_drvdata(op, priv); + priv->dev = &op->dev; + mutex_init(&priv->lock); + init_completion(&priv->completion); + videobuf_dma_init(&priv->vb); + + dev_set_drvdata(priv->dev, priv); + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_SG, mask); + + /* Get control of DMA channel #0 */ + priv->chan = dma_request_channel(mask, dma_filter, NULL); + if (!priv->chan) { + dev_err(&op->dev, "Unable to acquire DMA channel #0\n"); + ret = -ENODEV; + goto out_free_priv; + } + + /* Remap the registers for use */ + priv->regs = of_iomap(of_node, 0); + if (!priv->regs) { + dev_err(&op->dev, "Unable to ioremap registers\n"); + ret = -ENOMEM; + goto out_dma_release_channel; + } + + /* Remap the IMMR for use */ + priv->immr = ioremap(get_immrbase(), 0x100000); + if (!priv->immr) { + dev_err(&op->dev, "Unable to ioremap IMMR\n"); + ret = -ENOMEM; + goto out_unmap_regs; + } + + /* + * Check that external DMA is configured + * + * U-Boot does this for us, but we should check it and bail out if + * there is a problem. Failing to have this register setup correctly + * will cause the DMA controller to transfer a single cacheline + * worth of data, then wedge itself. + */ + if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) { + dev_err(&op->dev, "External DMA control not configured\n"); + ret = -ENODEV; + goto out_unmap_immr; + } + + /* + * Check the CTL-CPLD version + * + * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we + * don't want to run on any version of the CTL-CPLD that does not use + * a compatible register layout. + * + * v2: changed register layout, added power sequencer + * v3: added glitch filter on the i2c overcurrent/overtemp outputs + */ + ver = ioread8(priv->regs + CTL_CPLD_VERSION); + if (ver != 0x02 && ver != 0x03) { + dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n"); + ret = -ENODEV; + goto out_unmap_immr; + } + + /* Set the exact size that the firmware image should be */ + ver = ioread32be(priv->regs + SYS_REG_VERSION); + priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90; + + /* Find the correct IRQ number */ + priv->irq = irq_of_parse_and_map(of_node, 0); + if (priv->irq == NO_IRQ) { + dev_err(&op->dev, "Unable to find IRQ line\n"); + ret = -ENODEV; + goto out_unmap_immr; + } + + /* Request the IRQ */ + ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv); + if (ret) { + dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq); + ret = -ENODEV; + goto out_irq_dispose_mapping; + } + + /* Reset and stop the FPGA's, just in case */ + fpga_do_stop(priv); + + /* Register the miscdevice */ + ret = misc_register(&priv->miscdev); + if (ret) { + dev_err(&op->dev, "Unable to register miscdevice\n"); + goto out_free_irq; + } + + /* Create the sysfs files */ + this_device = priv->miscdev.this_device; + dev_set_drvdata(this_device, priv); + ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group); + if (ret) { + dev_err(&op->dev, "Unable to create sysfs files\n"); + goto out_misc_deregister; + } + + dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n", + (ver & (1 << 17)) ? "Correlator" : "Digitizer", + (ver & (1 << 16)) ? "B" : "A", + (ver & (1 << 18)) ? "EP2S130" : "EP2S90"); + + return 0; + +out_misc_deregister: + misc_deregister(&priv->miscdev); +out_free_irq: + free_irq(priv->irq, priv); +out_irq_dispose_mapping: + irq_dispose_mapping(priv->irq); +out_unmap_immr: + iounmap(priv->immr); +out_unmap_regs: + iounmap(priv->regs); +out_dma_release_channel: + dma_release_channel(priv->chan); +out_free_priv: + kref_put(&priv->ref, fpga_dev_remove); +out_return: + return ret; +} + +static struct of_device_id fpga_of_match[] = { + { .compatible = "carma,fpga-programmer", }, + {}, +}; + +static struct platform_driver fpga_of_driver = { + .probe = fpga_of_probe, + .remove = fpga_of_remove, + .driver = { + .name = drv_name, + .of_match_table = fpga_of_match, + .owner = THIS_MODULE, + }, +}; + +/* + * Module Init / Exit + */ + +static int __init fpga_init(void) +{ + led_trigger_register_simple("fpga", &ledtrig_fpga); + return platform_driver_register(&fpga_of_driver); +} + +static void __exit fpga_exit(void) +{ + platform_driver_unregister(&fpga_of_driver); + led_trigger_unregister_simple(ledtrig_fpga); +} + +MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); +MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer"); +MODULE_LICENSE("GPL"); + +module_init(fpga_init); +module_exit(fpga_exit); diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c new file mode 100644 index 00000000000..14d90eae605 --- /dev/null +++ b/drivers/misc/carma/carma-fpga.c @@ -0,0 +1,1447 @@ +/* + * CARMA DATA-FPGA Access Driver + * + * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * FPGA Memory Dump Format + * + * FPGA #0 control registers (32 x 32-bit words) + * FPGA #1 control registers (32 x 32-bit words) + * FPGA #2 control registers (32 x 32-bit words) + * FPGA #3 control registers (32 x 32-bit words) + * SYSFPGA control registers (32 x 32-bit words) + * FPGA #0 correlation array (NUM_CORL0 correlation blocks) + * FPGA #1 correlation array (NUM_CORL1 correlation blocks) + * FPGA #2 correlation array (NUM_CORL2 correlation blocks) + * FPGA #3 correlation array (NUM_CORL3 correlation blocks) + * + * Each correlation array consists of: + * + * Correlation Data (2 x NUM_LAGSn x 32-bit words) + * Pipeline Metadata (2 x NUM_METAn x 32-bit words) + * Quantization Counters (2 x NUM_QCNTn x 32-bit words) + * + * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from + * the FPGA configuration registers. They do not change once the FPGA's + * have been programmed, they only change on re-programming. + */ + +/* + * Basic Description: + * + * This driver is used to capture correlation spectra off of the four data + * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore + * this driver supports dynamic enable/disable of capture while the device + * remains open. + * + * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast + * capture rate, all buffers are pre-allocated to avoid any potentially long + * running memory allocations while capturing. + * + * There are two lists and one pointer which are used to keep track of the + * different states of data buffers. + * + * 1) free list + * This list holds all empty data buffers which are ready to receive data. + * + * 2) inflight pointer + * This pointer holds the currently inflight data buffer. This buffer is having + * data copied into it by the DMA engine. + * + * 3) used list + * This list holds data buffers which have been filled, and are waiting to be + * read by userspace. + * + * All buffers start life on the free list, then move successively to the + * inflight pointer, and then to the used list. After they have been read by + * userspace, they are moved back to the free list. The cycle repeats as long + * as necessary. + * + * It should be noted that all buffers are mapped and ready for DMA when they + * are on any of the three lists. They are only unmapped when they are in the + * process of being read by userspace. + */ + +/* + * Notes on the IRQ masking scheme: + * + * The IRQ masking scheme here is different than most other hardware. The only + * way for the DATA-FPGAs to detect if the kernel has taken too long to copy + * the data is if the status registers are not cleared before the next + * correlation data dump is ready. + * + * The interrupt line is connected to the status registers, such that when they + * are cleared, the interrupt is de-asserted. Therein lies our problem. We need + * to schedule a long-running DMA operation and return from the interrupt + * handler quickly, but we cannot clear the status registers. + * + * To handle this, the system controller FPGA has the capability to connect the + * interrupt line to a user-controlled GPIO pin. This pin is driven high + * (unasserted) and left that way. To mask the interrupt, we change the + * interrupt source to the GPIO pin. Tada, we hid the interrupt. :) + */ + +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/dma-mapping.h> +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/dmaengine.h> +#include <linux/seq_file.h> +#include <linux/highmem.h> +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/kref.h> +#include <linux/io.h> + +#include <media/videobuf-dma-sg.h> + +/* system controller registers */ +#define SYS_IRQ_SOURCE_CTL 0x24 +#define SYS_IRQ_OUTPUT_EN 0x28 +#define SYS_IRQ_OUTPUT_DATA 0x2C +#define SYS_IRQ_INPUT_DATA 0x30 +#define SYS_FPGA_CONFIG_STATUS 0x44 + +/* GPIO IRQ line assignment */ +#define IRQ_CORL_DONE 0x10 + +/* FPGA registers */ +#define MMAP_REG_VERSION 0x00 +#define MMAP_REG_CORL_CONF1 0x08 +#define MMAP_REG_CORL_CONF2 0x0C +#define MMAP_REG_STATUS 0x48 + +#define SYS_FPGA_BLOCK 0xF0000000 + +#define DATA_FPGA_START 0x400000 +#define DATA_FPGA_SIZE 0x80000 + +static const char drv_name[] = "carma-fpga"; + +#define NUM_FPGA 4 + +#define MIN_DATA_BUFS 8 +#define MAX_DATA_BUFS 64 + +struct fpga_info { + unsigned int num_lag_ram; + unsigned int blk_size; +}; + +struct data_buf { + struct list_head entry; + struct videobuf_dmabuf vb; + size_t size; +}; + +struct fpga_device { + /* character device */ + struct miscdevice miscdev; + struct device *dev; + struct mutex mutex; + + /* reference count */ + struct kref ref; + + /* FPGA registers and information */ + struct fpga_info info[NUM_FPGA]; + void __iomem *regs; + int irq; + + /* FPGA Physical Address/Size Information */ + resource_size_t phys_addr; + size_t phys_size; + + /* DMA structures */ + struct sg_table corl_table; + unsigned int corl_nents; + struct dma_chan *chan; + + /* Protection for all members below */ + spinlock_t lock; + + /* Device enable/disable flag */ + bool enabled; + + /* Correlation data buffers */ + wait_queue_head_t wait; + struct list_head free; + struct list_head used; + struct data_buf *inflight; + + /* Information about data buffers */ + unsigned int num_dropped; + unsigned int num_buffers; + size_t bufsize; + struct dentry *dbg_entry; +}; + +struct fpga_reader { + struct fpga_device *priv; + struct data_buf *buf; + off_t buf_start; +}; + +static void fpga_device_release(struct kref *ref) +{ + struct fpga_device *priv = container_of(ref, struct fpga_device, ref); + + /* the last reader has exited, cleanup the last bits */ + mutex_destroy(&priv->mutex); + kfree(priv); +} + +/* + * Data Buffer Allocation Helpers + */ + +/** + * data_free_buffer() - free a single data buffer and all allocated memory + * @buf: the buffer to free + * + * This will free all of the pages allocated to the given data buffer, and + * then free the structure itself + */ +static void data_free_buffer(struct data_buf *buf) +{ + /* It is ok to free a NULL buffer */ + if (!buf) + return; + + /* free all memory */ + videobuf_dma_free(&buf->vb); + kfree(buf); +} + +/** + * data_alloc_buffer() - allocate and fill a data buffer with pages + * @bytes: the number of bytes required + * + * This allocates all space needed for a data buffer. It must be mapped before + * use in a DMA transaction using videobuf_dma_map(). + * + * Returns NULL on failure + */ +static struct data_buf *data_alloc_buffer(const size_t bytes) +{ + unsigned int nr_pages; + struct data_buf *buf; + int ret; + + /* calculate the number of pages necessary */ + nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); + + /* allocate the buffer structure */ + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + goto out_return; + + /* initialize internal fields */ + INIT_LIST_HEAD(&buf->entry); + buf->size = bytes; + + /* allocate the videobuf */ + videobuf_dma_init(&buf->vb); + ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages); + if (ret) + goto out_free_buf; + + return buf; + +out_free_buf: + kfree(buf); +out_return: + return NULL; +} + +/** + * data_free_buffers() - free all allocated buffers + * @priv: the driver's private data structure + * + * Free all buffers allocated by the driver (except those currently in the + * process of being read by userspace). + * + * LOCKING: must hold dev->mutex + * CONTEXT: user + */ +static void data_free_buffers(struct fpga_device *priv) +{ + struct data_buf *buf, *tmp; + + /* the device should be stopped, no DMA in progress */ + BUG_ON(priv->inflight != NULL); + + list_for_each_entry_safe(buf, tmp, &priv->free, entry) { + list_del_init(&buf->entry); + videobuf_dma_unmap(priv->dev, &buf->vb); + data_free_buffer(buf); + } + + list_for_each_entry_safe(buf, tmp, &priv->used, entry) { + list_del_init(&buf->entry); + videobuf_dma_unmap(priv->dev, &buf->vb); + data_free_buffer(buf); + } + + priv->num_buffers = 0; + priv->bufsize = 0; +} + +/** + * data_alloc_buffers() - allocate 1 seconds worth of data buffers + * @priv: the driver's private data structure + * + * Allocate enough buffers for a whole second worth of data + * + * This routine will attempt to degrade nicely by succeeding even if a full + * second worth of data buffers could not be allocated, as long as a minimum + * number were allocated. In this case, it will print a message to the kernel + * log. + * + * The device must not be modifying any lists when this is called. + * + * CONTEXT: user + * LOCKING: must hold dev->mutex + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_alloc_buffers(struct fpga_device *priv) +{ + struct data_buf *buf; + int i, ret; + + for (i = 0; i < MAX_DATA_BUFS; i++) { + + /* allocate a buffer */ + buf = data_alloc_buffer(priv->bufsize); + if (!buf) + break; + + /* map it for DMA */ + ret = videobuf_dma_map(priv->dev, &buf->vb); + if (ret) { + data_free_buffer(buf); + break; + } + + /* add it to the list of free buffers */ + list_add_tail(&buf->entry, &priv->free); + priv->num_buffers++; + } + + /* Make sure we allocated the minimum required number of buffers */ + if (priv->num_buffers < MIN_DATA_BUFS) { + dev_err(priv->dev, "Unable to allocate enough data buffers\n"); + data_free_buffers(priv); + return -ENOMEM; + } + + /* Warn if we are running in a degraded state, but do not fail */ + if (priv->num_buffers < MAX_DATA_BUFS) { + dev_warn(priv->dev, + "Unable to allocate %d buffers, using %d buffers instead\n", + MAX_DATA_BUFS, i); + } + + return 0; +} + +/* + * DMA Operations Helpers + */ + +/** + * fpga_start_addr() - get the physical address a DATA-FPGA + * @priv: the driver's private data structure + * @fpga: the DATA-FPGA number (zero based) + */ +static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga) +{ + return priv->phys_addr + 0x400000 + (0x80000 * fpga); +} + +/** + * fpga_block_addr() - get the physical address of a correlation data block + * @priv: the driver's private data structure + * @fpga: the DATA-FPGA number (zero based) + * @blknum: the correlation block number (zero based) + */ +static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga, + unsigned int blknum) +{ + return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum)); +} + +#define REG_BLOCK_SIZE (32 * 4) + +/** + * data_setup_corl_table() - create the scatterlist for correlation dumps + * @priv: the driver's private data structure + * + * Create the scatterlist for transferring a correlation dump from the + * DATA FPGAs. This structure will be reused for each buffer than needs + * to be filled with correlation data. + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_setup_corl_table(struct fpga_device *priv) +{ + struct sg_table *table = &priv->corl_table; + struct scatterlist *sg; + struct fpga_info *info; + int i, j, ret; + + /* Calculate the number of entries needed */ + priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE; + for (i = 0; i < NUM_FPGA; i++) + priv->corl_nents += priv->info[i].num_lag_ram; + + /* Allocate the scatterlist table */ + ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL); + if (ret) { + dev_err(priv->dev, "unable to allocate DMA table\n"); + return ret; + } + + /* Add the DATA FPGA registers to the scatterlist */ + sg = table->sgl; + for (i = 0; i < NUM_FPGA; i++) { + sg_dma_address(sg) = fpga_start_addr(priv, i); + sg_dma_len(sg) = REG_BLOCK_SIZE; + sg = sg_next(sg); + } + + /* Add the SYS-FPGA registers to the scatterlist */ + sg_dma_address(sg) = SYS_FPGA_BLOCK; + sg_dma_len(sg) = REG_BLOCK_SIZE; + sg = sg_next(sg); + + /* Add the FPGA correlation data blocks to the scatterlist */ + for (i = 0; i < NUM_FPGA; i++) { + info = &priv->info[i]; + for (j = 0; j < info->num_lag_ram; j++) { + sg_dma_address(sg) = fpga_block_addr(priv, i, j); + sg_dma_len(sg) = info->blk_size; + sg = sg_next(sg); + } + } + + /* + * All physical addresses and lengths are present in the structure + * now. It can be reused for every FPGA DATA interrupt + */ + return 0; +} + +/* + * FPGA Register Access Helpers + */ + +static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga, + unsigned int reg, u32 val) +{ + const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE); + iowrite32be(val, priv->regs + fpga_start + reg); +} + +static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga, + unsigned int reg) +{ + const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE); + return ioread32be(priv->regs + fpga_start + reg); +} + +/** + * data_calculate_bufsize() - calculate the data buffer size required + * @priv: the driver's private data structure + * + * Calculate the total buffer size needed to hold a single block + * of correlation data + * + * CONTEXT: user + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_calculate_bufsize(struct fpga_device *priv) +{ + u32 num_corl, num_lags, num_meta, num_qcnt, num_pack; + u32 conf1, conf2, version; + u32 num_lag_ram, blk_size; + int i; + + /* Each buffer starts with the 5 FPGA register areas */ + priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE; + + /* Read and store the configuration data for each FPGA */ + for (i = 0; i < NUM_FPGA; i++) { + version = fpga_read_reg(priv, i, MMAP_REG_VERSION); + conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1); + conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2); + + /* minor version 2 and later */ + if ((version & 0x000000FF) >= 2) { + num_corl = (conf1 & 0x000000F0) >> 4; + num_pack = (conf1 & 0x00000F00) >> 8; + num_lags = (conf1 & 0x00FFF000) >> 12; + num_meta = (conf1 & 0x7F000000) >> 24; + num_qcnt = (conf2 & 0x00000FFF) >> 0; + } else { + num_corl = (conf1 & 0x000000F0) >> 4; + num_pack = 1; /* implied */ + num_lags = (conf1 & 0x000FFF00) >> 8; + num_meta = (conf1 & 0x7FF00000) >> 20; + num_qcnt = (conf2 & 0x00000FFF) >> 0; + } + + num_lag_ram = (num_corl + num_pack - 1) / num_pack; + blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8; + + priv->info[i].num_lag_ram = num_lag_ram; + priv->info[i].blk_size = blk_size; + priv->bufsize += num_lag_ram * blk_size; + + dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl); + dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack); + dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags); + dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta); + dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt); + dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size); + } + + dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize); + return 0; +} + +/* + * Interrupt Handling + */ + +/** + * data_disable_interrupts() - stop the device from generating interrupts + * @priv: the driver's private data structure + * + * Hide interrupts by switching to GPIO interrupt source + * + * LOCKING: must hold dev->lock + */ +static void data_disable_interrupts(struct fpga_device *priv) +{ + /* hide the interrupt by switching the IRQ driver to GPIO */ + iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL); +} + +/** + * data_enable_interrupts() - allow the device to generate interrupts + * @priv: the driver's private data structure + * + * Unhide interrupts by switching to the FPGA interrupt source. At the + * same time, clear the DATA-FPGA status registers. + * + * LOCKING: must hold dev->lock + */ +static void data_enable_interrupts(struct fpga_device *priv) +{ + /* clear the actual FPGA corl_done interrupt */ + fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0); + fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0); + fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0); + fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0); + + /* flush the writes */ + fpga_read_reg(priv, 0, MMAP_REG_STATUS); + fpga_read_reg(priv, 1, MMAP_REG_STATUS); + fpga_read_reg(priv, 2, MMAP_REG_STATUS); + fpga_read_reg(priv, 3, MMAP_REG_STATUS); + + /* switch back to the external interrupt source */ + iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL); +} + +/** + * data_dma_cb() - DMAEngine callback for DMA completion + * @data: the driver's private data structure + * + * Complete a DMA transfer from the DATA-FPGA's + * + * This is called via the DMA callback mechanism, and will handle moving the + * completed DMA transaction to the used list, and then wake any processes + * waiting for new data + * + * CONTEXT: any, softirq expected + */ +static void data_dma_cb(void *data) +{ + struct fpga_device *priv = data; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* If there is no inflight buffer, we've got a bug */ + BUG_ON(priv->inflight == NULL); + + /* Move the inflight buffer onto the used list */ + list_move_tail(&priv->inflight->entry, &priv->used); + priv->inflight = NULL; + + /* + * If data dumping is still enabled, then clear the FPGA + * status registers and re-enable FPGA interrupts + */ + if (priv->enabled) + data_enable_interrupts(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* + * We've changed both the inflight and used lists, so we need + * to wake up any processes that are blocking for those events + */ + wake_up(&priv->wait); +} + +/** + * data_submit_dma() - prepare and submit the required DMA to fill a buffer + * @priv: the driver's private data structure + * @buf: the data buffer + * + * Prepare and submit the necessary DMA transactions to fill a correlation + * data buffer. + * + * LOCKING: must hold dev->lock + * CONTEXT: hardirq only + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) +{ + struct scatterlist *dst_sg, *src_sg; + unsigned int dst_nents, src_nents; + struct dma_chan *chan = priv->chan; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + dma_addr_t dst, src; + unsigned long dma_flags = 0; + + dst_sg = buf->vb.sglist; + dst_nents = buf->vb.sglen; + + src_sg = priv->corl_table.sgl; + src_nents = priv->corl_nents; + + /* + * All buffers passed to this function should be ready and mapped + * for DMA already. Therefore, we don't need to do anything except + * submit it to the Freescale DMA Engine for processing + */ + + /* setup the scatterlist to scatterlist transfer */ + tx = chan->device->device_prep_dma_sg(chan, + dst_sg, dst_nents, + src_sg, src_nents, + 0); + if (!tx) { + dev_err(priv->dev, "unable to prep scatterlist DMA\n"); + return -ENOMEM; + } + + /* submit the transaction to the DMA controller */ + cookie = tx->tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(priv->dev, "unable to submit scatterlist DMA\n"); + return -ENOMEM; + } + + /* Prepare the re-read of the SYS-FPGA block */ + dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE); + src = SYS_FPGA_BLOCK; + tx = chan->device->device_prep_dma_memcpy(chan, dst, src, + REG_BLOCK_SIZE, + dma_flags); + if (!tx) { + dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n"); + return -ENOMEM; + } + + /* Setup the callback */ + tx->callback = data_dma_cb; + tx->callback_param = priv; + + /* submit the transaction to the DMA controller */ + cookie = tx->tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n"); + return -ENOMEM; + } + + return 0; +} + +#define CORL_DONE 0x1 +#define CORL_ERR 0x2 + +static irqreturn_t data_irq(int irq, void *dev_id) +{ + struct fpga_device *priv = dev_id; + bool submitted = false; + struct data_buf *buf; + u32 status; + int i; + + /* detect spurious interrupts via FPGA status */ + for (i = 0; i < 4; i++) { + status = fpga_read_reg(priv, i, MMAP_REG_STATUS); + if (!(status & (CORL_DONE | CORL_ERR))) { + dev_err(priv->dev, "spurious irq detected (FPGA)\n"); + return IRQ_NONE; + } + } + + /* detect spurious interrupts via raw IRQ pin readback */ + status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA); + if (status & IRQ_CORL_DONE) { + dev_err(priv->dev, "spurious irq detected (IRQ)\n"); + return IRQ_NONE; + } + + spin_lock(&priv->lock); + + /* + * This is an error case that should never happen. + * + * If this driver has a bug and manages to re-enable interrupts while + * a DMA is in progress, then we will hit this statement and should + * start paying attention immediately. + */ + BUG_ON(priv->inflight != NULL); + + /* hide the interrupt by switching the IRQ driver to GPIO */ + data_disable_interrupts(priv); + + /* If there are no free buffers, drop this data */ + if (list_empty(&priv->free)) { + priv->num_dropped++; + goto out; + } + + buf = list_first_entry(&priv->free, struct data_buf, entry); + list_del_init(&buf->entry); + BUG_ON(buf->size != priv->bufsize); + + /* Submit a DMA transfer to get the correlation data */ + if (data_submit_dma(priv, buf)) { + dev_err(priv->dev, "Unable to setup DMA transfer\n"); + list_move_tail(&buf->entry, &priv->free); + goto out; + } + + /* Save the buffer for the DMA callback */ + priv->inflight = buf; + submitted = true; + + /* Start the DMA Engine */ + dma_async_issue_pending(priv->chan); + +out: + /* If no DMA was submitted, re-enable interrupts */ + if (!submitted) + data_enable_interrupts(priv); + + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + +/* + * Realtime Device Enable Helpers + */ + +/** + * data_device_enable() - enable the device for buffered dumping + * @priv: the driver's private data structure + * + * Enable the device for buffered dumping. Allocates buffers and hooks up + * the interrupt handler. When this finishes, data will come pouring in. + * + * LOCKING: must hold dev->mutex + * CONTEXT: user context only + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_device_enable(struct fpga_device *priv) +{ + bool enabled; + u32 val; + int ret; + + /* multiple enables are safe: they do nothing */ + spin_lock_irq(&priv->lock); + enabled = priv->enabled; + spin_unlock_irq(&priv->lock); + if (enabled) + return 0; + + /* check that the FPGAs are programmed */ + val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS); + if (!(val & (1 << 18))) { + dev_err(priv->dev, "DATA-FPGAs are not enabled\n"); + return -ENODATA; + } + + /* read the FPGAs to calculate the buffer size */ + ret = data_calculate_bufsize(priv); + if (ret) { + dev_err(priv->dev, "unable to calculate buffer size\n"); + goto out_error; + } + + /* allocate the correlation data buffers */ + ret = data_alloc_buffers(priv); + if (ret) { + dev_err(priv->dev, "unable to allocate buffers\n"); + goto out_error; + } + + /* setup the source scatterlist for dumping correlation data */ + ret = data_setup_corl_table(priv); + if (ret) { + dev_err(priv->dev, "unable to setup correlation DMA table\n"); + goto out_error; + } + + /* prevent the FPGAs from generating interrupts */ + data_disable_interrupts(priv); + + /* hookup the irq handler */ + ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv); + if (ret) { + dev_err(priv->dev, "unable to request IRQ handler\n"); + goto out_error; + } + + /* allow the DMA callback to re-enable FPGA interrupts */ + spin_lock_irq(&priv->lock); + priv->enabled = true; + spin_unlock_irq(&priv->lock); + + /* allow the FPGAs to generate interrupts */ + data_enable_interrupts(priv); + return 0; + +out_error: + sg_free_table(&priv->corl_table); + priv->corl_nents = 0; + + data_free_buffers(priv); + return ret; +} + +/** + * data_device_disable() - disable the device for buffered dumping + * @priv: the driver's private data structure + * + * Disable the device for buffered dumping. Stops new DMA transactions from + * being generated, waits for all outstanding DMA to complete, and then frees + * all buffers. + * + * LOCKING: must hold dev->mutex + * CONTEXT: user only + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_device_disable(struct fpga_device *priv) +{ + spin_lock_irq(&priv->lock); + + /* allow multiple disable */ + if (!priv->enabled) { + spin_unlock_irq(&priv->lock); + return 0; + } + + /* + * Mark the device disabled + * + * This stops DMA callbacks from re-enabling interrupts + */ + priv->enabled = false; + + /* prevent the FPGAs from generating interrupts */ + data_disable_interrupts(priv); + + /* wait until all ongoing DMA has finished */ + while (priv->inflight != NULL) { + spin_unlock_irq(&priv->lock); + wait_event(priv->wait, priv->inflight == NULL); + spin_lock_irq(&priv->lock); + } + + spin_unlock_irq(&priv->lock); + + /* unhook the irq handler */ + free_irq(priv->irq, priv); + + /* free the correlation table */ + sg_free_table(&priv->corl_table); + priv->corl_nents = 0; + + /* free all buffers: the free and used lists are not being changed */ + data_free_buffers(priv); + return 0; +} + +/* + * DEBUGFS Interface + */ +#ifdef CONFIG_DEBUG_FS + +/* + * Count the number of entries in the given list + */ +static unsigned int list_num_entries(struct list_head *list) +{ + struct list_head *entry; + unsigned int ret = 0; + + list_for_each(entry, list) + ret++; + + return ret; +} + +static int data_debug_show(struct seq_file *f, void *offset) +{ + struct fpga_device *priv = f->private; + + spin_lock_irq(&priv->lock); + + seq_printf(f, "enabled: %d\n", priv->enabled); + seq_printf(f, "bufsize: %d\n", priv->bufsize); + seq_printf(f, "num_buffers: %d\n", priv->num_buffers); + seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free)); + seq_printf(f, "inflight: %d\n", priv->inflight != NULL); + seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used)); + seq_printf(f, "num_dropped: %d\n", priv->num_dropped); + + spin_unlock_irq(&priv->lock); + return 0; +} + +static int data_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, data_debug_show, inode->i_private); +} + +static const struct file_operations data_debug_fops = { + .owner = THIS_MODULE, + .open = data_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int data_debugfs_init(struct fpga_device *priv) +{ + priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv, + &data_debug_fops); + if (IS_ERR(priv->dbg_entry)) + return PTR_ERR(priv->dbg_entry); + + return 0; +} + +static void data_debugfs_exit(struct fpga_device *priv) +{ + debugfs_remove(priv->dbg_entry); +} + +#else + +static inline int data_debugfs_init(struct fpga_device *priv) +{ + return 0; +} + +static inline void data_debugfs_exit(struct fpga_device *priv) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +/* + * SYSFS Attributes + */ + +static ssize_t data_en_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_device *priv = dev_get_drvdata(dev); + int ret; + + spin_lock_irq(&priv->lock); + ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled); + spin_unlock_irq(&priv->lock); + + return ret; +} + +static ssize_t data_en_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpga_device *priv = dev_get_drvdata(dev); + unsigned long enable; + int ret; + + ret = kstrtoul(buf, 0, &enable); + if (ret) { + dev_err(priv->dev, "unable to parse enable input\n"); + return ret; + } + + /* protect against concurrent enable/disable */ + ret = mutex_lock_interruptible(&priv->mutex); + if (ret) + return ret; + + if (enable) + ret = data_device_enable(priv); + else + ret = data_device_disable(priv); + + if (ret) { + dev_err(priv->dev, "device %s failed\n", + enable ? "enable" : "disable"); + count = ret; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&priv->mutex); + return count; +} + +static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set); + +static struct attribute *data_sysfs_attrs[] = { + &dev_attr_enable.attr, + NULL, +}; + +static const struct attribute_group rt_sysfs_attr_group = { + .attrs = data_sysfs_attrs, +}; + +/* + * FPGA Realtime Data Character Device + */ + +static int data_open(struct inode *inode, struct file *filp) +{ + /* + * The miscdevice layer puts our struct miscdevice into the + * filp->private_data field. We use this to find our private + * data and then overwrite it with our own private structure. + */ + struct fpga_device *priv = container_of(filp->private_data, + struct fpga_device, miscdev); + struct fpga_reader *reader; + int ret; + + /* allocate private data */ + reader = kzalloc(sizeof(*reader), GFP_KERNEL); + if (!reader) + return -ENOMEM; + + reader->priv = priv; + reader->buf = NULL; + + filp->private_data = reader; + ret = nonseekable_open(inode, filp); + if (ret) { + dev_err(priv->dev, "nonseekable-open failed\n"); + kfree(reader); + return ret; + } + + /* + * success, increase the reference count of the private data structure + * so that it doesn't disappear if the device is unbound + */ + kref_get(&priv->ref); + return 0; +} + +static int data_release(struct inode *inode, struct file *filp) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + + /* free the per-reader structure */ + data_free_buffer(reader->buf); + kfree(reader); + filp->private_data = NULL; + + /* decrement our reference count to the private data */ + kref_put(&priv->ref, fpga_device_release); + return 0; +} + +static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count, + loff_t *f_pos) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + struct list_head *used = &priv->used; + bool drop_buffer = false; + struct data_buf *dbuf; + size_t avail; + void *data; + int ret; + + /* check if we already have a partial buffer */ + if (reader->buf) { + dbuf = reader->buf; + goto have_buffer; + } + + spin_lock_irq(&priv->lock); + + /* Block until there is at least one buffer on the used list */ + while (list_empty(used)) { + spin_unlock_irq(&priv->lock); + + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(priv->wait, !list_empty(used)); + if (ret) + return ret; + + spin_lock_irq(&priv->lock); + } + + /* Grab the first buffer off of the used list */ + dbuf = list_first_entry(used, struct data_buf, entry); + list_del_init(&dbuf->entry); + + spin_unlock_irq(&priv->lock); + + /* Buffers are always mapped: unmap it */ + videobuf_dma_unmap(priv->dev, &dbuf->vb); + + /* save the buffer for later */ + reader->buf = dbuf; + reader->buf_start = 0; + +have_buffer: + /* Get the number of bytes available */ + avail = dbuf->size - reader->buf_start; + data = dbuf->vb.vaddr + reader->buf_start; + + /* Get the number of bytes we can transfer */ + count = min(count, avail); + + /* Copy the data to the userspace buffer */ + if (copy_to_user(ubuf, data, count)) + return -EFAULT; + + /* Update the amount of available space */ + avail -= count; + + /* + * If there is still some data available, save the buffer for the + * next userspace call to read() and return + */ + if (avail > 0) { + reader->buf_start += count; + reader->buf = dbuf; + return count; + } + + /* + * Get the buffer ready to be reused for DMA + * + * If it fails, we pretend that the read never happed and return + * -EFAULT to userspace. The read will be retried. + */ + ret = videobuf_dma_map(priv->dev, &dbuf->vb); + if (ret) { + dev_err(priv->dev, "unable to remap buffer for DMA\n"); + return -EFAULT; + } + + /* Lock against concurrent enable/disable */ + spin_lock_irq(&priv->lock); + + /* the reader is finished with this buffer */ + reader->buf = NULL; + + /* + * One of two things has happened, the device is disabled, or the + * device has been reconfigured underneath us. In either case, we + * should just throw away the buffer. + * + * Lockdep complains if this is done under the spinlock, so we + * handle it during the unlock path. + */ + if (!priv->enabled || dbuf->size != priv->bufsize) { + drop_buffer = true; + goto out_unlock; + } + + /* The buffer is safe to reuse, so add it back to the free list */ + list_add_tail(&dbuf->entry, &priv->free); + +out_unlock: + spin_unlock_irq(&priv->lock); + + if (drop_buffer) { + videobuf_dma_unmap(priv->dev, &dbuf->vb); + data_free_buffer(dbuf); + } + + return count; +} + +static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + unsigned int mask = 0; + + poll_wait(filp, &priv->wait, tbl); + + if (!list_empty(&priv->used)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +static int data_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + unsigned long offset, vsize, psize, addr; + + /* VMA properties */ + offset = vma->vm_pgoff << PAGE_SHIFT; + vsize = vma->vm_end - vma->vm_start; + psize = priv->phys_size - offset; + addr = (priv->phys_addr + offset) >> PAGE_SHIFT; + + /* Check against the FPGA region's physical memory size */ + if (vsize > psize) { + dev_err(priv->dev, "requested mmap mapping too large\n"); + return -EINVAL; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return io_remap_pfn_range(vma, vma->vm_start, addr, vsize, + vma->vm_page_prot); +} + +static const struct file_operations data_fops = { + .owner = THIS_MODULE, + .open = data_open, + .release = data_release, + .read = data_read, + .poll = data_poll, + .mmap = data_mmap, + .llseek = no_llseek, +}; + +/* + * OpenFirmware Device Subsystem + */ + +static bool dma_filter(struct dma_chan *chan, void *data) +{ + /* + * DMA Channel #0 is used for the FPGA Programmer, so ignore it + * + * This probably won't survive an unload/load cycle of the Freescale + * DMAEngine driver, but that won't be a problem + */ + if (chan->chan_id == 0 && chan->device->dev_id == 0) + return false; + + return true; +} + +static int data_of_probe(struct platform_device *op) +{ + struct device_node *of_node = op->dev.of_node; + struct device *this_device; + struct fpga_device *priv; + struct resource res; + dma_cap_mask_t mask; + int ret; + + /* Allocate private data */ + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&op->dev, "Unable to allocate device private data\n"); + ret = -ENOMEM; + goto out_return; + } + + platform_set_drvdata(op, priv); + priv->dev = &op->dev; + kref_init(&priv->ref); + mutex_init(&priv->mutex); + + dev_set_drvdata(priv->dev, priv); + spin_lock_init(&priv->lock); + INIT_LIST_HEAD(&priv->free); + INIT_LIST_HEAD(&priv->used); + init_waitqueue_head(&priv->wait); + + /* Setup the misc device */ + priv->miscdev.minor = MISC_DYNAMIC_MINOR; + priv->miscdev.name = drv_name; + priv->miscdev.fops = &data_fops; + + /* Get the physical address of the FPGA registers */ + ret = of_address_to_resource(of_node, 0, &res); + if (ret) { + dev_err(&op->dev, "Unable to find FPGA physical address\n"); + ret = -ENODEV; + goto out_free_priv; + } + + priv->phys_addr = res.start; + priv->phys_size = resource_size(&res); + + /* ioremap the registers for use */ + priv->regs = of_iomap(of_node, 0); + if (!priv->regs) { + dev_err(&op->dev, "Unable to ioremap registers\n"); + ret = -ENOMEM; + goto out_free_priv; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(DMA_INTERRUPT, mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_SG, mask); + + /* Request a DMA channel */ + priv->chan = dma_request_channel(mask, dma_filter, NULL); + if (!priv->chan) { + dev_err(&op->dev, "Unable to request DMA channel\n"); + ret = -ENODEV; + goto out_unmap_regs; + } + + /* Find the correct IRQ number */ + priv->irq = irq_of_parse_and_map(of_node, 0); + if (priv->irq == NO_IRQ) { + dev_err(&op->dev, "Unable to find IRQ line\n"); + ret = -ENODEV; + goto out_release_dma; + } + + /* Drive the GPIO for FPGA IRQ high (no interrupt) */ + iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA); + + /* Register the miscdevice */ + ret = misc_register(&priv->miscdev); + if (ret) { + dev_err(&op->dev, "Unable to register miscdevice\n"); + goto out_irq_dispose_mapping; + } + + /* Create the debugfs files */ + ret = data_debugfs_init(priv); + if (ret) { + dev_err(&op->dev, "Unable to create debugfs files\n"); + goto out_misc_deregister; + } + + /* Create the sysfs files */ + this_device = priv->miscdev.this_device; + dev_set_drvdata(this_device, priv); + ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group); + if (ret) { + dev_err(&op->dev, "Unable to create sysfs files\n"); + goto out_data_debugfs_exit; + } + + dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n"); + return 0; + +out_data_debugfs_exit: + data_debugfs_exit(priv); +out_misc_deregister: + misc_deregister(&priv->miscdev); +out_irq_dispose_mapping: + irq_dispose_mapping(priv->irq); +out_release_dma: + dma_release_channel(priv->chan); +out_unmap_regs: + iounmap(priv->regs); +out_free_priv: + kref_put(&priv->ref, fpga_device_release); +out_return: + return ret; +} + +static int data_of_remove(struct platform_device *op) +{ + struct fpga_device *priv = platform_get_drvdata(op); + struct device *this_device = priv->miscdev.this_device; + + /* remove all sysfs files, now the device cannot be re-enabled */ + sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group); + + /* remove all debugfs files */ + data_debugfs_exit(priv); + + /* disable the device from generating data */ + data_device_disable(priv); + + /* remove the character device to stop new readers from appearing */ + misc_deregister(&priv->miscdev); + + /* cleanup everything not needed by readers */ + irq_dispose_mapping(priv->irq); + dma_release_channel(priv->chan); + iounmap(priv->regs); + + /* release our reference */ + kref_put(&priv->ref, fpga_device_release); + return 0; +} + +static struct of_device_id data_of_match[] = { + { .compatible = "carma,carma-fpga", }, + {}, +}; + +static struct platform_driver data_of_driver = { + .probe = data_of_probe, + .remove = data_of_remove, + .driver = { + .name = drv_name, + .of_match_table = data_of_match, + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(data_of_driver); + +MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); +MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/cb710/Makefile b/drivers/misc/cb710/Makefile index 7b80cbf1a60..467c8e9ca3c 100644 --- a/drivers/misc/cb710/Makefile +++ b/drivers/misc/cb710/Makefile @@ -1,6 +1,4 @@ -ifeq ($(CONFIG_CB710_DEBUG),y) - EXTRA_CFLAGS += -DDEBUG -endif +ccflags-$(CONFIG_CB710_DEBUG) := -DDEBUG obj-$(CONFIG_CB710_CORE) += cb710.o diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index b14eab0f2ba..fb397e7d1cc 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c @@ -9,11 +9,11 @@ */ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/cb710.h> +#include <linux/gfp.h> static DEFINE_IDA(cb710_ida); static DEFINE_SPINLOCK(cb710_ida_lock); @@ -30,10 +30,10 @@ void cb710_pci_update_config_reg(struct pci_dev *pdev, EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg); /* Some magic writes based on Windows driver init code */ -static int __devinit cb710_pci_configure(struct pci_dev *pdev) +static int cb710_pci_configure(struct pci_dev *pdev) { unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); - struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); + struct pci_dev *pdev0; u32 val; cb710_pci_update_config_reg(pdev, 0x48, @@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev) if (val & 0x80000000) return 0; + pdev0 = pci_get_slot(pdev->bus, devfn); if (!pdev0) return -ENODEV; @@ -95,7 +96,7 @@ static void cb710_release_slot(struct device *dev) #endif } -static int __devinit cb710_register_slot(struct cb710_chip *chip, +static int cb710_register_slot(struct cb710_chip *chip, unsigned slot_mask, unsigned io_offset, const char *name) { int nr = chip->slots; @@ -175,11 +176,11 @@ static int cb710_suspend(struct pci_dev *pdev, pm_message_t state) { struct cb710_chip *chip = pci_get_drvdata(pdev); - free_irq(pdev->irq, chip); + devm_free_irq(&pdev->dev, pdev->irq, chip); pci_save_state(pdev); pci_disable_device(pdev); if (state.event & PM_EVENT_SLEEP) - pci_set_power_state(pdev, PCI_D3cold); + pci_set_power_state(pdev, PCI_D3hot); return 0; } @@ -200,7 +201,7 @@ static int cb710_resume(struct pci_dev *pdev) #endif /* CONFIG_PM */ -static int __devinit cb710_probe(struct pci_dev *pdev, +static int cb710_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct cb710_chip *chip; @@ -244,6 +245,7 @@ static int __devinit cb710_probe(struct pci_dev *pdev, if (err) return err; + spin_lock_init(&chip->irq_lock); chip->pdev = pdev; chip->iobase = pcim_iomap_table(pdev)[0]; @@ -303,7 +305,7 @@ unreg_mmc: return err; } -static void __devexit cb710_remove_one(struct pci_dev *pdev) +static void cb710_remove_one(struct pci_dev *pdev) { struct cb710_chip *chip = pci_get_drvdata(pdev); unsigned long flags; @@ -330,7 +332,7 @@ static struct pci_driver cb710_driver = { .name = KBUILD_MODNAME, .id_table = cb710_pci_tbl, .probe = cb710_probe, - .remove = __devexit_p(cb710_remove_one), + .remove = cb710_remove_one, #ifdef CONFIG_PM .suspend = cb710_suspend, .resume = cb710_resume, diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c index 02358d086e0..fcb3b8e30c5 100644 --- a/drivers/misc/cb710/debug.c +++ b/drivers/misc/cb710/debug.c @@ -10,7 +10,6 @@ #include <linux/cb710.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/slab.h> #define CB710_REG_COUNT 0x80 diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c index d019746551f..2a40d0efdff 100644 --- a/drivers/misc/cb710/sgbuf2.c +++ b/drivers/misc/cb710/sgbuf2.c @@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter) static inline bool needs_unaligned_copy(const void *ptr) { -#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS return false; #else return ((ptr - NULL) & 3) != 0; diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c index 8110460558f..effd8c6b2b9 100644 --- a/drivers/misc/cs5535-mfgpt.c +++ b/drivers/misc/cs5535-mfgpt.c @@ -16,16 +16,19 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> -#include <linux/pci.h> +#include <linux/platform_device.h> #include <linux/cs5535.h> +#include <linux/slab.h> #define DRV_NAME "cs5535-mfgpt" -#define MFGPT_BAR 2 static int mfgpt_reset_timers; module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); -MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; " - "required by some broken BIOSes (ie, TinyBIOS < 0.99)."); +MODULE_PARM_DESC(mfgptfix, "Try to reset the MFGPT timers during init; " + "required by some broken BIOSes (ie, TinyBIOS < 0.99) or kexec " + "(1 = reset the MFGPT using an undocumented bit, " + "2 = perform a soft reset by unconfiguring all timers); " + "use what works best for you."); struct cs5535_mfgpt_timer { struct cs5535_mfgpt_chip *chip; @@ -36,7 +39,7 @@ static struct cs5535_mfgpt_chip { DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); resource_size_t base; - struct pci_dev *pdev; + struct platform_device *pdev; spinlock_t lock; int initialized; } cs5535_mfgpt_chip; @@ -174,7 +177,7 @@ struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain) timer_nr = t < max ? (int) t : -1; } else { /* check if the requested timer's available */ - if (test_bit(timer_nr, mfgpt->avail)) + if (!test_bit(timer_nr, mfgpt->avail)) timer_nr = -1; } @@ -210,6 +213,17 @@ EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer); */ void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer) { + unsigned long flags; + uint16_t val; + + /* timer can be made available again only if never set up */ + val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP); + if (!(val & MFGPT_SETUP_SETUP)) { + spin_lock_irqsave(&timer->chip->lock, flags); + __set_bit(timer->nr, timer->chip->avail); + spin_unlock_irqrestore(&timer->chip->lock, flags); + } + kfree(timer); } EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer); @@ -235,7 +249,7 @@ EXPORT_SYMBOL_GPL(cs5535_mfgpt_write); * Jordan tells me that he and Mitch once played w/ it, but it's unclear * what the results of that were (and they experienced some instability). */ -static void __init reset_all_timers(void) +static void reset_all_timers(void) { uint32_t val, dummy; @@ -245,13 +259,35 @@ static void __init reset_all_timers(void) } /* + * This is another sledgehammer to reset all MFGPT timers. + * Instead of using the undocumented bit method it clears + * IRQ, NMI and RESET settings. + */ +static void soft_reset(void) +{ + int i; + struct cs5535_mfgpt_timer t; + + for (i = 0; i < MFGPT_MAX_TIMERS; i++) { + t.nr = i; + + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_RESET, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_RESET, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_NMI, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_NMI, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_IRQ, 0); + cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_IRQ, 0); + } +} + +/* * Check whether any MFGPTs are available for the kernel to use. In most * cases, firmware that uses AMD's VSA code will claim all timers during * bootup; we certainly don't want to take them if they're already in use. * In other cases (such as with VSAless OpenFirmware), the system firmware * leaves timers available for us to use. */ -static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) +static int scan_timers(struct cs5535_mfgpt_chip *mfgpt) { struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; unsigned long flags; @@ -260,15 +296,17 @@ static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) int i; /* bios workaround */ - if (mfgpt_reset_timers) + if (mfgpt_reset_timers == 1) reset_all_timers(); + else if (mfgpt_reset_timers == 2) + soft_reset(); /* just to be safe, protect this section w/ lock */ spin_lock_irqsave(&mfgpt->lock, flags); for (i = 0; i < MFGPT_MAX_TIMERS; i++) { timer.nr = i; val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP); - if (!(val & MFGPT_SETUP_SETUP)) { + if (!(val & MFGPT_SETUP_SETUP) || mfgpt_reset_timers == 2) { __set_bit(i, mfgpt->avail); timers++; } @@ -278,10 +316,16 @@ static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) return timers; } -static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_id) +static int cs5535_mfgpt_probe(struct platform_device *pdev) { - int err, t; + struct resource *res; + int err = -EIO, t; + + if (mfgpt_reset_timers < 0 || mfgpt_reset_timers > 2) { + dev_err(&pdev->dev, "Bad mfgpt_reset_timers value: %i\n", + mfgpt_reset_timers); + goto done; + } /* There are two ways to get the MFGPT base address; one is by * fetching it from MSR_LBAR_MFGPT, the other is by reading the @@ -290,29 +334,27 @@ static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, * it turns out to be unreliable in the face of crappy BIOSes, we * can always go back to using MSRs.. */ - err = pci_enable_device_io(pdev); - if (err) { - dev_err(&pdev->dev, "can't enable device IO\n"); + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "can't fetch device resource info\n"); goto done; } - err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR); + if (!request_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "can't request region\n"); goto done; } /* set up the driver-specific struct */ - cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR); + cs5535_mfgpt_chip.base = res->start; cs5535_mfgpt_chip.pdev = pdev; spin_lock_init(&cs5535_mfgpt_chip.lock); - dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR, - (unsigned long long) cs5535_mfgpt_chip.base); + dev_info(&pdev->dev, "reserved resource region %pR\n", res); /* detect the available timers */ t = scan_timers(&cs5535_mfgpt_chip); - dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t); + dev_info(&pdev->dev, "%d MFGPT timers available\n", t); cs5535_mfgpt_chip.initialized = 1; return 0; @@ -320,51 +362,23 @@ done: return err; } -static struct pci_device_id cs5535_mfgpt_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, - { 0, }, +static struct platform_driver cs5535_mfgpt_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = cs5535_mfgpt_probe, }; -MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl); - -/* - * Just like with the cs5535-gpio driver, we can't use the standard PCI driver - * registration stuff. It only allows only one driver to bind to each PCI - * device, and we want the GPIO and MFGPT drivers to be able to share a PCI - * device. Instead, we manually scan for the PCI device, request a single - * region, and keep track of the devices that we're using. - */ - -static int __init cs5535_mfgpt_scan_pci(void) -{ - struct pci_dev *pdev; - int err = -ENODEV; - int i; - for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) { - pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor, - cs5535_mfgpt_pci_tbl[i].device, NULL); - if (pdev) { - err = cs5535_mfgpt_probe(pdev, - &cs5535_mfgpt_pci_tbl[i]); - if (err) - pci_dev_put(pdev); - - /* we only support a single CS5535/6 southbridge */ - break; - } - } - - return err; -} static int __init cs5535_mfgpt_init(void) { - return cs5535_mfgpt_scan_pci(); + return platform_driver_register(&cs5535_mfgpt_driver); } module_init(cs5535_mfgpt_init); -MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>"); +MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index f3ee4a1abb7..b909fb30232 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c @@ -32,8 +32,6 @@ */ #include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> #include <linux/i2c.h> #include <linux/string.h> #include <linux/list.h> @@ -87,7 +85,6 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); - char *endp; u64 val; __le32 val_le; int rc; @@ -95,8 +92,8 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name); /* Decode input */ - val = simple_strtoull(buf, &endp, 0); - if (buf == endp) { + rc = kstrtoull(buf, 0, &val); + if (rc < 0) { dev_dbg(dev, "input string not a number\n"); return -EINVAL; } @@ -141,7 +138,8 @@ static const struct attribute_group ds1682_group = { /* * User data attribute */ -static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr, +static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct i2c_client *client = kobj_to_i2c_client(kobj); @@ -164,7 +162,8 @@ static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *at return count; } -static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr, +static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct i2c_client *client = kobj_to_i2c_client(kobj); @@ -249,19 +248,8 @@ static struct i2c_driver ds1682_driver = { .id_table = ds1682_id, }; -static int __init ds1682_init(void) -{ - return i2c_add_driver(&ds1682_driver); -} - -static void __exit ds1682_exit(void) -{ - i2c_del_driver(&ds1682_driver); -} +module_i2c_driver(ds1682_driver); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver"); MODULE_LICENSE("GPL"); - -module_init(ds1682_init); -module_exit(ds1682_exit); diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c new file mode 100644 index 00000000000..4d0db15df11 --- /dev/null +++ b/drivers/misc/dummy-irq.c @@ -0,0 +1,63 @@ +/* + * Dummy IRQ handler driver. + * + * This module only registers itself as a handler that is specified to it + * by the 'irq' parameter. + * + * The sole purpose of this module is to help with debugging of systems on + * which spurious IRQs would happen on disabled IRQ vector. + * + * Copyright (C) 2013 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#include <linux/module.h> +#include <linux/irq.h> +#include <linux/interrupt.h> + +static int irq = -1; + +static irqreturn_t dummy_interrupt(int irq, void *dev_id) +{ + static int count = 0; + + if (count == 0) { + printk(KERN_INFO "dummy-irq: interrupt occurred on IRQ %d\n", + irq); + count++; + } + + return IRQ_NONE; +} + +static int __init dummy_irq_init(void) +{ + if (irq < 0) { + printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n"); + return -EIO; + } + if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { + printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); + return -EIO; + } + printk(KERN_INFO "dummy-irq: registered for IRQ %d\n", irq); + return 0; +} + +static void __exit dummy_irq_exit(void) +{ + printk(KERN_INFO "dummy-irq unloaded\n"); + free_irq(irq, &irq); +} + +module_init(dummy_irq_init); +module_exit(dummy_irq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jiri Kosina"); +module_param(irq, uint, 0444); +MODULE_PARM_DESC(irq, "The IRQ to register for"); diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig new file mode 100644 index 00000000000..f1d41ea9cd4 --- /dev/null +++ b/drivers/misc/echo/Kconfig @@ -0,0 +1,9 @@ +config ECHO + tristate "Line Echo Canceller support" + default n + ---help--- + This driver provides line echo cancelling support for mISDN and + Zaptel drivers. + + To compile this driver as a module, choose M here. The module + will be called echo. diff --git a/drivers/misc/echo/Makefile b/drivers/misc/echo/Makefile new file mode 100644 index 00000000000..7d4caac12a8 --- /dev/null +++ b/drivers/misc/echo/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ECHO) += echo.o diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c new file mode 100644 index 00000000000..9597e9523ca --- /dev/null +++ b/drivers/misc/echo/echo.c @@ -0,0 +1,674 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller. This code is being developed + * against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe + * + * Based on a bit from here, a bit from there, eye of toad, ear of + * bat, 15 years of failed attempts by David and a few fried brain + * cells. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*! \file */ + +/* Implementation Notes + David Rowe + April 2007 + + This code started life as Steve's NLMS algorithm with a tap + rotation algorithm to handle divergence during double talk. I + added a Geigel Double Talk Detector (DTD) [2] and performed some + G168 tests. However I had trouble meeting the G168 requirements, + especially for double talk - there were always cases where my DTD + failed, for example where near end speech was under the 6dB + threshold required for declaring double talk. + + So I tried a two path algorithm [1], which has so far given better + results. The original tap rotation/Geigel algorithm is available + in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. + It's probably possible to make it work if some one wants to put some + serious work into it. + + At present no special treatment is provided for tones, which + generally cause NLMS algorithms to diverge. Initial runs of a + subset of the G168 tests for tones (e.g ./echo_test 6) show the + current algorithm is passing OK, which is kind of surprising. The + full set of tests needs to be performed to confirm this result. + + One other interesting change is that I have managed to get the NLMS + code to work with 16 bit coefficients, rather than the original 32 + bit coefficents. This reduces the MIPs and storage required. + I evaulated the 16 bit port using g168_tests.sh and listening tests + on 4 real-world samples. + + I also attempted the implementation of a block based NLMS update + [2] but although this passes g168_tests.sh it didn't converge well + on the real-world samples. I have no idea why, perhaps a scaling + problem. The block based code is also available in SVN + http://svn.rowetel.com/software/oslec/tags/before_16bit. If this + code can be debugged, it will lead to further reduction in MIPS, as + the block update code maps nicely onto DSP instruction sets (it's a + dot product) compared to the current sample-by-sample update. + + Steve also has some nice notes on echo cancellers in echo.h + + References: + + [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo + Path Models", IEEE Transactions on communications, COM-25, + No. 6, June + 1977. + http://www.rowetel.com/images/echo/dual_path_paper.pdf + + [2] The classic, very useful paper that tells you how to + actually build a real world echo canceller: + Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice + Echo Canceller with a TMS320020, + http://www.rowetel.com/images/echo/spra129.pdf + + [3] I have written a series of blog posts on this work, here is + Part 1: http://www.rowetel.com/blog/?p=18 + + [4] The source code http://svn.rowetel.com/software/oslec/ + + [5] A nice reference on LMS filters: + http://en.wikipedia.org/wiki/Least_mean_squares_filter + + Credits: + + Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan + Muthukrishnan for their suggestions and email discussions. Thanks + also to those people who collected echo samples for me such as + Mark, Pawel, and Pavel. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "echo.h" + +#define MIN_TX_POWER_FOR_ADAPTION 64 +#define MIN_RX_POWER_FOR_ADAPTION 64 +#define DTD_HANGOVER 600 /* 600 samples, or 75ms */ +#define DC_LOG2BETA 3 /* log2() of DC filter Beta */ + +/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ + +#ifdef __bfin__ +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ + int i; + int offset1; + int offset2; + int factor; + int exp; + int16_t *phist; + int n; + + if (shift > 0) + factor = clean << shift; + else + factor = clean >> -shift; + + /* Update the FIR taps */ + + offset2 = ec->curr_pos; + offset1 = ec->taps - offset2; + phist = &ec->fir_state_bg.history[offset2]; + + /* st: and en: help us locate the assembler in echo.s */ + + /* asm("st:"); */ + n = ec->taps; + for (i = 0; i < n; i++) { + exp = *phist++ * factor; + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } + /* asm("en:"); */ + + /* Note the asm for the inner loop above generated by Blackfin gcc + 4.1.1 is pretty good (note even parallel instructions used): + + R0 = W [P0++] (X); + R0 *= R2; + R0 = R0 + R3 (NS) || + R1 = W [P1] (X) || + nop; + R0 >>>= 15; + R0 = R0 + R1; + W [P1++] = R0; + + A block based update algorithm would be much faster but the + above can't be improved on much. Every instruction saved in + the loop above is 2 MIPs/ch! The for loop above is where the + Blackfin spends most of it's time - about 17 MIPs/ch measured + with speedtest.c with 256 taps (32ms). Write-back and + Write-through cache gave about the same performance. + */ +} + +/* + IDEAS for further optimisation of lms_adapt_bg(): + + 1/ The rounding is quite costly. Could we keep as 32 bit coeffs + then make filter pluck the MS 16-bits of the coeffs when filtering? + However this would lower potential optimisation of filter, as I + think the dual-MAC architecture requires packed 16 bit coeffs. + + 2/ Block based update would be more efficient, as per comments above, + could use dual MAC architecture. + + 3/ Look for same sample Blackfin LMS code, see if we can get dual-MAC + packing. + + 4/ Execute the whole e/c in a block of say 20ms rather than sample + by sample. Processing a few samples every ms is inefficient. +*/ + +#else +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ + int i; + + int offset1; + int offset2; + int factor; + int exp; + + if (shift > 0) + factor = clean << shift; + else + factor = clean >> -shift; + + /* Update the FIR taps */ + + offset2 = ec->curr_pos; + offset1 = ec->taps - offset2; + + for (i = ec->taps - 1; i >= offset1; i--) { + exp = (ec->fir_state_bg.history[i - offset1] * factor); + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } + for (; i >= 0; i--) { + exp = (ec->fir_state_bg.history[i + offset2] * factor); + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } +} +#endif + +static inline int top_bit(unsigned int bits) +{ + if (bits == 0) + return -1; + else + return (int)fls((int32_t) bits) - 1; +} + +struct oslec_state *oslec_create(int len, int adaption_mode) +{ + struct oslec_state *ec; + int i; + const int16_t *history; + + ec = kzalloc(sizeof(*ec), GFP_KERNEL); + if (!ec) + return NULL; + + ec->taps = len; + ec->log2taps = top_bit(len); + ec->curr_pos = ec->taps - 1; + + ec->fir_taps16[0] = + kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->fir_taps16[0]) + goto error_oom_0; + + ec->fir_taps16[1] = + kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->fir_taps16[1]) + goto error_oom_1; + + history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps); + if (!history) + goto error_state; + history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps); + if (!history) + goto error_state_bg; + + for (i = 0; i < 5; i++) + ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; + + ec->cng_level = 1000; + oslec_adaption_mode(ec, adaption_mode); + + ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->snapshot) + goto error_snap; + + ec->cond_met = 0; + ec->pstates = 0; + ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; + ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; + ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + ec->lbgn = ec->lbgn_acc = 0; + ec->lbgn_upper = 200; + ec->lbgn_upper_acc = ec->lbgn_upper << 13; + + return ec; + +error_snap: + fir16_free(&ec->fir_state_bg); +error_state_bg: + fir16_free(&ec->fir_state); +error_state: + kfree(ec->fir_taps16[1]); +error_oom_1: + kfree(ec->fir_taps16[0]); +error_oom_0: + kfree(ec); + return NULL; +} +EXPORT_SYMBOL_GPL(oslec_create); + +void oslec_free(struct oslec_state *ec) +{ + int i; + + fir16_free(&ec->fir_state); + fir16_free(&ec->fir_state_bg); + for (i = 0; i < 2; i++) + kfree(ec->fir_taps16[i]); + kfree(ec->snapshot); + kfree(ec); +} +EXPORT_SYMBOL_GPL(oslec_free); + +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode) +{ + ec->adaption_mode = adaption_mode; +} +EXPORT_SYMBOL_GPL(oslec_adaption_mode); + +void oslec_flush(struct oslec_state *ec) +{ + int i; + + ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; + ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; + ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + + ec->lbgn = ec->lbgn_acc = 0; + ec->lbgn_upper = 200; + ec->lbgn_upper_acc = ec->lbgn_upper << 13; + + ec->nonupdate_dwell = 0; + + fir16_flush(&ec->fir_state); + fir16_flush(&ec->fir_state_bg); + ec->fir_state.curr_pos = ec->taps - 1; + ec->fir_state_bg.curr_pos = ec->taps - 1; + for (i = 0; i < 2; i++) + memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t)); + + ec->curr_pos = ec->taps - 1; + ec->pstates = 0; +} +EXPORT_SYMBOL_GPL(oslec_flush); + +void oslec_snapshot(struct oslec_state *ec) +{ + memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t)); +} +EXPORT_SYMBOL_GPL(oslec_snapshot); + +/* Dual Path Echo Canceller */ + +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) +{ + int32_t echo_value; + int clean_bg; + int tmp; + int tmp1; + + /* + * Input scaling was found be required to prevent problems when tx + * starts clipping. Another possible way to handle this would be the + * filter coefficent scaling. + */ + + ec->tx = tx; + ec->rx = rx; + tx >>= 1; + rx >>= 1; + + /* + * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision + * required otherwise values do not track down to 0. Zero at DC, Pole + * at (1-Beta) on real axis. Some chip sets (like Si labs) don't + * need this, but something like a $10 X100P card does. Any DC really + * slows down convergence. + * + * Note: removes some low frequency from the signal, this reduces the + * speech quality when listening to samples through headphones but may + * not be obvious through a telephone handset. + * + * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta + * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. + */ + + if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { + tmp = rx << 15; + + /* + * Make sure the gain of the HPF is 1.0. This can still + * saturate a little under impulse conditions, and it might + * roll to 32768 and need clipping on sustained peak level + * signals. However, the scale of such clipping is small, and + * the error due to any saturation should not markedly affect + * the downstream processing. + */ + tmp -= (tmp >> 4); + + ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2; + + /* + * hard limit filter to prevent clipping. Note that at this + * stage rx should be limited to +/- 16383 due to right shift + * above + */ + tmp1 = ec->rx_1 >> 15; + if (tmp1 > 16383) + tmp1 = 16383; + if (tmp1 < -16383) + tmp1 = -16383; + rx = tmp1; + ec->rx_2 = tmp; + } + + /* Block average of power in the filter states. Used for + adaption power calculation. */ + + { + int new, old; + + /* efficient "out with the old and in with the new" algorithm so + we don't have to recalculate over the whole block of + samples. */ + new = (int)tx * (int)tx; + old = (int)ec->fir_state.history[ec->fir_state.curr_pos] * + (int)ec->fir_state.history[ec->fir_state.curr_pos]; + ec->pstates += + ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps; + if (ec->pstates < 0) + ec->pstates = 0; + } + + /* Calculate short term average levels using simple single pole IIRs */ + + ec->ltxacc += abs(tx) - ec->ltx; + ec->ltx = (ec->ltxacc + (1 << 4)) >> 5; + ec->lrxacc += abs(rx) - ec->lrx; + ec->lrx = (ec->lrxacc + (1 << 4)) >> 5; + + /* Foreground filter */ + + ec->fir_state.coeffs = ec->fir_taps16[0]; + echo_value = fir16(&ec->fir_state, tx); + ec->clean = rx - echo_value; + ec->lcleanacc += abs(ec->clean) - ec->lclean; + ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5; + + /* Background filter */ + + echo_value = fir16(&ec->fir_state_bg, tx); + clean_bg = rx - echo_value; + ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg; + ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5; + + /* Background Filter adaption */ + + /* Almost always adap bg filter, just simple DT and energy + detection to minimise adaption in cases of strong double talk. + However this is not critical for the dual path algorithm. + */ + ec->factor = 0; + ec->shift = 0; + if ((ec->nonupdate_dwell == 0)) { + int p, logp, shift; + + /* Determine: + + f = Beta * clean_bg_rx/P ------ (1) + + where P is the total power in the filter states. + + The Boffins have shown that if we obey (1) we converge + quickly and avoid instability. + + The correct factor f must be in Q30, as this is the fixed + point format required by the lms_adapt_bg() function, + therefore the scaled version of (1) is: + + (2^30) * f = (2^30) * Beta * clean_bg_rx/P + factor = (2^30) * Beta * clean_bg_rx/P ----- (2) + + We have chosen Beta = 0.25 by experiment, so: + + factor = (2^30) * (2^-2) * clean_bg_rx/P + + (30 - 2 - log2(P)) + factor = clean_bg_rx 2 ----- (3) + + To avoid a divide we approximate log2(P) as top_bit(P), + which returns the position of the highest non-zero bit in + P. This approximation introduces an error as large as a + factor of 2, but the algorithm seems to handle it OK. + + Come to think of it a divide may not be a big deal on a + modern DSP, so its probably worth checking out the cycles + for a divide versus a top_bit() implementation. + */ + + p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates; + logp = top_bit(p) + ec->log2taps; + shift = 30 - 2 - logp; + ec->shift = shift; + + lms_adapt_bg(ec, clean_bg, shift); + } + + /* very simple DTD to make sure we dont try and adapt with strong + near end speech */ + + ec->adapt = 0; + if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx)) + ec->nonupdate_dwell = DTD_HANGOVER; + if (ec->nonupdate_dwell) + ec->nonupdate_dwell--; + + /* Transfer logic */ + + /* These conditions are from the dual path paper [1], I messed with + them a bit to improve performance. */ + + if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && + (ec->nonupdate_dwell == 0) && + /* (ec->Lclean_bg < 0.875*ec->Lclean) */ + (8 * ec->lclean_bg < 7 * ec->lclean) && + /* (ec->Lclean_bg < 0.125*ec->Ltx) */ + (8 * ec->lclean_bg < ec->ltx)) { + if (ec->cond_met == 6) { + /* + * BG filter has had better results for 6 consecutive + * samples + */ + ec->adapt = 1; + memcpy(ec->fir_taps16[0], ec->fir_taps16[1], + ec->taps * sizeof(int16_t)); + } else + ec->cond_met++; + } else + ec->cond_met = 0; + + /* Non-Linear Processing */ + + ec->clean_nlp = ec->clean; + if (ec->adaption_mode & ECHO_CAN_USE_NLP) { + /* + * Non-linear processor - a fancy way to say "zap small + * signals, to avoid residual echo due to (uLaw/ALaw) + * non-linearity in the channel.". + */ + + if ((16 * ec->lclean < ec->ltx)) { + /* + * Our e/c has improved echo by at least 24 dB (each + * factor of 2 is 6dB, so 2*2*2*2=16 is the same as + * 6+6+6+6=24dB) + */ + if (ec->adaption_mode & ECHO_CAN_USE_CNG) { + ec->cng_level = ec->lbgn; + + /* + * Very elementary comfort noise generation. + * Just random numbers rolled off very vaguely + * Hoth-like. DR: This noise doesn't sound + * quite right to me - I suspect there are some + * overflow issues in the filtering as it's too + * "crackly". + * TODO: debug this, maybe just play noise at + * high level or look at spectrum. + */ + + ec->cng_rndnum = + 1664525U * ec->cng_rndnum + 1013904223U; + ec->cng_filter = + ((ec->cng_rndnum & 0xFFFF) - 32768 + + 5 * ec->cng_filter) >> 3; + ec->clean_nlp = + (ec->cng_filter * ec->cng_level * 8) >> 14; + + } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) { + /* This sounds much better than CNG */ + if (ec->clean_nlp > ec->lbgn) + ec->clean_nlp = ec->lbgn; + if (ec->clean_nlp < -ec->lbgn) + ec->clean_nlp = -ec->lbgn; + } else { + /* + * just mute the residual, doesn't sound very + * good, used mainly in G168 tests + */ + ec->clean_nlp = 0; + } + } else { + /* + * Background noise estimator. I tried a few + * algorithms here without much luck. This very simple + * one seems to work best, we just average the level + * using a slow (1 sec time const) filter if the + * current level is less than a (experimentally + * derived) constant. This means we dont include high + * level signals like near end speech. When combined + * with CNG or especially CLIP seems to work OK. + */ + if (ec->lclean < 40) { + ec->lbgn_acc += abs(ec->clean) - ec->lbgn; + ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12; + } + } + } + + /* Roll around the taps buffer */ + if (ec->curr_pos <= 0) + ec->curr_pos = ec->taps; + ec->curr_pos--; + + if (ec->adaption_mode & ECHO_CAN_DISABLE) + ec->clean_nlp = rx; + + /* Output scaled back up again to match input scaling */ + + return (int16_t) ec->clean_nlp << 1; +} +EXPORT_SYMBOL_GPL(oslec_update); + +/* This function is separated from the echo canceller is it is usually called + as part of the tx process. See rx HP (DC blocking) filter above, it's + the same design. + + Some soft phones send speech signals with a lot of low frequency + energy, e.g. down to 20Hz. This can make the hybrid non-linear + which causes the echo canceller to fall over. This filter can help + by removing any low frequency before it gets to the tx port of the + hybrid. + + It can also help by removing and DC in the tx signal. DC is bad + for LMS algorithms. + + This is one of the classic DC removal filters, adjusted to provide + sufficient bass rolloff to meet the above requirement to protect hybrids + from things that upset them. The difference between successive samples + produces a lousy HPF, and then a suitably placed pole flattens things out. + The final result is a nicely rolled off bass end. The filtering is + implemented with extended fractional precision, which noise shapes things, + giving very clean DC removal. +*/ + +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) +{ + int tmp; + int tmp1; + + if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { + tmp = tx << 15; + + /* + * Make sure the gain of the HPF is 1.0. The first can still + * saturate a little under impulse conditions, and it might + * roll to 32768 and need clipping on sustained peak level + * signals. However, the scale of such clipping is small, and + * the error due to any saturation should not markedly affect + * the downstream processing. + */ + tmp -= (tmp >> 4); + + ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2; + tmp1 = ec->tx_1 >> 15; + if (tmp1 > 32767) + tmp1 = 32767; + if (tmp1 < -32767) + tmp1 = -32767; + tx = tmp1; + ec->tx_2 = tmp; + } + + return tx; +} +EXPORT_SYMBOL_GPL(oslec_hpf_tx); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Rowe"); +MODULE_DESCRIPTION("Open Source Line Echo Canceller"); +MODULE_VERSION("0.3.0"); diff --git a/drivers/misc/echo/echo.h b/drivers/misc/echo/echo.h new file mode 100644 index 00000000000..9b08c63e636 --- /dev/null +++ b/drivers/misc/echo/echo.h @@ -0,0 +1,187 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller. This code is being developed + * against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007 David Rowe + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ECHO_H +#define __ECHO_H + +/* +Line echo cancellation for voice + +What does it do? + +This module aims to provide G.168-2002 compliant echo cancellation, to remove +electrical echoes (e.g. from 2-4 wire hybrids) from voice calls. + +How does it work? + +The heart of the echo cancellor is FIR filter. This is adapted to match the +echo impulse response of the telephone line. It must be long enough to +adequately cover the duration of that impulse response. The signal transmitted +to the telephone line is passed through the FIR filter. Once the FIR is +properly adapted, the resulting output is an estimate of the echo signal +received from the line. This is subtracted from the received signal. The result +is an estimate of the signal which originated at the far end of the line, free +from echos of our own transmitted signal. + +The least mean squares (LMS) algorithm is attributed to Widrow and Hoff, and +was introduced in 1960. It is the commonest form of filter adaption used in +things like modem line equalisers and line echo cancellers. There it works very +well. However, it only works well for signals of constant amplitude. It works +very poorly for things like speech echo cancellation, where the signal level +varies widely. This is quite easy to fix. If the signal level is normalised - +similar to applying AGC - LMS can work as well for a signal of varying +amplitude as it does for a modem signal. This normalised least mean squares +(NLMS) algorithm is the commonest one used for speech echo cancellation. Many +other algorithms exist - e.g. RLS (essentially the same as Kalman filtering), +FAP, etc. Some perform significantly better than NLMS. However, factors such +as computational complexity and patents favour the use of NLMS. + +A simple refinement to NLMS can improve its performance with speech. NLMS tends +to adapt best to the strongest parts of a signal. If the signal is white noise, +the NLMS algorithm works very well. However, speech has more low frequency than +high frequency content. Pre-whitening (i.e. filtering the signal to flatten its +spectrum) the echo signal improves the adapt rate for speech, and ensures the +final residual signal is not heavily biased towards high frequencies. A very +low complexity filter is adequate for this, so pre-whitening adds little to the +compute requirements of the echo canceller. + +An FIR filter adapted using pre-whitened NLMS performs well, provided certain +conditions are met: + + - The transmitted signal has poor self-correlation. + - There is no signal being generated within the environment being + cancelled. + +The difficulty is that neither of these can be guaranteed. + +If the adaption is performed while transmitting noise (or something fairly +noise like, such as voice) the adaption works very well. If the adaption is +performed while transmitting something highly correlative (typically narrow +band energy such as signalling tones or DTMF), the adaption can go seriously +wrong. The reason is there is only one solution for the adaption on a near +random signal - the impulse response of the line. For a repetitive signal, +there are any number of solutions which converge the adaption, and nothing +guides the adaption to choose the generalised one. Allowing an untrained +canceller to converge on this kind of narrowband energy probably a good thing, +since at least it cancels the tones. Allowing a well converged canceller to +continue converging on such energy is just a way to ruin its generalised +adaption. A narrowband detector is needed, so adapation can be suspended at +appropriate times. + +The adaption process is based on trying to eliminate the received signal. When +there is any signal from within the environment being cancelled it may upset +the adaption process. Similarly, if the signal we are transmitting is small, +noise may dominate and disturb the adaption process. If we can ensure that the +adaption is only performed when we are transmitting a significant signal level, +and the environment is not, things will be OK. Clearly, it is easy to tell when +we are sending a significant signal. Telling, if the environment is generating +a significant signal, and doing it with sufficient speed that the adaption will +not have diverged too much more we stop it, is a little harder. + +The key problem in detecting when the environment is sourcing significant +energy is that we must do this very quickly. Given a reasonably long sample of +the received signal, there are a number of strategies which may be used to +assess whether that signal contains a strong far end component. However, by the +time that assessment is complete the far end signal will have already caused +major mis-convergence in the adaption process. An assessment algorithm is +needed which produces a fairly accurate result from a very short burst of far +end energy. + +How do I use it? + +The echo cancellor processes both the transmit and receive streams sample by +sample. The processing function is not declared inline. Unfortunately, +cancellation requires many operations per sample, so the call overhead is only +a minor burden. +*/ + +#include "fir.h" +#include "oslec.h" + +/* + G.168 echo canceller descriptor. This defines the working state for a line + echo canceller. +*/ +struct oslec_state { + int16_t tx; + int16_t rx; + int16_t clean; + int16_t clean_nlp; + + int nonupdate_dwell; + int curr_pos; + int taps; + int log2taps; + int adaption_mode; + + int cond_met; + int32_t pstates; + int16_t adapt; + int32_t factor; + int16_t shift; + + /* Average levels and averaging filter states */ + int ltxacc; + int lrxacc; + int lcleanacc; + int lclean_bgacc; + int ltx; + int lrx; + int lclean; + int lclean_bg; + int lbgn; + int lbgn_acc; + int lbgn_upper; + int lbgn_upper_acc; + + /* foreground and background filter states */ + struct fir16_state_t fir_state; + struct fir16_state_t fir_state_bg; + int16_t *fir_taps16[2]; + + /* DC blocking filter states */ + int tx_1; + int tx_2; + int rx_1; + int rx_2; + + /* optional High Pass Filter states */ + int32_t xvtx[5]; + int32_t yvtx[5]; + int32_t xvrx[5]; + int32_t yvrx[5]; + + /* Parameters for the optional Hoth noise generator */ + int cng_level; + int cng_rndnum; + int cng_filter; + + /* snapshot sample of coeffs used for development */ + int16_t *snapshot; +}; + +#endif /* __ECHO_H */ diff --git a/drivers/misc/echo/fir.h b/drivers/misc/echo/fir.h new file mode 100644 index 00000000000..7b9fabf1fea --- /dev/null +++ b/drivers/misc/echo/fir.h @@ -0,0 +1,216 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * fir.h - General telephony FIR routines + * + * Written by Steve Underwood <steveu@coppice.org> + * + * Copyright (C) 2002 Steve Underwood + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#if !defined(_FIR_H_) +#define _FIR_H_ + +/* + Blackfin NOTES & IDEAS: + + A simple dot product function is used to implement the filter. This performs + just one MAC/cycle which is inefficient but was easy to implement as a first + pass. The current Blackfin code also uses an unrolled form of the filter + history to avoid 0 length hardware loop issues. This is wasteful of + memory. + + Ideas for improvement: + + 1/ Rewrite filter for dual MAC inner loop. The issue here is handling + history sample offsets that are 16 bit aligned - the dual MAC needs + 32 bit aligmnent. There are some good examples in libbfdsp. + + 2/ Use the hardware circular buffer facility tohalve memory usage. + + 3/ Consider using internal memory. + + Using less memory might also improve speed as cache misses will be + reduced. A drop in MIPs and memory approaching 50% should be + possible. + + The foreground and background filters currenlty use a total of + about 10 MIPs/ch as measured with speedtest.c on a 256 TAP echo + can. +*/ + +/* + * 16 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 16 bit integer coefficients. + */ +struct fir16_state_t { + int taps; + int curr_pos; + const int16_t *coeffs; + int16_t *history; +}; + +/* + * 32 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 32 bit integer coefficients, and filtering + * 16 bit integer data. + */ +struct fir32_state_t { + int taps; + int curr_pos; + const int32_t *coeffs; + int16_t *history; +}; + +/* + * Floating point FIR descriptor. This defines the working state for a single + * instance of an FIR filter using floating point coefficients and data. + */ +struct fir_float_state_t { + int taps; + int curr_pos; + const float *coeffs; + float *history; +}; + +static inline const int16_t *fir16_create(struct fir16_state_t *fir, + const int16_t *coeffs, int taps) +{ + fir->taps = taps; + fir->curr_pos = taps - 1; + fir->coeffs = coeffs; +#if defined(__bfin__) + fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL); +#else + fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); +#endif + return fir->history; +} + +static inline void fir16_flush(struct fir16_state_t *fir) +{ +#if defined(__bfin__) + memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t)); +#else + memset(fir->history, 0, fir->taps * sizeof(int16_t)); +#endif +} + +static inline void fir16_free(struct fir16_state_t *fir) +{ + kfree(fir->history); +} + +#ifdef __bfin__ +static inline int32_t dot_asm(short *x, short *y, int len) +{ + int dot; + + len--; + + __asm__("I0 = %1;\n\t" + "I1 = %2;\n\t" + "A0 = 0;\n\t" + "R0.L = W[I0++] || R1.L = W[I1++];\n\t" + "LOOP dot%= LC0 = %3;\n\t" + "LOOP_BEGIN dot%=;\n\t" + "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t" + "LOOP_END dot%=;\n\t" + "A0 += R0.L*R1.L (IS);\n\t" + "R0 = A0;\n\t" + "%0 = R0;\n\t" + : "=&d"(dot) + : "a"(x), "a"(y), "a"(len) + : "I0", "I1", "A1", "A0", "R0", "R1" + ); + + return dot; +} +#endif + +static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample) +{ + int32_t y; +#if defined(__bfin__) + fir->history[fir->curr_pos] = sample; + fir->history[fir->curr_pos + fir->taps] = sample; + y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos], + fir->taps); +#else + int i; + int offset1; + int offset2; + + fir->history[fir->curr_pos] = sample; + + offset2 = fir->curr_pos; + offset1 = fir->taps - offset2; + y = 0; + for (i = fir->taps - 1; i >= offset1; i--) + y += fir->coeffs[i] * fir->history[i - offset1]; + for (; i >= 0; i--) + y += fir->coeffs[i] * fir->history[i + offset2]; +#endif + if (fir->curr_pos <= 0) + fir->curr_pos = fir->taps; + fir->curr_pos--; + return (int16_t) (y >> 15); +} + +static inline const int16_t *fir32_create(struct fir32_state_t *fir, + const int32_t *coeffs, int taps) +{ + fir->taps = taps; + fir->curr_pos = taps - 1; + fir->coeffs = coeffs; + fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); + return fir->history; +} + +static inline void fir32_flush(struct fir32_state_t *fir) +{ + memset(fir->history, 0, fir->taps * sizeof(int16_t)); +} + +static inline void fir32_free(struct fir32_state_t *fir) +{ + kfree(fir->history); +} + +static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample) +{ + int i; + int32_t y; + int offset1; + int offset2; + + fir->history[fir->curr_pos] = sample; + offset2 = fir->curr_pos; + offset1 = fir->taps - offset2; + y = 0; + for (i = fir->taps - 1; i >= offset1; i--) + y += fir->coeffs[i] * fir->history[i - offset1]; + for (; i >= 0; i--) + y += fir->coeffs[i] * fir->history[i + offset2]; + if (fir->curr_pos <= 0) + fir->curr_pos = fir->taps; + fir->curr_pos--; + return (int16_t) (y >> 15); +} + +#endif diff --git a/drivers/misc/echo/oslec.h b/drivers/misc/echo/oslec.h new file mode 100644 index 00000000000..f4175360ce2 --- /dev/null +++ b/drivers/misc/echo/oslec.h @@ -0,0 +1,94 @@ +/* + * OSLEC - A line echo canceller. This code is being developed + * against and partially complies with G168. Using code from SpanDSP + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __OSLEC_H +#define __OSLEC_H + +/* Mask bits for the adaption mode */ +#define ECHO_CAN_USE_ADAPTION 0x01 +#define ECHO_CAN_USE_NLP 0x02 +#define ECHO_CAN_USE_CNG 0x04 +#define ECHO_CAN_USE_CLIP 0x08 +#define ECHO_CAN_USE_TX_HPF 0x10 +#define ECHO_CAN_USE_RX_HPF 0x20 +#define ECHO_CAN_DISABLE 0x40 + +/** + * oslec_state: G.168 echo canceller descriptor. + * + * This defines the working state for a line echo canceller. + */ +struct oslec_state; + +/** + * oslec_create - Create a voice echo canceller context. + * @len: The length of the canceller, in samples. + * @return: The new canceller context, or NULL if the canceller could not be + * created. + */ +struct oslec_state *oslec_create(int len, int adaption_mode); + +/** + * oslec_free - Free a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_free(struct oslec_state *ec); + +/** + * oslec_flush - Flush (reinitialise) a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_flush(struct oslec_state *ec); + +/** + * oslec_adaption_mode - set the adaption mode of a voice echo canceller context. + * @ec The echo canceller context. + * @adaption_mode: The mode. + */ +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode); + +void oslec_snapshot(struct oslec_state *ec); + +/** + * oslec_update: Process a sample through a voice echo canceller. + * @ec: The echo canceller context. + * @tx: The transmitted audio sample. + * @rx: The received audio sample. + * + * The return value is the clean (echo cancelled) received sample. + */ +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx); + +/** + * oslec_hpf_tx: Process to high pass filter the tx signal. + * @ec: The echo canceller context. + * @tx: The transmitted auio sample. + * + * The return value is the HP filtered transmit sample, send this to your D/A. + */ +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx); + +#endif /* __OSLEC_H */ diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 9118613af32..9536852fd4c 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -1,13 +1,14 @@ menu "EEPROM support" config EEPROM_AT24 - tristate "I2C EEPROMs from most vendors" + tristate "I2C EEPROMs / RAMs / ROMs from most vendors" depends on I2C && SYSFS help - Enable this driver to get read/write support to most I2C EEPROMs, - after you configure the driver to know about each EEPROM on - your target board. Use these generic chip names, instead of - vendor-specific ones like at24c64 or 24lc02: + Enable this driver to get read/write support to most I2C EEPROMs + and compatible devices like FRAMs, SRAMs, ROMs etc. After you + configure the driver to know about each chip on your target + board. Use these generic chip names, instead of vendor-specific + ones like at24c64, 24lc02 or fm24c04: 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08, 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024 @@ -50,7 +51,7 @@ config EEPROM_LEGACY config EEPROM_MAX6875 tristate "Maxim MAX6874/5 power supply supervisor" - depends on I2C && EXPERIMENTAL + depends on I2C help If you say yes here you get read-only support for the user EEPROM of the Maxim MAX6874/5 EEPROM-programmable, quad power-supply @@ -70,4 +71,42 @@ config EEPROM_93CX6 If unsure, say N. +config EEPROM_93XX46 + tristate "Microwire EEPROM 93XX46 support" + depends on SPI && SYSFS + help + Driver for the microwire EEPROM chipsets 93xx46x. The driver + supports both read and write commands and also the command to + erase the whole EEPROM. + + This driver can also be built as a module. If so, the module + will be called eeprom_93xx46. + + If unsure, say N. + +config EEPROM_DIGSY_MTC_CFG + bool "DigsyMTC display configuration EEPROMs device" + depends on GPIO_MPC5200 && SPI_GPIO + help + This option enables access to display configuration EEPROMs + on digsy_mtc board. You have to additionally select Microwire + EEPROM 93XX46 driver. sysfs entries will be created for that + EEPROM allowing to read/write the configuration data or to + erase the whole EEPROM. + + If unsure, say N. + +config EEPROM_SUNXI_SID + tristate "Allwinner sunxi security ID support" + depends on ARCH_SUNXI && SYSFS + help + This is a driver for the 'security ID' available on various Allwinner + devices. + + Due to the potential risks involved with changing e-fuses, + this driver is read-only. + + This driver can also be built as a module. If so, the module + will be called sunxi_sid. + endmenu diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile index df3d68ffa9d..9507aec95e9 100644 --- a/drivers/misc/eeprom/Makefile +++ b/drivers/misc/eeprom/Makefile @@ -3,3 +3,6 @@ obj-$(CONFIG_EEPROM_AT25) += at25.o obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o obj-$(CONFIG_EEPROM_MAX6875) += max6875.o obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o +obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o +obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o +obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2cb2736d65a..d87f77f790d 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -20,8 +20,9 @@ #include <linux/log2.h> #include <linux/bitops.h> #include <linux/jiffies.h> +#include <linux/of.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> /* * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. @@ -54,7 +55,7 @@ struct at24_data { struct at24_platform_data chip; struct memory_accessor macc; - bool use_smbus; + int use_smbus; /* * Lock protects against activities from other Linux tasks, @@ -184,11 +185,19 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, if (count > io_limit) count = io_limit; - if (at24->use_smbus) { + switch (at24->use_smbus) { + case I2C_SMBUS_I2C_BLOCK_DATA: /* Smaller eeproms can work given some SMBus extension calls */ if (count > I2C_SMBUS_BLOCK_MAX) count = I2C_SMBUS_BLOCK_MAX; - } else { + break; + case I2C_SMBUS_WORD_DATA: + count = 2; + break; + case I2C_SMBUS_BYTE_DATA: + count = 1; + break; + default: /* * When we have a better choice than SMBus calls, use a * combined I2C message. Write address; then read up to @@ -219,10 +228,27 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, timeout = jiffies + msecs_to_jiffies(write_timeout); do { read_time = jiffies; - if (at24->use_smbus) { + switch (at24->use_smbus) { + case I2C_SMBUS_I2C_BLOCK_DATA: status = i2c_smbus_read_i2c_block_data(client, offset, count, buf); - } else { + break; + case I2C_SMBUS_WORD_DATA: + status = i2c_smbus_read_word_data(client, offset); + if (status >= 0) { + buf[0] = status & 0xff; + buf[1] = status >> 8; + status = count; + } + break; + case I2C_SMBUS_BYTE_DATA: + status = i2c_smbus_read_byte_data(client, offset); + if (status >= 0) { + buf[0] = status; + status = count; + } + break; + default: status = i2c_transfer(client->adapter, msg, 2); if (status == 2) status = count; @@ -274,7 +300,8 @@ static ssize_t at24_read(struct at24_data *at24, return retval; } -static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, +static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct at24_data *at24; @@ -395,11 +422,15 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, return retval; } -static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr, +static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct at24_data *at24; + if (unlikely(off >= attr->size)) + return -EFBIG; + at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_write(at24, buf, off, count); } @@ -430,11 +461,32 @@ static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf, /*-------------------------------------------------------------------------*/ +#ifdef CONFIG_OF +static void at24_get_ofdata(struct i2c_client *client, + struct at24_platform_data *chip) +{ + const __be32 *val; + struct device_node *node = client->dev.of_node; + + if (node) { + if (of_get_property(node, "read-only", NULL)) + chip->flags |= AT24_FLAG_READONLY; + val = of_get_property(node, "pagesize", NULL); + if (val) + chip->page_size = be32_to_cpup(val); + } +} +#else +static void at24_get_ofdata(struct i2c_client *client, + struct at24_platform_data *chip) +{ } +#endif /* CONFIG_OF */ + static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct at24_platform_data chip; bool writable; - bool use_smbus = false; + int use_smbus = 0; struct at24_data *at24; int err; unsigned i, num_addresses; @@ -443,10 +495,9 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) if (client->dev.platform_data) { chip = *(struct at24_platform_data *)client->dev.platform_data; } else { - if (!id->driver_data) { - err = -ENODEV; - goto err_out; - } + if (!id->driver_data) + return -ENODEV; + magic = id->driver_data; chip.byte_len = BIT(magic & AT24_BITMASK(AT24_SIZE_BYTELEN)); magic >>= AT24_SIZE_BYTELEN; @@ -458,6 +509,9 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) */ chip.page_size = 1; + /* update chipdata if OF is present */ + at24_get_ofdata(client, &chip); + chip.setup = NULL; chip.context = NULL; } @@ -465,22 +519,31 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) if (!is_power_of_2(chip.byte_len)) dev_warn(&client->dev, "byte_len looks suspicious (no power of 2)!\n"); + if (!chip.page_size) { + dev_err(&client->dev, "page_size must not be 0!\n"); + return -EINVAL; + } if (!is_power_of_2(chip.page_size)) dev_warn(&client->dev, "page_size looks suspicious (no power of 2)!\n"); /* Use I2C operations unless we're stuck with SMBus extensions. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { - if (chip.flags & AT24_FLAG_ADDR16) { - err = -EPFNOSUPPORT; - goto err_out; - } - if (!i2c_check_functionality(client->adapter, + if (chip.flags & AT24_FLAG_ADDR16) + return -EPFNOSUPPORT; + + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { - err = -EPFNOSUPPORT; - goto err_out; + use_smbus = I2C_SMBUS_I2C_BLOCK_DATA; + } else if (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_WORD_DATA)) { + use_smbus = I2C_SMBUS_WORD_DATA; + } else if (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + use_smbus = I2C_SMBUS_BYTE_DATA; + } else { + return -EPFNOSUPPORT; } - use_smbus = true; } if (chip.flags & AT24_FLAG_TAKE8ADDR) @@ -489,12 +552,10 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) num_addresses = DIV_ROUND_UP(chip.byte_len, (chip.flags & AT24_FLAG_ADDR16) ? 65536 : 256); - at24 = kzalloc(sizeof(struct at24_data) + + at24 = devm_kzalloc(&client->dev, sizeof(struct at24_data) + num_addresses * sizeof(struct i2c_client *), GFP_KERNEL); - if (!at24) { - err = -ENOMEM; - goto err_out; - } + if (!at24) + return -ENOMEM; mutex_init(&at24->lock); at24->use_smbus = use_smbus; @@ -505,6 +566,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) * Export the EEPROM bytes through sysfs, since that's convenient. * By default, only root should see the data (maybe passwords etc) */ + sysfs_bin_attr_init(&at24->bin); at24->bin.attr.name = "eeprom"; at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR; at24->bin.read = at24_bin_read; @@ -531,11 +593,10 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->write_max = write_max; /* buffer (data + address at the beginning) */ - at24->writebuf = kmalloc(write_max + 2, GFP_KERNEL); - if (!at24->writebuf) { - err = -ENOMEM; - goto err_struct; - } + at24->writebuf = devm_kzalloc(&client->dev, + write_max + 2, GFP_KERNEL); + if (!at24->writebuf) + return -ENOMEM; } else { dev_warn(&client->dev, "cannot write due to controller restrictions."); @@ -562,14 +623,15 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) i2c_set_clientdata(client, at24); - dev_info(&client->dev, "%zu byte %s EEPROM %s\n", + dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n", at24->bin.size, client->name, - writable ? "(writable)" : "(read-only)"); - dev_dbg(&client->dev, - "page_size %d, num_addresses %d, write_max %d%s\n", - chip.page_size, num_addresses, - at24->write_max, - use_smbus ? ", use_smbus" : ""); + writable ? "writable" : "read-only", at24->write_max); + if (use_smbus == I2C_SMBUS_WORD_DATA || + use_smbus == I2C_SMBUS_BYTE_DATA) { + dev_notice(&client->dev, "Falling back to %s reads, " + "performance will suffer\n", use_smbus == + I2C_SMBUS_WORD_DATA ? "word" : "byte"); + } /* export data to kernel code */ if (chip.setup) @@ -582,15 +644,10 @@ err_clients: if (at24->client[i]) i2c_unregister_device(at24->client[i]); - kfree(at24->writebuf); -err_struct: - kfree(at24); -err_out: - dev_dbg(&client->dev, "probe error %d\n", err); return err; } -static int __devexit at24_remove(struct i2c_client *client) +static int at24_remove(struct i2c_client *client) { struct at24_data *at24; int i; @@ -601,9 +658,6 @@ static int __devexit at24_remove(struct i2c_client *client) for (i = 1; i < at24->num_addresses; i++) i2c_unregister_device(at24->client[i]); - kfree(at24->writebuf); - kfree(at24); - i2c_set_clientdata(client, NULL); return 0; } @@ -615,12 +669,17 @@ static struct i2c_driver at24_driver = { .owner = THIS_MODULE, }, .probe = at24_probe, - .remove = __devexit_p(at24_remove), + .remove = at24_remove, .id_table = at24_ids, }; static int __init at24_init(void) { + if (!io_limit) { + pr_err("at24: io_limit must not be 0!\n"); + return -EINVAL; + } + io_limit = rounddown_pow_of_two(io_limit); return i2c_add_driver(&at24_driver); } diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index d902d81dde3..634f72929e1 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -10,7 +10,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> @@ -19,7 +18,7 @@ #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> - +#include <linux/of.h> /* * NOTE: this is an *EEPROM* driver. The vagaries of product naming @@ -50,6 +49,7 @@ struct at25_data { #define AT25_SR_BP1 0x08 #define AT25_SR_WPEN 0x80 /* writeprotect enable */ +#define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */ #define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ @@ -75,6 +75,7 @@ at25_ee_read( ssize_t status; struct spi_transfer t[2]; struct spi_message m; + u8 instr; if (unlikely(offset >= at25->bin.size)) return 0; @@ -84,7 +85,12 @@ at25_ee_read( return count; cp = command; - *cp++ = AT25_READ; + + instr = AT25_READ; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (offset >= (1U << (at25->addrlen * 8))) + instr |= AT25_INSTR_BIT3; + *cp++ = instr; /* 8/16/24-bit address is written MSB first */ switch (at25->addrlen) { @@ -126,7 +132,8 @@ at25_ee_read( } static ssize_t -at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr, +at25_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev; @@ -166,14 +173,14 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, /* For write, rollover is within the page ... so we write at * most one page, then manually roll over to the next page. */ - bounce[0] = AT25_WRITE; mutex_lock(&at25->lock); do { unsigned long timeout, retries; unsigned segment; unsigned offset = (unsigned) off; - u8 *cp = bounce + 1; + u8 *cp = bounce; int sr; + u8 instr; *cp = AT25_WREN; status = spi_write(at25->spi, cp, 1); @@ -183,6 +190,12 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, break; } + instr = AT25_WRITE; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (offset >= (1U << (at25->addrlen * 8))) + instr |= AT25_INSTR_BIT3; + *cp++ = instr; + /* 8/16/24-bit address is written MSB first */ switch (at25->addrlen) { default: /* case 3 */ @@ -253,7 +266,8 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, } static ssize_t -at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr, +at25_bin_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev; @@ -287,33 +301,93 @@ static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf, /*-------------------------------------------------------------------------*/ +static int at25_np_to_chip(struct device *dev, + struct device_node *np, + struct spi_eeprom *chip) +{ + u32 val; + + memset(chip, 0, sizeof(*chip)); + strncpy(chip->name, np->name, sizeof(chip->name)); + + if (of_property_read_u32(np, "size", &val) == 0 || + of_property_read_u32(np, "at25,byte-len", &val) == 0) { + chip->byte_len = val; + } else { + dev_err(dev, "Error: missing \"size\" property\n"); + return -ENODEV; + } + + if (of_property_read_u32(np, "pagesize", &val) == 0 || + of_property_read_u32(np, "at25,page-size", &val) == 0) { + chip->page_size = (u16)val; + } else { + dev_err(dev, "Error: missing \"pagesize\" property\n"); + return -ENODEV; + } + + if (of_property_read_u32(np, "at25,addr-mode", &val) == 0) { + chip->flags = (u16)val; + } else { + if (of_property_read_u32(np, "address-width", &val)) { + dev_err(dev, + "Error: missing \"address-width\" property\n"); + return -ENODEV; + } + switch (val) { + case 8: + chip->flags |= EE_ADDR1; + break; + case 16: + chip->flags |= EE_ADDR2; + break; + case 24: + chip->flags |= EE_ADDR3; + break; + default: + dev_err(dev, + "Error: bad \"address-width\" property: %u\n", + val); + return -ENODEV; + } + if (of_find_property(np, "read-only", NULL)) + chip->flags |= EE_READONLY; + } + return 0; +} + static int at25_probe(struct spi_device *spi) { struct at25_data *at25 = NULL; - const struct spi_eeprom *chip; + struct spi_eeprom chip; + struct device_node *np = spi->dev.of_node; int err; int sr; int addrlen; /* Chip description */ - chip = spi->dev.platform_data; - if (!chip) { - dev_dbg(&spi->dev, "no chip description\n"); - err = -ENODEV; - goto fail; - } + if (!spi->dev.platform_data) { + if (np) { + err = at25_np_to_chip(&spi->dev, np, &chip); + if (err) + return err; + } else { + dev_err(&spi->dev, "Error: no chip description\n"); + return -ENODEV; + } + } else + chip = *(struct spi_eeprom *)spi->dev.platform_data; /* For now we only support 8/16/24 bit addressing */ - if (chip->flags & EE_ADDR1) + if (chip.flags & EE_ADDR1) addrlen = 1; - else if (chip->flags & EE_ADDR2) + else if (chip.flags & EE_ADDR2) addrlen = 2; - else if (chip->flags & EE_ADDR3) + else if (chip.flags & EE_ADDR3) addrlen = 3; else { dev_dbg(&spi->dev, "unsupported address type\n"); - err = -EINVAL; - goto fail; + return -EINVAL; } /* Ping the chip ... the status register is pretty portable, @@ -323,19 +397,17 @@ static int at25_probe(struct spi_device *spi) sr = spi_w8r8(spi, AT25_RDSR); if (sr < 0 || sr & AT25_SR_nRDY) { dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr); - err = -ENXIO; - goto fail; + return -ENXIO; } - if (!(at25 = kzalloc(sizeof *at25, GFP_KERNEL))) { - err = -ENOMEM; - goto fail; - } + at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL); + if (!at25) + return -ENOMEM; mutex_init(&at25->lock); - at25->chip = *chip; + at25->chip = chip; at25->spi = spi_dev_get(spi); - dev_set_drvdata(&spi->dev, at25); + spi_set_drvdata(spi, at25); at25->addrlen = addrlen; /* Export the EEPROM bytes through sysfs, since that's convenient. @@ -347,13 +419,14 @@ static int at25_probe(struct spi_device *spi) * that's sensitive for read and/or write, like ethernet addresses, * security codes, board-specific manufacturing calibrations, etc. */ + sysfs_bin_attr_init(&at25->bin); at25->bin.attr.name = "eeprom"; at25->bin.attr.mode = S_IRUSR; at25->bin.read = at25_bin_read; at25->mem.read = at25_mem_read; at25->bin.size = at25->chip.byte_len; - if (!(chip->flags & EE_READONLY)) { + if (!(chip.flags & EE_READONLY)) { at25->bin.write = at25_bin_write; at25->bin.attr.mode |= S_IWUSR; at25->mem.write = at25_mem_write; @@ -361,10 +434,10 @@ static int at25_probe(struct spi_device *spi) err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin); if (err) - goto fail; + return err; - if (chip->setup) - chip->setup(&at25->mem, chip->context); + if (chip.setup) + chip.setup(&at25->mem, chip.context); dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n", (at25->bin.size < 1024) @@ -372,47 +445,39 @@ static int at25_probe(struct spi_device *spi) : (at25->bin.size / 1024), (at25->bin.size < 1024) ? "Byte" : "KByte", at25->chip.name, - (chip->flags & EE_READONLY) ? " (readonly)" : "", + (chip.flags & EE_READONLY) ? " (readonly)" : "", at25->chip.page_size); return 0; -fail: - dev_dbg(&spi->dev, "probe err %d\n", err); - kfree(at25); - return err; } -static int __devexit at25_remove(struct spi_device *spi) +static int at25_remove(struct spi_device *spi) { struct at25_data *at25; - at25 = dev_get_drvdata(&spi->dev); + at25 = spi_get_drvdata(spi); sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin); - kfree(at25); return 0; } /*-------------------------------------------------------------------------*/ +static const struct of_device_id at25_of_match[] = { + { .compatible = "atmel,at25", }, + { } +}; +MODULE_DEVICE_TABLE(of, at25_of_match); + static struct spi_driver at25_driver = { .driver = { .name = "at25", .owner = THIS_MODULE, + .of_match_table = at25_of_match, }, .probe = at25_probe, - .remove = __devexit_p(at25_remove), + .remove = at25_remove, }; -static int __init at25_init(void) -{ - return spi_register_driver(&at25_driver); -} -module_init(at25_init); - -static void __exit at25_exit(void) -{ - spi_unregister_driver(&at25_driver); -} -module_exit(at25_exit); +module_spi_driver(at25_driver); MODULE_DESCRIPTION("Driver for most SPI EEPROMs"); MODULE_AUTHOR("David Brownell"); diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c new file mode 100644 index 00000000000..66d9e1baeae --- /dev/null +++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c @@ -0,0 +1,85 @@ +/* + * EEPROMs access control driver for display configuration EEPROMs + * on DigsyMTC board. + * + * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_gpio.h> +#include <linux/eeprom_93xx46.h> + +#define GPIO_EEPROM_CLK 216 +#define GPIO_EEPROM_CS 210 +#define GPIO_EEPROM_DI 217 +#define GPIO_EEPROM_DO 249 +#define GPIO_EEPROM_OE 255 +#define EE_SPI_BUS_NUM 1 + +static void digsy_mtc_op_prepare(void *p) +{ + /* enable */ + gpio_set_value(GPIO_EEPROM_OE, 0); +} + +static void digsy_mtc_op_finish(void *p) +{ + /* disable */ + gpio_set_value(GPIO_EEPROM_OE, 1); +} + +struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = { + .flags = EE_ADDR8, + .prepare = digsy_mtc_op_prepare, + .finish = digsy_mtc_op_finish, +}; + +static struct spi_gpio_platform_data eeprom_spi_gpio_data = { + .sck = GPIO_EEPROM_CLK, + .mosi = GPIO_EEPROM_DI, + .miso = GPIO_EEPROM_DO, + .num_chipselect = 1, +}; + +static struct platform_device digsy_mtc_eeprom = { + .name = "spi_gpio", + .id = EE_SPI_BUS_NUM, + .dev = { + .platform_data = &eeprom_spi_gpio_data, + }, +}; + +static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = { + { + .modalias = "93xx46", + .max_speed_hz = 1000000, + .bus_num = EE_SPI_BUS_NUM, + .chip_select = 0, + .mode = SPI_MODE_0, + .controller_data = (void *)GPIO_EEPROM_CS, + .platform_data = &digsy_mtc_eeprom_data, + }, +}; + +static int __init digsy_mtc_eeprom_devices_init(void) +{ + int ret; + + ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH, + "93xx46 EEPROMs OE"); + if (ret) { + pr_err("can't request gpio %d\n", GPIO_EEPROM_OE); + return ret; + } + spi_register_board_info(digsy_mtc_eeprom_info, + ARRAY_SIZE(digsy_mtc_eeprom_info)); + return platform_device_register(&digsy_mtc_eeprom); +} +device_initcall(digsy_mtc_eeprom_devices_init); diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index f939ebc2507..33f8673d23a 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -1,27 +1,22 @@ /* - Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and - Philip Edelbrock <phil@netroedge.com> - Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> - Copyright (C) 2003 IBM Corp. - Copyright (C) 2004 Jean Delvare <khali@linux-fr.org> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ + * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and + * Philip Edelbrock <phil@netroedge.com> + * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2003 IBM Corp. + * Copyright (C) 2004 Jean Delvare <jdelvare@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/jiffies.h> @@ -85,7 +80,8 @@ exit: mutex_unlock(&data->update_lock); } -static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr, +static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); @@ -232,22 +228,10 @@ static struct i2c_driver eeprom_driver = { .address_list = normal_i2c, }; -static int __init eeprom_init(void) -{ - return i2c_add_driver(&eeprom_driver); -} - -static void __exit eeprom_exit(void) -{ - i2c_del_driver(&eeprom_driver); -} - +module_i2c_driver(eeprom_driver); MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " "Philip Edelbrock <phil@netroedge.com> and " "Greg Kroah-Hartman <greg@kroah.com>"); MODULE_DESCRIPTION("I2C EEPROM driver"); MODULE_LICENSE("GPL"); - -module_init(eeprom_init); -module_exit(eeprom_exit); diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c index 15b1780025c..0ff4b02177b 100644 --- a/drivers/misc/eeprom/eeprom_93cx6.c +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -1,27 +1,20 @@ /* - Copyright (C) 2004 - 2006 rt2x00 SourceForge Project - <http://rt2x00.serialmonkey.com> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the - Free Software Foundation, Inc., - 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -/* - Module: eeprom_93cx6 - Abstract: EEPROM reader routines for 93cx6 chipsets. - Supported chipsets: 93c46 & 93c66. + * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + * <http://rt2x00.serialmonkey.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Module: eeprom_93cx6 + * Abstract: EEPROM reader routines for 93cx6 chipsets. + * Supported chipsets: 93c46 & 93c66. */ #include <linux/kernel.h> @@ -70,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) eeprom->reg_data_out = 0; eeprom->reg_data_clock = 0; eeprom->reg_chip_select = 1; + eeprom->drive_data = 1; eeprom->register_write(eeprom); /* @@ -108,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; + eeprom->drive_data = 1; /* * Start writing all bits. @@ -147,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, */ eeprom->reg_data_in = 0; eeprom->reg_data_out = 0; + eeprom->drive_data = 0; /* * Start reading all bits. @@ -238,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, } EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); +/** + * eeprom_93cx6_wren - set the write enable state + * @eeprom: Pointer to eeprom structure + * @enable: true to enable writes, otherwise disable writes + * + * Set the EEPROM write enable state to either allow or deny + * writes depending on the @enable value. + */ +void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable) +{ + u16 command; + + /* start the command */ + eeprom_93cx6_startup(eeprom); + + /* create command to enable/disable */ + + command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE; + command <<= (eeprom->width - 2); + + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_wren); + +/** + * eeprom_93cx6_write - write data to the EEPROM + * @eeprom: Pointer to eeprom structure + * @addr: Address to write data to. + * @data: The data to write to address @addr. + * + * Write the @data to the specified @addr in the EEPROM and + * waiting for the device to finish writing. + * + * Note, since we do not expect large number of write operations + * we delay in between parts of the operation to avoid using excessive + * amounts of CPU time busy waiting. + */ +void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data) +{ + int timeout = 100; + u16 command; + + /* start the command */ + eeprom_93cx6_startup(eeprom); + + command = PCI_EEPROM_WRITE_OPCODE << eeprom->width; + command |= addr; + + /* send write command */ + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + /* send data */ + eeprom_93cx6_write_bits(eeprom, data, 16); + + /* get ready to check for busy */ + eeprom->drive_data = 0; + eeprom->reg_chip_select = 1; + eeprom->register_write(eeprom); + + /* wait at-least 250ns to get DO to be the busy signal */ + usleep_range(1000, 2000); + + /* wait for DO to go high to signify finish */ + + while (true) { + eeprom->register_read(eeprom); + + if (eeprom->reg_data_out) + break; + + usleep_range(1000, 2000); + + if (--timeout <= 0) { + printk(KERN_ERR "%s: timeout\n", __func__); + break; + } + } + + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_write); diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c new file mode 100644 index 00000000000..9ebeacdb8ec --- /dev/null +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -0,0 +1,398 @@ +/* + * Driver for 93xx46 EEPROMs + * + * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/sysfs.h> +#include <linux/eeprom_93xx46.h> + +#define OP_START 0x4 +#define OP_WRITE (OP_START | 0x1) +#define OP_READ (OP_START | 0x2) +#define ADDR_EWDS 0x00 +#define ADDR_ERAL 0x20 +#define ADDR_EWEN 0x30 + +struct eeprom_93xx46_dev { + struct spi_device *spi; + struct eeprom_93xx46_platform_data *pdata; + struct bin_attribute bin; + struct mutex lock; + int addrlen; +}; + +static ssize_t +eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct eeprom_93xx46_dev *edev; + struct device *dev; + struct spi_message m; + struct spi_transfer t[2]; + int bits, ret; + u16 cmd_addr; + + dev = container_of(kobj, struct device, kobj); + edev = dev_get_drvdata(dev); + + if (unlikely(off >= edev->bin.size)) + return 0; + if ((off + count) > edev->bin.size) + count = edev->bin.size - off; + if (unlikely(!count)) + return count; + + cmd_addr = OP_READ << edev->addrlen; + + if (edev->addrlen == 7) { + cmd_addr |= off & 0x7f; + bits = 10; + } else { + cmd_addr |= off & 0x3f; + bits = 9; + } + + dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", + cmd_addr, edev->spi->max_speed_hz); + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = (char *)&cmd_addr; + t[0].len = 2; + t[0].bits_per_word = bits; + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = count; + t[1].bits_per_word = 8; + spi_message_add_tail(&t[1], &m); + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + ret = spi_sync(edev->spi, &m); + /* have to wait at least Tcsl ns */ + ndelay(250); + if (ret) { + dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", + count, (int)off, ret); + } + + if (edev->pdata->finish) + edev->pdata->finish(edev); + + mutex_unlock(&edev->lock); + return ret ? : count; +} + +static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) +{ + struct spi_message m; + struct spi_transfer t; + int bits, ret; + u16 cmd_addr; + + cmd_addr = OP_START << edev->addrlen; + if (edev->addrlen == 7) { + cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS) << 1; + bits = 10; + } else { + cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS); + bits = 9; + } + + dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr); + + spi_message_init(&m); + memset(&t, 0, sizeof(t)); + + t.tx_buf = &cmd_addr; + t.len = 2; + t.bits_per_word = bits; + spi_message_add_tail(&t, &m); + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + ret = spi_sync(edev->spi, &m); + /* have to wait at least Tcsl ns */ + ndelay(250); + if (ret) + dev_err(&edev->spi->dev, "erase/write %sable error %d\n", + is_on ? "en" : "dis", ret); + + if (edev->pdata->finish) + edev->pdata->finish(edev); + + mutex_unlock(&edev->lock); + return ret; +} + +static ssize_t +eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, + const char *buf, unsigned off) +{ + struct spi_message m; + struct spi_transfer t[2]; + int bits, data_len, ret; + u16 cmd_addr; + + cmd_addr = OP_WRITE << edev->addrlen; + + if (edev->addrlen == 7) { + cmd_addr |= off & 0x7f; + bits = 10; + data_len = 1; + } else { + cmd_addr |= off & 0x3f; + bits = 9; + data_len = 2; + } + + dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr); + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = (char *)&cmd_addr; + t[0].len = 2; + t[0].bits_per_word = bits; + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = data_len; + t[1].bits_per_word = 8; + spi_message_add_tail(&t[1], &m); + + ret = spi_sync(edev->spi, &m); + /* have to wait program cycle time Twc ms */ + mdelay(6); + return ret; +} + +static ssize_t +eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct eeprom_93xx46_dev *edev; + struct device *dev; + int i, ret, step = 1; + + dev = container_of(kobj, struct device, kobj); + edev = dev_get_drvdata(dev); + + if (unlikely(off >= edev->bin.size)) + return -EFBIG; + if ((off + count) > edev->bin.size) + count = edev->bin.size - off; + if (unlikely(!count)) + return count; + + /* only write even number of bytes on 16-bit devices */ + if (edev->addrlen == 6) { + step = 2; + count &= ~1; + } + + /* erase/write enable */ + ret = eeprom_93xx46_ew(edev, 1); + if (ret) + return ret; + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + for (i = 0; i < count; i += step) { + ret = eeprom_93xx46_write_word(edev, &buf[i], off + i); + if (ret) { + dev_err(&edev->spi->dev, "write failed at %d: %d\n", + (int)off + i, ret); + break; + } + } + + if (edev->pdata->finish) + edev->pdata->finish(edev); + + mutex_unlock(&edev->lock); + + /* erase/write disable */ + eeprom_93xx46_ew(edev, 0); + return ret ? : count; +} + +static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) +{ + struct eeprom_93xx46_platform_data *pd = edev->pdata; + struct spi_message m; + struct spi_transfer t; + int bits, ret; + u16 cmd_addr; + + cmd_addr = OP_START << edev->addrlen; + if (edev->addrlen == 7) { + cmd_addr |= ADDR_ERAL << 1; + bits = 10; + } else { + cmd_addr |= ADDR_ERAL; + bits = 9; + } + + spi_message_init(&m); + memset(&t, 0, sizeof(t)); + + t.tx_buf = &cmd_addr; + t.len = 2; + t.bits_per_word = bits; + spi_message_add_tail(&t, &m); + + mutex_lock(&edev->lock); + + if (edev->pdata->prepare) + edev->pdata->prepare(edev); + + ret = spi_sync(edev->spi, &m); + if (ret) + dev_err(&edev->spi->dev, "erase error %d\n", ret); + /* have to wait erase cycle time Tec ms */ + mdelay(6); + + if (pd->finish) + pd->finish(edev); + + mutex_unlock(&edev->lock); + return ret; +} + +static ssize_t eeprom_93xx46_store_erase(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev); + int erase = 0, ret; + + sscanf(buf, "%d", &erase); + if (erase) { + ret = eeprom_93xx46_ew(edev, 1); + if (ret) + return ret; + ret = eeprom_93xx46_eral(edev); + if (ret) + return ret; + ret = eeprom_93xx46_ew(edev, 0); + if (ret) + return ret; + } + return count; +} +static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); + +static int eeprom_93xx46_probe(struct spi_device *spi) +{ + struct eeprom_93xx46_platform_data *pd; + struct eeprom_93xx46_dev *edev; + int err; + + pd = spi->dev.platform_data; + if (!pd) { + dev_err(&spi->dev, "missing platform data\n"); + return -ENODEV; + } + + edev = kzalloc(sizeof(*edev), GFP_KERNEL); + if (!edev) + return -ENOMEM; + + if (pd->flags & EE_ADDR8) + edev->addrlen = 7; + else if (pd->flags & EE_ADDR16) + edev->addrlen = 6; + else { + dev_err(&spi->dev, "unspecified address type\n"); + err = -EINVAL; + goto fail; + } + + mutex_init(&edev->lock); + + edev->spi = spi_dev_get(spi); + edev->pdata = pd; + + sysfs_bin_attr_init(&edev->bin); + edev->bin.attr.name = "eeprom"; + edev->bin.attr.mode = S_IRUSR; + edev->bin.read = eeprom_93xx46_bin_read; + edev->bin.size = 128; + if (!(pd->flags & EE_READONLY)) { + edev->bin.write = eeprom_93xx46_bin_write; + edev->bin.attr.mode |= S_IWUSR; + } + + err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin); + if (err) + goto fail; + + dev_info(&spi->dev, "%d-bit eeprom %s\n", + (pd->flags & EE_ADDR8) ? 8 : 16, + (pd->flags & EE_READONLY) ? "(readonly)" : ""); + + if (!(pd->flags & EE_READONLY)) { + if (device_create_file(&spi->dev, &dev_attr_erase)) + dev_err(&spi->dev, "can't create erase interface\n"); + } + + spi_set_drvdata(spi, edev); + return 0; +fail: + kfree(edev); + return err; +} + +static int eeprom_93xx46_remove(struct spi_device *spi) +{ + struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); + + if (!(edev->pdata->flags & EE_READONLY)) + device_remove_file(&spi->dev, &dev_attr_erase); + + sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); + kfree(edev); + return 0; +} + +static struct spi_driver eeprom_93xx46_driver = { + .driver = { + .name = "93xx46", + .owner = THIS_MODULE, + }, + .probe = eeprom_93xx46_probe, + .remove = eeprom_93xx46_remove, +}; + +module_spi_driver(eeprom_93xx46_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); +MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>"); +MODULE_ALIAS("spi:93xx46"); diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 5a6b2bce8ad..580ff9df552 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -1,33 +1,32 @@ /* - max6875.c - driver for MAX6874/MAX6875 - - Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> - - Based on eeprom.c - - The MAX6875 has a bank of registers and two banks of EEPROM. - Address ranges are defined as follows: - * 0x0000 - 0x0046 = configuration registers - * 0x8000 - 0x8046 = configuration EEPROM - * 0x8100 - 0x82FF = user EEPROM - - This driver makes the user EEPROM available for read. - - The registers & config EEPROM should be accessed via i2c-dev. - - The MAX6875 ignores the lowest address bit, so each chip responds to - two addresses - 0x50/0x51 and 0x52/0x53. - - Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read - address, so this driver is destructive if loaded for the wrong EEPROM chip. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. -*/ + * max6875.c - driver for MAX6874/MAX6875 + * + * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> + * + * Based on eeprom.c + * + * The MAX6875 has a bank of registers and two banks of EEPROM. + * Address ranges are defined as follows: + * * 0x0000 - 0x0046 = configuration registers + * * 0x8000 - 0x8046 = configuration EEPROM + * * 0x8100 - 0x82FF = user EEPROM + * + * This driver makes the user EEPROM available for read. + * + * The registers & config EEPROM should be accessed via i2c-dev. + * + * The MAX6875 ignores the lowest address bit, so each chip responds to + * two addresses - 0x50/0x51 and 0x52/0x53. + * + * Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read + * address, so this driver is destructive if loaded for the wrong EEPROM chip. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> @@ -107,7 +106,7 @@ exit_up: mutex_unlock(&data->update_lock); } -static ssize_t max6875_read(struct kobject *kobj, +static ssize_t max6875_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -208,20 +207,8 @@ static struct i2c_driver max6875_driver = { .id_table = max6875_id, }; -static int __init max6875_init(void) -{ - return i2c_add_driver(&max6875_driver); -} - -static void __exit max6875_exit(void) -{ - i2c_del_driver(&max6875_driver); -} - +module_i2c_driver(max6875_driver); MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); MODULE_DESCRIPTION("MAX6875 driver"); MODULE_LICENSE("GPL"); - -module_init(max6875_init); -module_exit(max6875_exit); diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c new file mode 100644 index 00000000000..3f2b625b203 --- /dev/null +++ b/drivers/misc/eeprom/sunxi_sid.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * http://www.linux-sunxi.org + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver exposes the Allwinner security ID, efuses exported in byte- + * sized chunks. + */ + +#include <linux/compiler.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#define DRV_NAME "sunxi-sid" + +struct sunxi_sid_data { + void __iomem *reg_base; + unsigned int keysize; +}; + +/* We read the entire key, due to a 32 bit read alignment requirement. Since we + * want to return the requested byte, this results in somewhat slower code and + * uses 4 times more reads as needed but keeps code simpler. Since the SID is + * only very rarely probed, this is not really an issue. + */ +static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data, + const unsigned int offset) +{ + u32 sid_key; + + if (offset >= sid_data->keysize) + return 0; + + sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4)); + sid_key >>= (offset % 4) * 8; + + return sid_key; /* Only return the last byte */ +} + +static ssize_t sid_read(struct file *fd, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t size) +{ + struct platform_device *pdev; + struct sunxi_sid_data *sid_data; + int i; + + pdev = to_platform_device(kobj_to_dev(kobj)); + sid_data = platform_get_drvdata(pdev); + + if (pos < 0 || pos >= sid_data->keysize) + return 0; + if (size > sid_data->keysize - pos) + size = sid_data->keysize - pos; + + for (i = 0; i < size; i++) + buf[i] = sunxi_sid_read_byte(sid_data, pos + i); + + return i; +} + +static struct bin_attribute sid_bin_attr = { + .attr = { .name = "eeprom", .mode = S_IRUGO, }, + .read = sid_read, +}; + +static int sunxi_sid_remove(struct platform_device *pdev) +{ + device_remove_bin_file(&pdev->dev, &sid_bin_attr); + dev_dbg(&pdev->dev, "driver unloaded\n"); + + return 0; +} + +static const struct of_device_id sunxi_sid_of_match[] = { + { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16}, + { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static int sunxi_sid_probe(struct platform_device *pdev) +{ + struct sunxi_sid_data *sid_data; + struct resource *res; + const struct of_device_id *of_dev_id; + u8 *entropy; + unsigned int i; + + sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data), + GFP_KERNEL); + if (!sid_data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sid_data->reg_base)) + return PTR_ERR(sid_data->reg_base); + + of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev); + if (!of_dev_id) + return -ENODEV; + sid_data->keysize = (int)of_dev_id->data; + + platform_set_drvdata(pdev, sid_data); + + sid_bin_attr.size = sid_data->keysize; + if (device_create_bin_file(&pdev->dev, &sid_bin_attr)) + return -ENODEV; + + entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL); + for (i = 0; i < sid_data->keysize; i++) + entropy[i] = sunxi_sid_read_byte(sid_data, i); + add_device_randomness(entropy, sid_data->keysize); + kfree(entropy); + + dev_dbg(&pdev->dev, "loaded\n"); + + return 0; +} + +static struct platform_driver sunxi_sid_driver = { + .probe = sunxi_sid_probe, + .remove = sunxi_sid_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sunxi_sid_of_match, + }, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 1eac626e710..2cf2bbc0b92 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/slab.h> static LIST_HEAD(container_list); static DEFINE_MUTEX(container_list_lock); @@ -197,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev) { char name[ENCLOSURE_NAME_SIZE]; + /* + * In odd circumstances, like multipath devices, something else may + * already have removed the links, so check for this condition first. + */ + if (!cdev->dev->kobj.sd) + return; + enclosure_link_name(cdev, name); sysfs_remove_link(&cdev->dev->kobj, name); sysfs_remove_link(&cdev->cdev.kobj, "device"); @@ -238,7 +246,7 @@ static void enclosure_component_release(struct device *dev) put_device(dev->parent); } -static const struct attribute_group *enclosure_groups[]; +static const struct attribute_group *enclosure_component_groups[]; /** * enclosure_component_register - add a particular component to an enclosure @@ -281,11 +289,14 @@ enclosure_component_register(struct enclosure_device *edev, dev_set_name(cdev, "%u", number); cdev->release = enclosure_component_release; - cdev->groups = enclosure_groups; + cdev->groups = enclosure_component_groups; err = device_register(cdev); - if (err) - ERR_PTR(err); + if (err) { + ecomp->number = -1; + put_device(cdev); + return ERR_PTR(err); + } return ecomp; } @@ -361,25 +372,26 @@ EXPORT_SYMBOL_GPL(enclosure_remove_device); * sysfs pieces below */ -static ssize_t enclosure_show_components(struct device *cdev, - struct device_attribute *attr, - char *buf) +static ssize_t components_show(struct device *cdev, + struct device_attribute *attr, char *buf) { struct enclosure_device *edev = to_enclosure_device(cdev); return snprintf(buf, 40, "%d\n", edev->components); } +static DEVICE_ATTR_RO(components); -static struct device_attribute enclosure_attrs[] = { - __ATTR(components, S_IRUGO, enclosure_show_components, NULL), - __ATTR_NULL +static struct attribute *enclosure_class_attrs[] = { + &dev_attr_components.attr, + NULL, }; +ATTRIBUTE_GROUPS(enclosure_class); static struct class enclosure_class = { .name = "enclosure", .owner = THIS_MODULE, .dev_release = enclosure_release, - .dev_attrs = enclosure_attrs, + .dev_groups = enclosure_class_groups, }; static const char *const enclosure_status [] = { @@ -532,15 +544,7 @@ static struct attribute *enclosure_component_attrs[] = { &dev_attr_type.attr, NULL }; - -static struct attribute_group enclosure_group = { - .attrs = enclosure_component_attrs, -}; - -static const struct attribute_group *enclosure_groups[] = { - &enclosure_group, - NULL -}; +ATTRIBUTE_GROUPS(enclosure_component); static int __init enclosure_init(void) { diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c deleted file mode 100644 index ba4694169d7..00000000000 --- a/drivers/misc/ep93xx_pwm.c +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Simple PWM driver for EP93XX - * - * (c) Copyright 2009 Matthieu Crapet <mcrapet@gmail.com> - * (c) Copyright 2009 H Hartley Sweeten <hsweeten@visionengravers.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * EP9307 has only one channel: - * - PWMOUT - * - * EP9301/02/12/15 have two channels: - * - PWMOUT - * - PWMOUT1 (alternate function for EGPIO14) - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/io.h> - -#include <mach/platform.h> - -#define EP93XX_PWMx_TERM_COUNT 0x00 -#define EP93XX_PWMx_DUTY_CYCLE 0x04 -#define EP93XX_PWMx_ENABLE 0x08 -#define EP93XX_PWMx_INVERT 0x0C - -#define EP93XX_PWM_MAX_COUNT 0xFFFF - -struct ep93xx_pwm { - void __iomem *mmio_base; - struct clk *clk; - u32 duty_percent; -}; - -static inline void ep93xx_pwm_writel(struct ep93xx_pwm *pwm, - unsigned int val, unsigned int off) -{ - __raw_writel(val, pwm->mmio_base + off); -} - -static inline unsigned int ep93xx_pwm_readl(struct ep93xx_pwm *pwm, - unsigned int off) -{ - return __raw_readl(pwm->mmio_base + off); -} - -static inline void ep93xx_pwm_write_tc(struct ep93xx_pwm *pwm, u16 value) -{ - ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_TERM_COUNT); -} - -static inline u16 ep93xx_pwm_read_tc(struct ep93xx_pwm *pwm) -{ - return ep93xx_pwm_readl(pwm, EP93XX_PWMx_TERM_COUNT); -} - -static inline void ep93xx_pwm_write_dc(struct ep93xx_pwm *pwm, u16 value) -{ - ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_DUTY_CYCLE); -} - -static inline void ep93xx_pwm_enable(struct ep93xx_pwm *pwm) -{ - ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_ENABLE); -} - -static inline void ep93xx_pwm_disable(struct ep93xx_pwm *pwm) -{ - ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_ENABLE); -} - -static inline int ep93xx_pwm_is_enabled(struct ep93xx_pwm *pwm) -{ - return ep93xx_pwm_readl(pwm, EP93XX_PWMx_ENABLE) & 0x1; -} - -static inline void ep93xx_pwm_invert(struct ep93xx_pwm *pwm) -{ - ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_INVERT); -} - -static inline void ep93xx_pwm_normal(struct ep93xx_pwm *pwm) -{ - ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_INVERT); -} - -static inline int ep93xx_pwm_is_inverted(struct ep93xx_pwm *pwm) -{ - return ep93xx_pwm_readl(pwm, EP93XX_PWMx_INVERT) & 0x1; -} - -/* - * /sys/devices/platform/ep93xx-pwm.N - * /min_freq read-only minimum pwm output frequency - * /max_req read-only maximum pwm output frequency - * /freq read-write pwm output frequency (0 = disable output) - * /duty_percent read-write pwm duty cycle percent (1..99) - * /invert read-write invert pwm output - */ - -static ssize_t ep93xx_pwm_get_min_freq(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - unsigned long rate = clk_get_rate(pwm->clk); - - return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1)); -} - -static ssize_t ep93xx_pwm_get_max_freq(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - unsigned long rate = clk_get_rate(pwm->clk); - - return sprintf(buf, "%ld\n", rate / 2); -} - -static ssize_t ep93xx_pwm_get_freq(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - - if (ep93xx_pwm_is_enabled(pwm)) { - unsigned long rate = clk_get_rate(pwm->clk); - u16 term = ep93xx_pwm_read_tc(pwm); - - return sprintf(buf, "%ld\n", rate / (term + 1)); - } else { - return sprintf(buf, "disabled\n"); - } -} - -static ssize_t ep93xx_pwm_set_freq(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - long val; - int err; - - err = strict_strtol(buf, 10, &val); - if (err) - return -EINVAL; - - if (val == 0) { - ep93xx_pwm_disable(pwm); - } else if (val <= (clk_get_rate(pwm->clk) / 2)) { - u32 term, duty; - - val = (clk_get_rate(pwm->clk) / val) - 1; - if (val > EP93XX_PWM_MAX_COUNT) - val = EP93XX_PWM_MAX_COUNT; - if (val < 1) - val = 1; - - term = ep93xx_pwm_read_tc(pwm); - duty = ((val + 1) * pwm->duty_percent / 100) - 1; - - /* If pwm is running, order is important */ - if (val > term) { - ep93xx_pwm_write_tc(pwm, val); - ep93xx_pwm_write_dc(pwm, duty); - } else { - ep93xx_pwm_write_dc(pwm, duty); - ep93xx_pwm_write_tc(pwm, val); - } - - if (!ep93xx_pwm_is_enabled(pwm)) - ep93xx_pwm_enable(pwm); - } else { - return -EINVAL; - } - - return count; -} - -static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - - return sprintf(buf, "%d\n", pwm->duty_percent); -} - -static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - long val; - int err; - - err = strict_strtol(buf, 10, &val); - if (err) - return -EINVAL; - - if (val > 0 && val < 100) { - u32 term = ep93xx_pwm_read_tc(pwm); - ep93xx_pwm_write_dc(pwm, ((term + 1) * val / 100) - 1); - pwm->duty_percent = val; - return count; - } - - return -EINVAL; -} - -static ssize_t ep93xx_pwm_get_invert(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - - return sprintf(buf, "%d\n", ep93xx_pwm_is_inverted(pwm)); -} - -static ssize_t ep93xx_pwm_set_invert(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct platform_device *pdev = to_platform_device(dev); - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - long val; - int err; - - err = strict_strtol(buf, 10, &val); - if (err) - return -EINVAL; - - if (val == 0) - ep93xx_pwm_normal(pwm); - else if (val == 1) - ep93xx_pwm_invert(pwm); - else - return -EINVAL; - - return count; -} - -static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); -static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); -static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, - ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); -static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, - ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); -static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, - ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); - -static struct attribute *ep93xx_pwm_attrs[] = { - &dev_attr_min_freq.attr, - &dev_attr_max_freq.attr, - &dev_attr_freq.attr, - &dev_attr_duty_percent.attr, - &dev_attr_invert.attr, - NULL -}; - -static const struct attribute_group ep93xx_pwm_sysfs_files = { - .attrs = ep93xx_pwm_attrs, -}; - -static int __init ep93xx_pwm_probe(struct platform_device *pdev) -{ - struct ep93xx_pwm *pwm; - struct resource *res; - int err; - - err = ep93xx_pwm_acquire_gpio(pdev); - if (err) - return err; - - pwm = kzalloc(sizeof(struct ep93xx_pwm), GFP_KERNEL); - if (!pwm) { - err = -ENOMEM; - goto fail_no_mem; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - err = -ENXIO; - goto fail_no_mem_resource; - } - - res = request_mem_region(res->start, resource_size(res), pdev->name); - if (res == NULL) { - err = -EBUSY; - goto fail_no_mem_resource; - } - - pwm->mmio_base = ioremap(res->start, resource_size(res)); - if (pwm->mmio_base == NULL) { - err = -ENXIO; - goto fail_no_ioremap; - } - - err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); - if (err) - goto fail_no_sysfs; - - pwm->clk = clk_get(&pdev->dev, "pwm_clk"); - if (IS_ERR(pwm->clk)) { - err = PTR_ERR(pwm->clk); - goto fail_no_clk; - } - - pwm->duty_percent = 50; - - platform_set_drvdata(pdev, pwm); - - /* disable pwm at startup. Avoids zero value. */ - ep93xx_pwm_disable(pwm); - ep93xx_pwm_write_tc(pwm, EP93XX_PWM_MAX_COUNT); - ep93xx_pwm_write_dc(pwm, EP93XX_PWM_MAX_COUNT / 2); - - clk_enable(pwm->clk); - - return 0; - -fail_no_clk: - sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); -fail_no_sysfs: - iounmap(pwm->mmio_base); -fail_no_ioremap: - release_mem_region(res->start, resource_size(res)); -fail_no_mem_resource: - kfree(pwm); -fail_no_mem: - ep93xx_pwm_release_gpio(pdev); - return err; -} - -static int __exit ep93xx_pwm_remove(struct platform_device *pdev) -{ - struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - ep93xx_pwm_disable(pwm); - clk_disable(pwm->clk); - clk_put(pwm->clk); - platform_set_drvdata(pdev, NULL); - sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); - iounmap(pwm->mmio_base); - release_mem_region(res->start, resource_size(res)); - kfree(pwm); - ep93xx_pwm_release_gpio(pdev); - - return 0; -} - -static struct platform_driver ep93xx_pwm_driver = { - .driver = { - .name = "ep93xx-pwm", - .owner = THIS_MODULE, - }, - .remove = __exit_p(ep93xx_pwm_remove), -}; - -static int __init ep93xx_pwm_init(void) -{ - return platform_driver_probe(&ep93xx_pwm_driver, ep93xx_pwm_probe); -} - -static void __exit ep93xx_pwm_exit(void) -{ - platform_driver_unregister(&ep93xx_pwm_driver); -} - -module_init(ep93xx_pwm_init); -module_exit(ep93xx_pwm_exit); - -MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, " - "H Hartley Sweeten <hsweeten@visionengravers.com>"); -MODULE_DESCRIPTION("EP93xx PWM driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:ep93xx-pwm"); diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c new file mode 100644 index 00000000000..71d2793b372 --- /dev/null +++ b/drivers/misc/fsa9480.c @@ -0,0 +1,549 @@ +/* + * fsa9480.c - FSA9480 micro USB switch device driver + * + * Copyright (C) 2010 Samsung Electronics + * Minkyu Kang <mk7.kang@samsung.com> + * Wonguk Jeong <wonguk.jeong@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/platform_data/fsa9480.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> + +/* FSA9480 I2C registers */ +#define FSA9480_REG_DEVID 0x01 +#define FSA9480_REG_CTRL 0x02 +#define FSA9480_REG_INT1 0x03 +#define FSA9480_REG_INT2 0x04 +#define FSA9480_REG_INT1_MASK 0x05 +#define FSA9480_REG_INT2_MASK 0x06 +#define FSA9480_REG_ADC 0x07 +#define FSA9480_REG_TIMING1 0x08 +#define FSA9480_REG_TIMING2 0x09 +#define FSA9480_REG_DEV_T1 0x0a +#define FSA9480_REG_DEV_T2 0x0b +#define FSA9480_REG_BTN1 0x0c +#define FSA9480_REG_BTN2 0x0d +#define FSA9480_REG_CK 0x0e +#define FSA9480_REG_CK_INT1 0x0f +#define FSA9480_REG_CK_INT2 0x10 +#define FSA9480_REG_CK_INTMASK1 0x11 +#define FSA9480_REG_CK_INTMASK2 0x12 +#define FSA9480_REG_MANSW1 0x13 +#define FSA9480_REG_MANSW2 0x14 + +/* Control */ +#define CON_SWITCH_OPEN (1 << 4) +#define CON_RAW_DATA (1 << 3) +#define CON_MANUAL_SW (1 << 2) +#define CON_WAIT (1 << 1) +#define CON_INT_MASK (1 << 0) +#define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \ + CON_MANUAL_SW | CON_WAIT) + +/* Device Type 1 */ +#define DEV_USB_OTG (1 << 7) +#define DEV_DEDICATED_CHG (1 << 6) +#define DEV_USB_CHG (1 << 5) +#define DEV_CAR_KIT (1 << 4) +#define DEV_UART (1 << 3) +#define DEV_USB (1 << 2) +#define DEV_AUDIO_2 (1 << 1) +#define DEV_AUDIO_1 (1 << 0) + +#define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB) +#define DEV_T1_UART_MASK (DEV_UART) +#define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG) + +/* Device Type 2 */ +#define DEV_AV (1 << 6) +#define DEV_TTY (1 << 5) +#define DEV_PPD (1 << 4) +#define DEV_JIG_UART_OFF (1 << 3) +#define DEV_JIG_UART_ON (1 << 2) +#define DEV_JIG_USB_OFF (1 << 1) +#define DEV_JIG_USB_ON (1 << 0) + +#define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON) +#define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON) +#define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \ + DEV_JIG_UART_OFF | DEV_JIG_UART_ON) + +/* + * Manual Switch + * D- [7:5] / D+ [4:2] + * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO + */ +#define SW_VAUDIO ((4 << 5) | (4 << 2)) +#define SW_UART ((3 << 5) | (3 << 2)) +#define SW_AUDIO ((2 << 5) | (2 << 2)) +#define SW_DHOST ((1 << 5) | (1 << 2)) +#define SW_AUTO ((0 << 5) | (0 << 2)) + +/* Interrupt 1 */ +#define INT_DETACH (1 << 1) +#define INT_ATTACH (1 << 0) + +struct fsa9480_usbsw { + struct i2c_client *client; + struct fsa9480_platform_data *pdata; + int dev1; + int dev2; + int mansw; +}; + +static struct fsa9480_usbsw *chip; + +static int fsa9480_write_reg(struct i2c_client *client, + int reg, int value) +{ + int ret; + + ret = i2c_smbus_write_byte_data(client, reg, value); + + if (ret < 0) + dev_err(&client->dev, "%s: err %d\n", __func__, ret); + + return ret; +} + +static int fsa9480_read_reg(struct i2c_client *client, int reg) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, reg); + + if (ret < 0) + dev_err(&client->dev, "%s: err %d\n", __func__, ret); + + return ret; +} + +static int fsa9480_read_irq(struct i2c_client *client, int *value) +{ + int ret; + + ret = i2c_smbus_read_i2c_block_data(client, + FSA9480_REG_INT1, 2, (u8 *)value); + *value &= 0xffff; + + if (ret < 0) + dev_err(&client->dev, "%s: err %d\n", __func__, ret); + + return ret; +} + +static void fsa9480_set_switch(const char *buf) +{ + struct fsa9480_usbsw *usbsw = chip; + struct i2c_client *client = usbsw->client; + unsigned int value; + unsigned int path = 0; + + value = fsa9480_read_reg(client, FSA9480_REG_CTRL); + + if (!strncmp(buf, "VAUDIO", 6)) { + path = SW_VAUDIO; + value &= ~CON_MANUAL_SW; + } else if (!strncmp(buf, "UART", 4)) { + path = SW_UART; + value &= ~CON_MANUAL_SW; + } else if (!strncmp(buf, "AUDIO", 5)) { + path = SW_AUDIO; + value &= ~CON_MANUAL_SW; + } else if (!strncmp(buf, "DHOST", 5)) { + path = SW_DHOST; + value &= ~CON_MANUAL_SW; + } else if (!strncmp(buf, "AUTO", 4)) { + path = SW_AUTO; + value |= CON_MANUAL_SW; + } else { + printk(KERN_ERR "Wrong command\n"); + return; + } + + usbsw->mansw = path; + fsa9480_write_reg(client, FSA9480_REG_MANSW1, path); + fsa9480_write_reg(client, FSA9480_REG_CTRL, value); +} + +static ssize_t fsa9480_get_switch(char *buf) +{ + struct fsa9480_usbsw *usbsw = chip; + struct i2c_client *client = usbsw->client; + unsigned int value; + + value = fsa9480_read_reg(client, FSA9480_REG_MANSW1); + + if (value == SW_VAUDIO) + return sprintf(buf, "VAUDIO\n"); + else if (value == SW_UART) + return sprintf(buf, "UART\n"); + else if (value == SW_AUDIO) + return sprintf(buf, "AUDIO\n"); + else if (value == SW_DHOST) + return sprintf(buf, "DHOST\n"); + else if (value == SW_AUTO) + return sprintf(buf, "AUTO\n"); + else + return sprintf(buf, "%x", value); +} + +static ssize_t fsa9480_show_device(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev); + struct i2c_client *client = usbsw->client; + int dev1, dev2; + + dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); + dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); + + if (!dev1 && !dev2) + return sprintf(buf, "NONE\n"); + + /* USB */ + if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK) + return sprintf(buf, "USB\n"); + + /* UART */ + if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK) + return sprintf(buf, "UART\n"); + + /* CHARGER */ + if (dev1 & DEV_T1_CHARGER_MASK) + return sprintf(buf, "CHARGER\n"); + + /* JIG */ + if (dev2 & DEV_T2_JIG_MASK) + return sprintf(buf, "JIG\n"); + + return sprintf(buf, "UNKNOWN\n"); +} + +static ssize_t fsa9480_show_manualsw(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return fsa9480_get_switch(buf); + +} + +static ssize_t fsa9480_set_manualsw(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + fsa9480_set_switch(buf); + + return count; +} + +static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL); +static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR, + fsa9480_show_manualsw, fsa9480_set_manualsw); + +static struct attribute *fsa9480_attributes[] = { + &dev_attr_device.attr, + &dev_attr_switch.attr, + NULL +}; + +static const struct attribute_group fsa9480_group = { + .attrs = fsa9480_attributes, +}; + +static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr) +{ + int val1, val2, ctrl; + struct fsa9480_platform_data *pdata = usbsw->pdata; + struct i2c_client *client = usbsw->client; + + val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); + val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); + ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL); + + dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n", + intr, val1, val2); + + if (!intr) + goto out; + + if (intr & INT_ATTACH) { /* Attached */ + /* USB */ + if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) { + if (pdata->usb_cb) + pdata->usb_cb(FSA9480_ATTACHED); + + if (usbsw->mansw) { + fsa9480_write_reg(client, + FSA9480_REG_MANSW1, usbsw->mansw); + } + } + + /* UART */ + if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) { + if (pdata->uart_cb) + pdata->uart_cb(FSA9480_ATTACHED); + + if (!(ctrl & CON_MANUAL_SW)) { + fsa9480_write_reg(client, + FSA9480_REG_MANSW1, SW_UART); + } + } + + /* CHARGER */ + if (val1 & DEV_T1_CHARGER_MASK) { + if (pdata->charger_cb) + pdata->charger_cb(FSA9480_ATTACHED); + } + + /* JIG */ + if (val2 & DEV_T2_JIG_MASK) { + if (pdata->jig_cb) + pdata->jig_cb(FSA9480_ATTACHED); + } + } else if (intr & INT_DETACH) { /* Detached */ + /* USB */ + if (usbsw->dev1 & DEV_T1_USB_MASK || + usbsw->dev2 & DEV_T2_USB_MASK) { + if (pdata->usb_cb) + pdata->usb_cb(FSA9480_DETACHED); + } + + /* UART */ + if (usbsw->dev1 & DEV_T1_UART_MASK || + usbsw->dev2 & DEV_T2_UART_MASK) { + if (pdata->uart_cb) + pdata->uart_cb(FSA9480_DETACHED); + } + + /* CHARGER */ + if (usbsw->dev1 & DEV_T1_CHARGER_MASK) { + if (pdata->charger_cb) + pdata->charger_cb(FSA9480_DETACHED); + } + + /* JIG */ + if (usbsw->dev2 & DEV_T2_JIG_MASK) { + if (pdata->jig_cb) + pdata->jig_cb(FSA9480_DETACHED); + } + } + + usbsw->dev1 = val1; + usbsw->dev2 = val2; + +out: + ctrl &= ~CON_INT_MASK; + fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); +} + +static irqreturn_t fsa9480_irq_handler(int irq, void *data) +{ + struct fsa9480_usbsw *usbsw = data; + struct i2c_client *client = usbsw->client; + int intr; + + /* clear interrupt */ + fsa9480_read_irq(client, &intr); + + /* device detection */ + fsa9480_detect_dev(usbsw, intr); + + return IRQ_HANDLED; +} + +static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw) +{ + struct fsa9480_platform_data *pdata = usbsw->pdata; + struct i2c_client *client = usbsw->client; + int ret; + int intr; + unsigned int ctrl = CON_MASK; + + /* clear interrupt */ + fsa9480_read_irq(client, &intr); + + /* unmask interrupt (attach/detach only) */ + fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc); + fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f); + + usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1); + + if (usbsw->mansw) + ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */ + + fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); + + if (pdata && pdata->cfg_gpio) + pdata->cfg_gpio(); + + if (client->irq) { + ret = request_threaded_irq(client->irq, NULL, + fsa9480_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "fsa9480 micro USB", usbsw); + if (ret) { + dev_err(&client->dev, "failed to request IRQ\n"); + return ret; + } + + if (pdata) + device_init_wakeup(&client->dev, pdata->wakeup); + } + + return 0; +} + +static int fsa9480_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct fsa9480_usbsw *usbsw; + int ret = 0; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL); + if (!usbsw) { + dev_err(&client->dev, "failed to allocate driver data\n"); + return -ENOMEM; + } + + usbsw->client = client; + usbsw->pdata = client->dev.platform_data; + + chip = usbsw; + + i2c_set_clientdata(client, usbsw); + + ret = fsa9480_irq_init(usbsw); + if (ret) + goto fail1; + + ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group); + if (ret) { + dev_err(&client->dev, + "failed to create fsa9480 attribute group\n"); + goto fail2; + } + + /* ADC Detect Time: 500ms */ + fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6); + + if (chip->pdata->reset_cb) + chip->pdata->reset_cb(); + + /* device detection */ + fsa9480_detect_dev(usbsw, INT_ATTACH); + + pm_runtime_set_active(&client->dev); + + return 0; + +fail2: + if (client->irq) + free_irq(client->irq, usbsw); +fail1: + kfree(usbsw); + return ret; +} + +static int fsa9480_remove(struct i2c_client *client) +{ + struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); + if (client->irq) + free_irq(client->irq, usbsw); + + sysfs_remove_group(&client->dev.kobj, &fsa9480_group); + device_init_wakeup(&client->dev, 0); + kfree(usbsw); + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int fsa9480_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); + struct fsa9480_platform_data *pdata = usbsw->pdata; + + if (device_may_wakeup(&client->dev) && client->irq) + enable_irq_wake(client->irq); + + if (pdata->usb_power) + pdata->usb_power(0); + + return 0; +} + +static int fsa9480_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); + int dev1, dev2; + + if (device_may_wakeup(&client->dev) && client->irq) + disable_irq_wake(client->irq); + + /* + * Clear Pending interrupt. Note that detect_dev does what + * the interrupt handler does. So, we don't miss pending and + * we reenable interrupt if there is one. + */ + fsa9480_read_reg(client, FSA9480_REG_INT1); + fsa9480_read_reg(client, FSA9480_REG_INT2); + + dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); + dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); + + /* device detection */ + fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(fsa9480_pm_ops, fsa9480_suspend, fsa9480_resume); +#define FSA9480_PM_OPS (&fsa9480_pm_ops) + +#else + +#define FSA9480_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +static const struct i2c_device_id fsa9480_id[] = { + {"fsa9480", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, fsa9480_id); + +static struct i2c_driver fsa9480_i2c_driver = { + .driver = { + .name = "fsa9480", + .pm = FSA9480_PM_OPS, + }, + .probe = fsa9480_probe, + .remove = fsa9480_remove, + .id_table = fsa9480_id, +}; + +module_i2c_driver(fsa9480_i2c_driver); + +MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>"); +MODULE_DESCRIPTION("FSA9480 USB Switch driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig new file mode 100644 index 00000000000..6069d8cd79d --- /dev/null +++ b/drivers/misc/genwqe/Kconfig @@ -0,0 +1,13 @@ +# +# IBM Accelerator Family 'GenWQE' +# + +menuconfig GENWQE + tristate "GenWQE PCIe Accelerator" + depends on PCI && 64BIT + select CRC_ITU_T + default n + help + Enables PCIe card driver for IBM GenWQE accelerators. + The user-space interface is described in + include/linux/genwqe/genwqe_card.h. diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile new file mode 100644 index 00000000000..98a2b4f0b18 --- /dev/null +++ b/drivers/misc/genwqe/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for GenWQE driver +# + +obj-$(CONFIG_GENWQE) := genwqe_card.o +genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \ + card_debugfs.o card_utils.o diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c new file mode 100644 index 00000000000..74d51c9bb85 --- /dev/null +++ b/drivers/misc/genwqe/card_base.c @@ -0,0 +1,1205 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Module initialization and PCIe setup. Card health monitoring and + * recovery functionality. Character device creation and deletion are + * controlled from here. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/err.h> +#include <linux/aer.h> +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/device.h> +#include <linux/log2.h> +#include <linux/genwqe/genwqe_card.h> + +#include "card_base.h" +#include "card_ddcb.h" + +MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>"); +MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>"); +MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>"); +MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>"); + +MODULE_DESCRIPTION("GenWQE Card"); +MODULE_VERSION(DRV_VERS_STRING); +MODULE_LICENSE("GPL"); + +static char genwqe_driver_name[] = GENWQE_DEVNAME; +static struct class *class_genwqe; +static struct dentry *debugfs_genwqe; +static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; + +/* PCI structure for identifying device by PCI vendor and device ID */ +static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = { + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5 << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Initial SR-IOV bring-up image */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ + .device = 0x0000, /* VF Device ID */ + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Fixed up image */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */ + .device = 0x0000, /* VF Device ID */ + .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5, + .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8), + .class_mask = ~0, + .driver_data = 0 }, + + /* Even one more ... */ + { .vendor = PCI_VENDOR_ID_IBM, + .device = PCI_DEVICE_GENWQE, + .subvendor = PCI_SUBVENDOR_ID_IBM, + .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW, + .class = (PCI_CLASSCODE_GENWQE5 << 8), + .class_mask = ~0, + .driver_data = 0 }, + + { 0, } /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, genwqe_device_table); + +/** + * genwqe_dev_alloc() - Create and prepare a new card descriptor + * + * Return: Pointer to card descriptor, or ERR_PTR(err) on error + */ +static struct genwqe_dev *genwqe_dev_alloc(void) +{ + unsigned int i = 0, j; + struct genwqe_dev *cd; + + for (i = 0; i < GENWQE_CARD_NO_MAX; i++) { + if (genwqe_devices[i] == NULL) + break; + } + if (i >= GENWQE_CARD_NO_MAX) + return ERR_PTR(-ENODEV); + + cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL); + if (!cd) + return ERR_PTR(-ENOMEM); + + cd->card_idx = i; + cd->class_genwqe = class_genwqe; + cd->debugfs_genwqe = debugfs_genwqe; + + init_waitqueue_head(&cd->queue_waitq); + + spin_lock_init(&cd->file_lock); + INIT_LIST_HEAD(&cd->file_list); + + cd->card_state = GENWQE_CARD_UNUSED; + spin_lock_init(&cd->print_lock); + + cd->ddcb_software_timeout = genwqe_ddcb_software_timeout; + cd->kill_timeout = genwqe_kill_timeout; + + for (j = 0; j < GENWQE_MAX_VFS; j++) + cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec; + + genwqe_devices[i] = cd; + return cd; +} + +static void genwqe_dev_free(struct genwqe_dev *cd) +{ + if (!cd) + return; + + genwqe_devices[cd->card_idx] = NULL; + kfree(cd); +} + +/** + * genwqe_bus_reset() - Card recovery + * + * pci_reset_function() will recover the device and ensure that the + * registers are accessible again when it completes with success. If + * not, the card will stay dead and registers will be unaccessible + * still. + */ +static int genwqe_bus_reset(struct genwqe_dev *cd) +{ + int bars, rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + void __iomem *mmio; + + if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE) + return -EIO; + + mmio = cd->mmio; + cd->mmio = NULL; + pci_iounmap(pci_dev, mmio); + + bars = pci_select_bars(pci_dev, IORESOURCE_MEM); + pci_release_selected_regions(pci_dev, bars); + + /* + * Firmware/BIOS might change memory mapping during bus reset. + * Settings like enable bus-mastering, ... are backuped and + * restored by the pci_reset_function(). + */ + dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__); + rc = pci_reset_function(pci_dev); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: failed reset func (rc %d)\n", __func__, rc); + return rc; + } + dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc); + + /* + * Here is the right spot to clear the register read + * failure. pci_bus_reset() does this job in real systems. + */ + cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | + GENWQE_INJECT_GFIR_FATAL | + GENWQE_INJECT_GFIR_INFO); + + rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + if (rc) { + dev_err(&pci_dev->dev, + "[%s] err: request bars failed (%d)\n", __func__, rc); + return -EIO; + } + + cd->mmio = pci_iomap(pci_dev, 0, 0); + if (cd->mmio == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: mapping BAR0 failed\n", __func__); + return -ENOMEM; + } + return 0; +} + +/* + * Hardware circumvention section. Certain bitstreams in our test-lab + * had different kinds of problems. Here is where we adjust those + * bitstreams to function will with this version of our device driver. + * + * Thise circumventions are applied to the physical function only. + * The magical numbers below are identifying development/manufacturing + * versions of the bitstream used on the card. + * + * Turn off error reporting for old/manufacturing images. + */ + +bool genwqe_need_err_masking(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +static void genwqe_tweak_hardware(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + /* Mask FIRs for development images */ + if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) && + ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) { + dev_warn(&pci_dev->dev, + "FIRs masked due to bitstream %016llx.%016llx\n", + cd->slu_unitcfg, cd->app_unitcfg); + + __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR, + 0xFFFFFFFFFFFFFFFFull); + + __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK, + 0x0000000000000000ull); + } +} + +/** + * genwqe_recovery_on_fatal_gfir_required() - Version depended actions + * + * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must + * be ignored. This is e.g. true for the bitstream we gave to the card + * manufacturer, but also for some old bitstreams we released to our + * test-lab. + */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull; +} + +int genwqe_flash_readback_fails(struct genwqe_dev *cd) +{ + return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull; +} + +/** + * genwqe_T_psec() - Calculate PF/VF timeout register content + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + */ +/* T = 1/f */ +static int genwqe_T_psec(struct genwqe_dev *cd) +{ + u16 speed; /* 1/f -> 250, 200, 166, 175 */ + static const int T[] = { 4000, 5000, 6000, 5714 }; + + speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); + if (speed >= ARRAY_SIZE(T)) + return -1; /* illegal value */ + + return T[speed]; +} + +/** + * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution + * + * Do this _after_ card_reset() is called. Otherwise the values will + * vanish. The settings need to be done when the queues are inactive. + * + * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16. + * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16. + */ +static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd) +{ + u32 T = genwqe_T_psec(cd); + u64 x; + + if (genwqe_pf_jobtimeout_msec == 0) + return false; + + /* PF: large value needed, flash update 2sec per block */ + x = ilog2(genwqe_pf_jobtimeout_msec * + 16000000000uL/(T * 15)) - 10; + + genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + 0xff00 | (x & 0xff), 0); + return true; +} + +/** + * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution + */ +static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + unsigned int vf; + u32 T = genwqe_T_psec(cd); + u64 x; + + for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) { + + if (cd->vf_jobtimeout_msec[vf] == 0) + continue; + + x = ilog2(cd->vf_jobtimeout_msec[vf] * + 16000000000uL/(T * 15)) - 10; + + genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + 0xff00 | (x & 0xff), vf + 1); + } + return true; +} + +static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd) +{ + unsigned int type, e = 0; + + for (type = 0; type < GENWQE_DBG_UNITS; type++) { + switch (type) { + case GENWQE_DBG_UNIT0: + e = genwqe_ffdc_buff_size(cd, 0); + break; + case GENWQE_DBG_UNIT1: + e = genwqe_ffdc_buff_size(cd, 1); + break; + case GENWQE_DBG_UNIT2: + e = genwqe_ffdc_buff_size(cd, 2); + break; + case GENWQE_DBG_REGS: + e = GENWQE_FFDC_REGS; + break; + } + + /* currently support only the debug units mentioned here */ + cd->ffdc[type].entries = e; + cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg), + GFP_KERNEL); + /* + * regs == NULL is ok, the using code treats this as no regs, + * Printing warning is ok in this case. + */ + } + return 0; +} + +static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd) +{ + unsigned int type; + + for (type = 0; type < GENWQE_DBG_UNITS; type++) { + kfree(cd->ffdc[type].regs); + cd->ffdc[type].regs = NULL; + } +} + +static int genwqe_read_ids(struct genwqe_dev *cd) +{ + int err = 0; + int slu_id; + struct pci_dev *pci_dev = cd->pci_dev; + + cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); + if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "err: SLUID=%016llx\n", cd->slu_unitcfg); + err = -EIO; + goto out_err; + } + + slu_id = genwqe_get_slu_id(cd); + if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) { + dev_err(&pci_dev->dev, + "err: incompatible SLU Architecture %u\n", slu_id); + err = -ENOENT; + goto out_err; + } + + cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); + if (cd->app_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "err: APPID=%016llx\n", cd->app_unitcfg); + err = -EIO; + goto out_err; + } + genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name)); + + /* + * Is access to all registers possible? If we are a VF the + * answer is obvious. If we run fully virtualized, we need to + * check if we can access all registers. If we do not have + * full access we will cause an UR and some informational FIRs + * in the PF, but that should not harm. + */ + if (pci_dev->is_virtfn) + cd->is_privileged = 0; + else + cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + != IO_ILLEGAL_VALUE); + + out_err: + return err; +} + +static int genwqe_start(struct genwqe_dev *cd) +{ + int err; + struct pci_dev *pci_dev = cd->pci_dev; + + err = genwqe_read_ids(cd); + if (err) + return err; + + if (genwqe_is_privileged(cd)) { + /* do this after the tweaks. alloc fail is acceptable */ + genwqe_ffdc_buffs_alloc(cd); + genwqe_stop_traps(cd); + + /* Collect registers e.g. FIRs, UNITIDs, traces ... */ + genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs, + cd->ffdc[GENWQE_DBG_REGS].entries, 0); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0, + cd->ffdc[GENWQE_DBG_UNIT0].regs, + cd->ffdc[GENWQE_DBG_UNIT0].entries); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1, + cd->ffdc[GENWQE_DBG_UNIT1].regs, + cd->ffdc[GENWQE_DBG_UNIT1].entries); + + genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2, + cd->ffdc[GENWQE_DBG_UNIT2].regs, + cd->ffdc[GENWQE_DBG_UNIT2].entries); + + genwqe_start_traps(cd); + + if (cd->card_state == GENWQE_CARD_FATAL_ERROR) { + dev_warn(&pci_dev->dev, + "[%s] chip reload/recovery!\n", __func__); + + /* + * Stealth Mode: Reload chip on either hot + * reset or PERST. + */ + cd->softreset = 0x7Cull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, + cd->softreset); + + err = genwqe_bus_reset(cd); + if (err != 0) { + dev_err(&pci_dev->dev, + "[%s] err: bus reset failed!\n", + __func__); + goto out; + } + + /* + * Re-read the IDs because + * it could happen that the bitstream load + * failed! + */ + err = genwqe_read_ids(cd); + if (err) + goto out; + } + } + + err = genwqe_setup_service_layer(cd); /* does a reset to the card */ + if (err != 0) { + dev_err(&pci_dev->dev, + "[%s] err: could not setup servicelayer!\n", __func__); + err = -ENODEV; + goto out; + } + + if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */ + genwqe_tweak_hardware(cd); + + genwqe_setup_pf_jtimer(cd); + genwqe_setup_vf_jtimer(cd); + } + + err = genwqe_device_create(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: chdev init failed! (err=%d)\n", err); + goto out_release_service_layer; + } + return 0; + + out_release_service_layer: + genwqe_release_service_layer(cd); + out: + if (genwqe_is_privileged(cd)) + genwqe_ffdc_buffs_free(cd); + return -EIO; +} + +/** + * genwqe_stop() - Stop card operation + * + * Recovery notes: + * As long as genwqe_thread runs we might access registers during + * error data capture. Same is with the genwqe_health_thread. + * When genwqe_bus_reset() fails this function might called two times: + * first by the genwqe_health_thread() and later by genwqe_remove() to + * unbind the device. We must be able to survive that. + * + * This function must be robust enough to be called twice. + */ +static int genwqe_stop(struct genwqe_dev *cd) +{ + genwqe_finish_queue(cd); /* no register access */ + genwqe_device_remove(cd); /* device removed, procs killed */ + genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */ + + if (genwqe_is_privileged(cd)) { + pci_disable_sriov(cd->pci_dev); /* access pci config space */ + genwqe_ffdc_buffs_free(cd); + } + + return 0; +} + +/** + * genwqe_recover_card() - Try to recover the card if it is possible + * + * If fatal_err is set no register access is possible anymore. It is + * likely that genwqe_start fails in that situation. Proper error + * handling is required in this case. + * + * genwqe_bus_reset() will cause the pci code to call genwqe_remove() + * and later genwqe_probe() for all virtual functions. + */ +static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + genwqe_stop(cd); + + /* + * Make sure chip is not reloaded to maintain FFDC. Write SLU + * Reset Register, CPLDReset field to 0. + */ + if (!fatal_err) { + cd->softreset = 0x70ull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); + } + + rc = genwqe_bus_reset(cd); + if (rc != 0) { + dev_err(&pci_dev->dev, + "[%s] err: card recovery impossible!\n", __func__); + return rc; + } + + rc = genwqe_start(cd); + if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: failed to launch device!\n", __func__); + return rc; + } + return 0; +} + +static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir) +{ + *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + return (*gfir & GFIR_ERR_TRIGGER) && + genwqe_recovery_on_fatal_gfir_required(cd); +} + +/** + * genwqe_fir_checking() - Check the fault isolation registers of the card + * + * If this code works ok, can be tried out with help of the genwqe_poke tool: + * sudo ./tools/genwqe_poke 0x8 0xfefefefefef + * + * Now the relevant FIRs/sFIRs should be printed out and the driver should + * invoke recovery (devices are removed and readded). + */ +static u64 genwqe_fir_checking(struct genwqe_dev *cd) +{ + int j, iterations = 0; + u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec; + u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr; + struct pci_dev *pci_dev = cd->pci_dev; + + healthMonitor: + iterations++; + if (iterations > 16) { + dev_err(&pci_dev->dev, "* exit looping after %d times\n", + iterations); + goto fatal_error; + } + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir != 0x0) + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", + IO_SLC_CFGREG_GFIR, gfir); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* + * Avoid printing when to GFIR bit is on prevents contignous + * printout e.g. for the following bug: + * FIR set without a 2ndary FIR/FIR cannot be cleared + * Comment out the following if to get the prints: + */ + if (gfir == 0) + return 0; + + gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */ + + for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */ + + /* read the primary FIR (pfir) */ + fir_addr = (uid << 24) + 0x08; + fir = __genwqe_readq(cd, fir_addr); + if (fir == 0x0) + continue; /* no error in this unit */ + + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir); + if (fir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* read primary FEC */ + fec_addr = (uid << 24) + 0x18; + fec = __genwqe_readq(cd, fec_addr); + + dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec); + if (fec == IO_ILLEGAL_VALUE) + goto fatal_error; + + for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) { + + /* secondary fir empty, skip it */ + if ((fir & mask) == 0x0) + continue; + + sfir_addr = (uid << 24) + 0x100 + 0x08 * j; + sfir = __genwqe_readq(cd, sfir_addr); + + if (sfir == IO_ILLEGAL_VALUE) + goto fatal_error; + dev_err(&pci_dev->dev, + "* 0x%08x 0x%016llx\n", sfir_addr, sfir); + + sfec_addr = (uid << 24) + 0x300 + 0x08 * j; + sfec = __genwqe_readq(cd, sfec_addr); + + if (sfec == IO_ILLEGAL_VALUE) + goto fatal_error; + dev_err(&pci_dev->dev, + "* 0x%08x 0x%016llx\n", sfec_addr, sfec); + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* gfir turned on during routine! get out and + start over. */ + if ((gfir_masked == 0x0) && + (gfir & GFIR_ERR_TRIGGER)) { + goto healthMonitor; + } + + /* do not clear if we entered with a fatal gfir */ + if (gfir_masked == 0x0) { + + /* NEW clear by mask the logged bits */ + sfir_addr = (uid << 24) + 0x100 + 0x08 * j; + __genwqe_writeq(cd, sfir_addr, sfir); + + dev_dbg(&pci_dev->dev, + "[HM] Clearing 2ndary FIR 0x%08x " + "with 0x%016llx\n", sfir_addr, sfir); + + /* + * note, these cannot be error-Firs + * since gfir_masked is 0 after sfir + * was read. Also, it is safe to do + * this write if sfir=0. Still need to + * clear the primary. This just means + * there is no secondary FIR. + */ + + /* clear by mask the logged bit. */ + fir_clr_addr = (uid << 24) + 0x10; + __genwqe_writeq(cd, fir_clr_addr, mask); + + dev_dbg(&pci_dev->dev, + "[HM] Clearing primary FIR 0x%08x " + "with 0x%016llx\n", fir_clr_addr, + mask); + } + } + } + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) + goto fatal_error; + + if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) { + /* + * Check once more that it didn't go on after all the + * FIRS were cleared. + */ + dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n", + iterations); + goto healthMonitor; + } + return gfir_masked; + + fatal_error: + return IO_ILLEGAL_VALUE; +} + +/** + * genwqe_health_thread() - Health checking thread + * + * This thread is only started for the PF of the card. + * + * This thread monitors the health of the card. A critical situation + * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In + * this case we need to be recovered from outside. Writing to + * registers will very likely not work either. + * + * This thread must only exit if kthread_should_stop() becomes true. + * + * Condition for the health-thread to trigger: + * a) when a kthread_stop() request comes in or + * b) a critical GFIR occured + * + * Informational GFIRs are checked and potentially printed in + * health_check_interval seconds. + */ +static int genwqe_health_thread(void *data) +{ + int rc, should_stop = 0; + struct genwqe_dev *cd = data; + struct pci_dev *pci_dev = cd->pci_dev; + u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; + + while (!kthread_should_stop()) { + rc = wait_event_interruptible_timeout(cd->health_waitq, + (genwqe_health_check_cond(cd, &gfir) || + (should_stop = kthread_should_stop())), + genwqe_health_check_interval * HZ); + + if (should_stop) + break; + + if (gfir == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] GFIR=%016llx\n", __func__, gfir); + goto fatal_error; + } + + slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); + if (slu_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] SLU_UNITCFG=%016llx\n", + __func__, slu_unitcfg); + goto fatal_error; + } + + app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); + if (app_unitcfg == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] APP_UNITCFG=%016llx\n", + __func__, app_unitcfg); + goto fatal_error; + } + + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if (gfir == IO_ILLEGAL_VALUE) { + dev_err(&pci_dev->dev, + "[%s] %s: GFIR=%016llx\n", __func__, + (gfir & GFIR_ERR_TRIGGER) ? "err" : "info", + gfir); + goto fatal_error; + } + + gfir_masked = genwqe_fir_checking(cd); + if (gfir_masked == IO_ILLEGAL_VALUE) + goto fatal_error; + + /* + * GFIR ErrorTrigger bits set => reset the card! + * Never do this for old/manufacturing images! + */ + if ((gfir_masked) && !cd->skip_recovery && + genwqe_recovery_on_fatal_gfir_required(cd)) { + + cd->card_state = GENWQE_CARD_FATAL_ERROR; + + rc = genwqe_recover_card(cd, 0); + if (rc < 0) { + /* FIXME Card is unusable and needs unbind! */ + goto fatal_error; + } + } + + cd->last_gfir = gfir; + cond_resched(); + } + + return 0; + + fatal_error: + dev_err(&pci_dev->dev, + "[%s] card unusable. Please trigger unbind!\n", __func__); + + /* Bring down logical devices to inform user space via udev remove. */ + cd->card_state = GENWQE_CARD_FATAL_ERROR; + genwqe_stop(cd); + + /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */ + while (!kthread_should_stop()) + cond_resched(); + + return -EIO; +} + +static int genwqe_health_check_start(struct genwqe_dev *cd) +{ + int rc; + + if (genwqe_health_check_interval <= 0) + return 0; /* valid for disabling the service */ + + /* moved before request_irq() */ + /* init_waitqueue_head(&cd->health_waitq); */ + + cd->health_thread = kthread_run(genwqe_health_thread, cd, + GENWQE_DEVNAME "%d_health", + cd->card_idx); + if (IS_ERR(cd->health_thread)) { + rc = PTR_ERR(cd->health_thread); + cd->health_thread = NULL; + return rc; + } + return 0; +} + +static int genwqe_health_thread_running(struct genwqe_dev *cd) +{ + return cd->health_thread != NULL; +} + +static int genwqe_health_check_stop(struct genwqe_dev *cd) +{ + int rc; + + if (!genwqe_health_thread_running(cd)) + return -EIO; + + rc = kthread_stop(cd->health_thread); + cd->health_thread = NULL; + return 0; +} + +/** + * genwqe_pci_setup() - Allocate PCIe related resources for our card + */ +static int genwqe_pci_setup(struct genwqe_dev *cd) +{ + int err, bars; + struct pci_dev *pci_dev = cd->pci_dev; + + bars = pci_select_bars(pci_dev, IORESOURCE_MEM); + err = pci_enable_device_mem(pci_dev); + if (err) { + dev_err(&pci_dev->dev, + "err: failed to enable pci memory (err=%d)\n", err); + goto err_out; + } + + /* Reserve PCI I/O and memory resources */ + err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + if (err) { + dev_err(&pci_dev->dev, + "[%s] err: request bars failed (%d)\n", __func__, err); + err = -EIO; + goto err_disable_device; + } + + /* check for 64-bit DMA address supported (DAC) */ + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pci_dev->dev, + "err: DMA64 consistent mask error\n"); + err = -EIO; + goto out_release_resources; + } + /* check for 32-bit DMA address supported (SAC) */ + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pci_dev->dev, + "err: DMA32 consistent mask error\n"); + err = -EIO; + goto out_release_resources; + } + } else { + dev_err(&pci_dev->dev, + "err: neither DMA32 nor DMA64 supported\n"); + err = -EIO; + goto out_release_resources; + } + + pci_set_master(pci_dev); + pci_enable_pcie_error_reporting(pci_dev); + + /* request complete BAR-0 space (length = 0) */ + cd->mmio_len = pci_resource_len(pci_dev, 0); + cd->mmio = pci_iomap(pci_dev, 0, 0); + if (cd->mmio == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: mapping BAR0 failed\n", __func__); + err = -ENOMEM; + goto out_release_resources; + } + + cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); + + err = genwqe_read_ids(cd); + if (err) + goto out_iounmap; + + return 0; + + out_iounmap: + pci_iounmap(pci_dev, cd->mmio); + out_release_resources: + pci_release_selected_regions(pci_dev, bars); + err_disable_device: + pci_disable_device(pci_dev); + err_out: + return err; +} + +/** + * genwqe_pci_remove() - Free PCIe related resources for our card + */ +static void genwqe_pci_remove(struct genwqe_dev *cd) +{ + int bars; + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->mmio) + pci_iounmap(pci_dev, cd->mmio); + + bars = pci_select_bars(pci_dev, IORESOURCE_MEM); + pci_release_selected_regions(pci_dev, bars); + pci_disable_device(pci_dev); +} + +/** + * genwqe_probe() - Device initialization + * @pdev: PCI device information struct + * + * Callable for multiple cards. This function is called on bind. + * + * Return: 0 if succeeded, < 0 when failed + */ +static int genwqe_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + int err; + struct genwqe_dev *cd; + + genwqe_init_crc32(); + + cd = genwqe_dev_alloc(); + if (IS_ERR(cd)) { + dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n", + (int)PTR_ERR(cd)); + return PTR_ERR(cd); + } + + dev_set_drvdata(&pci_dev->dev, cd); + cd->pci_dev = pci_dev; + + err = genwqe_pci_setup(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: problems with PCI setup (err=%d)\n", err); + goto out_free_dev; + } + + err = genwqe_start(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: cannot start card services! (err=%d)\n", err); + goto out_pci_remove; + } + + if (genwqe_is_privileged(cd)) { + err = genwqe_health_check_start(cd); + if (err < 0) { + dev_err(&pci_dev->dev, + "err: cannot start health checking! " + "(err=%d)\n", err); + goto out_stop_services; + } + } + return 0; + + out_stop_services: + genwqe_stop(cd); + out_pci_remove: + genwqe_pci_remove(cd); + out_free_dev: + genwqe_dev_free(cd); + return err; +} + +/** + * genwqe_remove() - Called when device is removed (hot-plugable) + * + * Or when driver is unloaded respecitively when unbind is done. + */ +static void genwqe_remove(struct pci_dev *pci_dev) +{ + struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev); + + genwqe_health_check_stop(cd); + + /* + * genwqe_stop() must survive if it is called twice + * sequentially. This happens when the health thread calls it + * and fails on genwqe_bus_reset(). + */ + genwqe_stop(cd); + genwqe_pci_remove(cd); + genwqe_dev_free(cd); +} + +/* + * genwqe_err_error_detected() - Error detection callback + * + * This callback is called by the PCI subsystem whenever a PCI bus + * error is detected. + */ +static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, + enum pci_channel_state state) +{ + struct genwqe_dev *cd; + + dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state); + + if (pci_dev == NULL) + return PCI_ERS_RESULT_NEED_RESET; + + cd = dev_get_drvdata(&pci_dev->dev); + if (cd == NULL) + return PCI_ERS_RESULT_NEED_RESET; + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev) +{ + return PCI_ERS_RESULT_NONE; +} + +static void genwqe_err_resume(struct pci_dev *dev) +{ +} + +static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) +{ + struct genwqe_dev *cd = dev_get_drvdata(&dev->dev); + + if (numvfs > 0) { + genwqe_setup_vf_jtimer(cd); + pci_enable_sriov(dev, numvfs); + return numvfs; + } + if (numvfs == 0) { + pci_disable_sriov(dev); + return 0; + } + return 0; +} + +static struct pci_error_handlers genwqe_err_handler = { + .error_detected = genwqe_err_error_detected, + .mmio_enabled = genwqe_err_result_none, + .link_reset = genwqe_err_result_none, + .slot_reset = genwqe_err_result_none, + .resume = genwqe_err_resume, +}; + +static struct pci_driver genwqe_driver = { + .name = genwqe_driver_name, + .id_table = genwqe_device_table, + .probe = genwqe_probe, + .remove = genwqe_remove, + .sriov_configure = genwqe_sriov_configure, + .err_handler = &genwqe_err_handler, +}; + +/** + * genwqe_init_module() - Driver registration and initialization + */ +static int __init genwqe_init_module(void) +{ + int rc; + + class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME); + if (IS_ERR(class_genwqe)) { + pr_err("[%s] create class failed\n", __func__); + return -ENOMEM; + } + + debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL); + if (!debugfs_genwqe) { + rc = -ENOMEM; + goto err_out; + } + + rc = pci_register_driver(&genwqe_driver); + if (rc != 0) { + pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc); + goto err_out0; + } + + return rc; + + err_out0: + debugfs_remove(debugfs_genwqe); + err_out: + class_destroy(class_genwqe); + return rc; +} + +/** + * genwqe_exit_module() - Driver exit + */ +static void __exit genwqe_exit_module(void) +{ + pci_unregister_driver(&genwqe_driver); + debugfs_remove(debugfs_genwqe); + class_destroy(class_genwqe); +} + +module_init(genwqe_init_module); +module_exit(genwqe_exit_module); diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h new file mode 100644 index 00000000000..0e608a28860 --- /dev/null +++ b/drivers/misc/genwqe/card_base.h @@ -0,0 +1,577 @@ +#ifndef __CARD_BASE_H__ +#define __CARD_BASE_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Interfaces within the GenWQE module. Defines genwqe_card and + * ddcb_queue as well as ddcb_requ. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/cdev.h> +#include <linux/stringify.h> +#include <linux/pci.h> +#include <linux/semaphore.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/version.h> +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include <linux/genwqe/genwqe_card.h> +#include "genwqe_driver.h" + +#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ +#define GENWQE_FLAG_MSI_ENABLED (1 << 0) + +#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ +#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ +#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) + +/* Compile parameters, some of them appear in debugfs for later adjustment */ +#define genwqe_ddcb_max 32 /* DDCBs on the work-queue */ +#define genwqe_polling_enabled 0 /* in case of irqs not working */ +#define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */ +#define genwqe_kill_timeout 8 /* time until process gets killed */ +#define genwqe_vf_jobtimeout_msec 250 /* 250 msec */ +#define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */ +#define genwqe_health_check_interval 4 /* <= 0: disabled */ + +/* Sysfs attribute groups used when we create the genwqe device */ +extern const struct attribute_group *genwqe_attribute_groups[]; + +/* + * Config space for Genwqe5 A7: + * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 + * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 + * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] + * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 + */ +#define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ + +#define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ +#define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ + +#define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 +#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ +#define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ + +#define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ + +/** + * struct genwqe_reg - Genwqe data dump functionality + */ +struct genwqe_reg { + u32 addr; + u32 idx; + u64 val; +}; + +/* + * enum genwqe_dbg_type - Specify chip unit to dump/debug + */ +enum genwqe_dbg_type { + GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ + GENWQE_DBG_UNIT1 = 1, + GENWQE_DBG_UNIT2 = 2, + GENWQE_DBG_UNIT3 = 3, + GENWQE_DBG_UNIT4 = 4, + GENWQE_DBG_UNIT5 = 5, + GENWQE_DBG_UNIT6 = 6, + GENWQE_DBG_UNIT7 = 7, + GENWQE_DBG_REGS = 8, + GENWQE_DBG_DMA = 9, + GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ +}; + +/* Software error injection to simulate card failures */ +#define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ +#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ +#define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ +#define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ + +/* + * Genwqe card description and management data. + * + * Error-handling in case of card malfunction + * ------------------------------------------ + * + * If the card is detected to be defective the outside environment + * will cause the PCI layer to call deinit (the cleanup function for + * probe). This is the same effect like doing a unbind/bind operation + * on the card. + * + * The genwqe card driver implements a health checking thread which + * verifies the card function. If this detects a problem the cards + * device is being shutdown and restarted again, along with a reset of + * the card and queue. + * + * All functions accessing the card device return either -EIO or -ENODEV + * code to indicate the malfunction to the user. The user has to close + * the file descriptor and open a new one, once the card becomes + * available again. + * + * If the open file descriptor is setup to receive SIGIO, the signal is + * genereated for the application which has to provide a handler to + * react on it. If the application does not close the open + * file descriptor a SIGKILL is send to enforce freeing the cards + * resources. + * + * I did not find a different way to prevent kernel problems due to + * reference counters for the cards character devices getting out of + * sync. The character device deallocation does not block, even if + * there is still an open file descriptor pending. If this pending + * descriptor is closed, the data structures used by the character + * device is reinstantiated, which will lead to the reference counter + * dropping below the allowed values. + * + * Card recovery + * ------------- + * + * To test the internal driver recovery the following command can be used: + * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' + */ + + +/** + * struct dma_mapping_type - Mapping type definition + * + * To avoid memcpying data arround we use user memory directly. To do + * this we need to pin/swap-in the memory and request a DMA address + * for it. + */ +enum dma_mapping_type { + GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ + GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ + GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ +}; + +/** + * struct dma_mapping - Information about memory mappings done by the driver + */ +struct dma_mapping { + enum dma_mapping_type type; + + void *u_vaddr; /* user-space vaddr/non-aligned */ + void *k_vaddr; /* kernel-space vaddr/non-aligned */ + dma_addr_t dma_addr; /* physical DMA address */ + + struct page **page_list; /* list of pages used by user buff */ + dma_addr_t *dma_list; /* list of dma addresses per page */ + unsigned int nr_pages; /* number of pages */ + unsigned int size; /* size in bytes */ + + struct list_head card_list; /* list of usr_maps for card */ + struct list_head pin_list; /* list of pinned memory for dev */ +}; + +static inline void genwqe_mapping_init(struct dma_mapping *m, + enum dma_mapping_type type) +{ + memset(m, 0, sizeof(*m)); + m->type = type; +} + +/** + * struct ddcb_queue - DDCB queue data + * @ddcb_max: Number of DDCBs on the queue + * @ddcb_next: Next free DDCB + * @ddcb_act: Next DDCB supposed to finish + * @ddcb_seq: Sequence number of last DDCB + * @ddcbs_in_flight: Currently enqueued DDCBs + * @ddcbs_completed: Number of already completed DDCBs + * @busy: Number of -EBUSY returns + * @ddcb_daddr: DMA address of first DDCB in the queue + * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue + * @ddcb_req: Associated requests (one per DDCB) + * @ddcb_waitqs: Associated wait queues (one per DDCB) + * @ddcb_lock: Lock to protect queuing operations + * @ddcb_waitq: Wait on next DDCB finishing + */ + +struct ddcb_queue { + int ddcb_max; /* amount of DDCBs */ + int ddcb_next; /* next available DDCB num */ + int ddcb_act; /* DDCB to be processed */ + u16 ddcb_seq; /* slc seq num */ + unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ + unsigned int ddcbs_completed; + unsigned int ddcbs_max_in_flight; + unsigned int busy; /* how many times -EBUSY? */ + + dma_addr_t ddcb_daddr; /* DMA address */ + struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ + struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ + wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ + + spinlock_t ddcb_lock; /* exclusive access to queue */ + wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */ + + /* registers or the respective queue to be used */ + u32 IO_QUEUE_CONFIG; + u32 IO_QUEUE_STATUS; + u32 IO_QUEUE_SEGMENT; + u32 IO_QUEUE_INITSQN; + u32 IO_QUEUE_WRAP; + u32 IO_QUEUE_OFFSET; + u32 IO_QUEUE_WTIME; + u32 IO_QUEUE_ERRCNTS; + u32 IO_QUEUE_LRW; +}; + +/* + * GFIR, SLU_UNITCFG, APP_UNITCFG + * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. + */ +#define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) + +struct genwqe_ffdc { + unsigned int entries; + struct genwqe_reg *regs; +}; + +/** + * struct genwqe_dev - GenWQE device information + * @card_state: Card operation state, see above + * @ffdc: First Failure Data Capture buffers for each unit + * @card_thread: Working thread to operate the DDCB queue + * @card_waitq: Wait queue used in card_thread + * @queue: DDCB queue + * @health_thread: Card monitoring thread (only for PFs) + * @health_waitq: Wait queue used in health_thread + * @pci_dev: Associated PCI device (function) + * @mmio: Base address of 64-bit register space + * @mmio_len: Length of register area + * @file_lock: Lock to protect access to file_list + * @file_list: List of all processes with open GenWQE file descriptors + * + * This struct contains all information needed to communicate with a + * GenWQE card. It is initialized when a GenWQE device is found and + * destroyed when it goes away. It holds data to maintain the queue as + * well as data needed to feed the user interfaces. + */ +struct genwqe_dev { + enum genwqe_card_state card_state; + spinlock_t print_lock; + + int card_idx; /* card index 0..CARD_NO_MAX-1 */ + u64 flags; /* general flags */ + + /* FFDC data gathering */ + struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; + + /* DDCB workqueue */ + struct task_struct *card_thread; + wait_queue_head_t queue_waitq; + struct ddcb_queue queue; /* genwqe DDCB queue */ + unsigned int irqs_processed; + + /* Card health checking thread */ + struct task_struct *health_thread; + wait_queue_head_t health_waitq; + + /* char device */ + dev_t devnum_genwqe; /* major/minor num card */ + struct class *class_genwqe; /* reference to class object */ + struct device *dev; /* for device creation */ + struct cdev cdev_genwqe; /* char device for card */ + + struct dentry *debugfs_root; /* debugfs card root directory */ + struct dentry *debugfs_genwqe; /* debugfs driver root directory */ + + /* pci resources */ + struct pci_dev *pci_dev; /* PCI device */ + void __iomem *mmio; /* BAR-0 MMIO start */ + unsigned long mmio_len; + u16 num_vfs; + u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; + int is_privileged; /* access to all regs possible */ + + /* config regs which we need often */ + u64 slu_unitcfg; + u64 app_unitcfg; + u64 softreset; + u64 err_inject; + u64 last_gfir; + char app_name[5]; + + spinlock_t file_lock; /* lock for open files */ + struct list_head file_list; /* list of open files */ + + /* debugfs parameters */ + int ddcb_software_timeout; /* wait until DDCB times out */ + int skip_recovery; /* circumvention if recovery fails */ + int kill_timeout; /* wait after sending SIGKILL */ +}; + +/** + * enum genwqe_requ_state - State of a DDCB execution request + */ +enum genwqe_requ_state { + GENWQE_REQU_NEW = 0, + GENWQE_REQU_ENQUEUED = 1, + GENWQE_REQU_TAPPED = 2, + GENWQE_REQU_FINISHED = 3, + GENWQE_REQU_STATE_MAX, +}; + +/** + * struct genwqe_sgl - Scatter gather list describing user-space memory + * @sgl: scatter gather list needs to be 128 byte aligned + * @sgl_dma_addr: dma address of sgl + * @sgl_size: size of area used for sgl + * @user_addr: user-space address of memory area + * @user_size: size of user-space memory area + * @page: buffer for partial pages if needed + * @page_dma_addr: dma address partial pages + */ +struct genwqe_sgl { + dma_addr_t sgl_dma_addr; + struct sg_entry *sgl; + size_t sgl_size; /* size of sgl */ + + void __user *user_addr; /* user-space base-address */ + size_t user_size; /* size of memory area */ + + unsigned long nr_pages; + unsigned long fpage_offs; + size_t fpage_size; + size_t lpage_size; + + void *fpage; + dma_addr_t fpage_dma_addr; + + void *lpage; + dma_addr_t lpage_dma_addr; +}; + +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + void __user *user_addr, size_t user_size); + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + dma_addr_t *dma_list); + +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl); + +/** + * struct ddcb_requ - Kernel internal representation of the DDCB request + * @cmd: User space representation of the DDCB execution request + */ +struct ddcb_requ { + /* kernel specific content */ + enum genwqe_requ_state req_state; /* request status */ + int num; /* ddcb_no for this request */ + struct ddcb_queue *queue; /* associated queue */ + + struct dma_mapping dma_mappings[DDCB_FIXUPS]; + struct genwqe_sgl sgls[DDCB_FIXUPS]; + + /* kernel/user shared content */ + struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ + struct genwqe_debug_data debug_data; +}; + +/** + * struct genwqe_file - Information for open GenWQE devices + */ +struct genwqe_file { + struct genwqe_dev *cd; + struct genwqe_driver *client; + struct file *filp; + + struct fasync_struct *async_queue; + struct task_struct *owner; + struct list_head list; /* entry in list of open files */ + + spinlock_t map_lock; /* lock for dma_mappings */ + struct list_head map_list; /* list of dma_mappings */ + + spinlock_t pin_lock; /* lock for pinned memory */ + struct list_head pin_list; /* list of pinned memory */ +}; + +int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ +int genwqe_finish_queue(struct genwqe_dev *cd); +int genwqe_release_service_layer(struct genwqe_dev *cd); + +/** + * genwqe_get_slu_id() - Read Service Layer Unit Id + * Return: 0x00: Development code + * 0x01: SLC1 (old) + * 0x02: SLC2 (sept2012) + * 0x03: SLC2 (feb2013, generic driver) + */ +static inline int genwqe_get_slu_id(struct genwqe_dev *cd) +{ + return (int)((cd->slu_unitcfg >> 32) & 0xff); +} + +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); + +u8 genwqe_card_type(struct genwqe_dev *cd); +int genwqe_card_reset(struct genwqe_dev *cd); +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); + +int genwqe_device_create(struct genwqe_dev *cd); +int genwqe_device_remove(struct genwqe_dev *cd); + +/* debugfs */ +int genwqe_init_debugfs(struct genwqe_dev *cd); +void genqwe_exit_debugfs(struct genwqe_dev *cd); + +int genwqe_read_softreset(struct genwqe_dev *cd); + +/* Hardware Circumventions */ +int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); +int genwqe_flash_readback_fails(struct genwqe_dev *cd); + +/** + * genwqe_write_vreg() - Write register in VF window + * @cd: genwqe device + * @reg: register address + * @val: value to write + * @func: 0: PF, 1: VF0, ..., 15: VF14 + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); + +/** + * genwqe_read_vreg() - Read register in VF window + * @cd: genwqe device + * @reg: register address + * @func: 0: PF, 1: VF0, ..., 15: VF14 + * + * Return: content of the register + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); + +/* FFDC Buffer Management */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, + struct genwqe_reg *regs, unsigned int max_regs); +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, + unsigned int max_regs, int all); +int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, + struct genwqe_reg *regs, unsigned int max_regs); + +int genwqe_init_debug_data(struct genwqe_dev *cd, + struct genwqe_debug_data *d); + +void genwqe_init_crc32(void); +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); + +/* Memory allocation/deallocation; dma address handling */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, + void *uaddr, unsigned long size, + struct ddcb_requ *req); + +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, + struct ddcb_requ *req); + +static inline bool dma_mapping_used(struct dma_mapping *m) +{ + if (!m) + return 0; + return m->size != 0; +} + +/** + * __genwqe_execute_ddcb() - Execute DDCB request with addr translation + * + * This function will do the address translation changes to the DDCBs + * according to the definitions required by the ATS field. It looks up + * the memory allocation buffer or does vmap/vunmap for the respective + * user-space buffers, inclusive page pinning and scatter gather list + * buildup and teardown. + */ +int __genwqe_execute_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd); + +/** + * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation + * + * This version will not do address translation or any modifcation of + * the DDCB data. It is used e.g. for the MoveFlash DDCB which is + * entirely prepared by the driver itself. That means the appropriate + * DMA addresses are already in the DDCB and do not need any + * modification. + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd); + +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); + +/* register access */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, + dma_addr_t *dma_handle); +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, + void *vaddr, dma_addr_t dma_handle); + +/* Base clock frequency in MHz */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd); + +/* Before FFDC is captured the traps should be stopped. */ +void genwqe_stop_traps(struct genwqe_dev *cd); +void genwqe_start_traps(struct genwqe_dev *cd); + +/* Hardware circumvention */ +bool genwqe_need_err_masking(struct genwqe_dev *cd); + +/** + * genwqe_is_privileged() - Determine operation mode for PCI function + * + * On Intel with SRIOV support we see: + * PF: is_physfn = 1 is_virtfn = 0 + * VF: is_physfn = 0 is_virtfn = 1 + * + * On Systems with no SRIOV support _and_ virtualized systems we get: + * is_physfn = 0 is_virtfn = 0 + * + * Other vendors have individual pci device ids to distinguish between + * virtual function drivers and physical function drivers. GenWQE + * unfortunately has just on pci device id for both, VFs and PF. + * + * The following code is used to distinguish if the card is running in + * privileged mode, either as true PF or in a virtualized system with + * full register access e.g. currently on PowerPC. + * + * if (pci_dev->is_virtfn) + * cd->is_privileged = 0; + * else + * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) + * != IO_ILLEGAL_VALUE); + */ +static inline int genwqe_is_privileged(struct genwqe_dev *cd) +{ + return cd->is_privileged; +} + +#endif /* __CARD_BASE_H__ */ diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c new file mode 100644 index 00000000000..c8046db2d5a --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.c @@ -0,0 +1,1380 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Device Driver Control Block (DDCB) queue support. Definition of + * interrupt handlers for queue support as well as triggering the + * health monitor code in case of problems. The current hardware uses + * an MSI interrupt which is shared between error handling and + * functional code. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/crc-itu-t.h> + +#include "card_base.h" +#include "card_ddcb.h" + +/* + * N: next DDCB, this is where the next DDCB will be put. + * A: active DDCB, this is where the code will look for the next completion. + * x: DDCB is enqueued, we are waiting for its completion. + + * Situation (1): Empty queue + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | | | | | | + * +---+---+---+---+---+---+---+---+ + * A/N + * enqueued_ddcbs = A - N = 2 - 2 = 0 + * + * Situation (2): Wrapped, N > A + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | x | x | | | | | + * +---+---+---+---+---+---+---+---+ + * A N + * enqueued_ddcbs = N - A = 4 - 2 = 2 + * + * Situation (3): Queue wrapped, A > N + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | | | x | x | x | x | + * +---+---+---+---+---+---+---+---+ + * N A + * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6 + * + * Situation (4a): Queue full N > A + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | x | x | x | x | x | | + * +---+---+---+---+---+---+---+---+ + * A N + * + * enqueued_ddcbs = N - A = 7 - 0 = 7 + * + * Situation (4a): Queue full A > N + * +---+---+---+---+---+---+---+---+ + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | x | x | x | | x | x | x | x | + * +---+---+---+---+---+---+---+---+ + * N A + * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7 + */ + +static int queue_empty(struct ddcb_queue *queue) +{ + return queue->ddcb_next == queue->ddcb_act; +} + +static int queue_enqueued_ddcbs(struct ddcb_queue *queue) +{ + if (queue->ddcb_next >= queue->ddcb_act) + return queue->ddcb_next - queue->ddcb_act; + + return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); +} + +static int queue_free_ddcbs(struct ddcb_queue *queue) +{ + int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; + + if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */ + return 0; + } + return free_ddcbs; +} + +/* + * Use of the PRIV field in the DDCB for queue debugging: + * + * (1) Trying to get rid of a DDCB which saw a timeout: + * pddcb->priv[6] = 0xcc; # cleared + * + * (2) Append a DDCB via NEXT bit: + * pddcb->priv[7] = 0xaa; # appended + * + * (3) DDCB needed tapping: + * pddcb->priv[7] = 0xbb; # tapped + * + * (4) DDCB marked as correctly finished: + * pddcb->priv[6] = 0xff; # finished + */ + +static inline void ddcb_mark_tapped(struct ddcb *pddcb) +{ + pddcb->priv[7] = 0xbb; /* tapped */ +} + +static inline void ddcb_mark_appended(struct ddcb *pddcb) +{ + pddcb->priv[7] = 0xaa; /* appended */ +} + +static inline void ddcb_mark_cleared(struct ddcb *pddcb) +{ + pddcb->priv[6] = 0xcc; /* cleared */ +} + +static inline void ddcb_mark_finished(struct ddcb *pddcb) +{ + pddcb->priv[6] = 0xff; /* finished */ +} + +static inline void ddcb_mark_unused(struct ddcb *pddcb) +{ + pddcb->priv_64 = cpu_to_be64(0); /* not tapped */ +} + +/** + * genwqe_crc16() - Generate 16-bit crc as required for DDCBs + * @buff: pointer to data buffer + * @len: length of data for calculation + * @init: initial crc (0xffff at start) + * + * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021) + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff + * should result in a crc16 of 0x89c3 + * + * Return: crc16 checksum in big endian format ! + */ +static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init) +{ + return crc_itu_t(init, buff, len); +} + +static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + int i; + struct ddcb *pddcb; + unsigned long flags; + struct pci_dev *pci_dev = cd->pci_dev; + + spin_lock_irqsave(&cd->print_lock, flags); + + dev_info(&pci_dev->dev, + "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n", + cd->card_idx, queue->ddcb_act, queue->ddcb_next); + + pddcb = queue->ddcb_vaddr; + for (i = 0; i < queue->ddcb_max; i++) { + dev_err(&pci_dev->dev, + " %c %-3d: RETC=%03x SEQ=%04x " + "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n", + i == queue->ddcb_act ? '>' : ' ', + i, + be16_to_cpu(pddcb->retc_16), + be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, + pddcb->shi, + be64_to_cpu(pddcb->priv_64), + pddcb->cmd); + pddcb++; + } + spin_unlock_irqrestore(&cd->print_lock, flags); +} + +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void) +{ + struct ddcb_requ *req; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return NULL; + + return &req->cmd; +} + +void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd) +{ + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + kfree(req); +} + +static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) +{ + return req->req_state; +} + +static inline void ddcb_requ_set_state(struct ddcb_requ *req, + enum genwqe_requ_state new_state) +{ + req->req_state = new_state; +} + +static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req) +{ + return req->cmd.ddata_addr != 0x0; +} + +/** + * ddcb_requ_finished() - Returns the hardware state of the associated DDCB + * @cd: pointer to genwqe device descriptor + * @req: DDCB work request + * + * Status of ddcb_requ mirrors this hardware state, but is copied in + * the ddcb_requ on interrupt/polling function. The lowlevel code + * should check the hardware state directly, the higher level code + * should check the copy. + * + * This function will also return true if the state of the queue is + * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the + * shutdown case. + */ +static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) || + (cd->card_state != GENWQE_CARD_USED); +} + +/** + * enqueue_ddcb() - Enqueue a DDCB + * @cd: pointer to genwqe device descriptor + * @queue: queue this operation should be done on + * @ddcb_no: pointer to ddcb number being tapped + * + * Start execution of DDCB by tapping or append to queue via NEXT + * bit. This is done by an atomic 'compare and swap' instruction and + * checking SHI and HSI of the previous DDCB. + * + * This function must only be called with ddcb_lock held. + * + * Return: 1 if new DDCB is appended to previous + * 2 if DDCB queue is tapped via register/simulation + */ +#define RET_DDCB_APPENDED 1 +#define RET_DDCB_TAPPED 2 + +static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, + struct ddcb *pddcb, int ddcb_no) +{ + unsigned int try; + int prev_no; + struct ddcb *prev_ddcb; + __be32 old, new, icrc_hsi_shi; + u64 num; + + /* + * For performance checks a Dispatch Timestamp can be put into + * DDCB It is supposed to use the SLU's free running counter, + * but this requires PCIe cycles. + */ + ddcb_mark_unused(pddcb); + + /* check previous DDCB if already fetched */ + prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1; + prev_ddcb = &queue->ddcb_vaddr[prev_no]; + + /* + * It might have happened that the HSI.FETCHED bit is + * set. Retry in this case. Therefore I expect maximum 2 times + * trying. + */ + ddcb_mark_appended(pddcb); + for (try = 0; try < 2; try++) { + old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + + /* try to append via NEXT bit if prev DDCB is not completed */ + if ((old & DDCB_COMPLETED_BE32) != 0x00000000) + break; + + new = (old | DDCB_NEXT_BE32); + + wmb(); + icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); + + if (icrc_hsi_shi == old) + return RET_DDCB_APPENDED; /* appended to queue */ + } + + /* Queue must be re-started by updating QUEUE_OFFSET */ + ddcb_mark_tapped(pddcb); + num = (u64)ddcb_no << 8; + + wmb(); + __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ + + return RET_DDCB_TAPPED; +} + +/** + * copy_ddcb_results() - Copy output state from real DDCB to request + * + * Copy DDCB ASV to request struct. There is no endian + * conversion made, since data structure in ASV is still + * unknown here. + * + * This is needed by: + * - genwqe_purge_ddcb() + * - genwqe_check_ddcb_queue() + */ +static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no) +{ + struct ddcb_queue *queue = req->queue; + struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; + + memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH); + + /* copy status flags of the variant part */ + req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16); + req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); + req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); + + req->cmd.attn = be16_to_cpu(pddcb->attn_16); + req->cmd.progress = be32_to_cpu(pddcb->progress_32); + req->cmd.retc = be16_to_cpu(pddcb->retc_16); + + if (ddcb_requ_collect_debug_data(req)) { + int prev_no = (ddcb_no == 0) ? + queue->ddcb_max - 1 : ddcb_no - 1; + struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no]; + + memcpy(&req->debug_data.ddcb_finished, pddcb, + sizeof(req->debug_data.ddcb_finished)); + memcpy(&req->debug_data.ddcb_prev, prev_pddcb, + sizeof(req->debug_data.ddcb_prev)); + } +} + +/** + * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests. + * @cd: pointer to genwqe device descriptor + * + * Return: Number of DDCBs which were finished + */ +static int genwqe_check_ddcb_queue(struct genwqe_dev *cd, + struct ddcb_queue *queue) +{ + unsigned long flags; + int ddcbs_finished = 0; + struct pci_dev *pci_dev = cd->pci_dev; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + /* FIXME avoid soft locking CPU */ + while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) { + + struct ddcb *pddcb; + struct ddcb_requ *req; + u16 vcrc, vcrc_16, retc_16; + + pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + + if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == + 0x00000000) + goto go_home; /* not completed, continue waiting */ + + /* Note: DDCB could be purged */ + + req = queue->ddcb_req[queue->ddcb_act]; + if (req == NULL) { + /* this occurs if DDCB is purged, not an error */ + /* Move active DDCB further; Nothing to do anymore. */ + goto pick_next_one; + } + + /* + * HSI=0x44 (fetched and completed), but RETC is + * 0x101, or even worse 0x000. + * + * In case of seeing the queue in inconsistent state + * we read the errcnts and the queue status to provide + * a trigger for our PCIe analyzer stop capturing. + */ + retc_16 = be16_to_cpu(pddcb->retc_16); + if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) { + u64 errcnts, status; + u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr; + + errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS); + status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + + dev_err(&pci_dev->dev, + "[%s] SEQN=%04x HSI=%02x RETC=%03x " + " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n" + " DDCB_DMA_ADDR=%016llx\n", + __func__, be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, retc_16, errcnts, status, + queue->ddcb_daddr + ddcb_offs); + } + + copy_ddcb_results(req, queue->ddcb_act); + queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */ + + dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + ddcb_mark_finished(pddcb); + + /* calculate CRC_16 to see if VCRC is correct */ + vcrc = genwqe_crc16(pddcb->asv, + VCRC_LENGTH(req->cmd.asv_length), + 0xffff); + vcrc_16 = be16_to_cpu(pddcb->vcrc_16); + if (vcrc != vcrc_16) { + printk_ratelimited(KERN_ERR + "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d " + "bytes vcrc_data=%04x is not vcrc_card=%04x\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), + vcrc, vcrc_16); + } + + ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); + queue->ddcbs_completed++; + queue->ddcbs_in_flight--; + + /* wake up process waiting for this DDCB */ + wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + +pick_next_one: + queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; + ddcbs_finished++; + } + + go_home: + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return ddcbs_finished; +} + +/** + * __genwqe_wait_ddcb(): Waits until DDCB is completed + * @cd: pointer to genwqe device descriptor + * @req: pointer to requsted DDCB parameters + * + * The Service Layer will update the RETC in DDCB when processing is + * pending or done. + * + * Return: > 0 remaining jiffies, DDCB completed + * -ETIMEDOUT when timeout + * -ERESTARTSYS when ^C + * -EINVAL when unknown error condition + * + * When an error is returned the called needs to ensure that + * purge_ddcb() is being called to get the &req removed from the + * queue. + */ +int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + int rc; + unsigned int ddcb_no; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + + if (req == NULL) + return -EINVAL; + + queue = req->queue; + if (queue == NULL) + return -EINVAL; + + ddcb_no = req->num; + if (ddcb_no >= queue->ddcb_max) + return -EINVAL; + + rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no], + ddcb_requ_finished(cd, req), + genwqe_ddcb_software_timeout * HZ); + + /* + * We need to distinguish 3 cases here: + * 1. rc == 0 timeout occured + * 2. rc == -ERESTARTSYS signal received + * 3. rc > 0 remaining jiffies condition is true + */ + if (rc == 0) { + struct ddcb_queue *queue = req->queue; + struct ddcb *pddcb; + + /* + * Timeout may be caused by long task switching time. + * When timeout happens, check if the request has + * meanwhile completed. + */ + genwqe_check_ddcb_queue(cd, req->queue); + if (ddcb_requ_finished(cd, req)) + return rc; + + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n", + __func__, req->num, rc, ddcb_requ_get_state(req), + req); + dev_err(&pci_dev->dev, + "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__, + __genwqe_readq(cd, queue->IO_QUEUE_STATUS)); + + pddcb = &queue->ddcb_vaddr[req->num]; + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + print_ddcb_info(cd, req->queue); + return -ETIMEDOUT; + + } else if (rc == -ERESTARTSYS) { + return rc; + /* + * EINTR: Stops the application + * ERESTARTSYS: Restartable systemcall; called again + */ + + } else if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n", + __func__, req->num, rc, ddcb_requ_get_state(req)); + return -EINVAL; + } + + /* Severe error occured. Driver is forced to stop operation */ + if (cd->card_state != GENWQE_CARD_USED) { + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d forced to stop (rc=%d)\n", + __func__, req->num, rc); + return -EIO; + } + return rc; +} + +/** + * get_next_ddcb() - Get next available DDCB + * @cd: pointer to genwqe device descriptor + * + * DDCB's content is completely cleared but presets for PRE and + * SEQNUM. This function must only be called when ddcb_lock is held. + * + * Return: NULL if no empty DDCB available otherwise ptr to next DDCB. + */ +static struct ddcb *get_next_ddcb(struct genwqe_dev *cd, + struct ddcb_queue *queue, + int *num) +{ + u64 *pu64; + struct ddcb *pddcb; + + if (queue_free_ddcbs(queue) == 0) /* queue is full */ + return NULL; + + /* find new ddcb */ + pddcb = &queue->ddcb_vaddr[queue->ddcb_next]; + + /* if it is not completed, we are not allowed to use it */ + /* barrier(); */ + if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000) + return NULL; + + *num = queue->ddcb_next; /* internal DDCB number */ + queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; + + /* clear important DDCB fields */ + pu64 = (u64 *)pddcb; + pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */ + pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */ + + /* destroy previous results in ASV */ + pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */ + pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */ + pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */ + pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */ + pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */ + + pddcb->pre = DDCB_PRESET_PRE; /* 128 */ + pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++); + return pddcb; +} + +/** + * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue + * @cd: genwqe device descriptor + * @req: DDCB request + * + * This will fail when the request was already FETCHED. In this case + * we need to wait until it is finished. Else the DDCB can be + * reused. This function also ensures that the request data structure + * is removed from ddcb_req[]. + * + * Do not forget to call this function when genwqe_wait_ddcb() fails, + * such that the request gets really removed from ddcb_req[]. + * + * Return: 0 success + */ +int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + struct ddcb *pddcb = NULL; + unsigned int t; + unsigned long flags; + struct ddcb_queue *queue = req->queue; + struct pci_dev *pci_dev = cd->pci_dev; + u64 queue_status; + __be32 icrc_hsi_shi = 0x0000; + __be32 old, new; + + /* unsigned long flags; */ + if (genwqe_ddcb_software_timeout <= 0) { + dev_err(&pci_dev->dev, + "[%s] err: software timeout is not set!\n", __func__); + return -EFAULT; + } + + pddcb = &queue->ddcb_vaddr[req->num]; + + for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) { + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + /* Check if req was meanwhile finished */ + if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) + goto go_home; + + /* try to set PURGE bit if FETCHED/COMPLETED are not set */ + old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */ + if ((old & DDCB_FETCHED_BE32) == 0x00000000) { + + new = (old | DDCB_PURGE_BE32); + icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32, + old, new); + if (icrc_hsi_shi == old) + goto finish_ddcb; + } + + /* normal finish with HSI bit */ + barrier(); + icrc_hsi_shi = pddcb->icrc_hsi_shi_32; + if (icrc_hsi_shi & DDCB_COMPLETED_BE32) + goto finish_ddcb; + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + /* + * Here the check_ddcb() function will most likely + * discover this DDCB to be finished some point in + * time. It will mark the req finished and free it up + * in the list. + */ + + copy_ddcb_results(req, req->num); /* for the failing case */ + msleep(100); /* sleep for 1/10 second and try again */ + continue; + +finish_ddcb: + copy_ddcb_results(req, req->num); + ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); + queue->ddcbs_in_flight--; + queue->ddcb_req[req->num] = NULL; /* delete from array */ + ddcb_mark_cleared(pddcb); + + /* Move active DDCB further; Nothing to do here anymore. */ + + /* + * We need to ensure that there is at least one free + * DDCB in the queue. To do that, we must update + * ddcb_act only if the COMPLETED bit is set for the + * DDCB we are working on else we treat that DDCB even + * if we PURGED it as occupied (hardware is supposed + * to set the COMPLETED bit yet!). + */ + icrc_hsi_shi = pddcb->icrc_hsi_shi_32; + if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) && + (queue->ddcb_act == req->num)) { + queue->ddcb_act = ((queue->ddcb_act + 1) % + queue->ddcb_max); + } +go_home: + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; + } + + /* + * If the card is dead and the queue is forced to stop, we + * might see this in the queue status register. + */ + queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); + + dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + dev_err(&pci_dev->dev, + "[%s] err: DDCB#%d not purged and not completed " + "after %d seconds QSTAT=%016llx!!\n", + __func__, req->num, genwqe_ddcb_software_timeout, + queue_status); + + print_ddcb_info(cd, req->queue); + + return -EFAULT; +} + +int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d) +{ + int len; + struct pci_dev *pci_dev = cd->pci_dev; + + if (d == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: invalid memory for debug data!\n", + __func__); + return -EFAULT; + } + + len = sizeof(d->driver_version); + snprintf(d->driver_version, len, "%s", DRV_VERS_STRING); + d->slu_unitcfg = cd->slu_unitcfg; + d->app_unitcfg = cd->app_unitcfg; + return 0; +} + +/** + * __genwqe_enqueue_ddcb() - Enqueue a DDCB + * @cd: pointer to genwqe device descriptor + * @req: pointer to DDCB execution request + * + * Return: 0 if enqueuing succeeded + * -EIO if card is unusable/PCIe problems + * -EBUSY if enqueuing failed + */ +int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) +{ + struct ddcb *pddcb; + unsigned long flags; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + u16 icrc; + + if (cd->card_state != GENWQE_CARD_USED) { + printk_ratelimited(KERN_ERR + "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + __func__, req->num); + return -EIO; + } + + queue = req->queue = &cd->queue; + + /* FIXME circumvention to improve performance when no irq is + * there. + */ + if (genwqe_polling_enabled) + genwqe_check_ddcb_queue(cd, queue); + + /* + * It must be ensured to process all DDCBs in successive + * order. Use a lock here in order to prevent nested DDCB + * enqueuing. + */ + spin_lock_irqsave(&queue->ddcb_lock, flags); + + pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ + if (pddcb == NULL) { + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + queue->busy++; + return -EBUSY; + } + + if (queue->ddcb_req[req->num] != NULL) { + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + dev_err(&pci_dev->dev, + "[%s] picked DDCB %d with req=%p still in use!!\n", + __func__, req->num, req); + return -EFAULT; + } + ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED); + queue->ddcb_req[req->num] = req; + + pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts); + pddcb->cmd = req->cmd.cmd; + pddcb->acfunc = req->cmd.acfunc; /* functional unit */ + + /* + * We know that we can get retc 0x104 with CRC error, do not + * stop the queue in those cases for this command. XDIR = 1 + * does not work for old SLU versions. + * + * Last bitstream with the old XDIR behavior had SLU_ID + * 0x34199. + */ + if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull) + pddcb->xdir = 0x1; + else + pddcb->xdir = 0x0; + + + pddcb->psp = (((req->cmd.asiv_length / 8) << 4) | + ((req->cmd.asv_length / 8))); + pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts); + + /* + * If copying the whole DDCB_ASIV_LENGTH is impacting + * performance we need to change it to + * req->cmd.asiv_length. But simulation benefits from some + * non-architectured bits behind the architectured content. + * + * How much data is copied depends on the availability of the + * ATS field, which was introduced late. If the ATS field is + * supported ASIV is 8 bytes shorter than it used to be. Since + * the ATS field is copied too, the code should do exactly + * what it did before, but I wanted to make copying of the ATS + * field very explicit. + */ + if (genwqe_get_slu_id(cd) <= 0x2) { + memcpy(&pddcb->__asiv[0], /* destination */ + &req->cmd.__asiv[0], /* source */ + DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */ + } else { + pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); + memcpy(&pddcb->n.asiv[0], /* destination */ + &req->cmd.asiv[0], /* source */ + DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */ + } + + pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */ + + /* + * Calculate CRC_16 for corresponding range PSP(7:4). Include + * empty 4 bytes prior to the data. + */ + icrc = genwqe_crc16((const u8 *)pddcb, + ICRC_LENGTH(req->cmd.asiv_length), 0xffff); + pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16); + + /* enable DDCB completion irq */ + if (!genwqe_polling_enabled) + pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32; + + dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num); + genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb)); + + if (ddcb_requ_collect_debug_data(req)) { + /* use the kernel copy of debug data. copying back to + user buffer happens later */ + + genwqe_init_debug_data(cd, &req->debug_data); + memcpy(&req->debug_data.ddcb_before, pddcb, + sizeof(req->debug_data.ddcb_before)); + } + + enqueue_ddcb(cd, queue, pddcb, req->num); + queue->ddcbs_in_flight++; + + if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight) + queue->ddcbs_max_in_flight = queue->ddcbs_in_flight; + + ddcb_requ_set_state(req, GENWQE_REQU_TAPPED); + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + wake_up_interruptible(&cd->queue_waitq); + + return 0; +} + +/** + * __genwqe_execute_raw_ddcb() - Setup and execute DDCB + * @cd: pointer to genwqe device descriptor + * @req: user provided DDCB request + */ +int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, + struct genwqe_ddcb_cmd *cmd) +{ + int rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + if (cmd->asiv_length > DDCB_ASIV_LENGTH) { + dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", + __func__, cmd->asiv_length); + return -EINVAL; + } + if (cmd->asv_length > DDCB_ASV_LENGTH) { + dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", + __func__, cmd->asiv_length); + return -EINVAL; + } + rc = __genwqe_enqueue_ddcb(cd, req); + if (rc != 0) + return rc; + + rc = __genwqe_wait_ddcb(cd, req); + if (rc < 0) /* error or signal interrupt */ + goto err_exit; + + if (ddcb_requ_collect_debug_data(req)) { + if (copy_to_user((struct genwqe_debug_data __user *) + (unsigned long)cmd->ddata_addr, + &req->debug_data, + sizeof(struct genwqe_debug_data))) + return -EFAULT; + } + + /* + * Higher values than 0x102 indicate completion with faults, + * lower values than 0x102 indicate processing faults. Note + * that DDCB might have been purged. E.g. Cntl+C. + */ + if (cmd->retc != DDCB_RETC_COMPLETE) { + /* This might happen e.g. flash read, and needs to be + handled by the upper layer code. */ + rc = -EBADMSG; /* not processed/error retc */ + } + + return rc; + + err_exit: + __genwqe_purge_ddcb(cd, req); + + if (ddcb_requ_collect_debug_data(req)) { + if (copy_to_user((struct genwqe_debug_data __user *) + (unsigned long)cmd->ddata_addr, + &req->debug_data, + sizeof(struct genwqe_debug_data))) + return -EFAULT; + } + return rc; +} + +/** + * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished + * + * We use this as condition for our wait-queue code. + */ +static int genwqe_next_ddcb_ready(struct genwqe_dev *cd) +{ + unsigned long flags; + struct ddcb *pddcb; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + if (queue_empty(queue)) { /* emtpy queue */ + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; + } + + pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; + if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */ + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 1; + } + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + return 0; +} + +/** + * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight + * + * Keep track on the number of DDCBs which ware currently in the + * queue. This is needed for statistics as well as conditon if we want + * to wait or better do polling in case of no interrupts available. + */ +int genwqe_ddcbs_in_flight(struct genwqe_dev *cd) +{ + unsigned long flags; + int ddcbs_in_flight = 0; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + ddcbs_in_flight += queue->ddcbs_in_flight; + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + return ddcbs_in_flight; +} + +static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + int rc, i; + struct ddcb *pddcb; + u64 val64; + unsigned int queue_size; + struct pci_dev *pci_dev = cd->pci_dev; + + if (genwqe_ddcb_max < 2) + return -EINVAL; + + queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + + queue->ddcbs_in_flight = 0; /* statistics */ + queue->ddcbs_max_in_flight = 0; + queue->ddcbs_completed = 0; + queue->busy = 0; + + queue->ddcb_seq = 0x100; /* start sequence number */ + queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ + queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, + &queue->ddcb_daddr); + if (queue->ddcb_vaddr == NULL) { + dev_err(&pci_dev->dev, + "[%s] **err: could not allocate DDCB **\n", __func__); + return -ENOMEM; + } + memset(queue->ddcb_vaddr, 0, queue_size); + + queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) * + queue->ddcb_max, GFP_KERNEL); + if (!queue->ddcb_req) { + rc = -ENOMEM; + goto free_ddcbs; + } + + queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) * + queue->ddcb_max, GFP_KERNEL); + if (!queue->ddcb_waitqs) { + rc = -ENOMEM; + goto free_requs; + } + + for (i = 0; i < queue->ddcb_max; i++) { + pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */ + pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32; + pddcb->retc_16 = cpu_to_be16(0xfff); + + queue->ddcb_req[i] = NULL; /* requests */ + init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ + } + + queue->ddcb_act = 0; + queue->ddcb_next = 0; /* queue is empty */ + + spin_lock_init(&queue->ddcb_lock); + init_waitqueue_head(&queue->ddcb_waitq); + + val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ + __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ + __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); + __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); + __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); + return 0; + + free_requs: + kfree(queue->ddcb_req); + queue->ddcb_req = NULL; + free_ddcbs: + __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, + queue->ddcb_daddr); + queue->ddcb_vaddr = NULL; + queue->ddcb_daddr = 0ull; + return -ENODEV; + +} + +static int ddcb_queue_initialized(struct ddcb_queue *queue) +{ + return queue->ddcb_vaddr != NULL; +} + +static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) +{ + unsigned int queue_size; + + queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); + + kfree(queue->ddcb_req); + queue->ddcb_req = NULL; + + if (queue->ddcb_vaddr) { + __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, + queue->ddcb_daddr); + queue->ddcb_vaddr = NULL; + queue->ddcb_daddr = 0ull; + } +} + +static irqreturn_t genwqe_pf_isr(int irq, void *dev_id) +{ + u64 gfir; + struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + struct pci_dev *pci_dev = cd->pci_dev; + + /* + * In case of fatal FIR error the queue is stopped, such that + * we can safely check it without risking anything. + */ + cd->irqs_processed++; + wake_up_interruptible(&cd->queue_waitq); + + /* + * Checking for errors before kicking the queue might be + * safer, but slower for the good-case ... See above. + */ + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + if ((gfir & GFIR_ERR_TRIGGER) != 0x0) { + + wake_up_interruptible(&cd->health_waitq); + + /* + * By default GFIRs causes recovery actions. This + * count is just for debug when recovery is masked. + */ + printk_ratelimited(KERN_ERR + "%s %s: [%s] GFIR=%016llx\n", + GENWQE_DEVNAME, dev_name(&pci_dev->dev), + __func__, gfir); + } + + return IRQ_HANDLED; +} + +static irqreturn_t genwqe_vf_isr(int irq, void *dev_id) +{ + struct genwqe_dev *cd = (struct genwqe_dev *)dev_id; + + cd->irqs_processed++; + wake_up_interruptible(&cd->queue_waitq); + + return IRQ_HANDLED; +} + +/** + * genwqe_card_thread() - Work thread for the DDCB queue + * + * The idea is to check if there are DDCBs in processing. If there are + * some finished DDCBs, we process them and wakeup the + * requestors. Otherwise we give other processes time using + * cond_resched(). + */ +static int genwqe_card_thread(void *data) +{ + int should_stop = 0, rc = 0; + struct genwqe_dev *cd = (struct genwqe_dev *)data; + + while (!kthread_should_stop()) { + + genwqe_check_ddcb_queue(cd, &cd->queue); + + if (genwqe_polling_enabled) { + rc = wait_event_interruptible_timeout( + cd->queue_waitq, + genwqe_ddcbs_in_flight(cd) || + (should_stop = kthread_should_stop()), 1); + } else { + rc = wait_event_interruptible_timeout( + cd->queue_waitq, + genwqe_next_ddcb_ready(cd) || + (should_stop = kthread_should_stop()), HZ); + } + if (should_stop) + break; + + /* + * Avoid soft lockups on heavy loads; we do not want + * to disable our interrupts. + */ + cond_resched(); + } + return 0; +} + +/** + * genwqe_setup_service_layer() - Setup DDCB queue + * @cd: pointer to genwqe device descriptor + * + * Allocate DDCBs. Configure Service Layer Controller (SLC). + * + * Return: 0 success + */ +int genwqe_setup_service_layer(struct genwqe_dev *cd) +{ + int rc; + struct ddcb_queue *queue; + struct pci_dev *pci_dev = cd->pci_dev; + + if (genwqe_is_privileged(cd)) { + rc = genwqe_card_reset(cd); + if (rc < 0) { + dev_err(&pci_dev->dev, + "[%s] err: reset failed.\n", __func__); + return rc; + } + genwqe_read_softreset(cd); + } + + queue = &cd->queue; + queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG; + queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS; + queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT; + queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN; + queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET; + queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP; + queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME; + queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS; + queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW; + + rc = setup_ddcb_queue(cd, queue); + if (rc != 0) { + rc = -ENODEV; + goto err_out; + } + + init_waitqueue_head(&cd->queue_waitq); + cd->card_thread = kthread_run(genwqe_card_thread, cd, + GENWQE_DEVNAME "%d_thread", + cd->card_idx); + if (IS_ERR(cd->card_thread)) { + rc = PTR_ERR(cd->card_thread); + cd->card_thread = NULL; + goto stop_free_queue; + } + + rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); + if (rc > 0) + rc = genwqe_set_interrupt_capability(cd, rc); + if (rc != 0) { + rc = -ENODEV; + goto stop_kthread; + } + + /* + * We must have all wait-queues initialized when we enable the + * interrupts. Otherwise we might crash if we get an early + * irq. + */ + init_waitqueue_head(&cd->health_waitq); + + if (genwqe_is_privileged(cd)) { + rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED, + GENWQE_DEVNAME, cd); + } else { + rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED, + GENWQE_DEVNAME, cd); + } + if (rc < 0) { + dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq); + goto stop_irq_cap; + } + + cd->card_state = GENWQE_CARD_USED; + return 0; + + stop_irq_cap: + genwqe_reset_interrupt_capability(cd); + stop_kthread: + kthread_stop(cd->card_thread); + cd->card_thread = NULL; + stop_free_queue: + free_ddcb_queue(cd, queue); + err_out: + return rc; +} + +/** + * queue_wake_up_all() - Handles fatal error case + * + * The PCI device got unusable and we have to stop all pending + * requests as fast as we can. The code after this must purge the + * DDCBs in question and ensure that all mappings are freed. + */ +static int queue_wake_up_all(struct genwqe_dev *cd) +{ + unsigned int i; + unsigned long flags; + struct ddcb_queue *queue = &cd->queue; + + spin_lock_irqsave(&queue->ddcb_lock, flags); + + for (i = 0; i < queue->ddcb_max; i++) + wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); + + spin_unlock_irqrestore(&queue->ddcb_lock, flags); + + return 0; +} + +/** + * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces + * + * Relies on the pre-condition that there are no users of the card + * device anymore e.g. with open file-descriptors. + * + * This function must be robust enough to be called twice. + */ +int genwqe_finish_queue(struct genwqe_dev *cd) +{ + int i, rc = 0, in_flight; + int waitmax = genwqe_ddcb_software_timeout; + struct pci_dev *pci_dev = cd->pci_dev; + struct ddcb_queue *queue = &cd->queue; + + if (!ddcb_queue_initialized(queue)) + return 0; + + /* Do not wipe out the error state. */ + if (cd->card_state == GENWQE_CARD_USED) + cd->card_state = GENWQE_CARD_UNUSED; + + /* Wake up all requests in the DDCB queue such that they + should be removed nicely. */ + queue_wake_up_all(cd); + + /* We must wait to get rid of the DDCBs in flight */ + for (i = 0; i < waitmax; i++) { + in_flight = genwqe_ddcbs_in_flight(cd); + + if (in_flight == 0) + break; + + dev_dbg(&pci_dev->dev, + " DEBUG [%d/%d] waiting for queue to get empty: " + "%d requests!\n", i, waitmax, in_flight); + + /* + * Severe severe error situation: The card itself has + * 16 DDCB queues, each queue has e.g. 32 entries, + * each DDBC has a hardware timeout of currently 250 + * msec but the PFs have a hardware timeout of 8 sec + * ... so I take something large. + */ + msleep(1000); + } + if (i == waitmax) { + dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n", + __func__); + rc = -EIO; + } + return rc; +} + +/** + * genwqe_release_service_layer() - Shutdown DDCB queue + * @cd: genwqe device descriptor + * + * This function must be robust enough to be called twice. + */ +int genwqe_release_service_layer(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (!ddcb_queue_initialized(&cd->queue)) + return 1; + + free_irq(pci_dev->irq, cd); + genwqe_reset_interrupt_capability(cd); + + if (cd->card_thread != NULL) { + kthread_stop(cd->card_thread); + cd->card_thread = NULL; + } + + free_ddcb_queue(cd, &cd->queue); + return 0; +} diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h new file mode 100644 index 00000000000..c4f26720753 --- /dev/null +++ b/drivers/misc/genwqe/card_ddcb.h @@ -0,0 +1,188 @@ +#ifndef __CARD_DDCB_H__ +#define __CARD_DDCB_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> + +#include "genwqe_driver.h" +#include "card_base.h" + +/** + * struct ddcb - Device Driver Control Block DDCB + * @hsi: Hardware software interlock + * @shi: Software hardware interlock. Hsi and shi are used to interlock + * software and hardware activities. We are using a compare and + * swap operation to ensure that there are no races when + * activating new DDCBs on the queue, or when we need to + * purge a DDCB from a running queue. + * @acfunc: Accelerator function addresses a unit within the chip + * @cmd: Command to work on + * @cmdopts_16: Options for the command + * @asiv: Input data + * @asv: Output data + * + * The DDCB data format is big endian. Multiple consequtive DDBCs form + * a DDCB queue. + */ +#define ASIV_LENGTH 104 /* Old specification without ATS field */ +#define ASIV_LENGTH_ATS 96 /* New specification with ATS field */ +#define ASV_LENGTH 64 + +struct ddcb { + union { + __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */ + struct { + __be16 icrc_16; + u8 hsi; + u8 shi; + }; + }; + u8 pre; /* Preamble */ + u8 xdir; /* Execution Directives */ + __be16 seqnum_16; /* Sequence Number */ + + u8 acfunc; /* Accelerator Function.. */ + u8 cmd; /* Command. */ + __be16 cmdopts_16; /* Command Options */ + u8 sur; /* Status Update Rate */ + u8 psp; /* Protection Section Pointer */ + __be16 rsvd_0e_16; /* Reserved invariant */ + + __be64 fwiv_64; /* Firmware Invariant. */ + + union { + struct { + __be64 ats_64; /* Address Translation Spec */ + u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */ + } n; + u8 __asiv[ASIV_LENGTH]; /* obsolete */ + }; + u8 asv[ASV_LENGTH]; /* Appl Spec Variant */ + + __be16 rsvd_c0_16; /* Reserved Variant */ + __be16 vcrc_16; /* Variant CRC */ + __be32 rsvd_32; /* Reserved unprotected */ + + __be64 deque_ts_64; /* Deque Time Stamp. */ + + __be16 retc_16; /* Return Code */ + __be16 attn_16; /* Attention/Extended Error Codes */ + __be32 progress_32; /* Progress indicator. */ + + __be64 cmplt_ts_64; /* Completion Time Stamp. */ + + /* The following layout matches the new service layer format */ + __be32 ibdc_32; /* Inbound Data Count (* 256) */ + __be32 obdc_32; /* Outbound Data Count (* 256) */ + + __be64 rsvd_SLH_64; /* Reserved for hardware */ + union { /* private data for driver */ + u8 priv[8]; + __be64 priv_64; + }; + __be64 disp_ts_64; /* Dispatch TimeStamp */ +} __attribute__((__packed__)); + +/* CRC polynomials for DDCB */ +#define CRC16_POLYNOMIAL 0x1021 + +/* + * SHI: Software to Hardware Interlock + * This 1 byte field is written by software to interlock the + * movement of one queue entry to another with the hardware in the + * chip. + */ +#define DDCB_SHI_INTR 0x04 /* Bit 2 */ +#define DDCB_SHI_PURGE 0x02 /* Bit 1 */ +#define DDCB_SHI_NEXT 0x01 /* Bit 0 */ + +/* + * HSI: Hardware to Software interlock + * This 1 byte field is written by hardware to interlock the movement + * of one queue entry to another with the software in the chip. + */ +#define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */ +#define DDCB_HSI_FETCHED 0x04 /* Bit 2 */ + +/* + * Accessing HSI/SHI is done 32-bit wide + * Normally 16-bit access would work too, but on some platforms the + * 16 compare and swap operation is not supported. Therefore + * switching to 32-bit such that those platforms will work too. + * + * iCRC HSI/SHI + */ +#define DDCB_INTR_BE32 cpu_to_be32(0x00000004) +#define DDCB_PURGE_BE32 cpu_to_be32(0x00000002) +#define DDCB_NEXT_BE32 cpu_to_be32(0x00000001) +#define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000) +#define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400) + +/* Definitions of DDCB presets */ +#define DDCB_PRESET_PRE 0x80 +#define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */ +#define VCRC_LENGTH(n) ((n)) /* used ASV */ + +/* + * Genwqe Scatter Gather list + * Each element has up to 8 entries. + * The chaining element is element 0 cause of prefetching needs. + */ + +/* + * 0b0110 Chained descriptor. The descriptor is describing the next + * descriptor list. + */ +#define SG_CHAINED (0x6) + +/* + * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty + * condition. + */ +#define SG_DATA (0x2) + +/* + * 0b0000 Early terminator. This is the last entry on the list + * irregardless of the length indicated. + */ +#define SG_END_LIST (0x0) + +/** + * struct sglist - Scatter gather list + * @target_addr: Either a dma addr of memory to work on or a + * dma addr or a subsequent sglist block. + * @len: Length of the data block. + * @flags: See above. + * + * Depending on the command the GenWQE card can use a scatter gather + * list to describe the memory it works on. Always 8 sg_entry's form + * a block. + */ +struct sg_entry { + __be64 target_addr; + __be32 len; + __be32 flags; +}; + +#endif /* __CARD_DDCB_H__ */ diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c new file mode 100644 index 00000000000..0a33ade6410 --- /dev/null +++ b/drivers/misc/genwqe/card_debugfs.c @@ -0,0 +1,499 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Debugfs interfaces for the GenWQE card. Help to debug potential + * problems. Dump internal chip state for debugging and failure + * determination. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> + +#include "card_base.h" +#include "card_ddcb.h" + +#define GENWQE_DEBUGFS_RO(_name, _showfn) \ + static int genwqe_debugfs_##_name##_open(struct inode *inode, \ + struct file *file) \ + { \ + return single_open(file, _showfn, inode->i_private); \ + } \ + static const struct file_operations genwqe_##_name##_fops = { \ + .open = genwqe_debugfs_##_name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, + int entries) +{ + unsigned int i; + u32 v_hi, v_lo; + + for (i = 0; i < entries; i++) { + v_hi = (regs[i].val >> 32) & 0xffffffff; + v_lo = (regs[i].val) & 0xffffffff; + + seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n", + regs[i].addr, regs[i].idx, v_hi, v_lo); + } +} + +static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ + struct genwqe_dev *cd = s->private; + int entries; + struct genwqe_reg *regs; + + entries = genwqe_ffdc_buff_size(cd, uid); + if (entries < 0) + return -EINVAL; + + if (entries == 0) + return 0; + + regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); + if (regs == NULL) + return -ENOMEM; + + genwqe_stop_traps(cd); /* halt the traps while dumping data */ + genwqe_ffdc_buff_read(cd, uid, regs, entries); + genwqe_start_traps(cd); + + dbg_uidn_show(s, regs, entries); + kfree(regs); + return 0; +} + +static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 0); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show); + +static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 1); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show); + +static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused) +{ + return curr_dbg_uidn_show(s, unused, 2); +} + +GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show); + +static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) +{ + struct genwqe_dev *cd = s->private; + + dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries); + return 0; +} + +static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 0); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show); + +static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 1); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show); + +static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused) +{ + return prev_dbg_uidn_show(s, unused, 2); +} + +GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show); + +static int genwqe_curr_regs_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct genwqe_reg *regs; + + regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL); + if (regs == NULL) + return -ENOMEM; + + genwqe_stop_traps(cd); + genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1); + genwqe_start_traps(cd); + + for (i = 0; i < GENWQE_FFDC_REGS; i++) { + if (regs[i].addr == 0xffffffff) + break; /* invalid entries */ + + if (regs[i].val == 0x0ull) + continue; /* do not print 0x0 FIRs */ + + seq_printf(s, " 0x%08x 0x%016llx\n", + regs[i].addr, regs[i].val); + } + return 0; +} + +GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show); + +static int genwqe_prev_regs_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs; + + if (regs == NULL) + return -EINVAL; + + for (i = 0; i < GENWQE_FFDC_REGS; i++) { + if (regs[i].addr == 0xffffffff) + break; /* invalid entries */ + + if (regs[i].val == 0x0ull) + continue; /* do not print 0x0 FIRs */ + + seq_printf(s, " 0x%08x 0x%016llx\n", + regs[i].addr, regs[i].val); + } + return 0; +} + +GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show); + +static int genwqe_jtimer_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int vf_num; + u64 jtimer; + + jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0); + seq_printf(s, " PF 0x%016llx %d msec\n", jtimer, + genwqe_pf_jobtimeout_msec); + + for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { + jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, + vf_num + 1); + seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer, + cd->vf_jobtimeout_msec[vf_num]); + } + return 0; +} + +GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show); + +static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int vf_num; + u64 t; + + t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0); + seq_printf(s, " PF 0x%016llx\n", t); + + for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { + t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1); + seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t); + } + return 0; +} + +GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show); + +static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + unsigned int i; + struct ddcb_queue *queue; + struct ddcb *pddcb; + + queue = &cd->queue; + seq_puts(s, "DDCB QUEUE:\n"); + seq_printf(s, " ddcb_max: %d\n" + " ddcb_daddr: %016llx - %016llx\n" + " ddcb_vaddr: %016llx\n" + " ddcbs_in_flight: %u\n" + " ddcbs_max_in_flight: %u\n" + " ddcbs_completed: %u\n" + " busy: %u\n" + " irqs_processed: %u\n", + queue->ddcb_max, (long long)queue->ddcb_daddr, + (long long)queue->ddcb_daddr + + (queue->ddcb_max * DDCB_LENGTH), + (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, + queue->ddcbs_max_in_flight, queue->ddcbs_completed, + queue->busy, cd->irqs_processed); + + /* Hardware State */ + seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n" + " 0x%08x 0x%016llx IO_QUEUE_STATUS\n" + " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n" + " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n" + " 0x%08x 0x%016llx IO_QUEUE_WRAP\n" + " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n" + " 0x%08x 0x%016llx IO_QUEUE_WTIME\n" + " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n" + " 0x%08x 0x%016llx IO_QUEUE_LRW\n", + queue->IO_QUEUE_CONFIG, + __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), + queue->IO_QUEUE_STATUS, + __genwqe_readq(cd, queue->IO_QUEUE_STATUS), + queue->IO_QUEUE_SEGMENT, + __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT), + queue->IO_QUEUE_INITSQN, + __genwqe_readq(cd, queue->IO_QUEUE_INITSQN), + queue->IO_QUEUE_WRAP, + __genwqe_readq(cd, queue->IO_QUEUE_WRAP), + queue->IO_QUEUE_OFFSET, + __genwqe_readq(cd, queue->IO_QUEUE_OFFSET), + queue->IO_QUEUE_WTIME, + __genwqe_readq(cd, queue->IO_QUEUE_WTIME), + queue->IO_QUEUE_ERRCNTS, + __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS), + queue->IO_QUEUE_LRW, + __genwqe_readq(cd, queue->IO_QUEUE_LRW)); + + seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n", + queue->ddcb_act, queue->ddcb_next); + + pddcb = queue->ddcb_vaddr; + for (i = 0; i < queue->ddcb_max; i++) { + seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ", + i, be16_to_cpu(pddcb->retc_16), + be16_to_cpu(pddcb->seqnum_16), + pddcb->hsi, pddcb->shi); + seq_printf(s, "PRIV=%06llx CMD=%02x\n", + be64_to_cpu(pddcb->priv_64), pddcb->cmd); + pddcb++; + } + return 0; +} + +GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); + +static int genwqe_info_show(struct seq_file *s, void *unused) +{ + struct genwqe_dev *cd = s->private; + u16 val16, type; + u64 app_id, slu_id, bitstream = -1; + struct pci_dev *pci_dev = cd->pci_dev; + + slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); + app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + + if (genwqe_is_privileged(cd)) + bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); + + val16 = (u16)(slu_id & 0x0fLLU); + type = (u16)((slu_id >> 20) & 0xffLLU); + + seq_printf(s, "%s driver version: %s\n" + " Device Name/Type: %s %s CardIdx: %d\n" + " SLU/APP Config : 0x%016llx/0x%016llx\n" + " Build Date : %u/%x/%u\n" + " Base Clock : %u MHz\n" + " Arch/SVN Release: %u/%llx\n" + " Bitstream : %llx\n", + GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev), + genwqe_is_privileged(cd) ? + "Physical" : "Virtual or no SR-IOV", + cd->card_idx, slu_id, app_id, + (u16)((slu_id >> 12) & 0x0fLLU), /* month */ + (u16)((slu_id >> 4) & 0xffLLU), /* day */ + (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */ + genwqe_base_clock_frequency(cd), + (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40, + bitstream); + + return 0; +} + +GENWQE_DEBUGFS_RO(info, genwqe_info_show); + +int genwqe_init_debugfs(struct genwqe_dev *cd) +{ + struct dentry *root; + struct dentry *file; + int ret; + char card_name[64]; + char name[64]; + unsigned int i; + + sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx); + + root = debugfs_create_dir(card_name, cd->debugfs_genwqe); + if (!root) { + ret = -ENOMEM; + goto err0; + } + + /* non privileged interfaces are done here */ + file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd, + &genwqe_ddcb_info_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("info", S_IRUGO, root, cd, + &genwqe_info_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_u32("ddcb_software_timeout", 0666, root, + &cd->ddcb_software_timeout); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_u32("kill_timeout", 0666, root, + &cd->kill_timeout); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + /* privileged interfaces follow here */ + if (!genwqe_is_privileged(cd)) { + cd->debugfs_root = root; + return 0; + } + + file = debugfs_create_file("curr_regs", S_IRUGO, root, cd, + &genwqe_curr_regs_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, + &genwqe_curr_dbg_uid0_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, + &genwqe_curr_dbg_uid1_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, + &genwqe_curr_dbg_uid2_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_regs", S_IRUGO, root, cd, + &genwqe_prev_regs_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, + &genwqe_prev_dbg_uid0_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, + &genwqe_prev_dbg_uid1_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, + &genwqe_prev_dbg_uid2_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + for (i = 0; i < GENWQE_MAX_VFS; i++) { + sprintf(name, "vf%u_jobtimeout_msec", i); + + file = debugfs_create_u32(name, 0666, root, + &cd->vf_jobtimeout_msec[i]); + if (!file) { + ret = -ENOMEM; + goto err1; + } + } + + file = debugfs_create_file("jobtimer", S_IRUGO, root, cd, + &genwqe_jtimer_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd, + &genwqe_queue_working_time_fops); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + file = debugfs_create_u32("skip_recovery", 0666, root, + &cd->skip_recovery); + if (!file) { + ret = -ENOMEM; + goto err1; + } + + cd->debugfs_root = root; + return 0; +err1: + debugfs_remove_recursive(root); +err0: + return ret; +} + +void genqwe_exit_debugfs(struct genwqe_dev *cd) +{ + debugfs_remove_recursive(cd->debugfs_root); +} diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c new file mode 100644 index 00000000000..1d2f163a190 --- /dev/null +++ b/drivers/misc/genwqe/card_dev.c @@ -0,0 +1,1403 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Character device representation of the GenWQE device. This allows + * user-space applications to communicate with the card. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/atomic.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static int genwqe_open_files(struct genwqe_dev *cd) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&cd->file_lock, flags); + rc = list_empty(&cd->file_list); + spin_unlock_irqrestore(&cd->file_lock, flags); + return !rc; +} + +static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ + unsigned long flags; + + cfile->owner = current; + spin_lock_irqsave(&cd->file_lock, flags); + list_add(&cfile->list, &cd->file_list); + spin_unlock_irqrestore(&cd->file_lock, flags); +} + +static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) +{ + unsigned long flags; + + spin_lock_irqsave(&cd->file_lock, flags); + list_del(&cfile->list); + spin_unlock_irqrestore(&cd->file_lock, flags); + + return 0; +} + +static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->pin_lock, flags); + list_add(&m->pin_list, &cfile->pin_list); + spin_unlock_irqrestore(&cfile->pin_lock, flags); +} + +static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->pin_lock, flags); + list_del(&m->pin_list); + spin_unlock_irqrestore(&cfile->pin_lock, flags); + + return 0; +} + +/** + * genwqe_search_pin() - Search for the mapping for a userspace address + * @cfile: Descriptor of opened file + * @u_addr: User virtual address + * @size: Size of buffer + * @dma_addr: DMA address to be updated + * + * Return: Pointer to the corresponding mapping NULL if not found + */ +static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, + unsigned long u_addr, + unsigned int size, + void **virt_addr) +{ + unsigned long flags; + struct dma_mapping *m; + + spin_lock_irqsave(&cfile->pin_lock, flags); + + list_for_each_entry(m, &cfile->pin_list, pin_list) { + if ((((u64)m->u_vaddr) <= (u_addr)) && + (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + + if (virt_addr) + *virt_addr = m->k_vaddr + + (u_addr - (u64)m->u_vaddr); + + spin_unlock_irqrestore(&cfile->pin_lock, flags); + return m; + } + } + spin_unlock_irqrestore(&cfile->pin_lock, flags); + return NULL; +} + +static void __genwqe_add_mapping(struct genwqe_file *cfile, + struct dma_mapping *dma_map) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_add(&dma_map->card_list, &cfile->map_list); + spin_unlock_irqrestore(&cfile->map_lock, flags); +} + +static void __genwqe_del_mapping(struct genwqe_file *cfile, + struct dma_mapping *dma_map) +{ + unsigned long flags; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_del(&dma_map->card_list); + spin_unlock_irqrestore(&cfile->map_lock, flags); +} + + +/** + * __genwqe_search_mapping() - Search for the mapping for a userspace address + * @cfile: descriptor of opened file + * @u_addr: user virtual address + * @size: size of buffer + * @dma_addr: DMA address to be updated + * Return: Pointer to the corresponding mapping NULL if not found + */ +static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, + unsigned long u_addr, + unsigned int size, + dma_addr_t *dma_addr, + void **virt_addr) +{ + unsigned long flags; + struct dma_mapping *m; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + + spin_lock_irqsave(&cfile->map_lock, flags); + list_for_each_entry(m, &cfile->map_list, card_list) { + + if ((((u64)m->u_vaddr) <= (u_addr)) && + (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { + + /* match found: current is as expected and + addr is in range */ + if (dma_addr) + *dma_addr = m->dma_addr + + (u_addr - (u64)m->u_vaddr); + + if (virt_addr) + *virt_addr = m->k_vaddr + + (u_addr - (u64)m->u_vaddr); + + spin_unlock_irqrestore(&cfile->map_lock, flags); + return m; + } + } + spin_unlock_irqrestore(&cfile->map_lock, flags); + + dev_err(&pci_dev->dev, + "[%s] Entry not found: u_addr=%lx, size=%x\n", + __func__, u_addr, size); + + return NULL; +} + +static void genwqe_remove_mappings(struct genwqe_file *cfile) +{ + int i = 0; + struct list_head *node, *next; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + + list_for_each_safe(node, next, &cfile->map_list) { + dma_map = list_entry(node, struct dma_mapping, card_list); + + list_del_init(&dma_map->card_list); + + /* + * This is really a bug, because those things should + * have been already tidied up. + * + * GENWQE_MAPPING_RAW should have been removed via mmunmap(). + * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. + */ + dev_err(&pci_dev->dev, + "[%s] %d. cleanup mapping: u_vaddr=%p " + "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++, + dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr, + (unsigned long)dma_map->dma_addr); + + if (dma_map->type == GENWQE_MAPPING_RAW) { + /* we allocated this dynamically */ + __genwqe_free_consistent(cd, dma_map->size, + dma_map->k_vaddr, + dma_map->dma_addr); + kfree(dma_map); + } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { + /* we use dma_map statically from the request */ + genwqe_user_vunmap(cd, dma_map, NULL); + } + } +} + +static void genwqe_remove_pinnings(struct genwqe_file *cfile) +{ + struct list_head *node, *next; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + + list_for_each_safe(node, next, &cfile->pin_list) { + dma_map = list_entry(node, struct dma_mapping, pin_list); + + /* + * This is not a bug, because a killed processed might + * not call the unpin ioctl, which is supposed to free + * the resources. + * + * Pinnings are dymically allocated and need to be + * deleted. + */ + list_del_init(&dma_map->pin_list); + genwqe_user_vunmap(cd, dma_map, NULL); + kfree(dma_map); + } +} + +/** + * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files + * + * E.g. genwqe_send_signal(cd, SIGIO); + */ +static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) +{ + unsigned int files = 0; + unsigned long flags; + struct genwqe_file *cfile; + + spin_lock_irqsave(&cd->file_lock, flags); + list_for_each_entry(cfile, &cd->file_list, list) { + if (cfile->async_queue) + kill_fasync(&cfile->async_queue, sig, POLL_HUP); + files++; + } + spin_unlock_irqrestore(&cd->file_lock, flags); + return files; +} + +static int genwqe_force_sig(struct genwqe_dev *cd, int sig) +{ + unsigned int files = 0; + unsigned long flags; + struct genwqe_file *cfile; + + spin_lock_irqsave(&cd->file_lock, flags); + list_for_each_entry(cfile, &cd->file_list, list) { + force_sig(sig, cfile->owner); + files++; + } + spin_unlock_irqrestore(&cd->file_lock, flags); + return files; +} + +/** + * genwqe_open() - file open + * @inode: file system information + * @filp: file handle + * + * This function is executed whenever an application calls + * open("/dev/genwqe",..). + * + * Return: 0 if successful or <0 if errors + */ +static int genwqe_open(struct inode *inode, struct file *filp) +{ + struct genwqe_dev *cd; + struct genwqe_file *cfile; + struct pci_dev *pci_dev; + + cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); + if (cfile == NULL) + return -ENOMEM; + + cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); + pci_dev = cd->pci_dev; + cfile->cd = cd; + cfile->filp = filp; + cfile->client = NULL; + + spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ + INIT_LIST_HEAD(&cfile->map_list); + + spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ + INIT_LIST_HEAD(&cfile->pin_list); + + filp->private_data = cfile; + + genwqe_add_file(cd, cfile); + return 0; +} + +/** + * genwqe_fasync() - Setup process to receive SIGIO. + * @fd: file descriptor + * @filp: file handle + * @mode: file mode + * + * Sending a signal is working as following: + * + * if (cdev->async_queue) + * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); + * + * Some devices also implement asynchronous notification to indicate + * when the device can be written; in this case, of course, + * kill_fasync must be called with a mode of POLL_OUT. + */ +static int genwqe_fasync(int fd, struct file *filp, int mode) +{ + struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; + return fasync_helper(fd, filp, mode, &cdev->async_queue); +} + + +/** + * genwqe_release() - file close + * @inode: file system information + * @filp: file handle + * + * This function is executed whenever an application calls 'close(fd_genwqe)' + * + * Return: always 0 + */ +static int genwqe_release(struct inode *inode, struct file *filp) +{ + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + + /* there must be no entries in these lists! */ + genwqe_remove_mappings(cfile); + genwqe_remove_pinnings(cfile); + + /* remove this filp from the asynchronously notified filp's */ + genwqe_fasync(-1, filp, 0); + + /* + * For this to work we must not release cd when this cfile is + * not yet released, otherwise the list entry is invalid, + * because the list itself gets reinstantiated! + */ + genwqe_del_file(cd, cfile); + kfree(cfile); + return 0; +} + +static void genwqe_vma_open(struct vm_area_struct *vma) +{ + /* nothing ... */ +} + +/** + * genwqe_vma_close() - Called each time when vma is unmapped + * + * Free memory which got allocated by GenWQE mmap(). + */ +static void genwqe_vma_close(struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, + cdev_genwqe); + struct pci_dev *pci_dev = cd->pci_dev; + dma_addr_t d_addr = 0; + struct genwqe_file *cfile = vma->vm_private_data; + + dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, + &d_addr, NULL); + if (dma_map == NULL) { + dev_err(&pci_dev->dev, + " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", + __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, + vsize); + return; + } + __genwqe_del_mapping(cfile, dma_map); + __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, + dma_map->dma_addr); + kfree(dma_map); +} + +static struct vm_operations_struct genwqe_vma_ops = { + .open = genwqe_vma_open, + .close = genwqe_vma_close, +}; + +/** + * genwqe_mmap() - Provide contignous buffers to userspace + * + * We use mmap() to allocate contignous buffers used for DMA + * transfers. After the buffer is allocated we remap it to user-space + * and remember a reference to our dma_mapping data structure, where + * we store the associated DMA address and allocated size. + * + * When we receive a DDCB execution request with the ATS bits set to + * plain buffer, we lookup our dma_mapping list to find the + * corresponding DMA address for the associated user-space address. + */ +static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int rc; + unsigned long pfn, vsize = vma->vm_end - vma->vm_start; + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + struct dma_mapping *dma_map; + + if (vsize == 0) + return -EINVAL; + + if (get_order(vsize) > MAX_ORDER) + return -ENOMEM; + + dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); + if (dma_map == NULL) + return -ENOMEM; + + genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); + dma_map->u_vaddr = (void *)vma->vm_start; + dma_map->size = vsize; + dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); + dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, + &dma_map->dma_addr); + if (dma_map->k_vaddr == NULL) { + rc = -ENOMEM; + goto free_dma_map; + } + + if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) + *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; + + pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; + rc = remap_pfn_range(vma, + vma->vm_start, + pfn, + vsize, + vma->vm_page_prot); + if (rc != 0) { + rc = -EFAULT; + goto free_dma_mem; + } + + vma->vm_private_data = cfile; + vma->vm_ops = &genwqe_vma_ops; + __genwqe_add_mapping(cfile, dma_map); + + return 0; + + free_dma_mem: + __genwqe_free_consistent(cd, dma_map->size, + dma_map->k_vaddr, + dma_map->dma_addr); + free_dma_map: + kfree(dma_map); + return rc; +} + +/** + * do_flash_update() - Excute flash update (write image or CVPD) + * @cd: genwqe device + * @load: details about image load + * + * Return: 0 if successful + */ + +#define FLASH_BLOCK 0x40000 /* we use 256k blocks */ + +static int do_flash_update(struct genwqe_file *cfile, + struct genwqe_bitstream *load) +{ + int rc = 0; + int blocks_to_flash; + dma_addr_t dma_addr; + u64 flash = 0; + size_t tocopy = 0; + u8 __user *buf; + u8 *xbuf; + u32 crc; + u8 cmdopts; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cd->pci_dev; + + if ((load->size & 0x3) != 0) + return -EINVAL; + + if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) + return -EINVAL; + + /* FIXME Bits have changed for new service layer! */ + switch ((char)load->partition) { + case '0': + cmdopts = 0x14; + break; /* download/erase_first/part_0 */ + case '1': + cmdopts = 0x1C; + break; /* download/erase_first/part_1 */ + case 'v': + cmdopts = 0x0C; + break; /* download/erase_first/vpd */ + default: + return -EINVAL; + } + + buf = (u8 __user *)load->data_addr; + xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); + if (xbuf == NULL) + return -ENOMEM; + + blocks_to_flash = load->size / FLASH_BLOCK; + while (load->size) { + struct genwqe_ddcb_cmd *req; + + /* + * We must be 4 byte aligned. Buffer must be 0 appened + * to have defined values when calculating CRC. + */ + tocopy = min_t(size_t, load->size, FLASH_BLOCK); + + rc = copy_from_user(xbuf, buf, tocopy); + if (rc) { + rc = -EFAULT; + goto free_buffer; + } + crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); + + dev_dbg(&pci_dev->dev, + "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", + __func__, (unsigned long)dma_addr, crc, tocopy, + blocks_to_flash); + + /* prepare DDCB for SLU process */ + req = ddcb_requ_alloc(); + if (req == NULL) { + rc = -ENOMEM; + goto free_buffer; + } + + req->cmd = SLCMD_MOVE_FLASH; + req->cmdopts = cmdopts; + + /* prepare invariant values */ + if (genwqe_get_slu_id(cd) <= 0x2) { + *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); + *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); + *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); + *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); + req->__asiv[24] = load->uid; + *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); + + /* for simulation only */ + *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); + *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); + req->asiv_length = 32; /* bytes included in crc calc */ + } else { /* setup DDCB for ATS architecture */ + *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); + *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); + *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ + *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); + *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); + *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); + + /* for simulation only */ + *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); + *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); + + /* Rd only */ + req->ats = 0x4ULL << 44; + req->asiv_length = 40; /* bytes included in crc calc */ + } + req->asv_length = 8; + + /* For Genwqe5 we get back the calculated CRC */ + *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ + + rc = __genwqe_execute_raw_ddcb(cd, req); + + load->retc = req->retc; + load->attn = req->attn; + load->progress = req->progress; + + if (rc < 0) { + ddcb_requ_free(req); + goto free_buffer; + } + + if (req->retc != DDCB_RETC_COMPLETE) { + rc = -EIO; + ddcb_requ_free(req); + goto free_buffer; + } + + load->size -= tocopy; + flash += tocopy; + buf += tocopy; + blocks_to_flash--; + ddcb_requ_free(req); + } + + free_buffer: + __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); + return rc; +} + +static int do_flash_read(struct genwqe_file *cfile, + struct genwqe_bitstream *load) +{ + int rc, blocks_to_flash; + dma_addr_t dma_addr; + u64 flash = 0; + size_t tocopy = 0; + u8 __user *buf; + u8 *xbuf; + u8 cmdopts; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cd->pci_dev; + struct genwqe_ddcb_cmd *cmd; + + if ((load->size & 0x3) != 0) + return -EINVAL; + + if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) + return -EINVAL; + + /* FIXME Bits have changed for new service layer! */ + switch ((char)load->partition) { + case '0': + cmdopts = 0x12; + break; /* upload/part_0 */ + case '1': + cmdopts = 0x1A; + break; /* upload/part_1 */ + case 'v': + cmdopts = 0x0A; + break; /* upload/vpd */ + default: + return -EINVAL; + } + + buf = (u8 __user *)load->data_addr; + xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); + if (xbuf == NULL) + return -ENOMEM; + + blocks_to_flash = load->size / FLASH_BLOCK; + while (load->size) { + /* + * We must be 4 byte aligned. Buffer must be 0 appened + * to have defined values when calculating CRC. + */ + tocopy = min_t(size_t, load->size, FLASH_BLOCK); + + dev_dbg(&pci_dev->dev, + "[%s] DMA: %lx SZ: %ld %d\n", + __func__, (unsigned long)dma_addr, tocopy, + blocks_to_flash); + + /* prepare DDCB for SLU process */ + cmd = ddcb_requ_alloc(); + if (cmd == NULL) { + rc = -ENOMEM; + goto free_buffer; + } + cmd->cmd = SLCMD_MOVE_FLASH; + cmd->cmdopts = cmdopts; + + /* prepare invariant values */ + if (genwqe_get_slu_id(cd) <= 0x2) { + *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); + *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); + *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); + *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); + cmd->__asiv[24] = load->uid; + *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; + cmd->asiv_length = 32; /* bytes included in crc calc */ + } else { /* setup DDCB for ATS architecture */ + *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); + *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); + *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ + *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); + *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); + *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ + + /* rd/wr */ + cmd->ats = 0x5ULL << 44; + cmd->asiv_length = 40; /* bytes included in crc calc */ + } + cmd->asv_length = 8; + + /* we only get back the calculated CRC */ + *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ + + rc = __genwqe_execute_raw_ddcb(cd, cmd); + + load->retc = cmd->retc; + load->attn = cmd->attn; + load->progress = cmd->progress; + + if ((rc < 0) && (rc != -EBADMSG)) { + ddcb_requ_free(cmd); + goto free_buffer; + } + + rc = copy_to_user(buf, xbuf, tocopy); + if (rc) { + rc = -EFAULT; + ddcb_requ_free(cmd); + goto free_buffer; + } + + /* We know that we can get retc 0x104 with CRC err */ + if (((cmd->retc == DDCB_RETC_FAULT) && + (cmd->attn != 0x02)) || /* Normally ignore CRC error */ + ((cmd->retc == DDCB_RETC_COMPLETE) && + (cmd->attn != 0x00))) { /* Everything was fine */ + rc = -EIO; + ddcb_requ_free(cmd); + goto free_buffer; + } + + load->size -= tocopy; + flash += tocopy; + buf += tocopy; + blocks_to_flash--; + ddcb_requ_free(cmd); + } + rc = 0; + + free_buffer: + __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); + return rc; +} + +static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ + int rc; + struct genwqe_dev *cd = cfile->cd; + struct pci_dev *pci_dev = cfile->cd->pci_dev; + struct dma_mapping *dma_map; + unsigned long map_addr; + unsigned long map_size; + + if ((m->addr == 0x0) || (m->size == 0)) + return -EINVAL; + + map_addr = (m->addr & PAGE_MASK); + map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + + dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); + if (dma_map == NULL) + return -ENOMEM; + + genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); + rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL); + if (rc != 0) { + dev_err(&pci_dev->dev, + "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); + kfree(dma_map); + return rc; + } + + genwqe_add_pin(cfile, dma_map); + return 0; +} + +static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) +{ + struct genwqe_dev *cd = cfile->cd; + struct dma_mapping *dma_map; + unsigned long map_addr; + unsigned long map_size; + + if (m->addr == 0x0) + return -EINVAL; + + map_addr = (m->addr & PAGE_MASK); + map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); + + dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); + if (dma_map == NULL) + return -ENOENT; + + genwqe_del_pin(cfile, dma_map); + genwqe_user_vunmap(cd, dma_map, NULL); + kfree(dma_map); + return 0; +} + +/** + * ddcb_cmd_cleanup() - Remove dynamically created fixup entries + * + * Only if there are any. Pinnings are not removed. + */ +static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) +{ + unsigned int i; + struct dma_mapping *dma_map; + struct genwqe_dev *cd = cfile->cd; + + for (i = 0; i < DDCB_FIXUPS; i++) { + dma_map = &req->dma_mappings[i]; + + if (dma_mapping_used(dma_map)) { + __genwqe_del_mapping(cfile, dma_map); + genwqe_user_vunmap(cd, dma_map, req); + } + if (req->sgls[i].sgl != NULL) + genwqe_free_sync_sgl(cd, &req->sgls[i]); + } + return 0; +} + +/** + * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references + * + * Before the DDCB gets executed we need to handle the fixups. We + * replace the user-space addresses with DMA addresses or do + * additional setup work e.g. generating a scatter-gather list which + * is used to describe the memory referred to in the fixup. + */ +static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) +{ + int rc; + unsigned int asiv_offs, i; + struct genwqe_dev *cd = cfile->cd; + struct genwqe_ddcb_cmd *cmd = &req->cmd; + struct dma_mapping *m; + const char *type = "UNKNOWN"; + + for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; + i++, asiv_offs += 0x08) { + + u64 u_addr; + dma_addr_t d_addr; + u32 u_size = 0; + u64 ats_flags; + + ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); + + switch (ats_flags) { + + case ATS_TYPE_DATA: + break; /* nothing to do here */ + + case ATS_TYPE_FLAT_RDWR: + case ATS_TYPE_FLAT_RD: { + u_addr = be64_to_cpu(*((__be64 *)&cmd-> + asiv[asiv_offs])); + u_size = be32_to_cpu(*((__be32 *)&cmd-> + asiv[asiv_offs + 0x08])); + + /* + * No data available. Ignore u_addr in this + * case and set addr to 0. Hardware must not + * fetch the buffer. + */ + if (u_size == 0x0) { + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(0x0); + break; + } + + m = __genwqe_search_mapping(cfile, u_addr, u_size, + &d_addr, NULL); + if (m == NULL) { + rc = -EFAULT; + goto err_out; + } + + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(d_addr); + break; + } + + case ATS_TYPE_SGL_RDWR: + case ATS_TYPE_SGL_RD: { + int page_offs; + + u_addr = be64_to_cpu(*((__be64 *) + &cmd->asiv[asiv_offs])); + u_size = be32_to_cpu(*((__be32 *) + &cmd->asiv[asiv_offs + 0x08])); + + /* + * No data available. Ignore u_addr in this + * case and set addr to 0. Hardware must not + * fetch the empty sgl. + */ + if (u_size == 0x0) { + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(0x0); + break; + } + + m = genwqe_search_pin(cfile, u_addr, u_size, NULL); + if (m != NULL) { + type = "PINNING"; + page_offs = (u_addr - + (u64)m->u_vaddr)/PAGE_SIZE; + } else { + type = "MAPPING"; + m = &req->dma_mappings[i]; + + genwqe_mapping_init(m, + GENWQE_MAPPING_SGL_TEMP); + rc = genwqe_user_vmap(cd, m, (void *)u_addr, + u_size, req); + if (rc != 0) + goto err_out; + + __genwqe_add_mapping(cfile, m); + page_offs = 0; + } + + /* create genwqe style scatter gather list */ + rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i], + (void __user *)u_addr, + u_size); + if (rc != 0) + goto err_out; + + genwqe_setup_sgl(cd, &req->sgls[i], + &m->dma_list[page_offs]); + + *((__be64 *)&cmd->asiv[asiv_offs]) = + cpu_to_be64(req->sgls[i].sgl_dma_addr); + + break; + } + default: + rc = -EINVAL; + goto err_out; + } + } + return 0; + + err_out: + ddcb_cmd_cleanup(cfile, req); + return rc; +} + +/** + * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups + * + * The code will build up the translation tables or lookup the + * contignous memory allocation table to find the right translations + * and DMA addresses. + */ +static int genwqe_execute_ddcb(struct genwqe_file *cfile, + struct genwqe_ddcb_cmd *cmd) +{ + int rc; + struct genwqe_dev *cd = cfile->cd; + struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); + + rc = ddcb_cmd_fixups(cfile, req); + if (rc != 0) + return rc; + + rc = __genwqe_execute_raw_ddcb(cd, cmd); + ddcb_cmd_cleanup(cfile, req); + return rc; +} + +static int do_execute_ddcb(struct genwqe_file *cfile, + unsigned long arg, int raw) +{ + int rc; + struct genwqe_ddcb_cmd *cmd; + struct ddcb_requ *req; + struct genwqe_dev *cd = cfile->cd; + + cmd = ddcb_requ_alloc(); + if (cmd == NULL) + return -ENOMEM; + + req = container_of(cmd, struct ddcb_requ, cmd); + + if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { + ddcb_requ_free(cmd); + return -EFAULT; + } + + if (!raw) + rc = genwqe_execute_ddcb(cfile, cmd); + else + rc = __genwqe_execute_raw_ddcb(cd, cmd); + + /* Copy back only the modifed fields. Do not copy ASIV + back since the copy got modified by the driver. */ + if (copy_to_user((void __user *)arg, cmd, + sizeof(*cmd) - DDCB_ASIV_LENGTH)) { + ddcb_requ_free(cmd); + return -EFAULT; + } + + ddcb_requ_free(cmd); + return rc; +} + +/** + * genwqe_ioctl() - IO control + * @filp: file handle + * @cmd: command identifier (passed from user) + * @arg: argument (passed from user) + * + * Return: 0 success + */ +static long genwqe_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; + struct genwqe_dev *cd = cfile->cd; + struct genwqe_reg_io __user *io; + u64 val; + u32 reg_offs; + + if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) + return -EINVAL; + + switch (cmd) { + + case GENWQE_GET_CARD_STATE: + put_user(cd->card_state, (enum genwqe_card_state __user *)arg); + return 0; + + /* Register access */ + case GENWQE_READ_REG64: { + io = (struct genwqe_reg_io __user *)arg; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) + return -EINVAL; + + val = __genwqe_readq(cd, reg_offs); + put_user(val, &io->val64); + return 0; + } + + case GENWQE_WRITE_REG64: { + io = (struct genwqe_reg_io __user *)arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) + return -EINVAL; + + if (get_user(val, &io->val64)) + return -EFAULT; + + __genwqe_writeq(cd, reg_offs, val); + return 0; + } + + case GENWQE_READ_REG32: { + io = (struct genwqe_reg_io __user *)arg; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) + return -EINVAL; + + val = __genwqe_readl(cd, reg_offs); + put_user(val, &io->val64); + return 0; + } + + case GENWQE_WRITE_REG32: { + io = (struct genwqe_reg_io __user *)arg; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (get_user(reg_offs, &io->num)) + return -EFAULT; + + if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) + return -EINVAL; + + if (get_user(val, &io->val64)) + return -EFAULT; + + __genwqe_writel(cd, reg_offs, val); + return 0; + } + + /* Flash update/reading */ + case GENWQE_SLU_UPDATE: { + struct genwqe_bitstream load; + + if (!genwqe_is_privileged(cd)) + return -EPERM; + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) + return -EPERM; + + if (copy_from_user(&load, (void __user *)arg, + sizeof(load))) + return -EFAULT; + + rc = do_flash_update(cfile, &load); + + if (copy_to_user((void __user *)arg, &load, sizeof(load))) + return -EFAULT; + + return rc; + } + + case GENWQE_SLU_READ: { + struct genwqe_bitstream load; + + if (!genwqe_is_privileged(cd)) + return -EPERM; + + if (genwqe_flash_readback_fails(cd)) + return -ENOSPC; /* known to fail for old versions */ + + if (copy_from_user(&load, (void __user *)arg, sizeof(load))) + return -EFAULT; + + rc = do_flash_read(cfile, &load); + + if (copy_to_user((void __user *)arg, &load, sizeof(load))) + return -EFAULT; + + return rc; + } + + /* memory pinning and unpinning */ + case GENWQE_PIN_MEM: { + struct genwqe_mem m; + + if (copy_from_user(&m, (void __user *)arg, sizeof(m))) + return -EFAULT; + + return genwqe_pin_mem(cfile, &m); + } + + case GENWQE_UNPIN_MEM: { + struct genwqe_mem m; + + if (copy_from_user(&m, (void __user *)arg, sizeof(m))) + return -EFAULT; + + return genwqe_unpin_mem(cfile, &m); + } + + /* launch an DDCB and wait for completion */ + case GENWQE_EXECUTE_DDCB: + return do_execute_ddcb(cfile, arg, 0); + + case GENWQE_EXECUTE_RAW_DDCB: { + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return do_execute_ddcb(cfile, arg, 1); + } + + default: + return -EINVAL; + } + + return rc; +} + +#if defined(CONFIG_COMPAT) +/** + * genwqe_compat_ioctl() - Compatibility ioctl + * + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/genwqe<n>_card. + * + * @filp: file pointer. + * @cmd: command. + * @arg: user argument. + * Return: zero on success or negative number on failure. + */ +static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return genwqe_ioctl(filp, cmd, arg); +} +#endif /* defined(CONFIG_COMPAT) */ + +static const struct file_operations genwqe_fops = { + .owner = THIS_MODULE, + .open = genwqe_open, + .fasync = genwqe_fasync, + .mmap = genwqe_mmap, + .unlocked_ioctl = genwqe_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = genwqe_compat_ioctl, +#endif + .release = genwqe_release, +}; + +static int genwqe_device_initialized(struct genwqe_dev *cd) +{ + return cd->dev != NULL; +} + +/** + * genwqe_device_create() - Create and configure genwqe char device + * @cd: genwqe device descriptor + * + * This function must be called before we create any more genwqe + * character devices, because it is allocating the major and minor + * number which are supposed to be used by the client drivers. + */ +int genwqe_device_create(struct genwqe_dev *cd) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + /* + * Here starts the individual setup per client. It must + * initialize its own cdev data structure with its own fops. + * The appropriate devnum needs to be created. The ranges must + * not overlap. + */ + rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, + GENWQE_MAX_MINOR, GENWQE_DEVNAME); + if (rc < 0) { + dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); + goto err_dev; + } + + cdev_init(&cd->cdev_genwqe, &genwqe_fops); + cd->cdev_genwqe.owner = THIS_MODULE; + + rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); + if (rc < 0) { + dev_err(&pci_dev->dev, "err: cdev_add failed\n"); + goto err_add; + } + + /* + * Finally the device in /dev/... must be created. The rule is + * to use card%d_clientname for each created device. + */ + cd->dev = device_create_with_groups(cd->class_genwqe, + &cd->pci_dev->dev, + cd->devnum_genwqe, cd, + genwqe_attribute_groups, + GENWQE_DEVNAME "%u_card", + cd->card_idx); + if (IS_ERR(cd->dev)) { + rc = PTR_ERR(cd->dev); + goto err_cdev; + } + + rc = genwqe_init_debugfs(cd); + if (rc != 0) + goto err_debugfs; + + return 0; + + err_debugfs: + device_destroy(cd->class_genwqe, cd->devnum_genwqe); + err_cdev: + cdev_del(&cd->cdev_genwqe); + err_add: + unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + err_dev: + cd->dev = NULL; + return rc; +} + +static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) +{ + int rc; + unsigned int i; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_open_files(cd)) + return 0; + + dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); + + rc = genwqe_kill_fasync(cd, SIGIO); + if (rc > 0) { + /* give kill_timeout seconds to close file descriptors ... */ + for (i = 0; (i < genwqe_kill_timeout) && + genwqe_open_files(cd); i++) { + dev_info(&pci_dev->dev, " %d sec ...", i); + + cond_resched(); + msleep(1000); + } + + /* if no open files we can safely continue, else ... */ + if (!genwqe_open_files(cd)) + return 0; + + dev_warn(&pci_dev->dev, + "[%s] send SIGKILL and wait ...\n", __func__); + + rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ + if (rc) { + /* Give kill_timout more seconds to end processes */ + for (i = 0; (i < genwqe_kill_timeout) && + genwqe_open_files(cd); i++) { + dev_warn(&pci_dev->dev, " %d sec ...", i); + + cond_resched(); + msleep(1000); + } + } + } + return 0; +} + +/** + * genwqe_device_remove() - Remove genwqe's char device + * + * This function must be called after the client devices are removed + * because it will free the major/minor number range for the genwqe + * drivers. + * + * This function must be robust enough to be called twice. + */ +int genwqe_device_remove(struct genwqe_dev *cd) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_device_initialized(cd)) + return 1; + + genwqe_inform_and_stop_processes(cd); + + /* + * We currently do wait until all filedescriptors are + * closed. This leads to a problem when we abort the + * application which will decrease this reference from + * 1/unused to 0/illegal and not from 2/used 1/empty. + */ + rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); + if (rc != 1) { + dev_err(&pci_dev->dev, + "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); + panic("Fatal err: cannot free resources with pending references!"); + } + + genqwe_exit_debugfs(cd); + device_destroy(cd->class_genwqe, cd->devnum_genwqe); + cdev_del(&cd->cdev_genwqe); + unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); + cd->dev = NULL; + + return 0; +} diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c new file mode 100644 index 00000000000..a72a99266c3 --- /dev/null +++ b/drivers/misc/genwqe/card_sysfs.c @@ -0,0 +1,288 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Sysfs interfaces for the GenWQE card. There are attributes to query + * the version of the bitstream as well as some for the driver. For + * debugging, please also see the debugfs interfaces of this driver. + */ + +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/fs.h> +#include <linux/sysfs.h> +#include <linux/ctype.h> +#include <linux/device.h> + +#include "card_base.h" +#include "card_ddcb.h" + +static const char * const genwqe_types[] = { + [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230", + [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530", + [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4", + [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7", +}; + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct genwqe_dev *cd = dev_get_drvdata(dev); + const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" }; + + return sprintf(buf, "%s\n", cs[cd->card_state]); +} +static DEVICE_ATTR_RO(status); + +static ssize_t appid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char app_name[5]; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + genwqe_read_app_id(cd, app_name, sizeof(app_name)); + return sprintf(buf, "%s\n", app_name); +} +static DEVICE_ATTR_RO(appid); + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 slu_id, app_id; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG); + app_id = __genwqe_readq(cd, IO_APP_UNITCFG); + + return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id); +} +static DEVICE_ATTR_RO(version); + +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 card_type; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + card_type = genwqe_card_type(cd); + return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ? + "invalid" : genwqe_types[card_type]); +} +static DEVICE_ATTR_RO(type); + +static ssize_t driver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", DRV_VERS_STRING); +} +static DEVICE_ATTR_RO(driver); + +static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u64 tempsens; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR); + return sprintf(buf, "%016llx\n", tempsens); +} +static DEVICE_ATTR_RO(tempsens); + +static ssize_t freerunning_timer_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 t; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER); + return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(freerunning_timer); + +static ssize_t queue_working_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 t; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME); + return sprintf(buf, "%016llx\n", t); +} +static DEVICE_ATTR_RO(queue_working_time); + +static ssize_t base_clock_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 base_clock; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + base_clock = genwqe_base_clock_frequency(cd); + return sprintf(buf, "%lld\n", base_clock); +} +static DEVICE_ATTR_RO(base_clock); + +/** + * curr_bitstream_show() - Show the current bitstream id + * + * There is a bug in some old versions of the CPLD which selects the + * bitstream, which causes the IO_SLU_BITSTREAM register to report + * unreliable data in very rare cases. This makes this sysfs + * unreliable up to the point were a new CPLD version is being used. + * + * Unfortunately there is no automatic way yet to query the CPLD + * version, such that you need to manually ensure via programming + * tools that you have a recent version of the CPLD software. + * + * The proposed circumvention is to use a special recovery bitstream + * on the backup partition (0) to identify problems while loading the + * image. + */ +static ssize_t curr_bitstream_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int curr_bitstream; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; + return sprintf(buf, "%d\n", curr_bitstream); +} +static DEVICE_ATTR_RO(curr_bitstream); + +/** + * next_bitstream_show() - Show the next activated bitstream + * + * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF. + */ +static ssize_t next_bitstream_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int next_bitstream; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + switch ((cd->softreset & 0xc) >> 2) { + case 0x2: + next_bitstream = 0; + break; + case 0x3: + next_bitstream = 1; + break; + default: + next_bitstream = -1; + break; /* error */ + } + return sprintf(buf, "%d\n", next_bitstream); +} + +static ssize_t next_bitstream_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int partition; + struct genwqe_dev *cd = dev_get_drvdata(dev); + + if (kstrtoint(buf, 0, &partition) < 0) + return -EINVAL; + + switch (partition) { + case 0x0: + cd->softreset = 0x78; + break; + case 0x1: + cd->softreset = 0x7c; + break; + default: + return -EINVAL; + } + + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset); + return count; +} +static DEVICE_ATTR_RW(next_bitstream); + +/* + * Create device_attribute structures / params: name, mode, show, store + * additional flag if valid in VF + */ +static struct attribute *genwqe_attributes[] = { + &dev_attr_tempsens.attr, + &dev_attr_next_bitstream.attr, + &dev_attr_curr_bitstream.attr, + &dev_attr_base_clock.attr, + &dev_attr_driver.attr, + &dev_attr_type.attr, + &dev_attr_version.attr, + &dev_attr_appid.attr, + &dev_attr_status.attr, + &dev_attr_freerunning_timer.attr, + &dev_attr_queue_working_time.attr, + NULL, +}; + +static struct attribute *genwqe_normal_attributes[] = { + &dev_attr_driver.attr, + &dev_attr_type.attr, + &dev_attr_version.attr, + &dev_attr_appid.attr, + &dev_attr_status.attr, + &dev_attr_freerunning_timer.attr, + &dev_attr_queue_working_time.attr, + NULL, +}; + +/** + * genwqe_is_visible() - Determine if sysfs attribute should be visible or not + * + * VFs have restricted mmio capabilities, so not all sysfs entries + * are allowed in VFs. + */ +static umode_t genwqe_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + unsigned int j; + struct device *dev = container_of(kobj, struct device, kobj); + struct genwqe_dev *cd = dev_get_drvdata(dev); + umode_t mode = attr->mode; + + if (genwqe_is_privileged(cd)) + return mode; + + for (j = 0; genwqe_normal_attributes[j] != NULL; j++) + if (genwqe_normal_attributes[j] == attr) + return mode; + + return 0; +} + +static struct attribute_group genwqe_attribute_group = { + .is_visible = genwqe_is_visible, + .attrs = genwqe_attributes, +}; + +const struct attribute_group *genwqe_attribute_groups[] = { + &genwqe_attribute_group, + NULL, +}; diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c new file mode 100644 index 00000000000..62cc6bb3f62 --- /dev/null +++ b/drivers/misc/genwqe/card_utils.c @@ -0,0 +1,1034 @@ +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Miscelanous functionality used in the other GenWQE driver parts. + */ + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> +#include <linux/page-flags.h> +#include <linux/scatterlist.h> +#include <linux/hugetlb.h> +#include <linux/iommu.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <asm/pgtable.h> + +#include "genwqe_driver.h" +#include "card_base.h" +#include "card_ddcb.h" + +/** + * __genwqe_writeq() - Write 64-bit register + * @cd: genwqe device descriptor + * @byte_offs: byte offset within BAR + * @val: 64-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return -EIO; + + if (cd->mmio == NULL) + return -EIO; + + __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs); + return 0; +} + +/** + * __genwqe_readq() - Read 64-bit register + * @cd: genwqe device descriptor + * @byte_offs: offset within BAR + * + * Return: value from register + */ +u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return 0xffffffffffffffffull; + + if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) && + (byte_offs == IO_SLC_CFGREG_GFIR)) + return 0x000000000000ffffull; + + if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) && + (byte_offs == IO_SLC_CFGREG_GFIR)) + return 0x00000000ffff0000ull; + + if (cd->mmio == NULL) + return 0xffffffffffffffffull; + + return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs)); +} + +/** + * __genwqe_writel() - Write 32-bit register + * @cd: genwqe device descriptor + * @byte_offs: byte offset within BAR + * @val: 32-bit value + * + * Return: 0 if success; < 0 if error + */ +int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return -EIO; + + if (cd->mmio == NULL) + return -EIO; + + __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs); + return 0; +} + +/** + * __genwqe_readl() - Read 32-bit register + * @cd: genwqe device descriptor + * @byte_offs: offset within BAR + * + * Return: Value from register + */ +u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs) +{ + if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) + return 0xffffffff; + + if (cd->mmio == NULL) + return 0xffffffff; + + return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs)); +} + +/** + * genwqe_read_app_id() - Extract app_id + * + * app_unitcfg need to be filled with valid data first + */ +int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len) +{ + int i, j; + u32 app_id = (u32)cd->app_unitcfg; + + memset(app_name, 0, len); + for (i = 0, j = 0; j < min(len, 4); j++) { + char ch = (char)((app_id >> (24 - j*8)) & 0xff); + if (ch == ' ') + continue; + app_name[i++] = isprint(ch) ? ch : 'X'; + } + return i; +} + +/** + * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations + * + * Existing kernel functions seem to use a different polynom, + * therefore we could not use them here. + * + * Genwqe's Polynomial = 0x20044009 + */ +#define CRC32_POLYNOMIAL 0x20044009 +static u32 crc32_tab[256]; /* crc32 lookup table */ + +void genwqe_init_crc32(void) +{ + int i, j; + u32 crc; + + for (i = 0; i < 256; i++) { + crc = i << 24; + for (j = 0; j < 8; j++) { + if (crc & 0x80000000) + crc = (crc << 1) ^ CRC32_POLYNOMIAL; + else + crc = (crc << 1); + } + crc32_tab[i] = crc; + } +} + +/** + * genwqe_crc32() - Generate 32-bit crc as required for DDCBs + * @buff: pointer to data buffer + * @len: length of data for calculation + * @init: initial crc (0xffffffff at start) + * + * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009) + + * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should + * result in a crc32 of 0xf33cb7d3. + * + * The existing kernel crc functions did not cover this polynom yet. + * + * Return: crc32 checksum. + */ +u32 genwqe_crc32(u8 *buff, size_t len, u32 init) +{ + int i; + u32 crc; + + crc = init; + while (len--) { + i = ((crc >> 24) ^ *buff++) & 0xFF; + crc = (crc << 8) ^ crc32_tab[i]; + } + return crc; +} + +void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, + dma_addr_t *dma_handle) +{ + if (get_order(size) > MAX_ORDER) + return NULL; + + return pci_alloc_consistent(cd->pci_dev, size, dma_handle); +} + +void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + if (vaddr == NULL) + return; + + pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle); +} + +static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, + int num_pages) +{ + int i; + struct pci_dev *pci_dev = cd->pci_dev; + + for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { + pci_unmap_page(pci_dev, dma_list[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_list[i] = 0x0; + } +} + +static int genwqe_map_pages(struct genwqe_dev *cd, + struct page **page_list, int num_pages, + dma_addr_t *dma_list) +{ + int i; + struct pci_dev *pci_dev = cd->pci_dev; + + /* establish DMA mapping for requested pages */ + for (i = 0; i < num_pages; i++) { + dma_addr_t daddr; + + dma_list[i] = 0x0; + daddr = pci_map_page(pci_dev, page_list[i], + 0, /* map_offs */ + PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */ + + if (pci_dma_mapping_error(pci_dev, daddr)) { + dev_err(&pci_dev->dev, + "[%s] err: no dma addr daddr=%016llx!\n", + __func__, (long long)daddr); + goto err; + } + + dma_list[i] = daddr; + } + return 0; + + err: + genwqe_unmap_pages(cd, dma_list, num_pages); + return -EIO; +} + +static int genwqe_sgl_size(int num_pages) +{ + int len, num_tlb = num_pages / 7; + + len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1); + return roundup(len, PAGE_SIZE); +} + +/** + * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages + * + * Allocates memory for sgl and overlapping pages. Pages which might + * overlap other user-space memory blocks are being cached for DMAs, + * such that we do not run into syncronization issues. Data is copied + * from user-space into the cached pages. + */ +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + void __user *user_addr, size_t user_size) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + sgl->fpage_offs = offset_in_page((unsigned long)user_addr); + sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size); + sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); + sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE; + + dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld " + "fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", + __func__, user_addr, user_size, sgl->nr_pages, + sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size); + + sgl->user_addr = user_addr; + sgl->user_size = user_size; + sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); + + if (get_order(sgl->sgl_size) > MAX_ORDER) { + dev_err(&pci_dev->dev, + "[%s] err: too much memory requested!\n", __func__); + return -ENOMEM; + } + + sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, + &sgl->sgl_dma_addr); + if (sgl->sgl == NULL) { + dev_err(&pci_dev->dev, + "[%s] err: no memory available!\n", __func__); + return -ENOMEM; + } + + /* Only use buffering on incomplete pages */ + if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) { + sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, + &sgl->fpage_dma_addr); + if (sgl->fpage == NULL) + goto err_out; + + /* Sync with user memory */ + if (copy_from_user(sgl->fpage + sgl->fpage_offs, + user_addr, sgl->fpage_size)) { + rc = -EFAULT; + goto err_out; + } + } + if (sgl->lpage_size != 0) { + sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, + &sgl->lpage_dma_addr); + if (sgl->lpage == NULL) + goto err_out1; + + /* Sync with user memory */ + if (copy_from_user(sgl->lpage, user_addr + user_size - + sgl->lpage_size, sgl->lpage_size)) { + rc = -EFAULT; + goto err_out1; + } + } + return 0; + + err_out1: + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, + sgl->fpage_dma_addr); + err_out: + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, + sgl->sgl_dma_addr); + return -ENOMEM; +} + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + dma_addr_t *dma_list) +{ + int i = 0, j = 0, p; + unsigned long dma_offs, map_offs; + dma_addr_t prev_daddr = 0; + struct sg_entry *s, *last_s = NULL; + size_t size = sgl->user_size; + + dma_offs = 128; /* next block if needed/dma_offset */ + map_offs = sgl->fpage_offs; /* offset in first page */ + + s = &sgl->sgl[0]; /* first set of 8 entries */ + p = 0; /* page */ + while (p < sgl->nr_pages) { + dma_addr_t daddr; + unsigned int size_to_map; + + /* always write the chaining entry, cleanup is done later */ + j = 0; + s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs); + s[j].len = cpu_to_be32(128); + s[j].flags = cpu_to_be32(SG_CHAINED); + j++; + + while (j < 8) { + /* DMA mapping for requested page, offs, size */ + size_to_map = min(size, PAGE_SIZE - map_offs); + + if ((p == 0) && (sgl->fpage != NULL)) { + daddr = sgl->fpage_dma_addr + map_offs; + + } else if ((p == sgl->nr_pages - 1) && + (sgl->lpage != NULL)) { + daddr = sgl->lpage_dma_addr; + } else { + daddr = dma_list[p] + map_offs; + } + + size -= size_to_map; + map_offs = 0; + + if (prev_daddr == daddr) { + u32 prev_len = be32_to_cpu(last_s->len); + + /* pr_info("daddr combining: " + "%016llx/%08x -> %016llx\n", + prev_daddr, prev_len, daddr); */ + + last_s->len = cpu_to_be32(prev_len + + size_to_map); + + p++; /* process next page */ + if (p == sgl->nr_pages) + goto fixup; /* nothing to do */ + + prev_daddr = daddr + size_to_map; + continue; + } + + /* start new entry */ + s[j].target_addr = cpu_to_be64(daddr); + s[j].len = cpu_to_be32(size_to_map); + s[j].flags = cpu_to_be32(SG_DATA); + prev_daddr = daddr + size_to_map; + last_s = &s[j]; + j++; + + p++; /* process next page */ + if (p == sgl->nr_pages) + goto fixup; /* nothing to do */ + } + dma_offs += 128; + s += 8; /* continue 8 elements further */ + } + fixup: + if (j == 1) { /* combining happend on last entry! */ + s -= 8; /* full shift needed on previous sgl block */ + j = 7; /* shift all elements */ + } + + for (i = 0; i < j; i++) /* move elements 1 up */ + s[i] = s[i + 1]; + + s[i].target_addr = cpu_to_be64(0); + s[i].len = cpu_to_be32(0); + s[i].flags = cpu_to_be32(SG_END_LIST); + return 0; +} + +/** + * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages + * + * After the DMA transfer has been completed we free the memory for + * the sgl and the cached pages. Data is being transfered from cached + * pages into user-space buffers. + */ +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) +{ + int rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + + if (sgl->fpage) { + if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs, + sgl->fpage_size)) { + dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n", + __func__); + rc = -EFAULT; + } + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, + sgl->fpage_dma_addr); + sgl->fpage = NULL; + sgl->fpage_dma_addr = 0; + } + if (sgl->lpage) { + if (copy_to_user(sgl->user_addr + sgl->user_size - + sgl->lpage_size, sgl->lpage, + sgl->lpage_size)) { + dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n", + __func__); + rc = -EFAULT; + } + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, + sgl->lpage_dma_addr); + sgl->lpage = NULL; + sgl->lpage_dma_addr = 0; + } + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, + sgl->sgl_dma_addr); + + sgl->sgl = NULL; + sgl->sgl_dma_addr = 0x0; + sgl->sgl_size = 0; + return rc; +} + +/** + * free_user_pages() - Give pinned pages back + * + * Documentation of get_user_pages is in mm/memory.c: + * + * If the page is written to, set_page_dirty (or set_page_dirty_lock, + * as appropriate) must be called after the page is finished with, and + * before put_page is called. + * + * FIXME Could be of use to others and might belong in the generic + * code, if others agree. E.g. + * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c + * ceph_put_page_vector in net/ceph/pagevec.c + * maybe more? + */ +static int free_user_pages(struct page **page_list, unsigned int nr_pages, + int dirty) +{ + unsigned int i; + + for (i = 0; i < nr_pages; i++) { + if (page_list[i] != NULL) { + if (dirty) + set_page_dirty_lock(page_list[i]); + put_page(page_list[i]); + } + } + return 0; +} + +/** + * genwqe_user_vmap() - Map user-space memory to virtual kernel memory + * @cd: pointer to genwqe device + * @m: mapping params + * @uaddr: user virtual address + * @size: size of memory to be mapped + * + * We need to think about how we could speed this up. Of course it is + * not a good idea to do this over and over again, like we are + * currently doing it. Nevertheless, I am curious where on the path + * the performance is spend. Most probably within the memory + * allocation functions, but maybe also in the DMA mapping code. + * + * Restrictions: The maximum size of the possible mapping currently depends + * on the amount of memory we can get using kzalloc() for the + * page_list and pci_alloc_consistent for the sg_list. + * The sg_list is currently itself not scattered, which could + * be fixed with some effort. The page_list must be split into + * PAGE_SIZE chunks too. All that will make the complicated + * code more complicated. + * + * Return: 0 if success + */ +int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, + unsigned long size, struct ddcb_requ *req) +{ + int rc = -EINVAL; + unsigned long data, offs; + struct pci_dev *pci_dev = cd->pci_dev; + + if ((uaddr == NULL) || (size == 0)) { + m->size = 0; /* mark unused and not added */ + return -EINVAL; + } + m->u_vaddr = uaddr; + m->size = size; + + /* determine space needed for page_list. */ + data = (unsigned long)uaddr; + offs = offset_in_page(data); + m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); + + m->page_list = kcalloc(m->nr_pages, + sizeof(struct page *) + sizeof(dma_addr_t), + GFP_KERNEL); + if (!m->page_list) { + dev_err(&pci_dev->dev, "err: alloc page_list failed\n"); + m->nr_pages = 0; + m->u_vaddr = NULL; + m->size = 0; /* mark unused and not added */ + return -ENOMEM; + } + m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); + + /* pin user pages in memory */ + rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ + m->nr_pages, + 1, /* write by caller */ + m->page_list); /* ptrs to pages */ + + /* assumption: get_user_pages can be killed by signals. */ + if (rc < m->nr_pages) { + free_user_pages(m->page_list, rc, 0); + rc = -EFAULT; + goto fail_get_user_pages; + } + + rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list); + if (rc != 0) + goto fail_free_user_pages; + + return 0; + + fail_free_user_pages: + free_user_pages(m->page_list, m->nr_pages, 0); + + fail_get_user_pages: + kfree(m->page_list); + m->page_list = NULL; + m->dma_list = NULL; + m->nr_pages = 0; + m->u_vaddr = NULL; + m->size = 0; /* mark unused and not added */ + return rc; +} + +/** + * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel + * memory + * @cd: pointer to genwqe device + * @m: mapping params + */ +int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, + struct ddcb_requ *req) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (!dma_mapping_used(m)) { + dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n", + __func__, m); + return -EINVAL; + } + + if (m->dma_list) + genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); + + if (m->page_list) { + free_user_pages(m->page_list, m->nr_pages, 1); + + kfree(m->page_list); + m->page_list = NULL; + m->dma_list = NULL; + m->nr_pages = 0; + } + + m->u_vaddr = NULL; + m->size = 0; /* mark as unused and not added */ + return 0; +} + +/** + * genwqe_card_type() - Get chip type SLU Configuration Register + * @cd: pointer to the genwqe device descriptor + * Return: 0: Altera Stratix-IV 230 + * 1: Altera Stratix-IV 530 + * 2: Altera Stratix-V A4 + * 3: Altera Stratix-V A7 + */ +u8 genwqe_card_type(struct genwqe_dev *cd) +{ + u64 card_type = cd->slu_unitcfg; + return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20); +} + +/** + * genwqe_card_reset() - Reset the card + * @cd: pointer to the genwqe device descriptor + */ +int genwqe_card_reset(struct genwqe_dev *cd) +{ + u64 softrst; + struct pci_dev *pci_dev = cd->pci_dev; + + if (!genwqe_is_privileged(cd)) + return -ENODEV; + + /* new SL */ + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull); + msleep(1000); + __genwqe_readq(cd, IO_HSU_FIR_CLR); + __genwqe_readq(cd, IO_APP_FIR_CLR); + __genwqe_readq(cd, IO_SLU_FIR_CLR); + + /* + * Read-modify-write to preserve the stealth bits + * + * For SL >= 039, Stealth WE bit allows removing + * the read-modify-wrote. + * r-m-w may require a mask 0x3C to avoid hitting hard + * reset again for error reset (should be 0, chicken). + */ + softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull; + __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull); + + /* give ERRORRESET some time to finish */ + msleep(50); + + if (genwqe_need_err_masking(cd)) { + dev_info(&pci_dev->dev, + "[%s] masking errors for old bitstreams\n", __func__); + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); + } + return 0; +} + +int genwqe_read_softreset(struct genwqe_dev *cd) +{ + u64 bitstream; + + if (!genwqe_is_privileged(cd)) + return -ENODEV; + + bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1; + cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull; + return 0; +} + +/** + * genwqe_set_interrupt_capability() - Configure MSI capability structure + * @cd: pointer to the device + * Return: 0 if no error + */ +int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) +{ + int rc; + struct pci_dev *pci_dev = cd->pci_dev; + + rc = pci_enable_msi_exact(pci_dev, count); + if (rc == 0) + cd->flags |= GENWQE_FLAG_MSI_ENABLED; + return rc; +} + +/** + * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability() + * @cd: pointer to the device + */ +void genwqe_reset_interrupt_capability(struct genwqe_dev *cd) +{ + struct pci_dev *pci_dev = cd->pci_dev; + + if (cd->flags & GENWQE_FLAG_MSI_ENABLED) { + pci_disable_msi(pci_dev); + cd->flags &= ~GENWQE_FLAG_MSI_ENABLED; + } +} + +/** + * set_reg_idx() - Fill array with data. Ignore illegal offsets. + * @cd: card device + * @r: debug register array + * @i: index to desired entry + * @m: maximum possible entries + * @addr: addr which is read + * @index: index in debug array + * @val: read value + */ +static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r, + unsigned int *i, unsigned int m, u32 addr, u32 idx, + u64 val) +{ + if (WARN_ON_ONCE(*i >= m)) + return -EFAULT; + + r[*i].addr = addr; + r[*i].idx = idx; + r[*i].val = val; + ++*i; + return 0; +} + +static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r, + unsigned int *i, unsigned int m, u32 addr, u64 val) +{ + return set_reg_idx(cd, r, i, m, addr, 0, val); +} + +int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, + unsigned int max_regs, int all) +{ + unsigned int i, j, idx = 0; + u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr; + u64 gfir, sluid, appid, ufir, ufec, sfir, sfec; + + /* Global FIR */ + gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); + set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir); + + /* UnitCfg for SLU */ + sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */ + set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid); + + /* UnitCfg for APP */ + appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */ + set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid); + + /* Check all chip Units */ + for (i = 0; i < GENWQE_MAX_UNITS; i++) { + + /* Unit FIR */ + ufir_addr = (i << 24) | 0x008; + ufir = __genwqe_readq(cd, ufir_addr); + set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir); + + /* Unit FEC */ + ufec_addr = (i << 24) | 0x018; + ufec = __genwqe_readq(cd, ufec_addr); + set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec); + + for (j = 0; j < 64; j++) { + /* wherever there is a primary 1, read the 2ndary */ + if (!all && (!(ufir & (1ull << j)))) + continue; + + sfir_addr = (i << 24) | (0x100 + 8 * j); + sfir = __genwqe_readq(cd, sfir_addr); + set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir); + + sfec_addr = (i << 24) | (0x300 + 8 * j); + sfec = __genwqe_readq(cd, sfec_addr); + set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec); + } + } + + /* fill with invalid data until end */ + for (i = idx; i < max_regs; i++) { + regs[i].addr = 0xffffffff; + regs[i].val = 0xffffffffffffffffull; + } + return idx; +} + +/** + * genwqe_ffdc_buff_size() - Calculates the number of dump registers + */ +int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid) +{ + int entries = 0, ring, traps, traces, trace_entries; + u32 eevptr_addr, l_addr, d_len, d_type; + u64 eevptr, val, addr; + + eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; + eevptr = __genwqe_readq(cd, eevptr_addr); + + if ((eevptr != 0x0) && (eevptr != -1ull)) { + l_addr = GENWQE_UID_OFFS(uid) | eevptr; + + while (1) { + val = __genwqe_readq(cd, l_addr); + + if ((val == 0x0) || (val == -1ull)) + break; + + /* 38:24 */ + d_len = (val & 0x0000007fff000000ull) >> 24; + + /* 39 */ + d_type = (val & 0x0000008000000000ull) >> 36; + + if (d_type) { /* repeat */ + entries += d_len; + } else { /* size in bytes! */ + entries += d_len >> 3; + } + + l_addr += 8; + } + } + + for (ring = 0; ring < 8; ring++) { + addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); + val = __genwqe_readq(cd, addr); + + if ((val == 0x0ull) || (val == -1ull)) + continue; + + traps = (val >> 24) & 0xff; + traces = (val >> 16) & 0xff; + trace_entries = val & 0xffff; + + entries += traps + (traces * trace_entries); + } + return entries; +} + +/** + * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure + */ +int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid, + struct genwqe_reg *regs, unsigned int max_regs) +{ + int i, traps, traces, trace, trace_entries, trace_entry, ring; + unsigned int idx = 0; + u32 eevptr_addr, l_addr, d_addr, d_len, d_type; + u64 eevptr, e, val, addr; + + eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER; + eevptr = __genwqe_readq(cd, eevptr_addr); + + if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) { + l_addr = GENWQE_UID_OFFS(uid) | eevptr; + while (1) { + e = __genwqe_readq(cd, l_addr); + if ((e == 0x0) || (e == 0xffffffffffffffffull)) + break; + + d_addr = (e & 0x0000000000ffffffull); /* 23:0 */ + d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */ + d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */ + d_addr |= GENWQE_UID_OFFS(uid); + + if (d_type) { + for (i = 0; i < (int)d_len; i++) { + val = __genwqe_readq(cd, d_addr); + set_reg_idx(cd, regs, &idx, max_regs, + d_addr, i, val); + } + } else { + d_len >>= 3; /* Size in bytes! */ + for (i = 0; i < (int)d_len; i++, d_addr += 8) { + val = __genwqe_readq(cd, d_addr); + set_reg_idx(cd, regs, &idx, max_regs, + d_addr, 0, val); + } + } + l_addr += 8; + } + } + + /* + * To save time, there are only 6 traces poplulated on Uid=2, + * Ring=1. each with iters=512. + */ + for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds, + 2...7 are ASI rings */ + addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring); + val = __genwqe_readq(cd, addr); + + if ((val == 0x0ull) || (val == -1ull)) + continue; + + traps = (val >> 24) & 0xff; /* Number of Traps */ + traces = (val >> 16) & 0xff; /* Number of Traces */ + trace_entries = val & 0xffff; /* Entries per trace */ + + /* Note: This is a combined loop that dumps both the traps */ + /* (for the trace == 0 case) as well as the traces 1 to */ + /* 'traces'. */ + for (trace = 0; trace <= traces; trace++) { + u32 diag_sel = + GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace); + + addr = (GENWQE_UID_OFFS(uid) | + IO_EXTENDED_DIAG_SELECTOR); + __genwqe_writeq(cd, addr, diag_sel); + + for (trace_entry = 0; + trace_entry < (trace ? trace_entries : traps); + trace_entry++) { + addr = (GENWQE_UID_OFFS(uid) | + IO_EXTENDED_DIAG_READ_MBX); + val = __genwqe_readq(cd, addr); + set_reg_idx(cd, regs, &idx, max_regs, addr, + (diag_sel<<16) | trace_entry, val); + } + } + } + return 0; +} + +/** + * genwqe_write_vreg() - Write register in virtual window + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func) +{ + __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); + __genwqe_writeq(cd, reg, val); + return 0; +} + +/** + * genwqe_read_vreg() - Read register in virtual window + * + * Note, these registers are only accessible to the PF through the + * VF-window. It is not intended for the VF to access. + */ +u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func) +{ + __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf); + return __genwqe_readq(cd, reg); +} + +/** + * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card + * + * Note: From a design perspective it turned out to be a bad idea to + * use codes here to specifiy the frequency/speed values. An old + * driver cannot understand new codes and is therefore always a + * problem. Better is to measure out the value or put the + * speed/frequency directly into a register which is always a valid + * value for old as well as for new software. + * + * Return: Card clock in MHz + */ +int genwqe_base_clock_frequency(struct genwqe_dev *cd) +{ + u16 speed; /* MHz MHz MHz MHz */ + static const int speed_grade[] = { 250, 200, 166, 175 }; + + speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full); + if (speed >= ARRAY_SIZE(speed_grade)) + return 0; /* illegal value */ + + return speed_grade[speed]; +} + +/** + * genwqe_stop_traps() - Stop traps + * + * Before reading out the analysis data, we need to stop the traps. + */ +void genwqe_stop_traps(struct genwqe_dev *cd) +{ + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull); +} + +/** + * genwqe_start_traps() - Start traps + * + * After having read the data, we can/must enable the traps again. + */ +void genwqe_start_traps(struct genwqe_dev *cd) +{ + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull); + + if (genwqe_need_err_masking(cd)) + __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull); +} diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h new file mode 100644 index 00000000000..cd5263163a6 --- /dev/null +++ b/drivers/misc/genwqe/genwqe_driver.h @@ -0,0 +1,77 @@ +#ifndef __GENWQE_DRIVER_H__ +#define __GENWQE_DRIVER_H__ + +/** + * IBM Accelerator Family 'GenWQE' + * + * (C) Copyright IBM Corp. 2013 + * + * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> + * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> + * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Ruettger <michael@ibmra.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/kthread.h> +#include <linux/scatterlist.h> +#include <linux/iommu.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/printk.h> + +#include <asm/byteorder.h> +#include <linux/genwqe/genwqe_card.h> + +#define DRV_VERS_STRING "2.0.15" + +/* + * Static minor number assignement, until we decide/implement + * something dynamic. + */ +#define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */ + +/** + * genwqe_requ_alloc() - Allocate a new DDCB execution request + * + * This data structure contains the user visiable fields of the DDCB + * to be executed. + * + * Return: ptr to genwqe_ddcb_cmd data structure + */ +struct genwqe_ddcb_cmd *ddcb_requ_alloc(void); + +/** + * ddcb_requ_free() - Free DDCB execution request. + * @req: ptr to genwqe_ddcb_cmd data structure. + */ +void ddcb_requ_free(struct genwqe_ddcb_cmd *req); + +u32 genwqe_crc32(u8 *buff, size_t len, u32 init); + +static inline void genwqe_hexdump(struct pci_dev *pci_dev, + const void *buff, unsigned int size) +{ + char prefix[32]; + + scnprintf(prefix, sizeof(prefix), "%s %s: ", + GENWQE_DEVNAME, pci_name(pci_dev)); + + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff, + size, true); +} + +#endif /* __GENWQE_DRIVER_H__ */ diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile deleted file mode 100644 index ac74ae67923..00000000000 --- a/drivers/misc/hdpuftrs/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c deleted file mode 100644 index 176fe4e09d3..00000000000 --- a/drivers/misc/hdpuftrs/hdpu_cpustate.c +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Sky CPU State Driver - * - * Copyright (C) 2002 Brian Waite - * - * This driver allows use of the CPU state bits - * It exports the /dev/sky_cpustate and also - * /proc/sky_cpustate pseudo-file for status information. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/spinlock.h> -#include <linux/smp_lock.h> -#include <linux/miscdevice.h> -#include <linux/proc_fs.h> -#include <linux/hdpu_features.h> -#include <linux/platform_device.h> -#include <asm/uaccess.h> -#include <linux/seq_file.h> -#include <asm/io.h> - -#define SKY_CPUSTATE_VERSION "1.1" - -static int hdpu_cpustate_probe(struct platform_device *pdev); -static int hdpu_cpustate_remove(struct platform_device *pdev); - -static unsigned char cpustate_get_state(void); -static int cpustate_proc_open(struct inode *inode, struct file *file); -static int cpustate_proc_read(struct seq_file *seq, void *offset); - -static struct cpustate_t cpustate; - -static const struct file_operations proc_cpustate = { - .open = cpustate_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static int cpustate_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, cpustate_proc_read, NULL); -} - -static int cpustate_proc_read(struct seq_file *seq, void *offset) -{ - seq_printf(seq, "CPU State: %04x\n", cpustate_get_state()); - return 0; -} - -static int cpustate_get_ref(int excl) -{ - - int retval = -EBUSY; - - spin_lock(&cpustate.lock); - - if (cpustate.excl) - goto out_busy; - - if (excl) { - if (cpustate.open_count) - goto out_busy; - cpustate.excl = 1; - } - - cpustate.open_count++; - retval = 0; - - out_busy: - spin_unlock(&cpustate.lock); - return retval; -} - -static int cpustate_free_ref(void) -{ - - spin_lock(&cpustate.lock); - - cpustate.excl = 0; - cpustate.open_count--; - - spin_unlock(&cpustate.lock); - return 0; -} - -static unsigned char cpustate_get_state(void) -{ - - return cpustate.cached_val; -} - -static void cpustate_set_state(unsigned char new_state) -{ - unsigned int state = (new_state << 21); - -#ifdef DEBUG_CPUSTATE - printk("CPUSTATE -> 0x%x\n", new_state); -#endif - spin_lock(&cpustate.lock); - cpustate.cached_val = new_state; - writel((0xff << 21), cpustate.clr_addr); - writel(state, cpustate.set_addr); - spin_unlock(&cpustate.lock); -} - -/* - * Now all the various file operations that we export. - */ - -static ssize_t cpustate_read(struct file *file, char *buf, - size_t count, loff_t * ppos) -{ - unsigned char data; - - if (count < 0) - return -EFAULT; - if (count == 0) - return 0; - - data = cpustate_get_state(); - if (copy_to_user(buf, &data, sizeof(unsigned char))) - return -EFAULT; - return sizeof(unsigned char); -} - -static ssize_t cpustate_write(struct file *file, const char *buf, - size_t count, loff_t * ppos) -{ - unsigned char data; - - if (count < 0) - return -EFAULT; - - if (count == 0) - return 0; - - if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char))) - return -EFAULT; - - cpustate_set_state(data); - return sizeof(unsigned char); -} - -static int cpustate_open(struct inode *inode, struct file *file) -{ - int ret; - - lock_kernel(); - ret = cpustate_get_ref((file->f_flags & O_EXCL)); - unlock_kernel(); - - return ret; -} - -static int cpustate_release(struct inode *inode, struct file *file) -{ - return cpustate_free_ref(); -} - -static struct platform_driver hdpu_cpustate_driver = { - .probe = hdpu_cpustate_probe, - .remove = hdpu_cpustate_remove, - .driver = { - .name = HDPU_CPUSTATE_NAME, - .owner = THIS_MODULE, - }, -}; - -/* - * The various file operations we support. - */ -static const struct file_operations cpustate_fops = { - .owner = THIS_MODULE, - .open = cpustate_open, - .release = cpustate_release, - .read = cpustate_read, - .write = cpustate_write, - .llseek = no_llseek, -}; - -static struct miscdevice cpustate_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "sky_cpustate", - .fops = &cpustate_fops, -}; - -static int hdpu_cpustate_probe(struct platform_device *pdev) -{ - struct resource *res; - struct proc_dir_entry *proc_de; - int ret; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - printk(KERN_ERR "sky_cpustate: " - "Invalid memory resource.\n"); - return -EINVAL; - } - cpustate.set_addr = (unsigned long *)res->start; - cpustate.clr_addr = (unsigned long *)res->end - 1; - - ret = misc_register(&cpustate_dev); - if (ret) { - printk(KERN_WARNING "sky_cpustate: " - "Unable to register misc device.\n"); - cpustate.set_addr = NULL; - cpustate.clr_addr = NULL; - return ret; - } - - proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate); - if (!proc_de) { - printk(KERN_WARNING "sky_cpustate: " - "Unable to create proc entry\n"); - } - - printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); - return 0; -} - -static int hdpu_cpustate_remove(struct platform_device *pdev) -{ - cpustate.set_addr = NULL; - cpustate.clr_addr = NULL; - - remove_proc_entry("sky_cpustate", NULL); - misc_deregister(&cpustate_dev); - - return 0; -} - -static int __init cpustate_init(void) -{ - return platform_driver_register(&hdpu_cpustate_driver); -} - -static void __exit cpustate_exit(void) -{ - platform_driver_unregister(&hdpu_cpustate_driver); -} - -module_init(cpustate_init); -module_exit(cpustate_exit); - -MODULE_AUTHOR("Brian Waite"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME); diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c deleted file mode 100644 index ce39fa54949..00000000000 --- a/drivers/misc/hdpuftrs/hdpu_nexus.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Sky Nexus Register Driver - * - * Copyright (C) 2002 Brian Waite - * - * This driver allows reading the Nexus register - * It exports the /proc/sky_chassis_id and also - * /proc/sky_slot_id pseudo-file for status information. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/hdpu_features.h> -#include <linux/platform_device.h> -#include <linux/seq_file.h> -#include <asm/io.h> - -static int hdpu_nexus_probe(struct platform_device *pdev); -static int hdpu_nexus_remove(struct platform_device *pdev); -static int hdpu_slot_id_open(struct inode *inode, struct file *file); -static int hdpu_slot_id_read(struct seq_file *seq, void *offset); -static int hdpu_chassis_id_open(struct inode *inode, struct file *file); -static int hdpu_chassis_id_read(struct seq_file *seq, void *offset); - -static struct proc_dir_entry *hdpu_slot_id; -static struct proc_dir_entry *hdpu_chassis_id; -static int slot_id = -1; -static int chassis_id = -1; - -static const struct file_operations proc_slot_id = { - .open = hdpu_slot_id_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static const struct file_operations proc_chassis_id = { - .open = hdpu_chassis_id_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static struct platform_driver hdpu_nexus_driver = { - .probe = hdpu_nexus_probe, - .remove = hdpu_nexus_remove, - .driver = { - .name = HDPU_NEXUS_NAME, - .owner = THIS_MODULE, - }, -}; - -static int hdpu_slot_id_open(struct inode *inode, struct file *file) -{ - return single_open(file, hdpu_slot_id_read, NULL); -} - -static int hdpu_slot_id_read(struct seq_file *seq, void *offset) -{ - seq_printf(seq, "%d\n", slot_id); - return 0; -} - -static int hdpu_chassis_id_open(struct inode *inode, struct file *file) -{ - return single_open(file, hdpu_chassis_id_read, NULL); -} - -static int hdpu_chassis_id_read(struct seq_file *seq, void *offset) -{ - seq_printf(seq, "%d\n", chassis_id); - return 0; -} - -static int hdpu_nexus_probe(struct platform_device *pdev) -{ - struct resource *res; - int *nexus_id_addr; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - printk(KERN_ERR "sky_nexus: " - "Invalid memory resource.\n"); - return -EINVAL; - } - nexus_id_addr = ioremap(res->start, - (unsigned long)(res->end - res->start)); - if (nexus_id_addr) { - slot_id = (*nexus_id_addr >> 8) & 0x1f; - chassis_id = *nexus_id_addr & 0xff; - iounmap(nexus_id_addr); - } else { - printk(KERN_ERR "sky_nexus: Could not map slot id\n"); - } - - hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id); - if (!hdpu_slot_id) { - printk(KERN_WARNING "sky_nexus: " - "Unable to create proc dir entry: sky_slot_id\n"); - } - - hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL, - &proc_chassis_id); - if (!hdpu_chassis_id) - printk(KERN_WARNING "sky_nexus: " - "Unable to create proc dir entry: sky_chassis_id\n"); - - return 0; -} - -static int hdpu_nexus_remove(struct platform_device *pdev) -{ - slot_id = -1; - chassis_id = -1; - - remove_proc_entry("sky_slot_id", NULL); - remove_proc_entry("sky_chassis_id", NULL); - - hdpu_slot_id = 0; - hdpu_chassis_id = 0; - - return 0; -} - -static int __init nexus_init(void) -{ - return platform_driver_register(&hdpu_nexus_driver); -} - -static void __exit nexus_exit(void) -{ - platform_driver_unregister(&hdpu_nexus_driver); -} - -module_init(nexus_init); -module_exit(nexus_exit); - -MODULE_AUTHOR("Brian Waite"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" HDPU_NEXUS_NAME); diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c new file mode 100644 index 00000000000..90520d76633 --- /dev/null +++ b/drivers/misc/hmc6352.c @@ -0,0 +1,155 @@ +/* + * hmc6352.c - Honeywell Compass Driver + * + * Copyright (C) 2009 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/sysfs.h> + +static DEFINE_MUTEX(compass_mutex); + +static int compass_command(struct i2c_client *c, u8 cmd) +{ + int ret = i2c_master_send(c, &cmd, 1); + if (ret < 0) + dev_warn(&c->dev, "command '%c' failed.\n", cmd); + return ret; +} + +static int compass_store(struct device *dev, const char *buf, size_t count, + const char *map) +{ + struct i2c_client *c = to_i2c_client(dev); + int ret; + unsigned long val; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + if (val >= strlen(map)) + return -EINVAL; + mutex_lock(&compass_mutex); + ret = compass_command(c, map[val]); + mutex_unlock(&compass_mutex); + if (ret < 0) + return ret; + return count; +} + +static ssize_t compass_calibration_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return compass_store(dev, buf, count, "EC"); +} + +static ssize_t compass_power_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return compass_store(dev, buf, count, "SW"); +} + +static ssize_t compass_heading_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char i2c_data[2]; + int ret; + + mutex_lock(&compass_mutex); + ret = compass_command(client, 'A'); + if (ret != 1) { + mutex_unlock(&compass_mutex); + return ret; + } + msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */ + ret = i2c_master_recv(client, i2c_data, 2); + mutex_unlock(&compass_mutex); + if (ret < 0) { + dev_warn(dev, "i2c read data cmd failed\n"); + return ret; + } + ret = (i2c_data[0] << 8) | i2c_data[1]; + return sprintf(buf, "%d.%d\n", ret/10, ret%10); +} + + +static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL); +static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store); +static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store); + +static struct attribute *mid_att_compass[] = { + &dev_attr_heading0_input.attr, + &dev_attr_calibration.attr, + &dev_attr_power_state.attr, + NULL +}; + +static const struct attribute_group m_compass_gr = { + .name = "hmc6352", + .attrs = mid_att_compass +}; + +static int hmc6352_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + + res = sysfs_create_group(&client->dev.kobj, &m_compass_gr); + if (res) { + dev_err(&client->dev, "device_create_file failed\n"); + return res; + } + dev_info(&client->dev, "%s HMC6352 compass chip found\n", + client->name); + return 0; +} + +static int hmc6352_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &m_compass_gr); + return 0; +} + +static struct i2c_device_id hmc6352_id[] = { + { "hmc6352", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, hmc6352_id); + +static struct i2c_driver hmc6352_driver = { + .driver = { + .name = "hmc6352", + }, + .probe = hmc6352_probe, + .remove = hmc6352_remove, + .id_table = hmc6352_id, +}; + +module_i2c_driver(hmc6352_driver); + +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); +MODULE_DESCRIPTION("hmc6352 Compass Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index a92a3a742b4..b83e3ca12a4 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -1,5 +1,5 @@ /* - * Driver for HP iLO/iLO2 management processor. + * Driver for the HP iLO management processor. * * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. * David Altobelli <david.altobelli@hp.com> @@ -25,10 +25,12 @@ #include <linux/io.h> #include <linux/wait.h> #include <linux/poll.h> +#include <linux/slab.h> #include "hpilo.h" static struct class *ilo_class; static unsigned int ilo_major; +static unsigned int max_ccb = 16; static char ilo_hwdev[MAX_ILO_DEV]; static inline int get_entry_id(int entry) @@ -255,7 +257,8 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) { - char *dma_va, *dma_pa; + char *dma_va; + dma_addr_t dma_pa; struct ccb *driver_ccb, *ilo_ccb; driver_ccb = &data->driver_ccb; @@ -271,12 +274,12 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) return -ENOMEM; dma_va = (char *)data->dma_va; - dma_pa = (char *)data->dma_pa; + dma_pa = data->dma_pa; memset(dma_va, 0, data->dma_size); dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); - dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN); + dma_pa = roundup(dma_pa, ILO_START_ALIGN); /* * Create two ccb's, one with virt addrs, one with phys addrs. @@ -287,26 +290,26 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) fifo_setup(dma_va, NR_QENTRY); driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; - ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE; + ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; dma_va += fifo_sz(NR_QENTRY); dma_pa += fifo_sz(NR_QENTRY); dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); - dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ); + dma_pa = roundup(dma_pa, ILO_CACHE_SZ); fifo_setup(dma_va, NR_QENTRY); driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; - ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE; + ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; dma_va += fifo_sz(NR_QENTRY); dma_pa += fifo_sz(NR_QENTRY); driver_ccb->ccb_u2.send_desc = dma_va; - ilo_ccb->ccb_u2.send_desc = dma_pa; + ilo_ccb->ccb_u2.send_desc_pa = dma_pa; dma_pa += desc_mem_sz(NR_QENTRY); dma_va += desc_mem_sz(NR_QENTRY); driver_ccb->ccb_u4.recv_desc = dma_va; - ilo_ccb->ccb_u4.recv_desc = dma_pa; + ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; driver_ccb->channel = slot; ilo_ccb->channel = slot; @@ -422,7 +425,7 @@ static void ilo_set_reset(struct ilo_hwinfo *hw) * Mapped memory is zeroed on ilo reset, so set a per ccb flag * to indicate that this ccb needs to be closed and reopened. */ - for (slot = 0; slot < MAX_CCB; slot++) { + for (slot = 0; slot < max_ccb; slot++) { if (!hw->ccb_alloc[slot]) continue; set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); @@ -533,7 +536,7 @@ static int ilo_close(struct inode *ip, struct file *fp) struct ilo_hwinfo *hw; unsigned long flags; - slot = iminor(ip) % MAX_CCB; + slot = iminor(ip) % max_ccb; hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); spin_lock(&hw->open_lock); @@ -564,7 +567,7 @@ static int ilo_open(struct inode *ip, struct file *fp) struct ilo_hwinfo *hw; unsigned long flags; - slot = iminor(ip) % MAX_CCB; + slot = iminor(ip) % max_ccb; hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); /* new ccb allocation */ @@ -638,6 +641,7 @@ static const struct file_operations ilo_fops = { .poll = ilo_poll, .open = ilo_open, .release = ilo_close, + .llseek = noop_llseek, }; static irqreturn_t ilo_isr(int irq, void *data) @@ -660,7 +664,7 @@ static irqreturn_t ilo_isr(int irq, void *data) ilo_set_reset(hw); } - for (i = 0; i < MAX_CCB; i++) { + for (i = 0; i < max_ccb; i++) { if (!hw->ccb_alloc[i]) continue; if (pending & (1 << i)) @@ -682,7 +686,7 @@ static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) pci_iounmap(pdev, hw->mmio_vaddr); } -static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) +static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) { int error = -ENOMEM; @@ -694,14 +698,14 @@ static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) } /* map the adapter shared memory region */ - hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ); + hw->ram_vaddr = pci_iomap(pdev, 2, max_ccb * ILOHW_CCB_SZ); if (hw->ram_vaddr == NULL) { dev_err(&pdev->dev, "Error mapping shared mem\n"); goto mmio_free; } /* map the doorbell aperture */ - hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE); + hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE); if (hw->db_vaddr == NULL) { dev_err(&pdev->dev, "Error mapping doorbell\n"); goto ram_free; @@ -721,10 +725,13 @@ static void ilo_remove(struct pci_dev *pdev) int i, minor; struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); + if (!ilo_hw) + return; + clear_device(ilo_hw); minor = MINOR(ilo_hw->cdev.dev); - for (i = minor; i < minor + MAX_CCB; i++) + for (i = minor; i < minor + max_ccb; i++) device_destroy(ilo_class, MKDEV(ilo_major, i)); cdev_del(&ilo_hw->cdev); @@ -732,17 +739,33 @@ static void ilo_remove(struct pci_dev *pdev) free_irq(pdev->irq, ilo_hw); ilo_unmap_device(pdev, ilo_hw); pci_release_regions(pdev); - pci_disable_device(pdev); + /* + * pci_disable_device(pdev) used to be here. But this PCI device has + * two functions with interrupt lines connected to a single pin. The + * other one is a USB host controller. So when we disable the PIN here + * e.g. by rmmod hpilo, the controller stops working. It is because + * the interrupt link is disabled in ACPI since it is not refcounted + * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable. + */ kfree(ilo_hw); - ilo_hwdev[(minor / MAX_CCB)] = 0; + ilo_hwdev[(minor / max_ccb)] = 0; } -static int __devinit ilo_probe(struct pci_dev *pdev, +static int ilo_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int devnum, minor, start, error; + int devnum, minor, start, error = 0; struct ilo_hwinfo *ilo_hw; + /* Ignore subsystem_device = 0x1979 (set by BIOS) */ + if (pdev->subsystem_device == 0x1979) + return 0; + + if (max_ccb > MAX_CCB) + max_ccb = MAX_CCB; + else if (max_ccb < MIN_CCB) + max_ccb = MIN_CCB; + /* find a free range for device files */ for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { if (ilo_hwdev[devnum] == 0) { @@ -792,14 +815,14 @@ static int __devinit ilo_probe(struct pci_dev *pdev, cdev_init(&ilo_hw->cdev, &ilo_fops); ilo_hw->cdev.owner = THIS_MODULE; - start = devnum * MAX_CCB; - error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB); + start = devnum * max_ccb; + error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb); if (error) { dev_err(&pdev->dev, "Could not add cdev\n"); goto remove_isr; } - for (minor = 0 ; minor < MAX_CCB; minor++) { + for (minor = 0 ; minor < max_ccb; minor++) { struct device *dev; dev = device_create(ilo_class, &pdev->dev, MKDEV(ilo_major, minor), NULL, @@ -817,7 +840,7 @@ unmap: free_regions: pci_release_regions(pdev); disable: - pci_disable_device(pdev); +/* pci_disable_device(pdev); see comment in ilo_remove */ free: kfree(ilo_hw); out: @@ -836,7 +859,7 @@ static struct pci_driver ilo_driver = { .name = ILO_NAME, .id_table = ilo_devices, .probe = ilo_probe, - .remove = __devexit_p(ilo_remove), + .remove = ilo_remove, }; static int __init ilo_init(void) @@ -876,11 +899,14 @@ static void __exit ilo_exit(void) class_destroy(ilo_class); } -MODULE_VERSION("1.2"); +MODULE_VERSION("1.4.1"); MODULE_ALIAS(ILO_NAME); MODULE_DESCRIPTION(ILO_NAME); MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); MODULE_LICENSE("GPL v2"); +module_param(max_ccb, uint, 0444); +MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (16)"); + module_init(ilo_init); module_exit(ilo_exit); diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h index 247eb386a97..b97672e0cf9 100644 --- a/drivers/misc/hpilo.h +++ b/drivers/misc/hpilo.h @@ -14,7 +14,9 @@ #define ILO_NAME "hpilo" /* max number of open channel control blocks per device, hw limited to 32 */ -#define MAX_CCB 8 +#define MAX_CCB 24 +/* min number of open channel control blocks per device, hw limited to 32 */ +#define MIN_CCB 8 /* max number of supported devices */ #define MAX_ILO_DEV 1 /* max number of files */ @@ -79,21 +81,21 @@ struct ilo_hwinfo { struct ccb { union { char *send_fifobar; - u64 padding1; + u64 send_fifobar_pa; } ccb_u1; union { char *send_desc; - u64 padding2; + u64 send_desc_pa; } ccb_u2; u64 send_ctrl; union { char *recv_fifobar; - u64 padding3; + u64 recv_fifobar_pa; } ccb_u3; union { char *recv_desc; - u64 padding4; + u64 recv_desc_pa; } ccb_u4; u64 recv_ctrl; diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c index e2031739aa2..7d56f45dee1 100644 --- a/drivers/misc/ibmasm/command.c +++ b/drivers/misc/ibmasm/command.c @@ -18,11 +18,12 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ #include <linux/sched.h> +#include <linux/slab.h> #include "ibmasm.h" #include "lowlevel.h" diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c index 3dd2dfb8da1..d7b2ca358b2 100644 --- a/drivers/misc/ibmasm/dot_command.c +++ b/drivers/misc/ibmasm/dot_command.c @@ -17,7 +17,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h index 6cbba1afef3..fc9fc9d4e08 100644 --- a/drivers/misc/ibmasm/dot_command.h +++ b/drivers/misc/ibmasm/dot_command.h @@ -17,7 +17,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c index 572d41ffc18..8e540f4e9d5 100644 --- a/drivers/misc/ibmasm/event.c +++ b/drivers/misc/ibmasm/event.c @@ -18,11 +18,12 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ #include <linux/sched.h> +#include <linux/slab.h> #include "ibmasm.h" #include "lowlevel.h" diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c index 1bc4306572a..90746378f9b 100644 --- a/drivers/misc/ibmasm/heartbeat.c +++ b/drivers/misc/ibmasm/heartbeat.c @@ -18,7 +18,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h index bf2c738d2b7..2e9566dab2b 100644 --- a/drivers/misc/ibmasm/i2o.h +++ b/drivers/misc/ibmasm/i2o.h @@ -17,7 +17,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h index 4d8a4e248b3..9b083448814 100644 --- a/drivers/misc/ibmasm/ibmasm.h +++ b/drivers/misc/ibmasm/ibmasm.h @@ -18,7 +18,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index aecf40ecb3a..e8b933111e0 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -17,7 +17,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ @@ -29,7 +29,7 @@ /* * The IBMASM file virtual filesystem. It creates the following hierarchy - * dymamically when mounted from user space: + * dynamically when mounted from user space: * * /ibmasm * |-- 0 @@ -75,6 +75,7 @@ #include <linux/fs.h> #include <linux/pagemap.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #include "ibmasm.h" @@ -86,15 +87,14 @@ static LIST_HEAD(service_processors); static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode); -static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root); +static void ibmasmfs_create_files (struct super_block *sb); static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent); -static int ibmasmfs_get_super(struct file_system_type *fst, - int flags, const char *name, void *data, - struct vfsmount *mnt) +static struct dentry *ibmasmfs_mount(struct file_system_type *fst, + int flags, const char *name, void *data) { - return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt); + return mount_single(fst, flags, data, ibmasmfs_fill_super); } static const struct super_operations ibmasmfs_s_ops = { @@ -107,14 +107,14 @@ static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations; static struct file_system_type ibmasmfs_type = { .owner = THIS_MODULE, .name = "ibmasmfs", - .get_sb = ibmasmfs_get_super, + .mount = ibmasmfs_mount, .kill_sb = kill_litter_super, }; +MODULE_ALIAS_FS("ibmasmfs"); static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent) { struct inode *root; - struct dentry *root_dentry; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -129,14 +129,11 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent) root->i_op = &simple_dir_inode_operations; root->i_fop = ibmasmfs_dir_ops; - root_dentry = d_alloc_root(root); - if (!root_dentry) { - iput(root); + sb->s_root = d_make_root(root); + if (!sb->s_root) return -ENOMEM; - } - sb->s_root = root_dentry; - ibmasmfs_create_files(sb, root_dentry); + ibmasmfs_create_files(sb); return 0; } @@ -145,14 +142,14 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode) struct inode *ret = new_inode(sb); if (ret) { + ret->i_ino = get_next_ino(); ret->i_mode = mode; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; } return ret; } -static struct dentry *ibmasmfs_create_file (struct super_block *sb, - struct dentry *parent, +static struct dentry *ibmasmfs_create_file(struct dentry *parent, const char *name, const struct file_operations *fops, void *data, @@ -165,7 +162,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb, if (!dentry) return NULL; - inode = ibmasmfs_make_inode(sb, S_IFREG | mode); + inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode); if (!inode) { dput(dentry); return NULL; @@ -178,8 +175,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb, return dentry; } -static struct dentry *ibmasmfs_create_dir (struct super_block *sb, - struct dentry *parent, +static struct dentry *ibmasmfs_create_dir(struct dentry *parent, const char *name) { struct dentry *dentry; @@ -189,7 +185,7 @@ static struct dentry *ibmasmfs_create_dir (struct super_block *sb, if (!dentry) return NULL; - inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500); + inode = ibmasmfs_make_inode(parent->d_sb, S_IFDIR | 0500); if (!inode) { dput(dentry); return NULL; @@ -503,12 +499,6 @@ static ssize_t r_heartbeat_file_write(struct file *file, const char __user *buf, return 1; } -static int remote_settings_file_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static int remote_settings_file_close(struct inode *inode, struct file *file) { return 0; @@ -583,6 +573,7 @@ static const struct file_operations command_fops = { .release = command_file_close, .read = command_file_read, .write = command_file_write, + .llseek = generic_file_llseek, }; static const struct file_operations event_fops = { @@ -590,6 +581,7 @@ static const struct file_operations event_fops = { .release = event_file_close, .read = event_file_read, .write = event_file_write, + .llseek = generic_file_llseek, }; static const struct file_operations r_heartbeat_fops = { @@ -597,17 +589,19 @@ static const struct file_operations r_heartbeat_fops = { .release = r_heartbeat_file_close, .read = r_heartbeat_file_read, .write = r_heartbeat_file_write, + .llseek = generic_file_llseek, }; static const struct file_operations remote_settings_fops = { - .open = remote_settings_file_open, + .open = simple_open, .release = remote_settings_file_close, .read = remote_settings_file_read, .write = remote_settings_file_write, + .llseek = generic_file_llseek, }; -static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root) +static void ibmasmfs_create_files (struct super_block *sb) { struct list_head *entry; struct service_processor *sp; @@ -616,20 +610,20 @@ static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root) struct dentry *dir; struct dentry *remote_dir; sp = list_entry(entry, struct service_processor, node); - dir = ibmasmfs_create_dir(sb, root, sp->dirname); + dir = ibmasmfs_create_dir(sb->s_root, sp->dirname); if (!dir) continue; - ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR); - ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR); - ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR); + ibmasmfs_create_file(dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR); - remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video"); + remote_dir = ibmasmfs_create_dir(dir, "remote_video"); if (!remote_dir) continue; - ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR); - ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR); - ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR); + ibmasmfs_create_file(remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR); } } diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c index 4b2398e27fd..5319ea261c0 100644 --- a/drivers/misc/ibmasm/lowlevel.c +++ b/drivers/misc/ibmasm/lowlevel.c @@ -17,7 +17,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h index 766766523a6..e97848f51b3 100644 --- a/drivers/misc/ibmasm/lowlevel.h +++ b/drivers/misc/ibmasm/lowlevel.h @@ -17,7 +17,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c index dc14b0b9cbf..6b3bf9ab051 100644 --- a/drivers/misc/ibmasm/module.c +++ b/drivers/misc/ibmasm/module.c @@ -18,7 +18,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * * This driver is based on code originally written by Pete Reynolds * and others. @@ -52,6 +52,7 @@ #include <linux/pci.h> #include <linux/init.h> +#include <linux/slab.h> #include "ibmasm.h" #include "lowlevel.h" #include "remote.h" @@ -61,7 +62,7 @@ module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off"); -static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int result; struct service_processor *sp; @@ -152,7 +153,6 @@ error_ioremap: error_heartbeat: ibmasm_event_buffer_exit(sp); error_eventbuffer: - pci_set_drvdata(pdev, NULL); kfree(sp); error_kmalloc: pci_release_regions(pdev); @@ -162,9 +162,9 @@ error_resources: return result; } -static void __devexit ibmasm_remove_one(struct pci_dev *pdev) +static void ibmasm_remove_one(struct pci_dev *pdev) { - struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev); + struct service_processor *sp = pci_get_drvdata(pdev); dbg("Unregistering UART\n"); ibmasm_unregister_uart(sp); @@ -181,7 +181,6 @@ static void __devexit ibmasm_remove_one(struct pci_dev *pdev) ibmasm_free_remote_input_dev(sp); iounmap(sp->base_address); ibmasm_event_buffer_exit(sp); - pci_set_drvdata(pdev, NULL); kfree(sp); pci_release_regions(pdev); pci_disable_device(pdev); @@ -197,7 +196,7 @@ static struct pci_driver ibmasm_driver = { .name = DRIVER_NAME, .id_table = ibmasm_pci_table, .probe = ibmasm_init_one, - .remove = __devexit_p(ibmasm_remove_one), + .remove = ibmasm_remove_one, }; static void __exit ibmasm_exit (void) @@ -210,18 +209,17 @@ static void __exit ibmasm_exit (void) static int __init ibmasm_init(void) { - int result; + int result = pci_register_driver(&ibmasm_driver); + if (result) + return result; result = ibmasmfs_register(); if (result) { + pci_unregister_driver(&ibmasm_driver); err("Failed to register ibmasmfs file system"); return result; } - result = pci_register_driver(&ibmasm_driver); - if (result) { - ibmasmfs_unregister(); - return result; - } + ibmasm_register_panic_notifier(); info(DRIVER_DESC " version " DRIVER_VERSION " loaded"); return 0; diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c index 2de487ac788..232034f5da4 100644 --- a/drivers/misc/ibmasm/r_heartbeat.c +++ b/drivers/misc/ibmasm/r_heartbeat.c @@ -16,7 +16,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h index 72acf5af7a2..a7729ef76ac 100644 --- a/drivers/misc/ibmasm/remote.h +++ b/drivers/misc/ibmasm/remote.h @@ -18,9 +18,9 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * - * Orignally written by Pete Reynolds + * Originally written by Pete Reynolds */ #ifndef _IBMASM_REMOTE_H_ diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c index 93baa350d69..01e2b0d7e59 100644 --- a/drivers/misc/ibmasm/uart.c +++ b/drivers/misc/ibmasm/uart.c @@ -18,7 +18,7 @@ * * Copyright (C) IBM Corporation, 2004 * - * Author: Max Asböck <amax@us.ibm.com> + * Author: Max Asböck <amax@us.ibm.com> * */ @@ -33,7 +33,7 @@ void ibmasm_register_uart(struct service_processor *sp) { - struct uart_port uport; + struct uart_8250_port uart; void __iomem *iomem_base; iomem_base = sp->base_address + SCOUT_COM_B_BASE; @@ -47,14 +47,14 @@ void ibmasm_register_uart(struct service_processor *sp) return; } - memset(&uport, 0, sizeof(struct uart_port)); - uport.irq = sp->irq; - uport.uartclk = 3686400; - uport.flags = UPF_SHARE_IRQ; - uport.iotype = UPIO_MEM; - uport.membase = iomem_base; + memset(&uart, 0, sizeof(uart)); + uart.port.irq = sp->irq; + uart.port.uartclk = 3686400; + uart.port.flags = UPF_SHARE_IRQ; + uart.port.iotype = UPIO_MEM; + uart.port.membase = iomem_base; - sp->serial_line = serial8250_register_port(&uport); + sp->serial_line = serial8250_register_8250_port(&uart); if (sp->serial_line < 0) { dev_err(sp->dev, "Failed to register serial port\n"); return; diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 395a4ea64e9..28f51e01fd2 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -2,7 +2,7 @@ * A driver for the Integrated Circuits ICS932S401 * Copyright (C) 2008 IBM * - * Author: Darrick J. Wong <djwong@us.ibm.com> + * Author: Darrick J. Wong <darrick.wong@oracle.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,7 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/log2.h> +#include <linux/slab.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END }; @@ -479,23 +480,12 @@ static int ics932s401_remove(struct i2c_client *client) return 0; } -static int __init ics932s401_init(void) -{ - return i2c_add_driver(&ics932s401_driver); -} - -static void __exit ics932s401_exit(void) -{ - i2c_del_driver(&ics932s401_driver); -} +module_i2c_driver(ics932s401_driver); -MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); +MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>"); MODULE_DESCRIPTION("ICS932S401 driver"); MODULE_LICENSE("GPL"); -module_init(ics932s401_init); -module_exit(ics932s401_exit); - /* IBM IntelliStation Z30 */ MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*"); MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*"); diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 09dcb699e66..06f6ad29cef 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c @@ -30,6 +30,7 @@ #include <linux/pci.h> #include <linux/ioc4.h> #include <linux/ktime.h> +#include <linux/slab.h> #include <linux/mutex.h> #include <linux/time.h> #include <asm/io.h> @@ -138,7 +139,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is) * even though the following code utilizes external interrupt registers * to perform the speed calculation. */ -static void __devinit +static void ioc4_clock_calibrate(struct ioc4_driver_data *idd) { union ioc4_int_out int_out; @@ -230,7 +231,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd) * on the same PCI bus at slot number 3 to differentiate IO9 from IO10. * If neither is present, it's a PCI-RT. */ -static unsigned int __devinit +static unsigned int ioc4_variant(struct ioc4_driver_data *idd) { struct pci_dev *pdev = NULL; @@ -269,18 +270,16 @@ ioc4_variant(struct ioc4_driver_data *idd) return IOC4_VARIANT_PCI_RT; } -static void __devinit +static void ioc4_load_modules(struct work_struct *work) { - /* arg just has to be freed */ - request_module("sgiioc4"); - - kfree(work); } +static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules); + /* Adds a new instance of an IOC4 card */ -static int __devinit +static int ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct ioc4_driver_data *idd; @@ -395,21 +394,12 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) * PCI device. */ if (idd->idd_variant != IOC4_VARIANT_PCI_RT) { - struct work_struct *work; - work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); - if (!work) { - printk(KERN_WARNING - "%s: IOC4 unable to allocate memory for " - "load of sub-modules.\n", __func__); - } else { - /* Request the module from a work procedure as the - * modprobe goes out to a userland helper and that - * will hang if done directly from ioc4_probe(). - */ - printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n"); - INIT_WORK(work, ioc4_load_modules); - schedule_work(work); - } + /* Request the module from a work procedure as the modprobe + * goes out to a userland helper and that will hang if done + * directly from ioc4_probe(). + */ + printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n"); + schedule_work(&ioc4_load_modules_work); } return 0; @@ -425,7 +415,7 @@ out: } /* Removes a particular instance of an IOC4 card. */ -static void __devexit +static void ioc4_remove(struct pci_dev *pdev) { struct ioc4_submodule *is; @@ -476,7 +466,7 @@ static struct pci_driver ioc4_driver = { .name = "IOC4", .id_table = ioc4_id_table, .probe = ioc4_probe, - .remove = __devexit_p(ioc4_remove), + .remove = ioc4_remove, }; MODULE_DEVICE_TABLE(pci, ioc4_id_table); @@ -497,7 +487,7 @@ static void __exit ioc4_exit(void) { /* Ensure ioc4_load_modules() has completed before exiting */ - flush_scheduled_work(); + flush_work(&ioc4_load_modules_work); pci_unregister_driver(&ioc4_driver); } diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index a71e245801e..12c30b486b2 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -26,7 +26,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> @@ -208,7 +207,11 @@ static ssize_t isl29003_store_range(struct device *dev, unsigned long val; int ret; - if ((strict_strtoul(buf, 10, &val) < 0) || (val > 3)) + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 3) return -EINVAL; ret = isl29003_set_range(client, val); @@ -239,7 +242,11 @@ static ssize_t isl29003_store_resolution(struct device *dev, unsigned long val; int ret; - if ((strict_strtoul(buf, 10, &val) < 0) || (val > 3)) + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 3) return -EINVAL; ret = isl29003_set_resolution(client, val); @@ -267,7 +274,11 @@ static ssize_t isl29003_store_mode(struct device *dev, unsigned long val; int ret; - if ((strict_strtoul(buf, 10, &val) < 0) || (val > 2)) + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 2) return -EINVAL; ret = isl29003_set_mode(client, val); @@ -298,7 +309,11 @@ static ssize_t isl29003_store_power_state(struct device *dev, unsigned long val; int ret; - if ((strict_strtoul(buf, 10, &val) < 0) || (val > 1)) + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val > 1) return -EINVAL; ret = isl29003_set_power_state(client, val); @@ -365,7 +380,7 @@ static int isl29003_init_client(struct i2c_client *client) * I2C layer */ -static int __devinit isl29003_probe(struct i2c_client *client, +static int isl29003_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); @@ -401,7 +416,7 @@ exit_kfree: return err; } -static int __devexit isl29003_remove(struct i2c_client *client) +static int isl29003_remove(struct i2c_client *client) { sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group); isl29003_set_power_state(client, 0); @@ -409,18 +424,20 @@ static int __devexit isl29003_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM -static int isl29003_suspend(struct i2c_client *client, pm_message_t mesg) +#ifdef CONFIG_PM_SLEEP +static int isl29003_suspend(struct device *dev) { + struct i2c_client *client = to_i2c_client(dev); struct isl29003_data *data = i2c_get_clientdata(client); data->power_state_before_suspend = isl29003_get_power_state(client); return isl29003_set_power_state(client, 0); } -static int isl29003_resume(struct i2c_client *client) +static int isl29003_resume(struct device *dev) { int i; + struct i2c_client *client = to_i2c_client(dev); struct isl29003_data *data = i2c_get_clientdata(client); /* restore registers from cache */ @@ -432,10 +449,12 @@ static int isl29003_resume(struct i2c_client *client) data->power_state_before_suspend); } +static SIMPLE_DEV_PM_OPS(isl29003_pm_ops, isl29003_suspend, isl29003_resume); +#define ISL29003_PM_OPS (&isl29003_pm_ops) + #else -#define isl29003_suspend NULL -#define isl29003_resume NULL -#endif /* CONFIG_PM */ +#define ISL29003_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ static const struct i2c_device_id isl29003_id[] = { { "isl29003", 0 }, @@ -447,29 +466,16 @@ static struct i2c_driver isl29003_driver = { .driver = { .name = ISL29003_DRV_NAME, .owner = THIS_MODULE, + .pm = ISL29003_PM_OPS, }, - .suspend = isl29003_suspend, - .resume = isl29003_resume, .probe = isl29003_probe, - .remove = __devexit_p(isl29003_remove), + .remove = isl29003_remove, .id_table = isl29003_id, }; -static int __init isl29003_init(void) -{ - return i2c_add_driver(&isl29003_driver); -} - -static void __exit isl29003_exit(void) -{ - i2c_del_driver(&isl29003_driver); -} +module_i2c_driver(isl29003_driver); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("ISL29003 ambient light sensor driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRIVER_VERSION); - -module_init(isl29003_init); -module_exit(isl29003_exit); - diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c new file mode 100644 index 00000000000..4a9c50a43af --- /dev/null +++ b/drivers/misc/isl29020.c @@ -0,0 +1,238 @@ +/* + * isl29020.c - Intersil ALS Driver + * + * Copyright (C) 2008 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/sysfs.h> +#include <linux/pm_runtime.h> + +static DEFINE_MUTEX(mutex); + +static ssize_t als_sensing_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int val; + + val = i2c_smbus_read_byte_data(client, 0x00); + + if (val < 0) + return val; + return sprintf(buf, "%d000\n", 1 << (2 * (val & 3))); + +} + +static ssize_t als_lux_input_data_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret_val, val; + unsigned long int lux; + int temp; + + pm_runtime_get_sync(dev); + msleep(100); + + mutex_lock(&mutex); + temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */ + if (temp < 0) { + pm_runtime_put_sync(dev); + mutex_unlock(&mutex); + return temp; + } + + ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */ + mutex_unlock(&mutex); + + if (ret_val < 0) { + pm_runtime_put_sync(dev); + return ret_val; + } + + ret_val |= temp << 8; + val = i2c_smbus_read_byte_data(client, 0x00); + pm_runtime_put_sync(dev); + if (val < 0) + return val; + lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536; + return sprintf(buf, "%ld\n", lux); +} + +static ssize_t als_sensing_range_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret_val; + unsigned long val; + + ret_val = kstrtoul(buf, 10, &val); + if (ret_val) + return ret_val; + + if (val < 1 || val > 64000) + return -EINVAL; + + /* Pick the smallest sensor range that will meet our requirements */ + if (val <= 1000) + val = 1; + else if (val <= 4000) + val = 2; + else if (val <= 16000) + val = 3; + else + val = 4; + + ret_val = i2c_smbus_read_byte_data(client, 0x00); + if (ret_val < 0) + return ret_val; + + ret_val &= 0xFC; /*reset the bit before setting them */ + ret_val |= val - 1; + ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val); + + if (ret_val < 0) + return ret_val; + return count; +} + +static void als_set_power_state(struct i2c_client *client, int enable) +{ + int ret_val; + + ret_val = i2c_smbus_read_byte_data(client, 0x00); + if (ret_val < 0) + return; + + if (enable) + ret_val |= 0x80; + else + ret_val &= 0x7F; + + i2c_smbus_write_byte_data(client, 0x00, ret_val); +} + +static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR, + als_sensing_range_show, als_sensing_range_store); +static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL); + +static struct attribute *mid_att_als[] = { + &dev_attr_lux0_sensor_range.attr, + &dev_attr_lux0_input.attr, + NULL +}; + +static struct attribute_group m_als_gr = { + .name = "isl29020", + .attrs = mid_att_als +}; + +static int als_set_default_config(struct i2c_client *client) +{ + int retval; + + retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0); + if (retval < 0) { + dev_err(&client->dev, "default write failed."); + return retval; + } + return 0; +} + +static int isl29020_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + + res = als_set_default_config(client); + if (res < 0) + return res; + + res = sysfs_create_group(&client->dev.kobj, &m_als_gr); + if (res) { + dev_err(&client->dev, "isl29020: device create file failed\n"); + return res; + } + dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name); + als_set_power_state(client, 0); + pm_runtime_enable(&client->dev); + return res; +} + +static int isl29020_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &m_als_gr); + return 0; +} + +static struct i2c_device_id isl29020_id[] = { + { "isl29020", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, isl29020_id); + +#ifdef CONFIG_PM + +static int isl29020_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + als_set_power_state(client, 0); + return 0; +} + +static int isl29020_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + als_set_power_state(client, 1); + return 0; +} + +static const struct dev_pm_ops isl29020_pm_ops = { + .runtime_suspend = isl29020_runtime_suspend, + .runtime_resume = isl29020_runtime_resume, +}; + +#define ISL29020_PM_OPS (&isl29020_pm_ops) +#else /* CONFIG_PM */ +#define ISL29020_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct i2c_driver isl29020_driver = { + .driver = { + .name = "isl29020", + .pm = ISL29020_PM_OPS, + }, + .probe = isl29020_probe, + .remove = isl29020_remove, + .id_table = isl29020_id, +}; + +module_i2c_driver(isl29020_driver); + +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>"); +MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig deleted file mode 100644 index 9e4b88fb57f..00000000000 --- a/drivers/misc/iwmc3200top/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -config IWMC3200TOP - tristate "Intel Wireless MultiCom Top Driver" - depends on MMC && EXPERIMENTAL - select FW_LOADER - ---help--- - Intel Wireless MultiCom 3200 Top driver is responsible for - for firmware load and enabled coms enumeration - -config IWMC3200TOP_DEBUG - bool "Enable full debug output of iwmc3200top Driver" - depends on IWMC3200TOP - ---help--- - Enable full debug output of iwmc3200top Driver - -config IWMC3200TOP_DEBUGFS - bool "Enable Debugfs debugging interface for iwmc3200top" - depends on IWMC3200TOP - ---help--- - Enable creation of debugfs files for iwmc3200top - diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile deleted file mode 100644 index fbf53fb4634..00000000000 --- a/drivers/misc/iwmc3200top/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver -# drivers/misc/iwmc3200top/Makefile -# -# Copyright (C) 2009 Intel Corporation. All rights reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License version -# 2 as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA -# 02110-1301, USA. -# -# -# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> -# - -# -# - -obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o -iwmc3200top-objs := main.o fw-download.o -iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o -iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c deleted file mode 100644 index 0c8ea0a1c8a..00000000000 --- a/drivers/misc/iwmc3200top/debugfs.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/debufs.c - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/ctype.h> -#include <linux/mmc/sdio_func.h> -#include <linux/mmc/sdio.h> -#include <linux/debugfs.h> - -#include "iwmc3200top.h" -#include "fw-msg.h" -#include "log.h" -#include "debugfs.h" - - - -/* Constants definition */ -#define HEXADECIMAL_RADIX 16 - -/* Functions definition */ - - -#define DEBUGFS_ADD(name, parent) do { \ - dbgfs->dbgfs_##parent##_files.file_##name = \ - debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \ - &iwmct_dbgfs_##name##_ops); \ -} while (0) - -#define DEBUGFS_RM(name) do { \ - debugfs_remove(name); \ - name = NULL; \ -} while (0) - -#define DEBUGFS_READ_FUNC(name) \ -ssize_t iwmct_dbgfs_##name##_read(struct file *file, \ - char __user *user_buf, \ - size_t count, loff_t *ppos); - -#define DEBUGFS_WRITE_FUNC(name) \ -ssize_t iwmct_dbgfs_##name##_write(struct file *file, \ - const char __user *user_buf, \ - size_t count, loff_t *ppos); - -#define DEBUGFS_READ_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name) \ - static const struct file_operations iwmct_dbgfs_##name##_ops = { \ - .read = iwmct_dbgfs_##name##_read, \ - .open = iwmct_dbgfs_open_file_generic, \ - }; - -#define DEBUGFS_WRITE_FILE_OPS(name) \ - DEBUGFS_WRITE_FUNC(name) \ - static const struct file_operations iwmct_dbgfs_##name##_ops = { \ - .write = iwmct_dbgfs_##name##_write, \ - .open = iwmct_dbgfs_open_file_generic, \ - }; - -#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ - DEBUGFS_READ_FUNC(name) \ - DEBUGFS_WRITE_FUNC(name) \ - static const struct file_operations iwmct_dbgfs_##name##_ops = {\ - .write = iwmct_dbgfs_##name##_write, \ - .read = iwmct_dbgfs_##name##_read, \ - .open = iwmct_dbgfs_open_file_generic, \ - }; - - -/* Debugfs file ops definitions */ - -/* - * Create the debugfs files and directories - * - */ -void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name) -{ - struct iwmct_debugfs *dbgfs; - - dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL); - if (!dbgfs) { - LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", - sizeof(struct iwmct_debugfs)); - return; - } - - priv->dbgfs = dbgfs; - dbgfs->name = name; - dbgfs->dir_drv = debugfs_create_dir(name, NULL); - if (!dbgfs->dir_drv) { - LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n"); - return; - } - - return; -} - -/** - * Remove the debugfs files and directories - * - */ -void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs) -{ - if (!dbgfs) - return; - - DEBUGFS_RM(dbgfs->dir_drv); - kfree(dbgfs); - dbgfs = NULL; -} - diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h deleted file mode 100644 index 71d45759b40..00000000000 --- a/drivers/misc/iwmc3200top/debugfs.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/debufs.h - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#ifndef __DEBUGFS_H__ -#define __DEBUGFS_H__ - - -#ifdef CONFIG_IWMC3200TOP_DEBUGFS - -struct iwmct_debugfs { - const char *name; - struct dentry *dir_drv; - struct dir_drv_files { - } dbgfs_drv_files; -}; - -void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name); -void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs); - -#else /* CONFIG_IWMC3200TOP_DEBUGFS */ - -struct iwmct_debugfs; - -static inline void -iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name) -{} - -static inline void -iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs) -{} - -#endif /* CONFIG_IWMC3200TOP_DEBUGFS */ - -#endif /* __DEBUGFS_H__ */ - diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c deleted file mode 100644 index 50d431e469f..00000000000 --- a/drivers/misc/iwmc3200top/fw-download.c +++ /dev/null @@ -1,355 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/fw-download.c - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#include <linux/firmware.h> -#include <linux/mmc/sdio_func.h> -#include <asm/unaligned.h> - -#include "iwmc3200top.h" -#include "log.h" -#include "fw-msg.h" - -#define CHECKSUM_BYTES_NUM sizeof(u32) - -/** - init parser struct with file - */ -static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file, - size_t file_size, size_t block_size) -{ - struct iwmct_parser *parser = &priv->parser; - struct iwmct_fw_hdr *fw_hdr = &parser->versions; - - LOG_INFOEX(priv, INIT, "-->\n"); - - LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size); - - parser->file = file; - parser->file_size = file_size; - parser->cur_pos = 0; - parser->buf = NULL; - - parser->buf = kzalloc(block_size, GFP_KERNEL); - if (!parser->buf) { - LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n"); - return -ENOMEM; - } - parser->buf_size = block_size; - - /* extract fw versions */ - memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr)); - LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n" - "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n", - fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision, - fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision, - fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision, - fw_hdr->tic_name); - - parser->cur_pos += sizeof(struct iwmct_fw_hdr); - - LOG_INFOEX(priv, INIT, "<--\n"); - return 0; -} - -static bool iwmct_checksum(struct iwmct_priv *priv) -{ - struct iwmct_parser *parser = &priv->parser; - __le32 *file = (__le32 *)parser->file; - int i, pad, steps; - u32 accum = 0; - u32 checksum; - u32 mask = 0xffffffff; - - pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4; - steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4; - - LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps); - - for (i = 0; i < steps; i++) - accum += le32_to_cpu(file[i]); - - if (pad) { - mask <<= 8 * (4 - pad); - accum += le32_to_cpu(file[steps]) & mask; - } - - checksum = get_unaligned_le32((__le32 *)(parser->file + - parser->file_size - CHECKSUM_BYTES_NUM)); - - LOG_INFO(priv, FW_DOWNLOAD, - "compare checksum accum=0x%x to checksum=0x%x\n", - accum, checksum); - - return checksum == accum; -} - -static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec, - size_t *sec_size, __le32 *sec_addr) -{ - struct iwmct_parser *parser = &priv->parser; - struct iwmct_dbg *dbg = &priv->dbg; - struct iwmct_fw_sec_hdr *sec_hdr; - - LOG_INFOEX(priv, INIT, "-->\n"); - - while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr) - <= parser->file_size) { - - sec_hdr = (struct iwmct_fw_sec_hdr *) - (parser->file + parser->cur_pos); - parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr); - - LOG_INFO(priv, FW_DOWNLOAD, - "sec hdr: type=%s addr=0x%x size=%d\n", - sec_hdr->type, sec_hdr->target_addr, - sec_hdr->data_size); - - if (strcmp(sec_hdr->type, "ENT") == 0) - parser->entry_point = le32_to_cpu(sec_hdr->target_addr); - else if (strcmp(sec_hdr->type, "LBL") == 0) - strcpy(dbg->label_fw, parser->file + parser->cur_pos); - else if (((strcmp(sec_hdr->type, "TOP") == 0) && - (priv->barker & BARKER_DNLOAD_TOP_MSK)) || - ((strcmp(sec_hdr->type, "GPS") == 0) && - (priv->barker & BARKER_DNLOAD_GPS_MSK)) || - ((strcmp(sec_hdr->type, "BTH") == 0) && - (priv->barker & BARKER_DNLOAD_BT_MSK))) { - *sec_addr = sec_hdr->target_addr; - *sec_size = le32_to_cpu(sec_hdr->data_size); - *p_sec = parser->file + parser->cur_pos; - parser->cur_pos += le32_to_cpu(sec_hdr->data_size); - return 1; - } else if (strcmp(sec_hdr->type, "LOG") != 0) - LOG_WARNING(priv, FW_DOWNLOAD, - "skipping section type %s\n", - sec_hdr->type); - - parser->cur_pos += le32_to_cpu(sec_hdr->data_size); - LOG_INFO(priv, FW_DOWNLOAD, - "finished with section cur_pos=%zd\n", parser->cur_pos); - } - - LOG_INFOEX(priv, INIT, "<--\n"); - return 0; -} - -static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec, - size_t sec_size, __le32 addr) -{ - struct iwmct_parser *parser = &priv->parser; - struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf; - const u8 *cur_block = p_sec; - size_t sent = 0; - int cnt = 0; - int ret = 0; - u32 cmd = 0; - - LOG_INFOEX(priv, INIT, "-->\n"); - LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n", - addr, sec_size); - - while (sent < sec_size) { - int i; - u32 chksm = 0; - u32 reset = atomic_read(&priv->reset); - /* actual FW data */ - u32 data_size = min(parser->buf_size - sizeof(*hdr), - sec_size - sent); - /* Pad to block size */ - u32 trans_size = (data_size + sizeof(*hdr) + - IWMC_SDIO_BLK_SIZE - 1) & - ~(IWMC_SDIO_BLK_SIZE - 1); - ++cnt; - - /* in case of reset, interrupt FW DOWNLAOD */ - if (reset) { - LOG_INFO(priv, FW_DOWNLOAD, - "Reset detected. Abort FW download!!!"); - ret = -ECANCELED; - goto exit; - } - - memset(parser->buf, 0, parser->buf_size); - cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS; - cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS; - cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS; - cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS; - hdr->data_size = cpu_to_le32(data_size); - hdr->target_addr = addr; - - /* checksum is allowed for sizes divisible by 4 */ - if (data_size & 0x3) - cmd &= ~CMD_HDR_USE_CHECKSUM_MSK; - - memcpy(hdr->data, cur_block, data_size); - - - if (cmd & CMD_HDR_USE_CHECKSUM_MSK) { - - chksm = data_size + le32_to_cpu(addr) + cmd; - for (i = 0; i < data_size >> 2; i++) - chksm += ((u32 *)cur_block)[i]; - - hdr->block_chksm = cpu_to_le32(chksm); - LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n", - hdr->block_chksm); - } - - LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, " - "sec_size=%zd, startAddress 0x%X\n", - cnt, trans_size, sent, sec_size, addr); - - if (priv->dbg.dump) - LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size); - - - hdr->cmd = cpu_to_le32(cmd); - /* send it down */ - /* TODO: add more proper sending and error checking */ - ret = iwmct_tx(priv, 0, parser->buf, trans_size); - if (ret != 0) { - LOG_INFO(priv, FW_DOWNLOAD, - "iwmct_tx returned %d\n", ret); - goto exit; - } - - addr = cpu_to_le32(le32_to_cpu(addr) + data_size); - sent += data_size; - cur_block = p_sec + sent; - - if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) { - LOG_INFO(priv, FW_DOWNLOAD, - "Block number limit is reached [%d]\n", - priv->dbg.blocks); - break; - } - } - - if (sent < sec_size) - ret = -EINVAL; -exit: - LOG_INFOEX(priv, INIT, "<--\n"); - return ret; -} - -static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump) -{ - struct iwmct_parser *parser = &priv->parser; - struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf; - int ret; - u32 cmd; - - LOG_INFOEX(priv, INIT, "-->\n"); - - memset(parser->buf, 0, parser->buf_size); - cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS; - if (jump) { - cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS; - hdr->target_addr = cpu_to_le32(parser->entry_point); - LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n", - parser->entry_point); - } else { - cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS; - LOG_INFO(priv, FW_DOWNLOAD, "last command\n"); - } - - hdr->cmd = cpu_to_le32(cmd); - - LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr)); - /* send it down */ - /* TODO: add more proper sending and error checking */ - ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE); - if (ret) - LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret); - - LOG_INFOEX(priv, INIT, "<--\n"); - return 0; -} - -int iwmct_fw_load(struct iwmct_priv *priv) -{ - const u8 *fw_name = FW_NAME(FW_API_VER); - const struct firmware *raw; - const u8 *pdata; - size_t len; - __le32 addr; - int ret; - - /* clear parser struct */ - memset(&priv->parser, 0, sizeof(struct iwmct_parser)); - - /* get the firmware */ - ret = request_firmware(&raw, fw_name, &priv->func->dev); - if (ret < 0) { - LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n", - fw_name, ret); - goto exit; - } - - if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) { - LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n", - fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size); - goto exit; - } - - LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name); - - ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len); - if (ret < 0) { - LOG_ERROR(priv, FW_DOWNLOAD, - "iwmct_parser_init failed: Reason %d\n", ret); - goto exit; - } - - /* checksum */ - if (!iwmct_checksum(priv)) { - LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n"); - ret = -EINVAL; - goto exit; - } - - /* download firmware to device */ - while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) { - if (iwmct_download_section(priv, pdata, len, addr)) { - LOG_ERROR(priv, FW_DOWNLOAD, - "%s download section failed\n", fw_name); - ret = -EIO; - goto exit; - } - } - - iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK)); - -exit: - kfree(priv->parser.buf); - - if (raw) - release_firmware(raw); - - raw = NULL; - - return ret; -} diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h deleted file mode 100644 index 9e26b75bd48..00000000000 --- a/drivers/misc/iwmc3200top/fw-msg.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/fw-msg.h - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#ifndef __FWMSG_H__ -#define __FWMSG_H__ - -#define COMM_TYPE_D2H 0xFF -#define COMM_TYPE_H2D 0xEE - -#define COMM_CATEGORY_OPERATIONAL 0x00 -#define COMM_CATEGORY_DEBUG 0x01 -#define COMM_CATEGORY_TESTABILITY 0x02 -#define COMM_CATEGORY_DIAGNOSTICS 0x03 - -#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A) - -#define FW_LOG_SRC_MAX 32 -#define FW_LOG_SRC_ALL 255 - -#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000) - -#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001) -#define CMD_TST_DEV_RESET cpu_to_le16(0x0060) -#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062) -#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064) -#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065) -#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080) -#define CMD_TST_WAKEUP cpu_to_le16(0x0081) -#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082) -#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083) -#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096) - -#define OP_OPR_ALIVE cpu_to_le16(0x0010) -#define OP_OPR_CMD_ACK cpu_to_le16(0x001F) -#define OP_OPR_CMD_NACK cpu_to_le16(0x0020) -#define OP_TST_MEM_DUMP cpu_to_le16(0x0043) - -#define CMD_FLAG_PADDING_256 0x80 - -#define FW_HCMD_BLOCK_SIZE 256 - -struct msg_hdr { - u8 type; - u8 category; - __le16 opcode; - u8 seqnum; - u8 flags; - __le16 length; -} __attribute__((__packed__)); - -struct log_hdr { - __le32 timestamp; - u8 severity; - u8 logsource; - __le16 reserved; -} __attribute__((__packed__)); - -struct mdump_hdr { - u8 dmpid; - u8 frag; - __le16 size; - __le32 addr; -} __attribute__((__packed__)); - -struct top_msg { - struct msg_hdr hdr; - union { - /* D2H messages */ - struct { - struct log_hdr log_hdr; - u8 data[1]; - } __attribute__((__packed__)) log; - - struct { - struct log_hdr log_hdr; - struct mdump_hdr md_hdr; - u8 data[1]; - } __attribute__((__packed__)) mdump; - - /* H2D messages */ - struct { - u8 logsource; - u8 sevmask; - } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX]; - struct mdump_hdr mdump_req; - } u; -} __attribute__((__packed__)); - - -#endif /* __FWMSG_H__ */ diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h deleted file mode 100644 index 43bd510e187..00000000000 --- a/drivers/misc/iwmc3200top/iwmc3200top.h +++ /dev/null @@ -1,209 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/iwmc3200top.h - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#ifndef __IWMC3200TOP_H__ -#define __IWMC3200TOP_H__ - -#include <linux/workqueue.h> - -#define DRV_NAME "iwmc3200top" -#define FW_API_VER 1 -#define _FW_NAME(api) DRV_NAME "." #api ".fw" -#define FW_NAME(api) _FW_NAME(api) - -#define IWMC_SDIO_BLK_SIZE 256 -#define IWMC_DEFAULT_TR_BLK 64 -#define IWMC_SDIO_DATA_ADDR 0x0 -#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14 -#define IWMC_SDIO_INTR_STATUS_ADDR 0x13 -#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13 -#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C - -#define COMM_HUB_HEADER_LENGTH 16 -#define LOGGER_HEADER_LENGTH 10 - - -#define BARKER_DNLOAD_BT_POS 0 -#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS) -#define BARKER_DNLOAD_GPS_POS 1 -#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS) -#define BARKER_DNLOAD_TOP_POS 2 -#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS) -#define BARKER_DNLOAD_RESERVED1_POS 3 -#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS) -#define BARKER_DNLOAD_JUMP_POS 4 -#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS) -#define BARKER_DNLOAD_SYNC_POS 5 -#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS) -#define BARKER_DNLOAD_RESERVED2_POS 6 -#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS) -#define BARKER_DNLOAD_BARKER_POS 8 -#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS) - -#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS) -/* whole field barker */ -#define IWMC_BARKER_ACK 0xfeedbabe - -#define IWMC_CMD_SIGNATURE 0xcbbc - -#define CMD_HDR_OPCODE_POS 0 -#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS) -#define CMD_HDR_RESPONSE_CODE_POS 4 -#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS) -#define CMD_HDR_USE_CHECKSUM_POS 8 -#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS) -#define CMD_HDR_RESPONSE_REQUIRED_POS 9 -#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS) -#define CMD_HDR_DIRECT_ACCESS_POS 10 -#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS) -#define CMD_HDR_RESERVED_POS 11 -#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS) -#define CMD_HDR_SIGNATURE_POS 16 -#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS) - -enum { - IWMC_OPCODE_PING = 0, - IWMC_OPCODE_READ = 1, - IWMC_OPCODE_WRITE = 2, - IWMC_OPCODE_JUMP = 3, - IWMC_OPCODE_REBOOT = 4, - IWMC_OPCODE_PERSISTENT_WRITE = 5, - IWMC_OPCODE_PERSISTENT_READ = 6, - IWMC_OPCODE_READ_MODIFY_WRITE = 7, - IWMC_OPCODE_LAST_COMMAND = 15 -}; - -struct iwmct_fw_load_hdr { - __le32 cmd; - __le32 target_addr; - __le32 data_size; - __le32 block_chksm; - u8 data[0]; -}; - -/** - * struct iwmct_fw_hdr - * holds all sw components versions - */ -struct iwmct_fw_hdr { - u8 top_major; - u8 top_minor; - u8 top_revision; - u8 gps_major; - u8 gps_minor; - u8 gps_revision; - u8 bt_major; - u8 bt_minor; - u8 bt_revision; - u8 tic_name[31]; -}; - -/** - * struct iwmct_fw_sec_hdr - * @type: function type - * @data_size: section's data size - * @target_addr: download address - */ -struct iwmct_fw_sec_hdr { - u8 type[4]; - __le32 data_size; - __le32 target_addr; -}; - -/** - * struct iwmct_parser - * @file: fw image - * @file_size: fw size - * @cur_pos: position in file - * @buf: temp buf for download - * @buf_size: size of buf - * @entry_point: address to jump in fw kick-off - */ -struct iwmct_parser { - const u8 *file; - size_t file_size; - size_t cur_pos; - u8 *buf; - size_t buf_size; - u32 entry_point; - struct iwmct_fw_hdr versions; -}; - - -struct iwmct_work_struct { - struct list_head list; - ssize_t iosize; -}; - -struct iwmct_dbg { - int blocks; - bool dump; - bool jump; - bool direct; - bool checksum; - bool fw_download; - int block_size; - int download_trans_blks; - - char label_fw[256]; -}; - -struct iwmct_debugfs; - -struct iwmct_priv { - struct sdio_func *func; - struct iwmct_debugfs *dbgfs; - struct iwmct_parser parser; - atomic_t reset; - atomic_t dev_sync; - u32 trans_len; - u32 barker; - struct iwmct_dbg dbg; - - /* drivers work queue */ - struct workqueue_struct *wq; - struct workqueue_struct *bus_rescan_wq; - struct work_struct bus_rescan_worker; - struct work_struct isr_worker; - - /* drivers wait queue */ - wait_queue_head_t wait_q; - - /* rx request list */ - struct list_head read_req_list; -}; - -extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr, - void *src, int count); - -extern int iwmct_fw_load(struct iwmct_priv *priv); - -extern void iwmct_dbg_init_params(struct iwmct_priv *drv); -extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv); -extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv); -extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len); - -#endif /* __IWMC3200TOP_H__ */ diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c deleted file mode 100644 index d569279698f..00000000000 --- a/drivers/misc/iwmc3200top/log.c +++ /dev/null @@ -1,347 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/log.c - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#include <linux/kernel.h> -#include <linux/mmc/sdio_func.h> -#include <linux/ctype.h> -#include "fw-msg.h" -#include "iwmc3200top.h" -#include "log.h" - -/* Maximal hexadecimal string size of the FW memdump message */ -#define LOG_MSG_SIZE_MAX 12400 - -/* iwmct_logdefs is a global used by log macros */ -u8 iwmct_logdefs[LOG_SRC_MAX]; -static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX]; - - -static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask) -{ - int i; - - if (src < size) - logdefs[src] = logmask; - else if (src == LOG_SRC_ALL) - for (i = 0; i < size; i++) - logdefs[i] = logmask; - else - return -1; - - return 0; -} - - -int iwmct_log_set_filter(u8 src, u8 logmask) -{ - return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask); -} - - -int iwmct_log_set_fw_filter(u8 src, u8 logmask) -{ - return _log_set_log_filter(iwmct_fw_logdefs, - FW_LOG_SRC_MAX, src, logmask); -} - - -static int log_msg_format_hex(char *str, int slen, u8 *ibuf, - int ilen, char *pref) -{ - int pos = 0; - int i; - int len; - - for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++) - str[pos] = pref[i]; - - for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++) - len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]); - - if (i < ilen) - return -1; - - return 0; -} - -/* NOTE: This function is not thread safe. - Currently it's called only from sdio rx worker - no race there -*/ -void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len) -{ - struct top_msg *msg; - static char logbuf[LOG_MSG_SIZE_MAX]; - - msg = (struct top_msg *)buf; - - if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) { - LOG_ERROR(priv, FW_MSG, "Log message from TOP " - "is too short %d (expected %zd)\n", - len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)); - return; - } - - if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] & - BIT(msg->u.log.log_hdr.severity)) || - !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity))) - return; - - switch (msg->hdr.category) { - case COMM_CATEGORY_TESTABILITY: - if (!(iwmct_logdefs[LOG_SRC_TST] & - BIT(msg->u.log.log_hdr.severity))) - return; - if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, - le16_to_cpu(msg->hdr.length) + - sizeof(msg->hdr), "<TST>")) - LOG_WARNING(priv, TST, - "TOP TST message is too long, truncating..."); - LOG_WARNING(priv, TST, "%s\n", logbuf); - break; - case COMM_CATEGORY_DEBUG: - if (msg->hdr.opcode == OP_DBG_ZSTR_MSG) - LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>", - ((u8 *)msg) + sizeof(msg->hdr) - + sizeof(msg->u.log.log_hdr)); - else { - if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, - le16_to_cpu(msg->hdr.length) - + sizeof(msg->hdr), - "<DBG>")) - LOG_WARNING(priv, FW_MSG, - "TOP DBG message is too long," - "truncating..."); - LOG_WARNING(priv, FW_MSG, "%s\n", logbuf); - } - break; - default: - break; - } -} - -static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size) -{ - int i, pos, len; - for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) { - len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,", - i, logdefs[i]); - pos += len; - } - buf[pos-1] = '\n'; - buf[pos] = '\0'; - - if (i < logdefsz) - return -1; - return 0; -} - -int log_get_filter_str(char *buf, int size) -{ - return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size); -} - -int log_get_fw_filter_str(char *buf, int size) -{ - return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size); -} - -#define HEXADECIMAL_RADIX 16 -#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */ - -ssize_t show_iwmct_log_level(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwmct_priv *priv = dev_get_drvdata(d); - char *str_buf; - int buf_size; - ssize_t ret; - - buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1; - str_buf = kzalloc(buf_size, GFP_KERNEL); - if (!str_buf) { - LOG_ERROR(priv, DEBUGFS, - "failed to allocate %d bytes\n", buf_size); - ret = -ENOMEM; - goto exit; - } - - if (log_get_filter_str(str_buf, buf_size) < 0) { - ret = -EINVAL; - goto exit; - } - - ret = sprintf(buf, "%s", str_buf); - -exit: - kfree(str_buf); - return ret; -} - -ssize_t store_iwmct_log_level(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwmct_priv *priv = dev_get_drvdata(d); - char *token, *str_buf = NULL; - long val; - ssize_t ret = count; - u8 src, mask; - - if (!count) - goto exit; - - str_buf = kzalloc(count, GFP_KERNEL); - if (!str_buf) { - LOG_ERROR(priv, DEBUGFS, - "failed to allocate %zd bytes\n", count); - ret = -ENOMEM; - goto exit; - } - - memcpy(str_buf, buf, count); - - while ((token = strsep(&str_buf, ",")) != NULL) { - while (isspace(*token)) - ++token; - if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { - LOG_ERROR(priv, DEBUGFS, - "failed to convert string to long %s\n", - token); - ret = -EINVAL; - goto exit; - } - - mask = val & 0xFF; - src = (val & 0XFF00) >> 8; - iwmct_log_set_filter(src, mask); - } - -exit: - kfree(str_buf); - return ret; -} - -ssize_t show_iwmct_log_level_fw(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwmct_priv *priv = dev_get_drvdata(d); - char *str_buf; - int buf_size; - ssize_t ret; - - buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2; - - str_buf = kzalloc(buf_size, GFP_KERNEL); - if (!str_buf) { - LOG_ERROR(priv, DEBUGFS, - "failed to allocate %d bytes\n", buf_size); - ret = -ENOMEM; - goto exit; - } - - if (log_get_fw_filter_str(str_buf, buf_size) < 0) { - ret = -EINVAL; - goto exit; - } - - ret = sprintf(buf, "%s", str_buf); - -exit: - kfree(str_buf); - return ret; -} - -ssize_t store_iwmct_log_level_fw(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct iwmct_priv *priv = dev_get_drvdata(d); - struct top_msg cmd; - char *token, *str_buf = NULL; - ssize_t ret = count; - u16 cmdlen = 0; - int i; - long val; - u8 src, mask; - - if (!count) - goto exit; - - str_buf = kzalloc(count, GFP_KERNEL); - if (!str_buf) { - LOG_ERROR(priv, DEBUGFS, - "failed to allocate %zd bytes\n", count); - ret = -ENOMEM; - goto exit; - } - - memcpy(str_buf, buf, count); - - cmd.hdr.type = COMM_TYPE_H2D; - cmd.hdr.category = COMM_CATEGORY_DEBUG; - cmd.hdr.opcode = CMD_DBG_LOG_LEVEL; - - for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) && - (i < FW_LOG_SRC_MAX); i++) { - - while (isspace(*token)) - ++token; - - if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { - LOG_ERROR(priv, DEBUGFS, - "failed to convert string to long %s\n", - token); - ret = -EINVAL; - goto exit; - } - - mask = val & 0xFF; /* LSB */ - src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */ - iwmct_log_set_fw_filter(src, mask); - - cmd.u.logdefs[i].logsource = src; - cmd.u.logdefs[i].sevmask = mask; - } - - cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0])); - cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr)); - - ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen); - if (ret) { - LOG_ERROR(priv, DEBUGFS, - "Failed to send %d bytes of fwcmd, ret=%zd\n", - cmdlen, ret); - goto exit; - } else - LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen); - - ret = count; - -exit: - kfree(str_buf); - return ret; -} - diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h deleted file mode 100644 index aba8121f978..00000000000 --- a/drivers/misc/iwmc3200top/log.h +++ /dev/null @@ -1,158 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/log.h - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#ifndef __LOG_H__ -#define __LOG_H__ - - -/* log severity: - * The log levels here match FW log levels - * so values need to stay as is */ -#define LOG_SEV_CRITICAL 0 -#define LOG_SEV_ERROR 1 -#define LOG_SEV_WARNING 2 -#define LOG_SEV_INFO 3 -#define LOG_SEV_INFOEX 4 - -#define LOG_SEV_FILTER_ALL \ - (BIT(LOG_SEV_CRITICAL) | \ - BIT(LOG_SEV_ERROR) | \ - BIT(LOG_SEV_WARNING) | \ - BIT(LOG_SEV_INFO) | \ - BIT(LOG_SEV_INFOEX)) - -/* log source */ -#define LOG_SRC_INIT 0 -#define LOG_SRC_DEBUGFS 1 -#define LOG_SRC_FW_DOWNLOAD 2 -#define LOG_SRC_FW_MSG 3 -#define LOG_SRC_TST 4 -#define LOG_SRC_IRQ 5 - -#define LOG_SRC_MAX 6 -#define LOG_SRC_ALL 0xFF - -/** - * Default intitialization runtime log level - */ -#ifndef LOG_SEV_FILTER_RUNTIME -#define LOG_SEV_FILTER_RUNTIME \ - (BIT(LOG_SEV_CRITICAL) | \ - BIT(LOG_SEV_ERROR) | \ - BIT(LOG_SEV_WARNING)) -#endif - -#ifndef FW_LOG_SEV_FILTER_RUNTIME -#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL -#endif - -#ifdef CONFIG_IWMC3200TOP_DEBUG -/** - * Log macros - */ - -#define priv2dev(priv) (&(priv->func)->dev) - -#define LOG_CRITICAL(priv, src, fmt, args...) \ -do { \ - if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \ - dev_crit(priv2dev(priv), "%s %d: " fmt, \ - __func__, __LINE__, ##args); \ -} while (0) - -#define LOG_ERROR(priv, src, fmt, args...) \ -do { \ - if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \ - dev_err(priv2dev(priv), "%s %d: " fmt, \ - __func__, __LINE__, ##args); \ -} while (0) - -#define LOG_WARNING(priv, src, fmt, args...) \ -do { \ - if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \ - dev_warn(priv2dev(priv), "%s %d: " fmt, \ - __func__, __LINE__, ##args); \ -} while (0) - -#define LOG_INFO(priv, src, fmt, args...) \ -do { \ - if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \ - dev_info(priv2dev(priv), "%s %d: " fmt, \ - __func__, __LINE__, ##args); \ -} while (0) - -#define LOG_INFOEX(priv, src, fmt, args...) \ -do { \ - if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \ - dev_dbg(priv2dev(priv), "%s %d: " fmt, \ - __func__, __LINE__, ##args); \ -} while (0) - -#define LOG_HEXDUMP(src, ptr, len) \ -do { \ - if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \ - print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \ - 16, 1, ptr, len, false); \ -} while (0) - -void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len); - -extern u8 iwmct_logdefs[]; - -int iwmct_log_set_filter(u8 src, u8 logmask); -int iwmct_log_set_fw_filter(u8 src, u8 logmask); - -ssize_t show_iwmct_log_level(struct device *d, - struct device_attribute *attr, char *buf); -ssize_t store_iwmct_log_level(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count); -ssize_t show_iwmct_log_level_fw(struct device *d, - struct device_attribute *attr, char *buf); -ssize_t store_iwmct_log_level_fw(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count); - -#else - -#define LOG_CRITICAL(priv, src, fmt, args...) -#define LOG_ERROR(priv, src, fmt, args...) -#define LOG_WARNING(priv, src, fmt, args...) -#define LOG_INFO(priv, src, fmt, args...) -#define LOG_INFOEX(priv, src, fmt, args...) -#define LOG_HEXDUMP(src, ptr, len) - -static inline void iwmct_log_top_message(struct iwmct_priv *priv, - u8 *buf, int len) {} -static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; } -static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; } - -#endif /* CONFIG_IWMC3200TOP_DEBUG */ - -int log_get_filter_str(char *buf, int size); -int log_get_fw_filter_str(char *buf, int size); - -#endif /* __LOG_H__ */ diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c deleted file mode 100644 index fafcaa481d7..00000000000 --- a/drivers/misc/iwmc3200top/main.c +++ /dev/null @@ -1,678 +0,0 @@ -/* - * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver - * drivers/misc/iwmc3200top/main.c - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> - * - - * - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/debugfs.h> -#include <linux/mmc/sdio_ids.h> -#include <linux/mmc/sdio_func.h> -#include <linux/mmc/sdio.h> - -#include "iwmc3200top.h" -#include "log.h" -#include "fw-msg.h" -#include "debugfs.h" - - -#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver" -#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation." - -#define DRIVER_VERSION "0.1.62" - -MODULE_DESCRIPTION(DRIVER_DESCRIPTION); -MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR(DRIVER_COPYRIGHT); -MODULE_FIRMWARE(FW_NAME(FW_API_VER)); - -/* - * This workers main task is to wait for OP_OPR_ALIVE - * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. - * When OP_OPR_ALIVE received it will issue - * a call to "bus_rescan_devices". - */ -static void iwmct_rescan_worker(struct work_struct *ws) -{ - struct iwmct_priv *priv; - int ret; - - priv = container_of(ws, struct iwmct_priv, bus_rescan_worker); - - LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n"); - - ret = bus_rescan_devices(priv->func->dev.bus); - if (ret < 0) - LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n"); -} - -static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) -{ - switch (msg->hdr.opcode) { - case OP_OPR_ALIVE: - LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); - queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker); - break; - default: - LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", - msg->hdr.opcode); - break; - } -} - - -static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len) -{ - struct top_msg *msg; - - msg = (struct top_msg *)buf; - - if (msg->hdr.type != COMM_TYPE_D2H) { - LOG_ERROR(priv, FW_MSG, - "Message from TOP with invalid message type 0x%X\n", - msg->hdr.type); - return; - } - - if (len < sizeof(msg->hdr)) { - LOG_ERROR(priv, FW_MSG, - "Message from TOP is too short for message header " - "received %d bytes, expected at least %zd bytes\n", - len, sizeof(msg->hdr)); - return; - } - - if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) { - LOG_ERROR(priv, FW_MSG, - "Message length (%d bytes) is shorter than " - "in header (%d bytes)\n", - len, le16_to_cpu(msg->hdr.length)); - return; - } - - switch (msg->hdr.category) { - case COMM_CATEGORY_OPERATIONAL: - op_top_message(priv, (struct top_msg *)buf); - break; - - case COMM_CATEGORY_DEBUG: - case COMM_CATEGORY_TESTABILITY: - case COMM_CATEGORY_DIAGNOSTICS: - iwmct_log_top_message(priv, buf, len); - break; - - default: - LOG_ERROR(priv, FW_MSG, - "Message from TOP with unknown category 0x%X\n", - msg->hdr.category); - break; - } -} - -int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len) -{ - int ret; - u8 *buf; - - LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n"); - - /* add padding to 256 for IWMC */ - ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256; - - LOG_HEXDUMP(FW_MSG, cmd, len); - - if (len > FW_HCMD_BLOCK_SIZE) { - LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n", - len, FW_HCMD_BLOCK_SIZE); - return -1; - } - - buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL); - if (!buf) { - LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n", - FW_HCMD_BLOCK_SIZE); - return -1; - } - - memcpy(buf, cmd, len); - - sdio_claim_host(priv->func); - ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf, - FW_HCMD_BLOCK_SIZE); - sdio_release_host(priv->func); - - kfree(buf); - return ret; -} - -int iwmct_tx(struct iwmct_priv *priv, unsigned int addr, - void *src, int count) -{ - int ret; - - sdio_claim_host(priv->func); - ret = sdio_memcpy_toio(priv->func, addr, src, count); - sdio_release_host(priv->func); - - return ret; -} - -static void iwmct_irq_read_worker(struct work_struct *ws) -{ - struct iwmct_priv *priv; - struct iwmct_work_struct *read_req; - __le32 *buf = NULL; - int ret; - int iosize; - u32 barker; - bool is_barker; - - priv = container_of(ws, struct iwmct_priv, isr_worker); - - LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws); - - /* --------------------- Handshake with device -------------------- */ - sdio_claim_host(priv->func); - - /* all list manipulations have to be protected by - * sdio_claim_host/sdio_release_host */ - if (list_empty(&priv->read_req_list)) { - LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n"); - goto exit_release; - } - - read_req = list_entry(priv->read_req_list.next, - struct iwmct_work_struct, list); - - list_del(&read_req->list); - iosize = read_req->iosize; - kfree(read_req); - - buf = kzalloc(iosize, GFP_KERNEL); - if (!buf) { - LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize); - goto exit_release; - } - - LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n", - iosize, buf, priv->func->num); - - /* read from device */ - ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize); - if (ret) { - LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret); - goto exit_release; - } - - LOG_HEXDUMP(IRQ, (u8 *)buf, iosize); - - barker = le32_to_cpu(buf[0]); - - /* Verify whether it's a barker and if not - treat as regular Rx */ - if (barker == IWMC_BARKER_ACK || - (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) { - - /* Valid Barker is equal on first 4 dwords */ - is_barker = (buf[1] == buf[0]) && - (buf[2] == buf[0]) && - (buf[3] == buf[0]); - - if (!is_barker) { - LOG_WARNING(priv, IRQ, - "Potentially inconsistent barker " - "%08X_%08X_%08X_%08X\n", - le32_to_cpu(buf[0]), le32_to_cpu(buf[1]), - le32_to_cpu(buf[2]), le32_to_cpu(buf[3])); - } - } else { - is_barker = false; - } - - /* Handle Top CommHub message */ - if (!is_barker) { - sdio_release_host(priv->func); - handle_top_message(priv, (u8 *)buf, iosize); - goto exit; - } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */ - if (atomic_read(&priv->dev_sync) == 0) { - LOG_ERROR(priv, IRQ, - "ACK barker arrived out-of-sync\n"); - goto exit_release; - } - - /* Continuing to FW download (after Sync is completed)*/ - atomic_set(&priv->dev_sync, 0); - LOG_INFO(priv, IRQ, "ACK barker arrived " - "- starting FW download\n"); - } else { /* REBOOT barker */ - LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker); - priv->barker = barker; - - if (barker & BARKER_DNLOAD_SYNC_MSK) { - /* Send the same barker back */ - ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, - buf, iosize); - if (ret) { - LOG_ERROR(priv, IRQ, - "error %d echoing barker\n", ret); - goto exit_release; - } - LOG_INFO(priv, IRQ, "Echoing barker to device\n"); - atomic_set(&priv->dev_sync, 1); - goto exit_release; - } - - /* Continuing to FW download (without Sync) */ - LOG_INFO(priv, IRQ, "No sync requested " - "- starting FW download\n"); - } - - sdio_release_host(priv->func); - - - LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker); - LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n", - (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not"); - LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n", - (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not"); - LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n", - (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not"); - - if (priv->dbg.fw_download) - iwmct_fw_load(priv); - else - LOG_ERROR(priv, IRQ, "FW download not allowed\n"); - - goto exit; - -exit_release: - sdio_release_host(priv->func); -exit: - kfree(buf); - LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n"); -} - -static void iwmct_irq(struct sdio_func *func) -{ - struct iwmct_priv *priv; - int val, ret; - int iosize; - int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR; - struct iwmct_work_struct *read_req; - - priv = sdio_get_drvdata(func); - - LOG_INFO(priv, IRQ, "enter iwmct_irq\n"); - - /* read the function's status register */ - val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret); - - LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret); - - if (!val) { - LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n"); - goto exit_clear_intr; - } - - - /* - * read 2 bytes of the transaction size - * IMPORTANT: sdio transaction size has to be read before clearing - * sdio interrupt!!! - */ - val = sdio_readb(priv->func, addr++, &ret); - iosize = val; - val = sdio_readb(priv->func, addr++, &ret); - iosize += val << 8; - - LOG_INFO(priv, IRQ, "READ size %d\n", iosize); - - if (iosize == 0) { - LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize); - goto exit_clear_intr; - } - - /* allocate a work structure to pass iosize to the worker */ - read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL); - if (!read_req) { - LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n"); - goto exit_clear_intr; - } - - INIT_LIST_HEAD(&read_req->list); - read_req->iosize = iosize; - - list_add_tail(&priv->read_req_list, &read_req->list); - - /* clear the function's interrupt request bit (write 1 to clear) */ - sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); - - queue_work(priv->wq, &priv->isr_worker); - - LOG_INFO(priv, IRQ, "exit iwmct_irq\n"); - - return; - -exit_clear_intr: - /* clear the function's interrupt request bit (write 1 to clear) */ - sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); -} - - -static int blocks; -module_param(blocks, int, 0604); -MODULE_PARM_DESC(blocks, "max_blocks_to_send"); - -static int dump; -module_param(dump, bool, 0604); -MODULE_PARM_DESC(dump, "dump_hex_content"); - -static int jump = 1; -module_param(jump, bool, 0604); - -static int direct = 1; -module_param(direct, bool, 0604); - -static int checksum = 1; -module_param(checksum, bool, 0604); - -static int fw_download = 1; -module_param(fw_download, bool, 0604); - -static int block_size = IWMC_SDIO_BLK_SIZE; -module_param(block_size, int, 0404); - -static int download_trans_blks = IWMC_DEFAULT_TR_BLK; -module_param(download_trans_blks, int, 0604); - -static int rubbish_barker; -module_param(rubbish_barker, bool, 0604); - -#ifdef CONFIG_IWMC3200TOP_DEBUG -static int log_level[LOG_SRC_MAX]; -static unsigned int log_level_argc; -module_param_array(log_level, int, &log_level_argc, 0604); -MODULE_PARM_DESC(log_level, "log_level"); - -static int log_level_fw[FW_LOG_SRC_MAX]; -static unsigned int log_level_fw_argc; -module_param_array(log_level_fw, int, &log_level_fw_argc, 0604); -MODULE_PARM_DESC(log_level_fw, "log_level_fw"); -#endif - -void iwmct_dbg_init_params(struct iwmct_priv *priv) -{ -#ifdef CONFIG_IWMC3200TOP_DEBUG - int i; - - for (i = 0; i < log_level_argc; i++) { - dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n", - i, log_level[i]); - iwmct_log_set_filter((log_level[i] >> 8) & 0xFF, - log_level[i] & 0xFF); - } - for (i = 0; i < log_level_fw_argc; i++) { - dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n", - i, log_level_fw[i]); - iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF, - log_level_fw[i] & 0xFF); - } -#endif - - priv->dbg.blocks = blocks; - LOG_INFO(priv, INIT, "blocks=%d\n", blocks); - priv->dbg.dump = (bool)dump; - LOG_INFO(priv, INIT, "dump=%d\n", dump); - priv->dbg.jump = (bool)jump; - LOG_INFO(priv, INIT, "jump=%d\n", jump); - priv->dbg.direct = (bool)direct; - LOG_INFO(priv, INIT, "direct=%d\n", direct); - priv->dbg.checksum = (bool)checksum; - LOG_INFO(priv, INIT, "checksum=%d\n", checksum); - priv->dbg.fw_download = (bool)fw_download; - LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download); - priv->dbg.block_size = block_size; - LOG_INFO(priv, INIT, "block_size=%d\n", block_size); - priv->dbg.download_trans_blks = download_trans_blks; - LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks); -} - -/***************************************************************************** - * - * sysfs attributes - * - *****************************************************************************/ -static ssize_t show_iwmct_fw_version(struct device *d, - struct device_attribute *attr, char *buf) -{ - struct iwmct_priv *priv = dev_get_drvdata(d); - return sprintf(buf, "%s\n", priv->dbg.label_fw); -} -static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL); - -#ifdef CONFIG_IWMC3200TOP_DEBUG -static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO, - show_iwmct_log_level, store_iwmct_log_level); -static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO, - show_iwmct_log_level_fw, store_iwmct_log_level_fw); -#endif - -static struct attribute *iwmct_sysfs_entries[] = { - &dev_attr_cc_label_fw.attr, -#ifdef CONFIG_IWMC3200TOP_DEBUG - &dev_attr_log_level.attr, - &dev_attr_log_level_fw.attr, -#endif - NULL -}; - -static struct attribute_group iwmct_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = iwmct_sysfs_entries, -}; - - -static int iwmct_probe(struct sdio_func *func, - const struct sdio_device_id *id) -{ - struct iwmct_priv *priv; - int ret; - int val = 1; - int addr = IWMC_SDIO_INTR_ENABLE_ADDR; - - dev_dbg(&func->dev, "enter iwmct_probe\n"); - - dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n", - jiffies_to_msecs(2147483647), HZ); - - priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL); - if (!priv) { - dev_err(&func->dev, "kzalloc error\n"); - return -ENOMEM; - } - priv->func = func; - sdio_set_drvdata(func, priv); - - - /* create drivers work queue */ - priv->wq = create_workqueue(DRV_NAME "_wq"); - priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq"); - INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); - INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); - - init_waitqueue_head(&priv->wait_q); - - sdio_claim_host(func); - /* FIXME: Remove after it is fixed in the Boot ROM upgrade */ - func->enable_timeout = 10; - - /* In our HW, setting the block size also wakes up the boot rom. */ - ret = sdio_set_block_size(func, priv->dbg.block_size); - if (ret) { - LOG_ERROR(priv, INIT, - "sdio_set_block_size() failure: %d\n", ret); - goto error_sdio_enable; - } - - ret = sdio_enable_func(func); - if (ret) { - LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret); - goto error_sdio_enable; - } - - /* init reset and dev_sync states */ - atomic_set(&priv->reset, 0); - atomic_set(&priv->dev_sync, 0); - - /* init read req queue */ - INIT_LIST_HEAD(&priv->read_req_list); - - /* process configurable parameters */ - iwmct_dbg_init_params(priv); - ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group); - if (ret) { - LOG_ERROR(priv, INIT, "Failed to register attributes and " - "initialize module_params\n"); - goto error_dev_attrs; - } - - iwmct_dbgfs_register(priv, DRV_NAME); - - if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) { - LOG_INFO(priv, INIT, - "Reducing transaction to 8 blocks = 2K (from %d)\n", - priv->dbg.download_trans_blks); - priv->dbg.download_trans_blks = 8; - } - priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size; - LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len); - - ret = sdio_claim_irq(func, iwmct_irq); - if (ret) { - LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret); - goto error_claim_irq; - } - - - /* Enable function's interrupt */ - sdio_writeb(priv->func, val, addr, &ret); - if (ret) { - LOG_ERROR(priv, INIT, "Failure writing to " - "Interrupt Enable Register (%d): %d\n", addr, ret); - goto error_enable_int; - } - - sdio_release_host(func); - - LOG_INFO(priv, INIT, "exit iwmct_probe\n"); - - return ret; - -error_enable_int: - sdio_release_irq(func); -error_claim_irq: - sdio_disable_func(func); -error_dev_attrs: - iwmct_dbgfs_unregister(priv->dbgfs); - sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group); -error_sdio_enable: - sdio_release_host(func); - return ret; -} - -static void iwmct_remove(struct sdio_func *func) -{ - struct iwmct_work_struct *read_req; - struct iwmct_priv *priv = sdio_get_drvdata(func); - - priv = sdio_get_drvdata(func); - - LOG_INFO(priv, INIT, "enter\n"); - - sdio_claim_host(func); - sdio_release_irq(func); - sdio_release_host(func); - - /* Safely destroy osc workqueue */ - destroy_workqueue(priv->bus_rescan_wq); - destroy_workqueue(priv->wq); - - sdio_claim_host(func); - sdio_disable_func(func); - sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group); - iwmct_dbgfs_unregister(priv->dbgfs); - sdio_release_host(func); - - /* free read requests */ - while (!list_empty(&priv->read_req_list)) { - read_req = list_entry(priv->read_req_list.next, - struct iwmct_work_struct, list); - - list_del(&read_req->list); - kfree(read_req); - } - - kfree(priv); -} - - -static const struct sdio_device_id iwmct_ids[] = { - /* Intel Wireless MultiCom 3200 Top Driver */ - { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)}, - { }, /* Terminating entry */ -}; - -MODULE_DEVICE_TABLE(sdio, iwmct_ids); - -static struct sdio_driver iwmct_driver = { - .probe = iwmct_probe, - .remove = iwmct_remove, - .name = DRV_NAME, - .id_table = iwmct_ids, -}; - -static int __init iwmct_init(void) -{ - int rc; - - /* Default log filter settings */ - iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME); - iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL); - iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME); - - rc = sdio_register_driver(&iwmct_driver); - - return rc; -} - -static void __exit iwmct_exit(void) -{ - sdio_unregister_driver(&iwmct_driver); -} - -module_init(iwmct_init); -module_exit(iwmct_exit); - diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index fcb6ec1af17..36f5d52775a 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -102,6 +102,8 @@ #include <linux/nmi.h> #include <linux/delay.h> #include <linux/kthread.h> +#include <linux/module.h> +#include <asm/sections.h> #define v1printk(a...) do { \ if (verbose) \ @@ -133,12 +135,17 @@ static int force_hwbrks; static int hwbreaks_ok; static int hw_break_val; static int hw_break_val2; +static int cont_instead_of_sstep; +static unsigned long cont_thread_id; +static unsigned long sstep_thread_id; #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; #endif +static unsigned long cont_addr; static unsigned long sstep_addr; +static int restart_from_top_after_write; static int sstep_state; /* Storage for the registers, in GDB format. */ @@ -186,7 +193,8 @@ static int kgdbts_unreg_thread(void *ptr) */ while (!final_ack) msleep_interruptible(1500); - + /* Pause for any other threads to exit after final ack. */ + msleep_interruptible(1000); if (configured) kgdb_unregister_io_module(&kgdbts_io_ops); configured = 0; @@ -210,11 +218,12 @@ static unsigned long lookup_addr(char *arg) if (!strcmp(arg, "kgdbts_break_test")) addr = (unsigned long)kgdbts_break_test; else if (!strcmp(arg, "sys_open")) - addr = (unsigned long)sys_open; + addr = (unsigned long)do_sys_open; else if (!strcmp(arg, "do_fork")) addr = (unsigned long)do_fork; else if (!strcmp(arg, "hw_break_val")) addr = (unsigned long)&hw_break_val; + addr = (unsigned long) dereference_function_descriptor((void *)addr); return addr; } @@ -282,29 +291,49 @@ static void hw_break_val_write(void) hw_break_val++; } +static int get_thread_id_continue(char *put_str, char *arg) +{ + char *ptr = &put_str[11]; + + if (put_str[1] != 'T' || put_str[2] != '0') + return 1; + kgdb_hex2long(&ptr, &cont_thread_id); + return 0; +} + static int check_and_rewind_pc(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); + unsigned long ip; int offset = 0; kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, NUMREGBYTES); gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); - v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); -#ifdef CONFIG_X86 - /* On x86 a breakpoint stop requires it to be decremented */ - if (addr + 1 == kgdbts_regs.ip) - offset = -1; + ip = instruction_pointer(&kgdbts_regs); + v2printk("Stopped at IP: %lx\n", ip); +#ifdef GDB_ADJUSTS_BREAK_OFFSET + /* On some arches, a breakpoint stop requires it to be decremented */ + if (addr + BREAK_INSTR_SIZE == ip) + offset = -BREAK_INSTR_SIZE; #endif - if (strcmp(arg, "silent") && - instruction_pointer(&kgdbts_regs) + offset != addr) { + + if (arch_needs_sstep_emulation && sstep_addr && + ip + offset == sstep_addr && + ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) { + /* This is special case for emulated single step */ + v2printk("Emul: rewind hit single step bp\n"); + restart_from_top_after_write = 1; + } else if (strcmp(arg, "silent") && ip + offset != addr) { eprintk("kgdbts: BP mismatch %lx expected %lx\n", - instruction_pointer(&kgdbts_regs) + offset, addr); + ip + offset, addr); return 1; } -#ifdef CONFIG_X86 - /* On x86 adjust the instruction pointer if needed */ - kgdbts_regs.ip += offset; + /* Readjust the instruction pointer if needed */ + ip += offset; + cont_addr = ip; +#ifdef GDB_ADJUSTS_BREAK_OFFSET + instruction_pointer_set(&kgdbts_regs, ip); #endif return 0; } @@ -312,6 +341,8 @@ static int check_and_rewind_pc(char *put_str, char *arg) static int check_single_step(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); + static int matched_id; + /* * From an arch indepent point of view the instruction pointer * should be on a different instruction @@ -321,6 +352,29 @@ static int check_single_step(char *put_str, char *arg) gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); v2printk("Singlestep stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); + + if (sstep_thread_id != cont_thread_id) { + /* + * Ensure we stopped in the same thread id as before, else the + * debugger should continue until the original thread that was + * single stepped is scheduled again, emulating gdb's behavior. + */ + v2printk("ThrID does not match: %lx\n", cont_thread_id); + if (arch_needs_sstep_emulation) { + if (matched_id && + instruction_pointer(&kgdbts_regs) != addr) + goto continue_test; + matched_id++; + ts.idx -= 2; + sstep_state = 0; + return 0; + } + cont_instead_of_sstep = 1; + ts.idx -= 4; + return 0; + } +continue_test: + matched_id = 0; if (instruction_pointer(&kgdbts_regs) == addr) { eprintk("kgdbts: SingleStep failed at %lx\n", instruction_pointer(&kgdbts_regs)); @@ -362,10 +416,40 @@ static int got_break(char *put_str, char *arg) return 1; } +static void get_cont_catch(char *arg) +{ + /* Always send detach because the test is completed at this point */ + fill_get_buf("D"); +} + +static int put_cont_catch(char *put_str, char *arg) +{ + /* This is at the end of the test and we catch any and all input */ + v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id); + ts.idx--; + return 0; +} + +static int emul_reset(char *put_str, char *arg) +{ + if (strncmp(put_str, "$OK", 3)) + return 1; + if (restart_from_top_after_write) { + restart_from_top_after_write = 0; + ts.idx = -1; + } + return 0; +} + static void emul_sstep_get(char *arg) { if (!arch_needs_sstep_emulation) { - fill_get_buf(arg); + if (cont_instead_of_sstep) { + cont_instead_of_sstep = 0; + fill_get_buf("c"); + } else { + fill_get_buf(arg); + } return; } switch (sstep_state) { @@ -395,9 +479,11 @@ static void emul_sstep_get(char *arg) static int emul_sstep_put(char *put_str, char *arg) { if (!arch_needs_sstep_emulation) { - if (!strncmp(put_str+1, arg, 2)) - return 0; - return 1; + char *ptr = &put_str[11]; + if (put_str[1] != 'T' || put_str[2] != '0') + return 1; + kgdb_hex2long(&ptr, &sstep_thread_id); + return 0; } switch (sstep_state) { case 1: @@ -408,8 +494,7 @@ static int emul_sstep_put(char *put_str, char *arg) v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); /* Want to stop at IP + break instruction size by default */ - sstep_addr = instruction_pointer(&kgdbts_regs) + - BREAK_INSTR_SIZE; + sstep_addr = cont_addr + BREAK_INSTR_SIZE; break; case 2: if (strncmp(put_str, "$OK", 3)) { @@ -421,6 +506,9 @@ static int emul_sstep_put(char *put_str, char *arg) if (strncmp(put_str, "$T0", 3)) { eprintk("kgdbts: failed continue sstep\n"); return 1; + } else { + char *ptr = &put_str[11]; + kgdb_hex2long(&ptr, &sstep_thread_id); } break; case 4: @@ -499,10 +587,10 @@ static struct test_struct bad_read_test[] = { static struct test_struct singlestep_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ - { "c", "T0*", }, /* Continue */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ + { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, /* Write registers */ - { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "kgdbts_break_test", NULL, check_single_step }, { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ @@ -520,16 +608,16 @@ static struct test_struct singlestep_break_test[] = { static struct test_struct do_fork_test[] = { { "?", "S0*" }, /* Clear break points */ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ - { "c", "T0*", }, /* Continue */ - { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ - { "write", "OK", write_regs }, /* Write registers */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs, emul_reset }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "do_fork", NULL, check_single_step }, { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ - { "", "" }, + { "", "", get_cont_catch, put_cont_catch }, }; /* Test for hitting a breakpoint at sys_open for what ever the number @@ -538,16 +626,16 @@ static struct test_struct do_fork_test[] = { static struct test_struct sys_open_test[] = { { "?", "S0*" }, /* Clear break points */ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ - { "c", "T0*", }, /* Continue */ - { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ - { "write", "OK", write_regs }, /* Write registers */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs, emul_reset }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "sys_open", NULL, check_single_step }, { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ - { "", "" }, + { "", "", get_cont_catch, put_cont_catch }, }; /* @@ -639,7 +727,7 @@ static int validate_simple_test(char *put_str) while (*chk_str != '\0' && *put_str != '\0') { /* If someone does a * to match the rest of the string, allow - * it, or stop if the recieved string is complete. + * it, or stop if the received string is complete. */ if (*put_str == '#' || *chk_str == '*') return 0; @@ -690,8 +778,8 @@ static int run_simple_test(int is_get_char, int chr) /* This callback is a put char which is when kgdb sends data to * this I/O module. */ - if (ts.tst[ts.idx].get[0] == '\0' && - ts.tst[ts.idx].put[0] == '\0') { + if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' && + !ts.tst[ts.idx].get_handler) { eprintk("kgdbts: ERROR: beyond end of test on" " '%s' line %i\n", ts.name, ts.idx); return 0; @@ -904,6 +992,17 @@ static void kgdbts_run_tests(void) if (ptr) sstep_test = simple_strtol(ptr+1, NULL, 10); + /* All HW break point tests */ + if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { + hwbreaks_ok = 1; + v1printk("kgdbts:RUN hw breakpoint test\n"); + run_breakpoint_test(1); + v1printk("kgdbts:RUN hw write breakpoint test\n"); + run_hw_break_test(1); + v1printk("kgdbts:RUN access write breakpoint test\n"); + run_hw_break_test(0); + } + /* required internal KGDB tests */ v1printk("kgdbts:RUN plant and detach test\n"); run_plant_and_detach_test(0); @@ -921,35 +1020,11 @@ static void kgdbts_run_tests(void) /* ===Optional tests=== */ - /* All HW break point tests */ - if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { - hwbreaks_ok = 1; - v1printk("kgdbts:RUN hw breakpoint test\n"); - run_breakpoint_test(1); - v1printk("kgdbts:RUN hw write breakpoint test\n"); - run_hw_break_test(1); - v1printk("kgdbts:RUN access write breakpoint test\n"); - run_hw_break_test(0); - } - if (nmi_sleep) { v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep); run_nmi_sleep_test(nmi_sleep); } -#ifdef CONFIG_DEBUG_RODATA - /* Until there is an api to write to read-only text segments, use - * HW breakpoints for the remainder of any tests, else print a - * failure message if hw breakpoints do not work. - */ - if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) { - eprintk("kgdbts: HW breakpoints do not work," - "skipping remaining tests\n"); - return; - } - force_hwbrks = 1; -#endif /* CONFIG_DEBUG_RODATA */ - /* If the do_fork test is run it will be the last test that is * executed because a kernel thread will be spawned at the very * end to unregister the debug hooks. @@ -982,7 +1057,7 @@ static void kgdbts_run_tests(void) static int kgdbts_option_setup(char *opt) { - if (strlen(opt) > MAX_CONFIG_LEN) { + if (strlen(opt) >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); return -ENOSPC; } @@ -1038,12 +1113,6 @@ static int __init init_kgdbts(void) return configure_kgdbts(); } -static void cleanup_kgdbts(void) -{ - if (configured == 1) - kgdb_unregister_io_module(&kgdbts_io_ops); -} - static int kgdbts_get_char(void) { int val = 0; @@ -1075,10 +1144,8 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) return 0; } - if (kgdb_connected) { - printk(KERN_ERR - "kgdbts: Cannot reconfigure while KGDB is connected.\n"); - + if (configured == 1) { + printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n"); return -EBUSY; } @@ -1087,9 +1154,6 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) if (config[len - 1] == '\n') config[len - 1] = '\0'; - if (configured == 1) - cleanup_kgdbts(); - /* Go and configure with the new params. */ return configure_kgdbts(); } @@ -1117,7 +1181,6 @@ static struct kgdb_io kgdbts_io_ops = { }; module_init(init_kgdbts); -module_exit(cleanup_kgdbts); module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644); MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]"); MODULE_DESCRIPTION("KGDB Test Suite"); diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c new file mode 100644 index 00000000000..0a1565e63c7 --- /dev/null +++ b/drivers/misc/lattice-ecp3-config.c @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2012 Stefan Roese <sr@denx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/spi/spi.h> +#include <linux/platform_device.h> +#include <linux/delay.h> + +#define FIRMWARE_NAME "lattice-ecp3.bit" + +/* + * The JTAG ID's of the supported FPGA's. The ID is 32bit wide + * reversed as noted in the manual. + */ +#define ID_ECP3_17 0xc2088080 +#define ID_ECP3_35 0xc2048080 + +/* FPGA commands */ +#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */ +#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */ +#define FPGA_CMD_CLEAR 0x70 +#define FPGA_CMD_REFRESH 0x71 +#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */ +#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */ +#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */ + +/* + * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf + * (LatticeECP3 Slave SPI Port User's Guide) + */ +#define FPGA_STATUS_DONE 0x00004000 +#define FPGA_STATUS_CLEARED 0x00010000 + +#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */ +#define FPGA_CLEAR_MSLEEP 10 +#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP) + +struct fpga_data { + struct completion fw_loaded; +}; + +struct ecp3_dev { + u32 jedec_id; + char *name; +}; + +static const struct ecp3_dev ecp3_dev[] = { + { + .jedec_id = ID_ECP3_17, + .name = "Lattice ECP3-17", + }, + { + .jedec_id = ID_ECP3_35, + .name = "Lattice ECP3-35", + }, +}; + +static void firmware_load(const struct firmware *fw, void *context) +{ + struct spi_device *spi = (struct spi_device *)context; + struct fpga_data *data = spi_get_drvdata(spi); + u8 *buffer; + int ret; + u8 txbuf[8]; + u8 rxbuf[8]; + int rx_len = 8; + int i; + u32 jedec_id; + u32 status; + + if (fw->size == 0) { + dev_err(&spi->dev, "Error: Firmware size is 0!\n"); + return; + } + + /* Fill dummy data (24 stuffing bits for commands) */ + txbuf[1] = 0x00; + txbuf[2] = 0x00; + txbuf[3] = 0x00; + + /* Trying to speak with the FPGA via SPI... */ + txbuf[0] = FPGA_CMD_READ_ID; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]); + jedec_id = *(u32 *)&rxbuf[4]; + + for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) { + if (jedec_id == ecp3_dev[i].jedec_id) + break; + } + if (i == ARRAY_SIZE(ecp3_dev)) { + dev_err(&spi->dev, + "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", + jedec_id); + return; + } + + dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); + + txbuf[0] = FPGA_CMD_READ_STATUS; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); + + buffer = kzalloc(fw->size + 8, GFP_KERNEL); + if (!buffer) { + dev_err(&spi->dev, "Error: Can't allocate memory!\n"); + return; + } + + /* + * Insert WRITE_INC command into stream (one SPI frame) + */ + buffer[0] = FPGA_CMD_WRITE_INC; + buffer[1] = 0xff; + buffer[2] = 0xff; + buffer[3] = 0xff; + memcpy(buffer + 4, fw->data, fw->size); + + txbuf[0] = FPGA_CMD_REFRESH; + ret = spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_WRITE_EN; + ret = spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_CLEAR; + ret = spi_write(spi, txbuf, 4); + + /* + * Wait for FPGA memory to become cleared + */ + for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) { + txbuf[0] = FPGA_CMD_READ_STATUS; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + status = *(u32 *)&rxbuf[4]; + if (status == FPGA_STATUS_CLEARED) + break; + + msleep(FPGA_CLEAR_MSLEEP); + } + + if (i == FPGA_CLEAR_LOOP_COUNT) { + dev_err(&spi->dev, + "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", + status); + kfree(buffer); + return; + } + + dev_info(&spi->dev, "Configuring the FPGA...\n"); + ret = spi_write(spi, buffer, fw->size + 8); + + txbuf[0] = FPGA_CMD_WRITE_DIS; + ret = spi_write(spi, txbuf, 4); + + txbuf[0] = FPGA_CMD_READ_STATUS; + ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); + dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); + status = *(u32 *)&rxbuf[4]; + + /* Check result */ + if (status & FPGA_STATUS_DONE) + dev_info(&spi->dev, "FPGA successfully configured!\n"); + else + dev_info(&spi->dev, "FPGA not configured (DONE not set)\n"); + + /* + * Don't forget to release the firmware again + */ + release_firmware(fw); + + kfree(buffer); + + complete(&data->fw_loaded); +} + +static int lattice_ecp3_probe(struct spi_device *spi) +{ + struct fpga_data *data; + int err; + + data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL); + if (!data) { + dev_err(&spi->dev, "Memory allocation for fpga_data failed\n"); + return -ENOMEM; + } + spi_set_drvdata(spi, data); + + init_completion(&data->fw_loaded); + err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, + FIRMWARE_NAME, &spi->dev, + GFP_KERNEL, spi, firmware_load); + if (err) { + dev_err(&spi->dev, "Firmware loading failed with %d!\n", err); + return err; + } + + dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n"); + + return 0; +} + +static int lattice_ecp3_remove(struct spi_device *spi) +{ + struct fpga_data *data = spi_get_drvdata(spi); + + wait_for_completion(&data->fw_loaded); + + return 0; +} + +static const struct spi_device_id lattice_ecp3_id[] = { + { "ecp3-17", 0 }, + { "ecp3-35", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, lattice_ecp3_id); + +static struct spi_driver lattice_ecp3_driver = { + .driver = { + .name = "lattice-ecp3", + .owner = THIS_MODULE, + }, + .probe = lattice_ecp3_probe, + .remove = lattice_ecp3_remove, + .id_table = lattice_ecp3_id, +}; + +module_spi_driver(lattice_ecp3_driver); + +MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); +MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig new file mode 100644 index 00000000000..8f474e6fc7b --- /dev/null +++ b/drivers/misc/lis3lv02d/Kconfig @@ -0,0 +1,37 @@ +# +# STMicroelectonics LIS3LV02D and similar accelerometers +# + +config SENSORS_LIS3_SPI + tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)" + depends on !ACPI && SPI_MASTER && INPUT + select SENSORS_LIS3LV02D + default n + help + This driver provides support for the LIS3LV02Dx accelerometer connected + via SPI. The accelerometer data is readable via + /sys/devices/platform/lis3lv02d. + + This driver also provides an absolute input class device, allowing + the laptop to act as a pinball machine-esque joystick. + + This driver can also be built as modules. If so, the core module + will be called lis3lv02d and a specific module for the SPI transport + is called lis3lv02d_spi. + +config SENSORS_LIS3_I2C + tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)" + depends on I2C && INPUT + select SENSORS_LIS3LV02D + default n + help + This driver provides support for the LIS3LV02Dx accelerometer connected + via I2C. The accelerometer data is readable via + /sys/devices/platform/lis3lv02d. + + This driver also provides an absolute input class device, allowing + the device to act as a pinball machine-esque joystick. + + This driver can also be built as modules. If so, the core module + will be called lis3lv02d and a specific module for the I2C transport + is called lis3lv02d_i2c. diff --git a/drivers/misc/lis3lv02d/Makefile b/drivers/misc/lis3lv02d/Makefile new file mode 100644 index 00000000000..4bf58b16fcf --- /dev/null +++ b/drivers/misc/lis3lv02d/Makefile @@ -0,0 +1,7 @@ +# +# STMicroelectonics LIS3LV02D and similar accelerometers +# + +obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o +obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d_spi.o +obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d_i2c.o diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c new file mode 100644 index 00000000000..3ef4627f9cb --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -0,0 +1,1244 @@ +/* + * lis3lv02d.c - ST LIS3LV02DL accelerometer driver + * + * Copyright (C) 2007-2008 Yan Burman + * Copyright (C) 2008 Eric Piel + * Copyright (C) 2008-2009 Pavel Machek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/dmi.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/input-polldev.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/freezer.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/pm_runtime.h> +#include <linux/atomic.h> +#include <linux/of_device.h> +#include "lis3lv02d.h" + +#define DRIVER_NAME "lis3lv02d" + +/* joystick device poll interval in milliseconds */ +#define MDPS_POLL_INTERVAL 50 +#define MDPS_POLL_MIN 0 +#define MDPS_POLL_MAX 2000 + +#define LIS3_SYSFS_POWERDOWN_DELAY 5000 /* In milliseconds */ + +#define SELFTEST_OK 0 +#define SELFTEST_FAIL -1 +#define SELFTEST_IRQ -2 + +#define IRQ_LINE0 0 +#define IRQ_LINE1 1 + +/* + * The sensor can also generate interrupts (DRDY) but it's pretty pointless + * because they are generated even if the data do not change. So it's better + * to keep the interrupt for the free-fall event. The values are updated at + * 40Hz (at the lowest frequency), but as it can be pretty time consuming on + * some low processor, we poll the sensor only at 20Hz... enough for the + * joystick. + */ + +#define LIS3_PWRON_DELAY_WAI_12B (5000) +#define LIS3_PWRON_DELAY_WAI_8B (3000) + +/* + * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG + * LIS302D spec says: 18 mG / digit + * LIS3_ACCURACY is used to increase accuracy of the intermediate + * calculation results. + */ +#define LIS3_ACCURACY 1024 +/* Sensitivity values for -2G +2G scale */ +#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024) +#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY) + +/* + * LIS331DLH spec says 1LSBs corresponds 4G/4096 -> 1LSB is 1000/1024 mG. + * Below macros defines sensitivity values for +/-2G. Dataout bits for + * +/-2G range is 12 bits so 4 bits adjustment must be done to get 12bit + * data from 16bit value. Currently this driver supports only 2G range. + */ +#define LIS3DLH_SENSITIVITY_2G ((LIS3_ACCURACY * 1000) / 1024) +#define SHIFT_ADJ_2G 4 + +#define LIS3_DEFAULT_FUZZ_12B 3 +#define LIS3_DEFAULT_FLAT_12B 3 +#define LIS3_DEFAULT_FUZZ_8B 1 +#define LIS3_DEFAULT_FLAT_8B 1 + +struct lis3lv02d lis3_dev = { + .misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait), +}; +EXPORT_SYMBOL_GPL(lis3_dev); + +/* just like param_set_int() but does sanity-check so that it won't point + * over the axis array size + */ +static int param_set_axis(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + if (!ret) { + int val = *(int *)kp->arg; + if (val < 0) + val = -val; + if (!val || val > 3) + return -EINVAL; + } + return ret; +} + +static struct kernel_param_ops param_ops_axis = { + .set = param_set_axis, + .get = param_get_int, +}; + +#define param_check_axis(name, p) param_check_int(name, p) + +module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644); +MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions"); + +static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg) +{ + s8 lo; + if (lis3->read(lis3, reg, &lo) < 0) + return 0; + + return lo; +} + +static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg) +{ + u8 lo, hi; + + lis3->read(lis3, reg - 1, &lo); + lis3->read(lis3, reg, &hi); + /* In "12 bit right justified" mode, bit 6, bit 7, bit 8 = bit 5 */ + return (s16)((hi << 8) | lo); +} + +/* 12bits for 2G range, 13 bits for 4G range and 14 bits for 8G range */ +static s16 lis331dlh_read_data(struct lis3lv02d *lis3, int reg) +{ + u8 lo, hi; + int v; + + lis3->read(lis3, reg - 1, &lo); + lis3->read(lis3, reg, &hi); + v = (int) ((hi << 8) | lo); + + return (s16) v >> lis3->shift_adj; +} + +/** + * lis3lv02d_get_axis - For the given axis, give the value converted + * @axis: 1,2,3 - can also be negative + * @hw_values: raw values returned by the hardware + * + * Returns the converted value. + */ +static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3]) +{ + if (axis > 0) + return hw_values[axis - 1]; + else + return -hw_values[-axis - 1]; +} + +/** + * lis3lv02d_get_xyz - Get X, Y and Z axis values from the accelerometer + * @lis3: pointer to the device struct + * @x: where to store the X axis value + * @y: where to store the Y axis value + * @z: where to store the Z axis value + * + * Note that 40Hz input device can eat up about 10% CPU at 800MHZ + */ +static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z) +{ + int position[3]; + int i; + + if (lis3->blkread) { + if (lis3->whoami == WAI_12B) { + u16 data[3]; + lis3->blkread(lis3, OUTX_L, 6, (u8 *)data); + for (i = 0; i < 3; i++) + position[i] = (s16)le16_to_cpu(data[i]); + } else { + u8 data[5]; + /* Data: x, dummy, y, dummy, z */ + lis3->blkread(lis3, OUTX, 5, data); + for (i = 0; i < 3; i++) + position[i] = (s8)data[i * 2]; + } + } else { + position[0] = lis3->read_data(lis3, OUTX); + position[1] = lis3->read_data(lis3, OUTY); + position[2] = lis3->read_data(lis3, OUTZ); + } + + for (i = 0; i < 3; i++) + position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY; + + *x = lis3lv02d_get_axis(lis3->ac.x, position); + *y = lis3lv02d_get_axis(lis3->ac.y, position); + *z = lis3lv02d_get_axis(lis3->ac.z, position); +} + +/* conversion btw sampling rate and the register values */ +static int lis3_12_rates[4] = {40, 160, 640, 2560}; +static int lis3_8_rates[2] = {100, 400}; +static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000}; +static int lis3_3dlh_rates[4] = {50, 100, 400, 1000}; + +/* ODR is Output Data Rate */ +static int lis3lv02d_get_odr(struct lis3lv02d *lis3) +{ + u8 ctrl; + int shift; + + lis3->read(lis3, CTRL_REG1, &ctrl); + ctrl &= lis3->odr_mask; + shift = ffs(lis3->odr_mask) - 1; + return lis3->odrs[(ctrl >> shift)]; +} + +static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3) +{ + int div = lis3lv02d_get_odr(lis3); + + if (WARN_ONCE(div == 0, "device returned spurious data")) + return -ENXIO; + + /* LIS3 power on delay is quite long */ + msleep(lis3->pwron_delay / div); + return 0; +} + +static int lis3lv02d_set_odr(struct lis3lv02d *lis3, int rate) +{ + u8 ctrl; + int i, len, shift; + + if (!rate) + return -EINVAL; + + lis3->read(lis3, CTRL_REG1, &ctrl); + ctrl &= ~lis3->odr_mask; + len = 1 << hweight_long(lis3->odr_mask); /* # of possible values */ + shift = ffs(lis3->odr_mask) - 1; + + for (i = 0; i < len; i++) + if (lis3->odrs[i] == rate) { + lis3->write(lis3, CTRL_REG1, + ctrl | (i << shift)); + return 0; + } + return -EINVAL; +} + +static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3]) +{ + u8 ctlreg, reg; + s16 x, y, z; + u8 selftest; + int ret; + u8 ctrl_reg_data; + unsigned char irq_cfg; + + mutex_lock(&lis3->mutex); + + irq_cfg = lis3->irq_cfg; + if (lis3->whoami == WAI_8B) { + lis3->data_ready_count[IRQ_LINE0] = 0; + lis3->data_ready_count[IRQ_LINE1] = 0; + + /* Change interrupt cfg to data ready for selftest */ + atomic_inc(&lis3->wake_thread); + lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY; + lis3->read(lis3, CTRL_REG3, &ctrl_reg_data); + lis3->write(lis3, CTRL_REG3, (ctrl_reg_data & + ~(LIS3_IRQ1_MASK | LIS3_IRQ2_MASK)) | + (LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY)); + } + + if ((lis3->whoami == WAI_3DC) || (lis3->whoami == WAI_3DLH)) { + ctlreg = CTRL_REG4; + selftest = CTRL4_ST0; + } else { + ctlreg = CTRL_REG1; + if (lis3->whoami == WAI_12B) + selftest = CTRL1_ST; + else + selftest = CTRL1_STP; + } + + lis3->read(lis3, ctlreg, ®); + lis3->write(lis3, ctlreg, (reg | selftest)); + ret = lis3lv02d_get_pwron_wait(lis3); + if (ret) + goto fail; + + /* Read directly to avoid axis remap */ + x = lis3->read_data(lis3, OUTX); + y = lis3->read_data(lis3, OUTY); + z = lis3->read_data(lis3, OUTZ); + + /* back to normal settings */ + lis3->write(lis3, ctlreg, reg); + ret = lis3lv02d_get_pwron_wait(lis3); + if (ret) + goto fail; + + results[0] = x - lis3->read_data(lis3, OUTX); + results[1] = y - lis3->read_data(lis3, OUTY); + results[2] = z - lis3->read_data(lis3, OUTZ); + + ret = 0; + + if (lis3->whoami == WAI_8B) { + /* Restore original interrupt configuration */ + atomic_dec(&lis3->wake_thread); + lis3->write(lis3, CTRL_REG3, ctrl_reg_data); + lis3->irq_cfg = irq_cfg; + + if ((irq_cfg & LIS3_IRQ1_MASK) && + lis3->data_ready_count[IRQ_LINE0] < 2) { + ret = SELFTEST_IRQ; + goto fail; + } + + if ((irq_cfg & LIS3_IRQ2_MASK) && + lis3->data_ready_count[IRQ_LINE1] < 2) { + ret = SELFTEST_IRQ; + goto fail; + } + } + + if (lis3->pdata) { + int i; + for (i = 0; i < 3; i++) { + /* Check against selftest acceptance limits */ + if ((results[i] < lis3->pdata->st_min_limits[i]) || + (results[i] > lis3->pdata->st_max_limits[i])) { + ret = SELFTEST_FAIL; + goto fail; + } + } + } + + /* test passed */ +fail: + mutex_unlock(&lis3->mutex); + return ret; +} + +/* + * Order of registers in the list affects to order of the restore process. + * Perhaps it is a good idea to set interrupt enable register as a last one + * after all other configurations + */ +static u8 lis3_wai8_regs[] = { FF_WU_CFG_1, FF_WU_THS_1, FF_WU_DURATION_1, + FF_WU_CFG_2, FF_WU_THS_2, FF_WU_DURATION_2, + CLICK_CFG, CLICK_SRC, CLICK_THSY_X, CLICK_THSZ, + CLICK_TIMELIMIT, CLICK_LATENCY, CLICK_WINDOW, + CTRL_REG1, CTRL_REG2, CTRL_REG3}; + +static u8 lis3_wai12_regs[] = {FF_WU_CFG, FF_WU_THS_L, FF_WU_THS_H, + FF_WU_DURATION, DD_CFG, DD_THSI_L, DD_THSI_H, + DD_THSE_L, DD_THSE_H, + CTRL_REG1, CTRL_REG3, CTRL_REG2}; + +static inline void lis3_context_save(struct lis3lv02d *lis3) +{ + int i; + for (i = 0; i < lis3->regs_size; i++) + lis3->read(lis3, lis3->regs[i], &lis3->reg_cache[i]); + lis3->regs_stored = true; +} + +static inline void lis3_context_restore(struct lis3lv02d *lis3) +{ + int i; + if (lis3->regs_stored) + for (i = 0; i < lis3->regs_size; i++) + lis3->write(lis3, lis3->regs[i], lis3->reg_cache[i]); +} + +void lis3lv02d_poweroff(struct lis3lv02d *lis3) +{ + if (lis3->reg_ctrl) + lis3_context_save(lis3); + /* disable X,Y,Z axis and power down */ + lis3->write(lis3, CTRL_REG1, 0x00); + if (lis3->reg_ctrl) + lis3->reg_ctrl(lis3, LIS3_REG_OFF); +} +EXPORT_SYMBOL_GPL(lis3lv02d_poweroff); + +int lis3lv02d_poweron(struct lis3lv02d *lis3) +{ + int err; + u8 reg; + + lis3->init(lis3); + + /* + * Common configuration + * BDU: (12 bits sensors only) LSB and MSB values are not updated until + * both have been read. So the value read will always be correct. + * Set BOOT bit to refresh factory tuning values. + */ + if (lis3->pdata) { + lis3->read(lis3, CTRL_REG2, ®); + if (lis3->whoami == WAI_12B) + reg |= CTRL2_BDU | CTRL2_BOOT; + else if (lis3->whoami == WAI_3DLH) + reg |= CTRL2_BOOT_3DLH; + else + reg |= CTRL2_BOOT_8B; + lis3->write(lis3, CTRL_REG2, reg); + + if (lis3->whoami == WAI_3DLH) { + lis3->read(lis3, CTRL_REG4, ®); + reg |= CTRL4_BDU; + lis3->write(lis3, CTRL_REG4, reg); + } + } + + err = lis3lv02d_get_pwron_wait(lis3); + if (err) + return err; + + if (lis3->reg_ctrl) + lis3_context_restore(lis3); + + return 0; +} +EXPORT_SYMBOL_GPL(lis3lv02d_poweron); + + +static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev) +{ + struct lis3lv02d *lis3 = pidev->private; + int x, y, z; + + mutex_lock(&lis3->mutex); + lis3lv02d_get_xyz(lis3, &x, &y, &z); + input_report_abs(pidev->input, ABS_X, x); + input_report_abs(pidev->input, ABS_Y, y); + input_report_abs(pidev->input, ABS_Z, z); + input_sync(pidev->input); + mutex_unlock(&lis3->mutex); +} + +static void lis3lv02d_joystick_open(struct input_polled_dev *pidev) +{ + struct lis3lv02d *lis3 = pidev->private; + + if (lis3->pm_dev) + pm_runtime_get_sync(lis3->pm_dev); + + if (lis3->pdata && lis3->whoami == WAI_8B && lis3->idev) + atomic_set(&lis3->wake_thread, 1); + /* + * Update coordinates for the case where poll interval is 0 and + * the chip in running purely under interrupt control + */ + lis3lv02d_joystick_poll(pidev); +} + +static void lis3lv02d_joystick_close(struct input_polled_dev *pidev) +{ + struct lis3lv02d *lis3 = pidev->private; + + atomic_set(&lis3->wake_thread, 0); + if (lis3->pm_dev) + pm_runtime_put(lis3->pm_dev); +} + +static irqreturn_t lis302dl_interrupt(int irq, void *data) +{ + struct lis3lv02d *lis3 = data; + + if (!test_bit(0, &lis3->misc_opened)) + goto out; + + /* + * Be careful: on some HP laptops the bios force DD when on battery and + * the lid is closed. This leads to interrupts as soon as a little move + * is done. + */ + atomic_inc(&lis3->count); + + wake_up_interruptible(&lis3->misc_wait); + kill_fasync(&lis3->async_queue, SIGIO, POLL_IN); +out: + if (atomic_read(&lis3->wake_thread)) + return IRQ_WAKE_THREAD; + return IRQ_HANDLED; +} + +static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3) +{ + struct input_dev *dev = lis3->idev->input; + u8 click_src; + + mutex_lock(&lis3->mutex); + lis3->read(lis3, CLICK_SRC, &click_src); + + if (click_src & CLICK_SINGLE_X) { + input_report_key(dev, lis3->mapped_btns[0], 1); + input_report_key(dev, lis3->mapped_btns[0], 0); + } + + if (click_src & CLICK_SINGLE_Y) { + input_report_key(dev, lis3->mapped_btns[1], 1); + input_report_key(dev, lis3->mapped_btns[1], 0); + } + + if (click_src & CLICK_SINGLE_Z) { + input_report_key(dev, lis3->mapped_btns[2], 1); + input_report_key(dev, lis3->mapped_btns[2], 0); + } + input_sync(dev); + mutex_unlock(&lis3->mutex); +} + +static inline void lis302dl_data_ready(struct lis3lv02d *lis3, int index) +{ + int dummy; + + /* Dummy read to ack interrupt */ + lis3lv02d_get_xyz(lis3, &dummy, &dummy, &dummy); + lis3->data_ready_count[index]++; +} + +static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data) +{ + struct lis3lv02d *lis3 = data; + u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ1_MASK; + + if (irq_cfg == LIS3_IRQ1_CLICK) + lis302dl_interrupt_handle_click(lis3); + else if (unlikely(irq_cfg == LIS3_IRQ1_DATA_READY)) + lis302dl_data_ready(lis3, IRQ_LINE0); + else + lis3lv02d_joystick_poll(lis3->idev); + + return IRQ_HANDLED; +} + +static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data) +{ + struct lis3lv02d *lis3 = data; + u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ2_MASK; + + if (irq_cfg == LIS3_IRQ2_CLICK) + lis302dl_interrupt_handle_click(lis3); + else if (unlikely(irq_cfg == LIS3_IRQ2_DATA_READY)) + lis302dl_data_ready(lis3, IRQ_LINE1); + else + lis3lv02d_joystick_poll(lis3->idev); + + return IRQ_HANDLED; +} + +static int lis3lv02d_misc_open(struct inode *inode, struct file *file) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + if (test_and_set_bit(0, &lis3->misc_opened)) + return -EBUSY; /* already open */ + + if (lis3->pm_dev) + pm_runtime_get_sync(lis3->pm_dev); + + atomic_set(&lis3->count, 0); + return 0; +} + +static int lis3lv02d_misc_release(struct inode *inode, struct file *file) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + clear_bit(0, &lis3->misc_opened); /* release the device */ + if (lis3->pm_dev) + pm_runtime_put(lis3->pm_dev); + return 0; +} + +static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + DECLARE_WAITQUEUE(wait, current); + u32 data; + unsigned char byte_data; + ssize_t retval = 1; + + if (count < 1) + return -EINVAL; + + add_wait_queue(&lis3->misc_wait, &wait); + while (true) { + set_current_state(TASK_INTERRUPTIBLE); + data = atomic_xchg(&lis3->count, 0); + if (data) + break; + + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + } + + if (data < 255) + byte_data = data; + else + byte_data = 255; + + /* make sure we are not going into copy_to_user() with + * TASK_INTERRUPTIBLE state */ + set_current_state(TASK_RUNNING); + if (copy_to_user(buf, &byte_data, sizeof(byte_data))) + retval = -EFAULT; + +out: + __set_current_state(TASK_RUNNING); + remove_wait_queue(&lis3->misc_wait, &wait); + + return retval; +} + +static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + poll_wait(file, &lis3->misc_wait, wait); + if (atomic_read(&lis3->count)) + return POLLIN | POLLRDNORM; + return 0; +} + +static int lis3lv02d_misc_fasync(int fd, struct file *file, int on) +{ + struct lis3lv02d *lis3 = container_of(file->private_data, + struct lis3lv02d, miscdev); + + return fasync_helper(fd, file, on, &lis3->async_queue); +} + +static const struct file_operations lis3lv02d_misc_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = lis3lv02d_misc_read, + .open = lis3lv02d_misc_open, + .release = lis3lv02d_misc_release, + .poll = lis3lv02d_misc_poll, + .fasync = lis3lv02d_misc_fasync, +}; + +int lis3lv02d_joystick_enable(struct lis3lv02d *lis3) +{ + struct input_dev *input_dev; + int err; + int max_val, fuzz, flat; + int btns[] = {BTN_X, BTN_Y, BTN_Z}; + + if (lis3->idev) + return -EINVAL; + + lis3->idev = input_allocate_polled_device(); + if (!lis3->idev) + return -ENOMEM; + + lis3->idev->poll = lis3lv02d_joystick_poll; + lis3->idev->open = lis3lv02d_joystick_open; + lis3->idev->close = lis3lv02d_joystick_close; + lis3->idev->poll_interval = MDPS_POLL_INTERVAL; + lis3->idev->poll_interval_min = MDPS_POLL_MIN; + lis3->idev->poll_interval_max = MDPS_POLL_MAX; + lis3->idev->private = lis3; + input_dev = lis3->idev->input; + + input_dev->name = "ST LIS3LV02DL Accelerometer"; + input_dev->phys = DRIVER_NAME "/input0"; + input_dev->id.bustype = BUS_HOST; + input_dev->id.vendor = 0; + input_dev->dev.parent = &lis3->pdev->dev; + + set_bit(EV_ABS, input_dev->evbit); + max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY; + if (lis3->whoami == WAI_12B) { + fuzz = LIS3_DEFAULT_FUZZ_12B; + flat = LIS3_DEFAULT_FLAT_12B; + } else { + fuzz = LIS3_DEFAULT_FUZZ_8B; + flat = LIS3_DEFAULT_FLAT_8B; + } + fuzz = (fuzz * lis3->scale) / LIS3_ACCURACY; + flat = (flat * lis3->scale) / LIS3_ACCURACY; + + input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat); + input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat); + input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat); + + lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns); + lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns); + lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns); + + err = input_register_polled_device(lis3->idev); + if (err) { + input_free_polled_device(lis3->idev); + lis3->idev = NULL; + } + + return err; +} +EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable); + +void lis3lv02d_joystick_disable(struct lis3lv02d *lis3) +{ + if (lis3->irq) + free_irq(lis3->irq, lis3); + if (lis3->pdata && lis3->pdata->irq2) + free_irq(lis3->pdata->irq2, lis3); + + if (!lis3->idev) + return; + + if (lis3->irq) + misc_deregister(&lis3->miscdev); + input_unregister_polled_device(lis3->idev); + input_free_polled_device(lis3->idev); + lis3->idev = NULL; +} +EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); + +/* Sysfs stuff */ +static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3) +{ + /* + * SYSFS functions are fast visitors so put-call + * immediately after the get-call. However, keep + * chip running for a while and schedule delayed + * suspend. This way periodic sysfs calls doesn't + * suffer from relatively long power up time. + */ + + if (lis3->pm_dev) { + pm_runtime_get_sync(lis3->pm_dev); + pm_runtime_put_noidle(lis3->pm_dev); + pm_schedule_suspend(lis3->pm_dev, LIS3_SYSFS_POWERDOWN_DELAY); + } +} + +static ssize_t lis3lv02d_selftest_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + s16 values[3]; + + static const char ok[] = "OK"; + static const char fail[] = "FAIL"; + static const char irq[] = "FAIL_IRQ"; + const char *res; + + lis3lv02d_sysfs_poweron(lis3); + switch (lis3lv02d_selftest(lis3, values)) { + case SELFTEST_FAIL: + res = fail; + break; + case SELFTEST_IRQ: + res = irq; + break; + case SELFTEST_OK: + default: + res = ok; + break; + } + return sprintf(buf, "%s %d %d %d\n", res, + values[0], values[1], values[2]); +} + +static ssize_t lis3lv02d_position_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + int x, y, z; + + lis3lv02d_sysfs_poweron(lis3); + mutex_lock(&lis3->mutex); + lis3lv02d_get_xyz(lis3, &x, &y, &z); + mutex_unlock(&lis3->mutex); + return sprintf(buf, "(%d,%d,%d)\n", x, y, z); +} + +static ssize_t lis3lv02d_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + + lis3lv02d_sysfs_poweron(lis3); + return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3)); +} + +static ssize_t lis3lv02d_rate_set(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct lis3lv02d *lis3 = dev_get_drvdata(dev); + unsigned long rate; + int ret; + + ret = kstrtoul(buf, 0, &rate); + if (ret) + return ret; + + lis3lv02d_sysfs_poweron(lis3); + if (lis3lv02d_set_odr(lis3, rate)) + return -EINVAL; + + return count; +} + +static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL); +static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL); +static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show, + lis3lv02d_rate_set); + +static struct attribute *lis3lv02d_attributes[] = { + &dev_attr_selftest.attr, + &dev_attr_position.attr, + &dev_attr_rate.attr, + NULL +}; + +static struct attribute_group lis3lv02d_attribute_group = { + .attrs = lis3lv02d_attributes +}; + + +static int lis3lv02d_add_fs(struct lis3lv02d *lis3) +{ + lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + if (IS_ERR(lis3->pdev)) + return PTR_ERR(lis3->pdev); + + platform_set_drvdata(lis3->pdev, lis3); + return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group); +} + +int lis3lv02d_remove_fs(struct lis3lv02d *lis3) +{ + sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group); + platform_device_unregister(lis3->pdev); + if (lis3->pm_dev) { + /* Barrier after the sysfs remove */ + pm_runtime_barrier(lis3->pm_dev); + + /* SYSFS may have left chip running. Turn off if necessary */ + if (!pm_runtime_suspended(lis3->pm_dev)) + lis3lv02d_poweroff(lis3); + + pm_runtime_disable(lis3->pm_dev); + pm_runtime_set_suspended(lis3->pm_dev); + } + kfree(lis3->reg_cache); + return 0; +} +EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); + +static void lis3lv02d_8b_configure(struct lis3lv02d *lis3, + struct lis3lv02d_platform_data *p) +{ + int err; + int ctrl2 = p->hipass_ctrl; + + if (p->click_flags) { + lis3->write(lis3, CLICK_CFG, p->click_flags); + lis3->write(lis3, CLICK_TIMELIMIT, p->click_time_limit); + lis3->write(lis3, CLICK_LATENCY, p->click_latency); + lis3->write(lis3, CLICK_WINDOW, p->click_window); + lis3->write(lis3, CLICK_THSZ, p->click_thresh_z & 0xf); + lis3->write(lis3, CLICK_THSY_X, + (p->click_thresh_x & 0xf) | + (p->click_thresh_y << 4)); + + if (lis3->idev) { + struct input_dev *input_dev = lis3->idev->input; + input_set_capability(input_dev, EV_KEY, BTN_X); + input_set_capability(input_dev, EV_KEY, BTN_Y); + input_set_capability(input_dev, EV_KEY, BTN_Z); + } + } + + if (p->wakeup_flags) { + lis3->write(lis3, FF_WU_CFG_1, p->wakeup_flags); + lis3->write(lis3, FF_WU_THS_1, p->wakeup_thresh & 0x7f); + /* pdata value + 1 to keep this backward compatible*/ + lis3->write(lis3, FF_WU_DURATION_1, p->duration1 + 1); + ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/ + } + + if (p->wakeup_flags2) { + lis3->write(lis3, FF_WU_CFG_2, p->wakeup_flags2); + lis3->write(lis3, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f); + /* pdata value + 1 to keep this backward compatible*/ + lis3->write(lis3, FF_WU_DURATION_2, p->duration2 + 1); + ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/ + } + /* Configure hipass filters */ + lis3->write(lis3, CTRL_REG2, ctrl2); + + if (p->irq2) { + err = request_threaded_irq(p->irq2, + NULL, + lis302dl_interrupt_thread2_8b, + IRQF_TRIGGER_RISING | IRQF_ONESHOT | + (p->irq_flags2 & IRQF_TRIGGER_MASK), + DRIVER_NAME, lis3); + if (err < 0) + pr_err("No second IRQ. Limited functionality\n"); + } +} + +#ifdef CONFIG_OF +int lis3lv02d_init_dt(struct lis3lv02d *lis3) +{ + struct lis3lv02d_platform_data *pdata; + struct device_node *np = lis3->of_node; + u32 val; + + if (!lis3->of_node) + return 0; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + if (of_get_property(np, "st,click-single-x", NULL)) + pdata->click_flags |= LIS3_CLICK_SINGLE_X; + if (of_get_property(np, "st,click-double-x", NULL)) + pdata->click_flags |= LIS3_CLICK_DOUBLE_X; + + if (of_get_property(np, "st,click-single-y", NULL)) + pdata->click_flags |= LIS3_CLICK_SINGLE_Y; + if (of_get_property(np, "st,click-double-y", NULL)) + pdata->click_flags |= LIS3_CLICK_DOUBLE_Y; + + if (of_get_property(np, "st,click-single-z", NULL)) + pdata->click_flags |= LIS3_CLICK_SINGLE_Z; + if (of_get_property(np, "st,click-double-z", NULL)) + pdata->click_flags |= LIS3_CLICK_DOUBLE_Z; + + if (!of_property_read_u32(np, "st,click-threshold-x", &val)) + pdata->click_thresh_x = val; + if (!of_property_read_u32(np, "st,click-threshold-y", &val)) + pdata->click_thresh_y = val; + if (!of_property_read_u32(np, "st,click-threshold-z", &val)) + pdata->click_thresh_z = val; + + if (!of_property_read_u32(np, "st,click-time-limit", &val)) + pdata->click_time_limit = val; + if (!of_property_read_u32(np, "st,click-latency", &val)) + pdata->click_latency = val; + if (!of_property_read_u32(np, "st,click-window", &val)) + pdata->click_window = val; + + if (of_get_property(np, "st,irq1-disable", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_DISABLE; + if (of_get_property(np, "st,irq1-ff-wu-1", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_FF_WU_1; + if (of_get_property(np, "st,irq1-ff-wu-2", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_FF_WU_2; + if (of_get_property(np, "st,irq1-data-ready", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_DATA_READY; + if (of_get_property(np, "st,irq1-click", NULL)) + pdata->irq_cfg |= LIS3_IRQ1_CLICK; + + if (of_get_property(np, "st,irq2-disable", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_DISABLE; + if (of_get_property(np, "st,irq2-ff-wu-1", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_FF_WU_1; + if (of_get_property(np, "st,irq2-ff-wu-2", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_FF_WU_2; + if (of_get_property(np, "st,irq2-data-ready", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_DATA_READY; + if (of_get_property(np, "st,irq2-click", NULL)) + pdata->irq_cfg |= LIS3_IRQ2_CLICK; + + if (of_get_property(np, "st,irq-open-drain", NULL)) + pdata->irq_cfg |= LIS3_IRQ_OPEN_DRAIN; + if (of_get_property(np, "st,irq-active-low", NULL)) + pdata->irq_cfg |= LIS3_IRQ_ACTIVE_LOW; + + if (!of_property_read_u32(np, "st,wu-duration-1", &val)) + pdata->duration1 = val; + if (!of_property_read_u32(np, "st,wu-duration-2", &val)) + pdata->duration2 = val; + + if (of_get_property(np, "st,wakeup-x-lo", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_X_LO; + if (of_get_property(np, "st,wakeup-x-hi", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_X_HI; + if (of_get_property(np, "st,wakeup-y-lo", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Y_LO; + if (of_get_property(np, "st,wakeup-y-hi", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Y_HI; + if (of_get_property(np, "st,wakeup-z-lo", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO; + if (of_get_property(np, "st,wakeup-z-hi", NULL)) + pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI; + + if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) { + switch (val) { + case 1: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_1HZ; + break; + case 2: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_2HZ; + break; + case 4: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_4HZ; + break; + case 8: + pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_8HZ; + break; + } + } + + if (of_get_property(np, "st,hipass1-disable", NULL)) + pdata->hipass_ctrl |= LIS3_HIPASS1_DISABLE; + if (of_get_property(np, "st,hipass2-disable", NULL)) + pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE; + + if (of_get_property(np, "st,axis-x", &val)) + pdata->axis_x = val; + if (of_get_property(np, "st,axis-y", &val)) + pdata->axis_y = val; + if (of_get_property(np, "st,axis-z", &val)) + pdata->axis_z = val; + + if (of_get_property(np, "st,default-rate", NULL)) + pdata->default_rate = val; + + if (of_get_property(np, "st,min-limit-x", &val)) + pdata->st_min_limits[0] = val; + if (of_get_property(np, "st,min-limit-y", &val)) + pdata->st_min_limits[1] = val; + if (of_get_property(np, "st,min-limit-z", &val)) + pdata->st_min_limits[2] = val; + + if (of_get_property(np, "st,max-limit-x", &val)) + pdata->st_max_limits[0] = val; + if (of_get_property(np, "st,max-limit-y", &val)) + pdata->st_max_limits[1] = val; + if (of_get_property(np, "st,max-limit-z", &val)) + pdata->st_max_limits[2] = val; + + + lis3->pdata = pdata; + + return 0; +} + +#else +int lis3lv02d_init_dt(struct lis3lv02d *lis3) +{ + return 0; +} +#endif +EXPORT_SYMBOL_GPL(lis3lv02d_init_dt); + +/* + * Initialise the accelerometer and the various subsystems. + * Should be rather independent of the bus system. + */ +int lis3lv02d_init_device(struct lis3lv02d *lis3) +{ + int err; + irq_handler_t thread_fn; + int irq_flags = 0; + + lis3->whoami = lis3lv02d_read_8(lis3, WHO_AM_I); + + switch (lis3->whoami) { + case WAI_12B: + pr_info("12 bits sensor found\n"); + lis3->read_data = lis3lv02d_read_12; + lis3->mdps_max_val = 2048; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_12B; + lis3->odrs = lis3_12_rates; + lis3->odr_mask = CTRL1_DF0 | CTRL1_DF1; + lis3->scale = LIS3_SENSITIVITY_12B; + lis3->regs = lis3_wai12_regs; + lis3->regs_size = ARRAY_SIZE(lis3_wai12_regs); + break; + case WAI_8B: + pr_info("8 bits sensor found\n"); + lis3->read_data = lis3lv02d_read_8; + lis3->mdps_max_val = 128; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + lis3->odrs = lis3_8_rates; + lis3->odr_mask = CTRL1_DR; + lis3->scale = LIS3_SENSITIVITY_8B; + lis3->regs = lis3_wai8_regs; + lis3->regs_size = ARRAY_SIZE(lis3_wai8_regs); + break; + case WAI_3DC: + pr_info("8 bits 3DC sensor found\n"); + lis3->read_data = lis3lv02d_read_8; + lis3->mdps_max_val = 128; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + lis3->odrs = lis3_3dc_rates; + lis3->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3; + lis3->scale = LIS3_SENSITIVITY_8B; + break; + case WAI_3DLH: + pr_info("16 bits lis331dlh sensor found\n"); + lis3->read_data = lis331dlh_read_data; + lis3->mdps_max_val = 2048; /* 12 bits for 2G */ + lis3->shift_adj = SHIFT_ADJ_2G; + lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B; + lis3->odrs = lis3_3dlh_rates; + lis3->odr_mask = CTRL1_DR0 | CTRL1_DR1; + lis3->scale = LIS3DLH_SENSITIVITY_2G; + break; + default: + pr_err("unknown sensor type 0x%X\n", lis3->whoami); + return -EINVAL; + } + + lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs), + sizeof(lis3_wai12_regs)), GFP_KERNEL); + + if (lis3->reg_cache == NULL) { + printk(KERN_ERR DRIVER_NAME "out of memory\n"); + return -ENOMEM; + } + + mutex_init(&lis3->mutex); + atomic_set(&lis3->wake_thread, 0); + + lis3lv02d_add_fs(lis3); + err = lis3lv02d_poweron(lis3); + if (err) { + lis3lv02d_remove_fs(lis3); + return err; + } + + if (lis3->pm_dev) { + pm_runtime_set_active(lis3->pm_dev); + pm_runtime_enable(lis3->pm_dev); + } + + if (lis3lv02d_joystick_enable(lis3)) + pr_err("joystick initialization failed\n"); + + /* passing in platform specific data is purely optional and only + * used by the SPI transport layer at the moment */ + if (lis3->pdata) { + struct lis3lv02d_platform_data *p = lis3->pdata; + + if (lis3->whoami == WAI_8B) + lis3lv02d_8b_configure(lis3, p); + + irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK; + + lis3->irq_cfg = p->irq_cfg; + if (p->irq_cfg) + lis3->write(lis3, CTRL_REG3, p->irq_cfg); + + if (p->default_rate) + lis3lv02d_set_odr(lis3, p->default_rate); + } + + /* bail if we did not get an IRQ from the bus layer */ + if (!lis3->irq) { + pr_debug("No IRQ. Disabling /dev/freefall\n"); + goto out; + } + + /* + * The sensor can generate interrupts for free-fall and direction + * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep + * the things simple and _fast_ we activate it only for free-fall, so + * no need to read register (very slow with ACPI). For the same reason, + * we forbid shared interrupts. + * + * IRQF_TRIGGER_RISING seems pointless on HP laptops because the + * io-apic is not configurable (and generates a warning) but I keep it + * in case of support for other hardware. + */ + if (lis3->pdata && lis3->whoami == WAI_8B) + thread_fn = lis302dl_interrupt_thread1_8b; + else + thread_fn = NULL; + + err = request_threaded_irq(lis3->irq, lis302dl_interrupt, + thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT | + irq_flags, + DRIVER_NAME, lis3); + + if (err < 0) { + pr_err("Cannot get IRQ\n"); + goto out; + } + + lis3->miscdev.minor = MISC_DYNAMIC_MINOR; + lis3->miscdev.name = "freefall"; + lis3->miscdev.fops = &lis3lv02d_misc_fops; + + if (misc_register(&lis3->miscdev)) + pr_err("misc_register failed\n"); +out: + return 0; +} +EXPORT_SYMBOL_GPL(lis3lv02d_init_device); + +MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver"); +MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h new file mode 100644 index 00000000000..c439c827eea --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d.h @@ -0,0 +1,331 @@ +/* + * lis3lv02d.h - ST LIS3LV02DL accelerometer driver + * + * Copyright (C) 2007-2008 Yan Burman + * Copyright (C) 2008-2009 Eric Piel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <linux/platform_device.h> +#include <linux/input-polldev.h> +#include <linux/regulator/consumer.h> +#include <linux/miscdevice.h> + +/* + * This driver tries to support the "digital" accelerometer chips from + * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL, + * LIS331DLH, LIS35DE, or LIS202DL. They are very similar in terms of + * programming, with almost the same registers. In addition to differing + * on physical properties, they differ on the number of axes (2/3), + * precision (8/12 bits), and special features (freefall detection, + * click...). Unfortunately, not all the differences can be probed via + * a register. They can be connected either via I²C or SPI. + */ + +#include <linux/lis3lv02d.h> + +enum lis3_reg { + WHO_AM_I = 0x0F, + OFFSET_X = 0x16, + OFFSET_Y = 0x17, + OFFSET_Z = 0x18, + GAIN_X = 0x19, + GAIN_Y = 0x1A, + GAIN_Z = 0x1B, + CTRL_REG1 = 0x20, + CTRL_REG2 = 0x21, + CTRL_REG3 = 0x22, + CTRL_REG4 = 0x23, + HP_FILTER_RESET = 0x23, + STATUS_REG = 0x27, + OUTX_L = 0x28, + OUTX_H = 0x29, + OUTX = 0x29, + OUTY_L = 0x2A, + OUTY_H = 0x2B, + OUTY = 0x2B, + OUTZ_L = 0x2C, + OUTZ_H = 0x2D, + OUTZ = 0x2D, +}; + +enum lis302d_reg { + FF_WU_CFG_1 = 0x30, + FF_WU_SRC_1 = 0x31, + FF_WU_THS_1 = 0x32, + FF_WU_DURATION_1 = 0x33, + FF_WU_CFG_2 = 0x34, + FF_WU_SRC_2 = 0x35, + FF_WU_THS_2 = 0x36, + FF_WU_DURATION_2 = 0x37, + CLICK_CFG = 0x38, + CLICK_SRC = 0x39, + CLICK_THSY_X = 0x3B, + CLICK_THSZ = 0x3C, + CLICK_TIMELIMIT = 0x3D, + CLICK_LATENCY = 0x3E, + CLICK_WINDOW = 0x3F, +}; + +enum lis3lv02d_reg { + FF_WU_CFG = 0x30, + FF_WU_SRC = 0x31, + FF_WU_ACK = 0x32, + FF_WU_THS_L = 0x34, + FF_WU_THS_H = 0x35, + FF_WU_DURATION = 0x36, + DD_CFG = 0x38, + DD_SRC = 0x39, + DD_ACK = 0x3A, + DD_THSI_L = 0x3C, + DD_THSI_H = 0x3D, + DD_THSE_L = 0x3E, + DD_THSE_H = 0x3F, +}; + +enum lis3_who_am_i { + WAI_3DLH = 0x32, /* 16 bits: LIS331DLH */ + WAI_3DC = 0x33, /* 8 bits: LIS3DC, HP3DC */ + WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */ + WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */ + WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */ +}; + +enum lis3_type { + LIS3LV02D, + LIS3DC, + HP3DC, + LIS2302D, + LIS331DLF, + LIS331DLH, +}; + +enum lis3lv02d_ctrl1_12b { + CTRL1_Xen = 0x01, + CTRL1_Yen = 0x02, + CTRL1_Zen = 0x04, + CTRL1_ST = 0x08, + CTRL1_DF0 = 0x10, + CTRL1_DF1 = 0x20, + CTRL1_PD0 = 0x40, + CTRL1_PD1 = 0x80, +}; + +/* Delta to ctrl1_12b version */ +enum lis3lv02d_ctrl1_8b { + CTRL1_STM = 0x08, + CTRL1_STP = 0x10, + CTRL1_FS = 0x20, + CTRL1_PD = 0x40, + CTRL1_DR = 0x80, +}; + +enum lis3lv02d_ctrl1_3dc { + CTRL1_ODR0 = 0x10, + CTRL1_ODR1 = 0x20, + CTRL1_ODR2 = 0x40, + CTRL1_ODR3 = 0x80, +}; + +enum lis331dlh_ctrl1 { + CTRL1_DR0 = 0x08, + CTRL1_DR1 = 0x10, + CTRL1_PM0 = 0x20, + CTRL1_PM1 = 0x40, + CTRL1_PM2 = 0x80, +}; + +enum lis331dlh_ctrl2 { + CTRL2_HPEN1 = 0x04, + CTRL2_HPEN2 = 0x08, + CTRL2_FDS_3DLH = 0x10, + CTRL2_BOOT_3DLH = 0x80, +}; + +enum lis331dlh_ctrl4 { + CTRL4_STSIGN = 0x08, + CTRL4_BLE = 0x40, + CTRL4_BDU = 0x80, +}; + +enum lis3lv02d_ctrl2 { + CTRL2_DAS = 0x01, + CTRL2_SIM = 0x02, + CTRL2_DRDY = 0x04, + CTRL2_IEN = 0x08, + CTRL2_BOOT = 0x10, + CTRL2_BLE = 0x20, + CTRL2_BDU = 0x40, /* Block Data Update */ + CTRL2_FS = 0x80, /* Full Scale selection */ +}; + +enum lis3lv02d_ctrl4_3dc { + CTRL4_SIM = 0x01, + CTRL4_ST0 = 0x02, + CTRL4_ST1 = 0x04, + CTRL4_FS0 = 0x10, + CTRL4_FS1 = 0x20, +}; + +enum lis302d_ctrl2 { + HP_FF_WU2 = 0x08, + HP_FF_WU1 = 0x04, + CTRL2_BOOT_8B = 0x40, +}; + +enum lis3lv02d_ctrl3 { + CTRL3_CFS0 = 0x01, + CTRL3_CFS1 = 0x02, + CTRL3_FDS = 0x10, + CTRL3_HPFF = 0x20, + CTRL3_HPDD = 0x40, + CTRL3_ECK = 0x80, +}; + +enum lis3lv02d_status_reg { + STATUS_XDA = 0x01, + STATUS_YDA = 0x02, + STATUS_ZDA = 0x04, + STATUS_XYZDA = 0x08, + STATUS_XOR = 0x10, + STATUS_YOR = 0x20, + STATUS_ZOR = 0x40, + STATUS_XYZOR = 0x80, +}; + +enum lis3lv02d_ff_wu_cfg { + FF_WU_CFG_XLIE = 0x01, + FF_WU_CFG_XHIE = 0x02, + FF_WU_CFG_YLIE = 0x04, + FF_WU_CFG_YHIE = 0x08, + FF_WU_CFG_ZLIE = 0x10, + FF_WU_CFG_ZHIE = 0x20, + FF_WU_CFG_LIR = 0x40, + FF_WU_CFG_AOI = 0x80, +}; + +enum lis3lv02d_ff_wu_src { + FF_WU_SRC_XL = 0x01, + FF_WU_SRC_XH = 0x02, + FF_WU_SRC_YL = 0x04, + FF_WU_SRC_YH = 0x08, + FF_WU_SRC_ZL = 0x10, + FF_WU_SRC_ZH = 0x20, + FF_WU_SRC_IA = 0x40, +}; + +enum lis3lv02d_dd_cfg { + DD_CFG_XLIE = 0x01, + DD_CFG_XHIE = 0x02, + DD_CFG_YLIE = 0x04, + DD_CFG_YHIE = 0x08, + DD_CFG_ZLIE = 0x10, + DD_CFG_ZHIE = 0x20, + DD_CFG_LIR = 0x40, + DD_CFG_IEND = 0x80, +}; + +enum lis3lv02d_dd_src { + DD_SRC_XL = 0x01, + DD_SRC_XH = 0x02, + DD_SRC_YL = 0x04, + DD_SRC_YH = 0x08, + DD_SRC_ZL = 0x10, + DD_SRC_ZH = 0x20, + DD_SRC_IA = 0x40, +}; + +enum lis3lv02d_click_src_8b { + CLICK_SINGLE_X = 0x01, + CLICK_DOUBLE_X = 0x02, + CLICK_SINGLE_Y = 0x04, + CLICK_DOUBLE_Y = 0x08, + CLICK_SINGLE_Z = 0x10, + CLICK_DOUBLE_Z = 0x20, + CLICK_IA = 0x40, +}; + +enum lis3lv02d_reg_state { + LIS3_REG_OFF = 0x00, + LIS3_REG_ON = 0x01, +}; + +union axis_conversion { + struct { + int x, y, z; + }; + int as_array[3]; + +}; + +struct lis3lv02d { + void *bus_priv; /* used by the bus layer only */ + struct device *pm_dev; /* for pm_runtime purposes */ + int (*init) (struct lis3lv02d *lis3); + int (*write) (struct lis3lv02d *lis3, int reg, u8 val); + int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret); + int (*blkread) (struct lis3lv02d *lis3, int reg, int len, u8 *ret); + int (*reg_ctrl) (struct lis3lv02d *lis3, bool state); + + int *odrs; /* Supported output data rates */ + u8 *regs; /* Regs to store / restore */ + int regs_size; + u8 *reg_cache; + bool regs_stored; + u8 odr_mask; /* ODR bit mask */ + u8 whoami; /* indicates measurement precision */ + s16 (*read_data) (struct lis3lv02d *lis3, int reg); + int mdps_max_val; + int pwron_delay; + int scale; /* + * relationship between 1 LBS and mG + * (1/1000th of earth gravity) + */ + + struct input_polled_dev *idev; /* input device */ + struct platform_device *pdev; /* platform device */ + struct regulator_bulk_data regulators[2]; + atomic_t count; /* interrupt count after last read */ + union axis_conversion ac; /* hw -> logical axis */ + int mapped_btns[3]; + + u32 irq; /* IRQ number */ + struct fasync_struct *async_queue; /* queue for the misc device */ + wait_queue_head_t misc_wait; /* Wait queue for the misc device */ + unsigned long misc_opened; /* bit0: whether the device is open */ + struct miscdevice miscdev; + + int data_ready_count[2]; + atomic_t wake_thread; + unsigned char irq_cfg; + unsigned int shift_adj; + + struct lis3lv02d_platform_data *pdata; /* for passing board config */ + struct mutex mutex; /* Serialize poll and selftest */ + +#ifdef CONFIG_OF + struct device_node *of_node; +#endif +}; + +int lis3lv02d_init_device(struct lis3lv02d *lis3); +int lis3lv02d_joystick_enable(struct lis3lv02d *lis3); +void lis3lv02d_joystick_disable(struct lis3lv02d *lis3); +void lis3lv02d_poweroff(struct lis3lv02d *lis3); +int lis3lv02d_poweron(struct lis3lv02d *lis3); +int lis3lv02d_remove_fs(struct lis3lv02d *lis3); +int lis3lv02d_init_dt(struct lis3lv02d *lis3); + +extern struct lis3lv02d lis3_dev; diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c new file mode 100644 index 00000000000..d324f8a97b8 --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -0,0 +1,290 @@ +/* + * drivers/hwmon/lis3lv02d_i2c.c + * + * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer. + * Driver is based on corresponding SPI driver written by Daniel Mack + * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ). + * + * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/pm_runtime.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_device.h> + +#include "lis3lv02d.h" + +#define DRV_NAME "lis3lv02d_i2c" + +static const char reg_vdd[] = "Vdd"; +static const char reg_vdd_io[] = "Vdd_IO"; + +static int lis3_reg_ctrl(struct lis3lv02d *lis3, bool state) +{ + int ret; + if (state == LIS3_REG_OFF) { + ret = regulator_bulk_disable(ARRAY_SIZE(lis3->regulators), + lis3->regulators); + } else { + ret = regulator_bulk_enable(ARRAY_SIZE(lis3->regulators), + lis3->regulators); + /* Chip needs time to wakeup. Not mentioned in datasheet */ + usleep_range(10000, 20000); + } + return ret; +} + +static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value) +{ + struct i2c_client *c = lis3->bus_priv; + return i2c_smbus_write_byte_data(c, reg, value); +} + +static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v) +{ + struct i2c_client *c = lis3->bus_priv; + *v = i2c_smbus_read_byte_data(c, reg); + return 0; +} + +static inline s32 lis3_i2c_blockread(struct lis3lv02d *lis3, int reg, int len, + u8 *v) +{ + struct i2c_client *c = lis3->bus_priv; + reg |= (1 << 7); /* 7th bit enables address auto incrementation */ + return i2c_smbus_read_i2c_block_data(c, reg, len, v); +} + +static int lis3_i2c_init(struct lis3lv02d *lis3) +{ + u8 reg; + int ret; + + lis3_reg_ctrl(lis3, LIS3_REG_ON); + + lis3->read(lis3, WHO_AM_I, ®); + if (reg != lis3->whoami) + printk(KERN_ERR "lis3: power on failure\n"); + + /* power up the device */ + ret = lis3->read(lis3, CTRL_REG1, ®); + if (ret < 0) + return ret; + + if (lis3->whoami == WAI_3DLH) + reg |= CTRL1_PM0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; + else + reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; + + return lis3->write(lis3, CTRL_REG1, reg); +} + +/* Default axis mapping but it can be overwritten by platform data */ +static union axis_conversion lis3lv02d_axis_map = + { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } }; + +#ifdef CONFIG_OF +static struct of_device_id lis3lv02d_i2c_dt_ids[] = { + { .compatible = "st,lis3lv02d" }, + {} +}; +MODULE_DEVICE_TABLE(of, lis3lv02d_i2c_dt_ids); +#endif + +static int lis3lv02d_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret = 0; + struct lis3lv02d_platform_data *pdata = client->dev.platform_data; + +#ifdef CONFIG_OF + if (of_match_device(lis3lv02d_i2c_dt_ids, &client->dev)) { + lis3_dev.of_node = client->dev.of_node; + ret = lis3lv02d_init_dt(&lis3_dev); + if (ret) + return ret; + pdata = lis3_dev.pdata; + } +#endif + + if (pdata) { + if ((pdata->driver_features & LIS3_USE_BLOCK_READ) && + (i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK))) + lis3_dev.blkread = lis3_i2c_blockread; + + if (pdata->axis_x) + lis3lv02d_axis_map.x = pdata->axis_x; + + if (pdata->axis_y) + lis3lv02d_axis_map.y = pdata->axis_y; + + if (pdata->axis_z) + lis3lv02d_axis_map.z = pdata->axis_z; + + if (pdata->setup_resources) + ret = pdata->setup_resources(); + + if (ret) + goto fail; + } + + lis3_dev.regulators[0].supply = reg_vdd; + lis3_dev.regulators[1].supply = reg_vdd_io; + ret = regulator_bulk_get(&client->dev, + ARRAY_SIZE(lis3_dev.regulators), + lis3_dev.regulators); + if (ret < 0) + goto fail; + + lis3_dev.pdata = pdata; + lis3_dev.bus_priv = client; + lis3_dev.init = lis3_i2c_init; + lis3_dev.read = lis3_i2c_read; + lis3_dev.write = lis3_i2c_write; + lis3_dev.irq = client->irq; + lis3_dev.ac = lis3lv02d_axis_map; + lis3_dev.pm_dev = &client->dev; + + i2c_set_clientdata(client, &lis3_dev); + + /* Provide power over the init call */ + lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON); + + ret = lis3lv02d_init_device(&lis3_dev); + + lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF); + + if (ret) + goto fail2; + return 0; + +fail2: + regulator_bulk_free(ARRAY_SIZE(lis3_dev.regulators), + lis3_dev.regulators); +fail: + if (pdata && pdata->release_resources) + pdata->release_resources(); + return ret; +} + +static int lis3lv02d_i2c_remove(struct i2c_client *client) +{ + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + struct lis3lv02d_platform_data *pdata = client->dev.platform_data; + + if (pdata && pdata->release_resources) + pdata->release_resources(); + + lis3lv02d_joystick_disable(lis3); + lis3lv02d_remove_fs(&lis3_dev); + + regulator_bulk_free(ARRAY_SIZE(lis3->regulators), + lis3_dev.regulators); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int lis3lv02d_i2c_suspend(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + if (!lis3->pdata || !lis3->pdata->wakeup_flags) + lis3lv02d_poweroff(lis3); + return 0; +} + +static int lis3lv02d_i2c_resume(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + /* + * pm_runtime documentation says that devices should always + * be powered on at resume. Pm_runtime turns them off after system + * wide resume is complete. + */ + if (!lis3->pdata || !lis3->pdata->wakeup_flags || + pm_runtime_suspended(dev)) + lis3lv02d_poweron(lis3); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int lis3_i2c_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + lis3lv02d_poweroff(lis3); + return 0; +} + +static int lis3_i2c_runtime_resume(struct device *dev) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct lis3lv02d *lis3 = i2c_get_clientdata(client); + + lis3lv02d_poweron(lis3); + return 0; +} +#endif /* CONFIG_PM_RUNTIME */ + +static const struct i2c_device_id lis3lv02d_id[] = { + {"lis3lv02d", LIS3LV02D}, + {"lis331dlh", LIS331DLH}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, lis3lv02d_id); + +static const struct dev_pm_ops lis3_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(lis3lv02d_i2c_suspend, + lis3lv02d_i2c_resume) + SET_RUNTIME_PM_OPS(lis3_i2c_runtime_suspend, + lis3_i2c_runtime_resume, + NULL) +}; + +static struct i2c_driver lis3lv02d_i2c_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = &lis3_pm_ops, + .of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids), + }, + .probe = lis3lv02d_i2c_probe, + .remove = lis3lv02d_i2c_remove, + .id_table = lis3lv02d_id, +}; + +module_i2c_driver(lis3lv02d_i2c_driver); + +MODULE_AUTHOR("Nokia Corporation"); +MODULE_DESCRIPTION("lis3lv02d I2C interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c new file mode 100644 index 00000000000..bd06d0cfac4 --- /dev/null +++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c @@ -0,0 +1,154 @@ +/* + * lis3lv02d_spi - SPI glue layer for lis3lv02d + * + * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * publishhed by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/spi/spi.h> +#include <linux/pm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_device.h> + +#include "lis3lv02d.h" + +#define DRV_NAME "lis3lv02d_spi" +#define LIS3_SPI_READ 0x80 + +static int lis3_spi_read(struct lis3lv02d *lis3, int reg, u8 *v) +{ + struct spi_device *spi = lis3->bus_priv; + int ret = spi_w8r8(spi, reg | LIS3_SPI_READ); + if (ret < 0) + return -EINVAL; + + *v = (u8) ret; + return 0; +} + +static int lis3_spi_write(struct lis3lv02d *lis3, int reg, u8 val) +{ + u8 tmp[2] = { reg, val }; + struct spi_device *spi = lis3->bus_priv; + return spi_write(spi, tmp, sizeof(tmp)); +} + +static int lis3_spi_init(struct lis3lv02d *lis3) +{ + u8 reg; + int ret; + + /* power up the device */ + ret = lis3->read(lis3, CTRL_REG1, ®); + if (ret < 0) + return ret; + + reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; + return lis3->write(lis3, CTRL_REG1, reg); +} + +static union axis_conversion lis3lv02d_axis_normal = + { .as_array = { 1, 2, 3 } }; + +#ifdef CONFIG_OF +static struct of_device_id lis302dl_spi_dt_ids[] = { + { .compatible = "st,lis302dl-spi" }, + {} +}; +MODULE_DEVICE_TABLE(of, lis302dl_spi_dt_ids); +#endif + +static int lis302dl_spi_probe(struct spi_device *spi) +{ + int ret; + + spi->bits_per_word = 8; + spi->mode = SPI_MODE_0; + ret = spi_setup(spi); + if (ret < 0) + return ret; + + lis3_dev.bus_priv = spi; + lis3_dev.init = lis3_spi_init; + lis3_dev.read = lis3_spi_read; + lis3_dev.write = lis3_spi_write; + lis3_dev.irq = spi->irq; + lis3_dev.ac = lis3lv02d_axis_normal; + lis3_dev.pdata = spi->dev.platform_data; + +#ifdef CONFIG_OF + if (of_match_device(lis302dl_spi_dt_ids, &spi->dev)) { + lis3_dev.of_node = spi->dev.of_node; + ret = lis3lv02d_init_dt(&lis3_dev); + if (ret) + return ret; + } +#endif + spi_set_drvdata(spi, &lis3_dev); + + return lis3lv02d_init_device(&lis3_dev); +} + +static int lis302dl_spi_remove(struct spi_device *spi) +{ + struct lis3lv02d *lis3 = spi_get_drvdata(spi); + lis3lv02d_joystick_disable(lis3); + lis3lv02d_poweroff(lis3); + + return lis3lv02d_remove_fs(&lis3_dev); +} + +#ifdef CONFIG_PM_SLEEP +static int lis3lv02d_spi_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct lis3lv02d *lis3 = spi_get_drvdata(spi); + + if (!lis3->pdata || !lis3->pdata->wakeup_flags) + lis3lv02d_poweroff(&lis3_dev); + + return 0; +} + +static int lis3lv02d_spi_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct lis3lv02d *lis3 = spi_get_drvdata(spi); + + if (!lis3->pdata || !lis3->pdata->wakeup_flags) + lis3lv02d_poweron(lis3); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend, + lis3lv02d_spi_resume); + +static struct spi_driver lis302dl_spi_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = &lis3lv02d_spi_pm, + .of_match_table = of_match_ptr(lis302dl_spi_dt_ids), + }, + .probe = lis302dl_spi_probe, + .remove = lis302dl_spi_remove, +}; + +module_spi_driver(lis302dl_spi_driver); + +MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); +MODULE_DESCRIPTION("lis3lv02d SPI glue layer"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 3648b23d5c9..d66a2f24f6b 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -26,22 +26,11 @@ * It is adapted from the Linux Kernel Dump Test Tool by * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> * - * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<> - * [cpoint_count={>0}] + * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net> * - * recur_count : Recursion level for the stack overflow test. Default is 10. - * - * cpoint_name : Crash point where the kernel is to be crashed. It can be - * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY, - * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD, - * IDE_CORE_CP - * - * cpoint_type : Indicates the action to be taken on hitting the crash point. - * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW - * - * cpoint_count : Indicates the number of times the crash point is to be hit - * to trigger an action. The default is 10. + * See Documentation/fault-injection/provoke-crashes.txt for instructions */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/fs.h> @@ -52,36 +41,69 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> +#include <linux/slab.h> #include <scsi/scsi_cmnd.h> +#include <linux/debugfs.h> +#include <linux/vmalloc.h> +#include <linux/mman.h> +#include <asm/cacheflush.h> #ifdef CONFIG_IDE #include <linux/ide.h> #endif -#define NUM_CPOINTS 8 -#define NUM_CPOINT_TYPES 5 +/* + * Make sure our attempts to over run the kernel stack doesn't trigger + * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we + * recurse past the end of THREAD_SIZE by default. + */ +#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) +#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) +#else +#define REC_STACK_SIZE (THREAD_SIZE / 8) +#endif +#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) + #define DEFAULT_COUNT 10 -#define REC_NUM_DEFAULT 10 +#define EXEC_SIZE 64 enum cname { - INVALID, - INT_HARDWARE_ENTRY, - INT_HW_IRQ_EN, - INT_TASKLET_ENTRY, - FS_DEVRW, - MEM_SWAPOUT, - TIMERADD, - SCSI_DISPATCH_CMD, - IDE_CORE_CP + CN_INVALID, + CN_INT_HARDWARE_ENTRY, + CN_INT_HW_IRQ_EN, + CN_INT_TASKLET_ENTRY, + CN_FS_DEVRW, + CN_MEM_SWAPOUT, + CN_TIMERADD, + CN_SCSI_DISPATCH_CMD, + CN_IDE_CORE_CP, + CN_DIRECT, }; enum ctype { - NONE, - PANIC, - BUG, - EXCEPTION, - LOOP, - OVERFLOW + CT_NONE, + CT_PANIC, + CT_BUG, + CT_WARNING, + CT_EXCEPTION, + CT_LOOP, + CT_OVERFLOW, + CT_CORRUPT_STACK, + CT_UNALIGNED_LOAD_STORE_WRITE, + CT_OVERWRITE_ALLOCATION, + CT_WRITE_AFTER_FREE, + CT_SOFTLOCKUP, + CT_HARDLOCKUP, + CT_SPINLOCKUP, + CT_HUNG_TASK, + CT_EXEC_DATA, + CT_EXEC_STACK, + CT_EXEC_KMALLOC, + CT_EXEC_VMALLOC, + CT_EXEC_USERSPACE, + CT_ACCESS_USERSPACE, + CT_WRITE_RO, + CT_WRITE_KERN, }; static char* cp_name[] = { @@ -92,15 +114,33 @@ static char* cp_name[] = { "MEM_SWAPOUT", "TIMERADD", "SCSI_DISPATCH_CMD", - "IDE_CORE_CP" + "IDE_CORE_CP", + "DIRECT", }; static char* cp_type[] = { "PANIC", "BUG", + "WARNING", "EXCEPTION", "LOOP", - "OVERFLOW" + "OVERFLOW", + "CORRUPT_STACK", + "UNALIGNED_LOAD_STORE_WRITE", + "OVERWRITE_ALLOCATION", + "WRITE_AFTER_FREE", + "SOFTLOCKUP", + "HARDLOCKUP", + "SPINLOCKUP", + "HUNG_TASK", + "EXEC_DATA", + "EXEC_STACK", + "EXEC_KMALLOC", + "EXEC_VMALLOC", + "EXEC_USERSPACE", + "ACCESS_USERSPACE", + "WRITE_RO", + "WRITE_KERN", }; static struct jprobe lkdtm; @@ -113,16 +153,21 @@ static char* cpoint_type; static int cpoint_count = DEFAULT_COUNT; static int recur_count = REC_NUM_DEFAULT; -static enum cname cpoint = INVALID; -static enum ctype cptype = NONE; +static enum cname cpoint = CN_INVALID; +static enum ctype cptype = CT_NONE; static int count = DEFAULT_COUNT; +static DEFINE_SPINLOCK(count_lock); +static DEFINE_SPINLOCK(lock_me_up); + +static u8 data_area[EXEC_SIZE]; + +static const unsigned long rodata = 0xAA55AA55; module_param(recur_count, int, 0644); -MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ - "default is 10"); -module_param(cpoint_name, charp, 0644); +MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test"); +module_param(cpoint_name, charp, 0444); MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); -module_param(cpoint_type, charp, 0644); +module_param(cpoint_type, charp, 0444); MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ "hitting the crash point"); module_param(cpoint_count, int, 0644); @@ -183,7 +228,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) } #ifdef CONFIG_IDE -int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, +static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, unsigned int cmd, unsigned long arg) { @@ -193,150 +238,632 @@ int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, } #endif +/* Return the crashpoint number or NONE if the name is invalid */ +static enum ctype parse_cp_type(const char *what, size_t count) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cp_type); i++) { + if (!strcmp(what, cp_type[i])) + return i + 1; + } + + return CT_NONE; +} + +static const char *cp_type_to_str(enum ctype type) +{ + if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type)) + return "None"; + + return cp_type[type - 1]; +} + +static const char *cp_name_to_str(enum cname name) +{ + if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name)) + return "INVALID"; + + return cp_name[name - 1]; +} + + static int lkdtm_parse_commandline(void) { int i; + unsigned long flags; - if (cpoint_name == NULL || cpoint_type == NULL || - cpoint_count < 1 || recur_count < 1) + if (cpoint_count < 1 || recur_count < 1) return -EINVAL; - for (i = 0; i < NUM_CPOINTS; ++i) { + spin_lock_irqsave(&count_lock, flags); + count = cpoint_count; + spin_unlock_irqrestore(&count_lock, flags); + + /* No special parameters */ + if (!cpoint_type && !cpoint_name) + return 0; + + /* Neither or both of these need to be set */ + if (!cpoint_type || !cpoint_name) + return -EINVAL; + + cptype = parse_cp_type(cpoint_type, strlen(cpoint_type)); + if (cptype == CT_NONE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(cp_name); i++) { if (!strcmp(cpoint_name, cp_name[i])) { cpoint = i + 1; - break; + return 0; } } - for (i = 0; i < NUM_CPOINT_TYPES; ++i) { - if (!strcmp(cpoint_type, cp_type[i])) { - cptype = i + 1; - break; - } - } + /* Could not find a valid crash point */ + return -EINVAL; +} - if (cpoint == INVALID || cptype == NONE) - return -EINVAL; +static int recursive_loop(int remaining) +{ + char buf[REC_STACK_SIZE]; - count = cpoint_count; + /* Make sure compiler does not optimize this away. */ + memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE); + if (!remaining) + return 0; + else + return recursive_loop(remaining - 1); +} - return 0; +static void do_nothing(void) +{ + return; } -static int recursive_loop(int a) +/* Must immediately follow do_nothing for size calculuations to work out. */ +static void do_overwritten(void) { - char buf[1024]; + pr_info("do_overwritten wasn't overwritten!\n"); + return; +} - memset(buf,0xFF,1024); - recur_count--; - if (!recur_count) - return 0; - else - return recursive_loop(a); +static noinline void corrupt_stack(void) +{ + /* Use default char array length that triggers stack protection. */ + char data[8]; + + memset((void *)data, 0, 64); } -void lkdtm_handler(void) +static void execute_location(void *dst) { - printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n", - cpoint_name, cpoint_type); - --count; + void (*func)(void) = dst; - if (count == 0) { - switch (cptype) { - case NONE: - break; - case PANIC: - printk(KERN_INFO "lkdtm : PANIC\n"); - panic("dumptest"); - break; - case BUG: - printk(KERN_INFO "lkdtm : BUG\n"); - BUG(); - break; - case EXCEPTION: - printk(KERN_INFO "lkdtm : EXCEPTION\n"); - *((int *) 0) = 0; - break; - case LOOP: - printk(KERN_INFO "lkdtm : LOOP\n"); - for (;;); - break; - case OVERFLOW: - printk(KERN_INFO "lkdtm : OVERFLOW\n"); - (void) recursive_loop(0); - break; - default: - break; + pr_info("attempting ok execution at %p\n", do_nothing); + do_nothing(); + + memcpy(dst, do_nothing, EXEC_SIZE); + flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); + pr_info("attempting bad execution at %p\n", func); + func(); +} + +static void execute_user_location(void *dst) +{ + /* Intentionally crossing kernel/user memory boundary. */ + void (*func)(void) = dst; + + pr_info("attempting ok execution at %p\n", do_nothing); + do_nothing(); + + if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) + return; + flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); + pr_info("attempting bad execution at %p\n", func); + func(); +} + +static void lkdtm_do_action(enum ctype which) +{ + switch (which) { + case CT_PANIC: + panic("dumptest"); + break; + case CT_BUG: + BUG(); + break; + case CT_WARNING: + WARN_ON(1); + break; + case CT_EXCEPTION: + *((int *) 0) = 0; + break; + case CT_LOOP: + for (;;) + ; + break; + case CT_OVERFLOW: + (void) recursive_loop(recur_count); + break; + case CT_CORRUPT_STACK: + corrupt_stack(); + break; + case CT_UNALIGNED_LOAD_STORE_WRITE: { + static u8 data[5] __attribute__((aligned(4))) = {1, 2, + 3, 4, 5}; + u32 *p; + u32 val = 0x12345678; + + p = (u32 *)(data + 1); + if (*p == 0) + val = 0x87654321; + *p = val; + break; + } + case CT_OVERWRITE_ALLOCATION: { + size_t len = 1020; + u32 *data = kmalloc(len, GFP_KERNEL); + + data[1024 / sizeof(u32)] = 0x12345678; + kfree(data); + break; + } + case CT_WRITE_AFTER_FREE: { + size_t len = 1024; + u32 *data = kmalloc(len, GFP_KERNEL); + + kfree(data); + schedule(); + memset(data, 0x78, len); + break; + } + case CT_SOFTLOCKUP: + preempt_disable(); + for (;;) + cpu_relax(); + break; + case CT_HARDLOCKUP: + local_irq_disable(); + for (;;) + cpu_relax(); + break; + case CT_SPINLOCKUP: + /* Must be called twice to trigger. */ + spin_lock(&lock_me_up); + /* Let sparse know we intended to exit holding the lock. */ + __release(&lock_me_up); + break; + case CT_HUNG_TASK: + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + break; + case CT_EXEC_DATA: + execute_location(data_area); + break; + case CT_EXEC_STACK: { + u8 stack_area[EXEC_SIZE]; + execute_location(stack_area); + break; + } + case CT_EXEC_KMALLOC: { + u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); + execute_location(kmalloc_area); + kfree(kmalloc_area); + break; + } + case CT_EXEC_VMALLOC: { + u32 *vmalloc_area = vmalloc(EXEC_SIZE); + execute_location(vmalloc_area); + vfree(vmalloc_area); + break; + } + case CT_EXEC_USERSPACE: { + unsigned long user_addr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + execute_user_location((void *)user_addr); + vm_munmap(user_addr, PAGE_SIZE); + break; + } + case CT_ACCESS_USERSPACE: { + unsigned long user_addr, tmp; + unsigned long *ptr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; } + + ptr = (unsigned long *)user_addr; + + pr_info("attempting bad read at %p\n", ptr); + tmp = *ptr; + tmp += 0xc0dec0de; + + pr_info("attempting bad write at %p\n", ptr); + *ptr = tmp; + + vm_munmap(user_addr, PAGE_SIZE); + + break; + } + case CT_WRITE_RO: { + unsigned long *ptr; + + ptr = (unsigned long *)&rodata; + + pr_info("attempting bad write at %p\n", ptr); + *ptr ^= 0xabcd1234; + + break; + } + case CT_WRITE_KERN: { + size_t size; + unsigned char *ptr; + + size = (unsigned long)do_overwritten - + (unsigned long)do_nothing; + ptr = (unsigned char *)do_overwritten; + + pr_info("attempting bad %zu byte write at %p\n", size, ptr); + memcpy(ptr, (unsigned char *)do_nothing, size); + flush_icache_range((unsigned long)ptr, + (unsigned long)(ptr + size)); + + do_overwritten(); + break; + } + case CT_NONE: + default: + break; + } + +} + +static void lkdtm_handler(void) +{ + unsigned long flags; + bool do_it = false; + + spin_lock_irqsave(&count_lock, flags); + count--; + pr_info("Crash point %s of type %s hit, trigger in %d rounds\n", + cp_name_to_str(cpoint), cp_type_to_str(cptype), count); + + if (count == 0) { + do_it = true; count = cpoint_count; } + spin_unlock_irqrestore(&count_lock, flags); + + if (do_it) + lkdtm_do_action(cptype); } -static int __init lkdtm_module_init(void) +static int lkdtm_register_cpoint(enum cname which) { int ret; - if (lkdtm_parse_commandline() == -EINVAL) { - printk(KERN_INFO "lkdtm : Invalid command\n"); - return -EINVAL; - } + cpoint = CN_INVALID; + if (lkdtm.entry != NULL) + unregister_jprobe(&lkdtm); - switch (cpoint) { - case INT_HARDWARE_ENTRY: + switch (which) { + case CN_DIRECT: + lkdtm_do_action(cptype); + return 0; + case CN_INT_HARDWARE_ENTRY: lkdtm.kp.symbol_name = "do_IRQ"; lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; break; - case INT_HW_IRQ_EN: + case CN_INT_HW_IRQ_EN: lkdtm.kp.symbol_name = "handle_IRQ_event"; lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event; break; - case INT_TASKLET_ENTRY: + case CN_INT_TASKLET_ENTRY: lkdtm.kp.symbol_name = "tasklet_action"; lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action; break; - case FS_DEVRW: + case CN_FS_DEVRW: lkdtm.kp.symbol_name = "ll_rw_block"; lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block; break; - case MEM_SWAPOUT: + case CN_MEM_SWAPOUT: lkdtm.kp.symbol_name = "shrink_inactive_list"; lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list; break; - case TIMERADD: + case CN_TIMERADD: lkdtm.kp.symbol_name = "hrtimer_start"; lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start; break; - case SCSI_DISPATCH_CMD: + case CN_SCSI_DISPATCH_CMD: lkdtm.kp.symbol_name = "scsi_dispatch_cmd"; lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd; break; - case IDE_CORE_CP: + case CN_IDE_CORE_CP: #ifdef CONFIG_IDE lkdtm.kp.symbol_name = "generic_ide_ioctl"; lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; #else - printk(KERN_INFO "lkdtm : Crash point not available\n"); + pr_info("Crash point not available\n"); + return -EINVAL; #endif break; default: - printk(KERN_INFO "lkdtm : Invalid Crash Point\n"); - break; + pr_info("Invalid Crash Point\n"); + return -EINVAL; } + cpoint = which; if ((ret = register_jprobe(&lkdtm)) < 0) { - printk(KERN_INFO "lkdtm : Couldn't register jprobe\n"); - return ret; + pr_info("Couldn't register jprobe\n"); + cpoint = CN_INVALID; } - printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n", - cpoint_name, cpoint_type); + return ret; +} + +static ssize_t do_register_entry(enum cname which, struct file *f, + const char __user *user_buf, size_t count, loff_t *off) +{ + char *buf; + int err; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (copy_from_user(buf, user_buf, count)) { + free_page((unsigned long) buf); + return -EFAULT; + } + /* NULL-terminate and remove enter */ + buf[count] = '\0'; + strim(buf); + + cptype = parse_cp_type(buf, count); + free_page((unsigned long) buf); + + if (cptype == CT_NONE) + return -EINVAL; + + err = lkdtm_register_cpoint(which); + if (err < 0) + return err; + + *off += count; + + return count; +} + +/* Generic read callback that just prints out the available crash types */ +static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, + size_t count, loff_t *off) +{ + char *buf; + int i, n, out; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); + for (i = 0; i < ARRAY_SIZE(cp_type); i++) + n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]); + buf[n] = '\0'; + + out = simple_read_from_buffer(user_buf, count, off, + buf, n); + free_page((unsigned long) buf); + + return out; +} + +static int lkdtm_debugfs_open(struct inode *inode, struct file *file) +{ return 0; } + +static ssize_t int_hardware_entry(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off); +} + +static ssize_t int_hw_irq_en(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off); +} + +static ssize_t int_tasklet_entry(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off); +} + +static ssize_t fs_devrw_entry(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_FS_DEVRW, f, buf, count, off); +} + +static ssize_t mem_swapout_entry(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off); +} + +static ssize_t timeradd_entry(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_TIMERADD, f, buf, count, off); +} + +static ssize_t scsi_dispatch_cmd_entry(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off); +} + +static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off); +} + +/* Special entry to just crash directly. Available without KPROBEs */ +static ssize_t direct_entry(struct file *f, const char __user *user_buf, + size_t count, loff_t *off) +{ + enum ctype type; + char *buf; + + if (count >= PAGE_SIZE) + return -EINVAL; + if (count < 1) + return -EINVAL; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (copy_from_user(buf, user_buf, count)) { + free_page((unsigned long) buf); + return -EFAULT; + } + /* NULL-terminate and remove enter */ + buf[count] = '\0'; + strim(buf); + + type = parse_cp_type(buf, count); + free_page((unsigned long) buf); + if (type == CT_NONE) + return -EINVAL; + + pr_info("Performing direct entry %s\n", cp_type_to_str(type)); + lkdtm_do_action(type); + *off += count; + + return count; +} + +struct crash_entry { + const char *name; + const struct file_operations fops; +}; + +static const struct crash_entry crash_entries[] = { + {"DIRECT", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = direct_entry} }, + {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = int_hardware_entry} }, + {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = int_hw_irq_en} }, + {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = int_tasklet_entry} }, + {"FS_DEVRW", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = fs_devrw_entry} }, + {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = mem_swapout_entry} }, + {"TIMERADD", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = timeradd_entry} }, + {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = scsi_dispatch_cmd_entry} }, + {"IDE_CORE_CP", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, + .open = lkdtm_debugfs_open, + .write = ide_core_cp_entry} }, +}; + +static struct dentry *lkdtm_debugfs_root; + +static int __init lkdtm_module_init(void) +{ + int ret = -EINVAL; + int n_debugfs_entries = 1; /* Assume only the direct entry */ + int i; + + /* Register debugfs interface */ + lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); + if (!lkdtm_debugfs_root) { + pr_err("creating root dir failed\n"); + return -ENODEV; + } + +#ifdef CONFIG_KPROBES + n_debugfs_entries = ARRAY_SIZE(crash_entries); +#endif + + for (i = 0; i < n_debugfs_entries; i++) { + const struct crash_entry *cur = &crash_entries[i]; + struct dentry *de; + + de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, + NULL, &cur->fops); + if (de == NULL) { + pr_err("could not create %s\n", cur->name); + goto out_err; + } + } + + if (lkdtm_parse_commandline() == -EINVAL) { + pr_info("Invalid command\n"); + goto out_err; + } + + if (cpoint != CN_INVALID && cptype != CT_NONE) { + ret = lkdtm_register_cpoint(cpoint); + if (ret < 0) { + pr_info("Invalid crash point %d\n", cpoint); + goto out_err; + } + pr_info("Crash point %s of type %s registered\n", + cpoint_name, cpoint_type); + } else { + pr_info("No crash points registered, enable through debugfs\n"); + } + + return 0; + +out_err: + debugfs_remove_recursive(lkdtm_debugfs_root); + return ret; +} + static void __exit lkdtm_module_exit(void) { - unregister_jprobe(&lkdtm); - printk(KERN_INFO "lkdtm : Crash point unregistered\n"); + debugfs_remove_recursive(lkdtm_debugfs_root); + + unregister_jprobe(&lkdtm); + pr_info("Crash point unregistered\n"); } module_init(lkdtm_module_init); diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig new file mode 100644 index 00000000000..d23384dde73 --- /dev/null +++ b/drivers/misc/mei/Kconfig @@ -0,0 +1,45 @@ +config INTEL_MEI + tristate "Intel Management Engine Interface" + depends on X86 && PCI && WATCHDOG_CORE + help + The Intel Management Engine (Intel ME) provides Manageability, + Security and Media services for system containing Intel chipsets. + if selected /dev/mei misc device will be created. + + For more information see + <http://software.intel.com/en-us/manageability/> + +config INTEL_MEI_ME + tristate "ME Enabled Intel Chipsets" + select INTEL_MEI + depends on X86 && PCI && WATCHDOG_CORE + help + MEI support for ME Enabled Intel chipsets. + + Supported Chipsets are: + 7 Series Chipset Family + 6 Series Chipset Family + 5 Series Chipset Family + 4 Series Chipset Family + Mobile 4 Series Chipset Family + ICH9 + 82946GZ/GL + 82G35 Express + 82Q963/Q965 + 82P965/G965 + Mobile PM965/GM965 + Mobile GME965/GLE960 + 82Q35 Express + 82G33/G31/P35/P31 Express + 82Q33 Express + 82X38/X48 Express + +config INTEL_MEI_TXE + tristate "Intel Trusted Execution Environment with ME Interface" + select INTEL_MEI + depends on X86 && PCI && WATCHDOG_CORE + help + MEI Support for Trusted Execution Environment device on Intel SoCs + + Supported SoCs: + Intel Bay Trail diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile new file mode 100644 index 00000000000..8ebc6cda137 --- /dev/null +++ b/drivers/misc/mei/Makefile @@ -0,0 +1,23 @@ +# +# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver +# Copyright (c) 2010-2014, Intel Corporation. +# +obj-$(CONFIG_INTEL_MEI) += mei.o +mei-objs := init.o +mei-objs += hbm.o +mei-objs += interrupt.o +mei-objs += client.o +mei-objs += main.o +mei-objs += amthif.o +mei-objs += wd.o +mei-objs += bus.o +mei-objs += nfc.o +mei-$(CONFIG_DEBUG_FS) += debugfs.o + +obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o +mei-me-objs := pci-me.o +mei-me-objs += hw-me.o + +obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o +mei-txe-objs := pci-txe.o +mei-txe-objs += hw-txe.o diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c new file mode 100644 index 00000000000..0d6234db00f --- /dev/null +++ b/drivers/misc/mei/amthif.c @@ -0,0 +1,745 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/jiffies.h> +#include <linux/uaccess.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, + 0xac, 0xa8, 0x46, 0xe0, + 0xff, 0x65, 0x81, 0x4c); + +/** + * mei_amthif_reset_params - initializes mei device iamthif + * + * @dev: the device structure + */ +void mei_amthif_reset_params(struct mei_device *dev) +{ + /* reset iamthif parameters. */ + dev->iamthif_current_cb = NULL; + dev->iamthif_msg_buf_size = 0; + dev->iamthif_msg_buf_index = 0; + dev->iamthif_canceled = false; + dev->iamthif_ioctl = false; + dev->iamthif_state = MEI_IAMTHIF_IDLE; + dev->iamthif_timer = 0; + dev->iamthif_stall_timer = 0; + dev->iamthif_open_count = 0; +} + +/** + * mei_amthif_host_init - mei initialization amthif client. + * + * @dev: the device structure + * + */ +int mei_amthif_host_init(struct mei_device *dev) +{ + struct mei_cl *cl = &dev->iamthif_cl; + unsigned char *msg_buf; + int ret, i; + + dev->iamthif_state = MEI_IAMTHIF_IDLE; + + mei_cl_init(cl, dev); + + i = mei_me_cl_by_uuid(dev, &mei_amthif_guid); + if (i < 0) { + dev_info(&dev->pdev->dev, + "amthif: failed to find the client %d\n", i); + return -ENOTTY; + } + + cl->me_client_id = dev->me_clients[i].client_id; + + /* Assign iamthif_mtu to the value received from ME */ + + dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length; + dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n", + dev->me_clients[i].props.max_msg_length); + + kfree(dev->iamthif_msg_buf); + dev->iamthif_msg_buf = NULL; + + /* allocate storage for ME message buffer */ + msg_buf = kcalloc(dev->iamthif_mtu, + sizeof(unsigned char), GFP_KERNEL); + if (!msg_buf) { + dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n"); + return -ENOMEM; + } + + dev->iamthif_msg_buf = msg_buf; + + ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); + + if (ret < 0) { + dev_err(&dev->pdev->dev, + "amthif: failed link client %d\n", ret); + return ret; + } + + ret = mei_cl_connect(cl, NULL); + + dev->iamthif_state = MEI_IAMTHIF_IDLE; + + return ret; +} + +/** + * mei_amthif_find_read_list_entry - finds a amthilist entry for current file + * + * @dev: the device structure + * @file: pointer to file object + * + * returns returned a list entry on success, NULL on failure. + */ +struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, + struct file *file) +{ + struct mei_cl_cb *cb; + + list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) { + if (cb->cl && cb->cl == &dev->iamthif_cl && + cb->file_object == file) + return cb; + } + return NULL; +} + + +/** + * mei_amthif_read - read data from AMTHIF client + * + * @dev: the device structure + * @if_num: minor number + * @file: pointer to file object + * @*ubuf: pointer to user data in user space + * @length: data length to read + * @offset: data read offset + * + * Locking: called under "dev->device_lock" lock + * + * returns + * returned data length on success, + * zero if no data to read, + * negative on failure. + */ +int mei_amthif_read(struct mei_device *dev, struct file *file, + char __user *ubuf, size_t length, loff_t *offset) +{ + int rets; + int wait_ret; + struct mei_cl_cb *cb = NULL; + struct mei_cl *cl = file->private_data; + unsigned long timeout; + int i; + + /* Only possible if we are in timeout */ + if (!cl || cl != &dev->iamthif_cl) { + dev_dbg(&dev->pdev->dev, "bad file ext.\n"); + return -ETIME; + } + + i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); + if (i < 0) { + dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); + return -ENOTTY; + } + dev_dbg(&dev->pdev->dev, "checking amthif data\n"); + cb = mei_amthif_find_read_list_entry(dev, file); + + /* Check for if we can block or not*/ + if (cb == NULL && file->f_flags & O_NONBLOCK) + return -EAGAIN; + + + dev_dbg(&dev->pdev->dev, "waiting for amthif data\n"); + while (cb == NULL) { + /* unlock the Mutex */ + mutex_unlock(&dev->device_lock); + + wait_ret = wait_event_interruptible(dev->iamthif_cl.wait, + (cb = mei_amthif_find_read_list_entry(dev, file))); + + /* Locking again the Mutex */ + mutex_lock(&dev->device_lock); + + if (wait_ret) + return -ERESTARTSYS; + + dev_dbg(&dev->pdev->dev, "woke up from sleep\n"); + } + + + dev_dbg(&dev->pdev->dev, "Got amthif data\n"); + dev->iamthif_timer = 0; + + if (cb) { + timeout = cb->read_time + + mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); + dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n", + timeout); + + if (time_after(jiffies, timeout)) { + dev_dbg(&dev->pdev->dev, "amthif Time out\n"); + /* 15 sec for the message has expired */ + list_del(&cb->list); + rets = -ETIME; + goto free; + } + } + /* if the whole message will fit remove it from the list */ + if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) + list_del(&cb->list); + else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { + /* end of the message has been reached */ + list_del(&cb->list); + rets = 0; + goto free; + } + /* else means that not full buffer will be read and do not + * remove message from deletion list + */ + + dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n", + cb->response_buffer.size); + dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); + + /* length is being truncated to PAGE_SIZE, however, + * the buf_idx may point beyond */ + length = min_t(size_t, length, (cb->buf_idx - *offset)); + + if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { + dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + } else { + rets = length; + if ((*offset + length) < cb->buf_idx) { + *offset += length; + goto out; + } + } +free: + dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n"); + *offset = 0; + mei_io_cb_free(cb); +out: + return rets; +} + +/** + * mei_amthif_send_cmd - send amthif command to the ME + * + * @dev: the device structure + * @cb: mei call back struct + * + * returns 0 on success, <0 on failure. + * + */ +static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) +{ + struct mei_msg_hdr mei_hdr; + int ret; + + if (!dev || !cb) + return -ENODEV; + + dev_dbg(&dev->pdev->dev, "write data to amthif client.\n"); + + dev->iamthif_state = MEI_IAMTHIF_WRITING; + dev->iamthif_current_cb = cb; + dev->iamthif_file_object = cb->file_object; + dev->iamthif_canceled = false; + dev->iamthif_ioctl = true; + dev->iamthif_msg_buf_size = cb->request_buffer.size; + memcpy(dev->iamthif_msg_buf, cb->request_buffer.data, + cb->request_buffer.size); + + ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl); + if (ret < 0) + return ret; + + if (ret && mei_hbuf_acquire(dev)) { + ret = 0; + if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { + mei_hdr.length = mei_hbuf_max_len(dev); + mei_hdr.msg_complete = 0; + } else { + mei_hdr.length = cb->request_buffer.size; + mei_hdr.msg_complete = 1; + } + + mei_hdr.host_addr = dev->iamthif_cl.host_client_id; + mei_hdr.me_addr = dev->iamthif_cl.me_client_id; + mei_hdr.reserved = 0; + mei_hdr.internal = 0; + dev->iamthif_msg_buf_index += mei_hdr.length; + ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); + if (ret) + return ret; + + if (mei_hdr.msg_complete) { + if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl)) + return -EIO; + dev->iamthif_flow_control_pending = true; + dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; + dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n"); + dev->iamthif_current_cb = cb; + dev->iamthif_file_object = cb->file_object; + list_add_tail(&cb->list, &dev->write_waiting_list.list); + } else { + dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n"); + list_add_tail(&cb->list, &dev->write_list.list); + } + } else { + list_add_tail(&cb->list, &dev->write_list.list); + } + return 0; +} + +/** + * mei_amthif_write - write amthif data to amthif client + * + * @dev: the device structure + * @cb: mei call back struct + * + * returns 0 on success, <0 on failure. + * + */ +int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) +{ + int ret; + + if (!dev || !cb) + return -ENODEV; + + ret = mei_io_cb_alloc_resp_buf(cb, dev->iamthif_mtu); + if (ret) + return ret; + + cb->fop_type = MEI_FOP_WRITE; + + if (!list_empty(&dev->amthif_cmd_list.list) || + dev->iamthif_state != MEI_IAMTHIF_IDLE) { + dev_dbg(&dev->pdev->dev, + "amthif state = %d\n", dev->iamthif_state); + dev_dbg(&dev->pdev->dev, "AMTHIF: add cb to the wait list\n"); + list_add_tail(&cb->list, &dev->amthif_cmd_list.list); + return 0; + } + return mei_amthif_send_cmd(dev, cb); +} +/** + * mei_amthif_run_next_cmd + * + * @dev: the device structure + * + * returns 0 on success, <0 on failure. + */ +void mei_amthif_run_next_cmd(struct mei_device *dev) +{ + struct mei_cl_cb *pos = NULL; + struct mei_cl_cb *next = NULL; + int status; + + if (!dev) + return; + + dev->iamthif_msg_buf_size = 0; + dev->iamthif_msg_buf_index = 0; + dev->iamthif_canceled = false; + dev->iamthif_ioctl = true; + dev->iamthif_state = MEI_IAMTHIF_IDLE; + dev->iamthif_timer = 0; + dev->iamthif_file_object = NULL; + + dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n"); + + list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) { + list_del(&pos->list); + + if (pos->cl && pos->cl == &dev->iamthif_cl) { + status = mei_amthif_send_cmd(dev, pos); + if (status) { + dev_dbg(&dev->pdev->dev, + "amthif write failed status = %d\n", + status); + return; + } + break; + } + } +} + + +unsigned int mei_amthif_poll(struct mei_device *dev, + struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + + poll_wait(file, &dev->iamthif_cl.wait, wait); + + mutex_lock(&dev->device_lock); + if (!mei_cl_is_connected(&dev->iamthif_cl)) { + + mask = POLLERR; + + } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && + dev->iamthif_file_object == file) { + + mask |= (POLLIN | POLLRDNORM); + dev_dbg(&dev->pdev->dev, "run next amthif cb\n"); + mei_amthif_run_next_cmd(dev); + } + mutex_unlock(&dev->device_lock); + + return mask; +} + + + +/** + * mei_amthif_irq_write - write iamthif command in irq thread context. + * + * @dev: the device structure. + * @cb_pos: callback block. + * @cl: private data of the file object. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + struct mei_msg_hdr mei_hdr; + size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index; + u32 msg_slots = mei_data2slots(len); + int slots; + int rets; + + rets = mei_cl_flow_ctrl_creds(cl); + if (rets < 0) + return rets; + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + return 0; + } + + mei_hdr.host_addr = cl->host_client_id; + mei_hdr.me_addr = cl->me_client_id; + mei_hdr.reserved = 0; + mei_hdr.internal = 0; + + slots = mei_hbuf_empty_slots(dev); + + if (slots >= msg_slots) { + mei_hdr.length = len; + mei_hdr.msg_complete = 1; + /* Split the message only if we can write the whole host buffer */ + } else if (slots == dev->hbuf_depth) { + msg_slots = slots; + len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); + mei_hdr.length = len; + mei_hdr.msg_complete = 0; + } else { + /* wait for next time the host buffer is empty */ + return 0; + } + + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); + + rets = mei_write_message(dev, &mei_hdr, + dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); + if (rets) { + dev->iamthif_state = MEI_IAMTHIF_IDLE; + cl->status = rets; + list_del(&cb->list); + return rets; + } + + if (mei_cl_flow_ctrl_reduce(cl)) + return -EIO; + + dev->iamthif_msg_buf_index += mei_hdr.length; + cl->status = 0; + + if (mei_hdr.msg_complete) { + dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; + dev->iamthif_flow_control_pending = true; + + /* save iamthif cb sent to amthif client */ + cb->buf_idx = dev->iamthif_msg_buf_index; + dev->iamthif_current_cb = cb; + + list_move_tail(&cb->list, &dev->write_waiting_list.list); + } + + + return 0; +} + +/** + * mei_amthif_irq_read_message - read routine after ISR to + * handle the read amthif message + * + * @dev: the device structure + * @mei_hdr: header of amthif message + * @complete_list: An instance of our list structure + * + * returns 0 on success, <0 on failure. + */ +int mei_amthif_irq_read_msg(struct mei_device *dev, + struct mei_msg_hdr *mei_hdr, + struct mei_cl_cb *complete_list) +{ + struct mei_cl_cb *cb; + unsigned char *buffer; + + BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id); + BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING); + + buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index; + BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length); + + mei_read_slots(dev, buffer, mei_hdr->length); + + dev->iamthif_msg_buf_index += mei_hdr->length; + + if (!mei_hdr->msg_complete) + return 0; + + dev_dbg(&dev->pdev->dev, "amthif_message_buffer_index =%d\n", + mei_hdr->length); + + dev_dbg(&dev->pdev->dev, "completed amthif read.\n "); + if (!dev->iamthif_current_cb) + return -ENODEV; + + cb = dev->iamthif_current_cb; + dev->iamthif_current_cb = NULL; + + if (!cb->cl) + return -ENODEV; + + dev->iamthif_stall_timer = 0; + cb->buf_idx = dev->iamthif_msg_buf_index; + cb->read_time = jiffies; + if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) { + /* found the iamthif cb */ + dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n "); + dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n "); + list_add_tail(&cb->list, &complete_list->list); + } + return 0; +} + +/** + * mei_amthif_irq_read - prepares to read amthif data. + * + * @dev: the device structure. + * @slots: free slots. + * + * returns 0, OK; otherwise, error. + */ +int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) +{ + u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); + + if (*slots < msg_slots) + return -EMSGSIZE; + + *slots -= msg_slots; + + if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) { + dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n"); + return -EIO; + } + + dev_dbg(&dev->pdev->dev, "iamthif flow control success\n"); + dev->iamthif_state = MEI_IAMTHIF_READING; + dev->iamthif_flow_control_pending = false; + dev->iamthif_msg_buf_index = 0; + dev->iamthif_msg_buf_size = 0; + dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER; + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + return 0; +} + +/** + * mei_amthif_complete - complete amthif callback. + * + * @dev: the device structure. + * @cb_pos: callback block. + */ +void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) +{ + if (dev->iamthif_canceled != 1) { + dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; + dev->iamthif_stall_timer = 0; + memcpy(cb->response_buffer.data, + dev->iamthif_msg_buf, + dev->iamthif_msg_buf_index); + list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); + dev_dbg(&dev->pdev->dev, "amthif read completed\n"); + dev->iamthif_timer = jiffies; + dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", + dev->iamthif_timer); + } else { + mei_amthif_run_next_cmd(dev); + } + + dev_dbg(&dev->pdev->dev, "completing amthif call back.\n"); + wake_up_interruptible(&dev->iamthif_cl.wait); +} + +/** + * mei_clear_list - removes all callbacks associated with file + * from mei_cb_list + * + * @dev: device structure. + * @file: file structure + * @mei_cb_list: callbacks list + * + * mei_clear_list is called to clear resources associated with file + * when application calls close function or Ctrl-C was pressed + * + * returns true if callback removed from the list, false otherwise + */ +static bool mei_clear_list(struct mei_device *dev, + const struct file *file, struct list_head *mei_cb_list) +{ + struct mei_cl_cb *cb_pos = NULL; + struct mei_cl_cb *cb_next = NULL; + bool removed = false; + + /* list all list member */ + list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) { + /* check if list member associated with a file */ + if (file == cb_pos->file_object) { + /* remove member from the list */ + list_del(&cb_pos->list); + /* check if cb equal to current iamthif cb */ + if (dev->iamthif_current_cb == cb_pos) { + dev->iamthif_current_cb = NULL; + /* send flow control to iamthif client */ + mei_hbm_cl_flow_control_req(dev, + &dev->iamthif_cl); + } + /* free all allocated buffers */ + mei_io_cb_free(cb_pos); + cb_pos = NULL; + removed = true; + } + } + return removed; +} + +/** + * mei_clear_lists - removes all callbacks associated with file + * + * @dev: device structure + * @file: file structure + * + * mei_clear_lists is called to clear resources associated with file + * when application calls close function or Ctrl-C was pressed + * + * returns true if callback removed from the list, false otherwise + */ +static bool mei_clear_lists(struct mei_device *dev, struct file *file) +{ + bool removed = false; + + /* remove callbacks associated with a file */ + mei_clear_list(dev, file, &dev->amthif_cmd_list.list); + if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list)) + removed = true; + + mei_clear_list(dev, file, &dev->ctrl_rd_list.list); + + if (mei_clear_list(dev, file, &dev->ctrl_wr_list.list)) + removed = true; + + if (mei_clear_list(dev, file, &dev->write_waiting_list.list)) + removed = true; + + if (mei_clear_list(dev, file, &dev->write_list.list)) + removed = true; + + /* check if iamthif_current_cb not NULL */ + if (dev->iamthif_current_cb && !removed) { + /* check file and iamthif current cb association */ + if (dev->iamthif_current_cb->file_object == file) { + /* remove cb */ + mei_io_cb_free(dev->iamthif_current_cb); + dev->iamthif_current_cb = NULL; + removed = true; + } + } + return removed; +} + +/** +* mei_amthif_release - the release function +* +* @dev: device structure +* @file: pointer to file structure +* +* returns 0 on success, <0 on error +*/ +int mei_amthif_release(struct mei_device *dev, struct file *file) +{ + if (dev->iamthif_open_count > 0) + dev->iamthif_open_count--; + + if (dev->iamthif_file_object == file && + dev->iamthif_state != MEI_IAMTHIF_IDLE) { + + dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n", + dev->iamthif_state); + dev->iamthif_canceled = true; + if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) { + dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n"); + mei_amthif_run_next_cmd(dev); + } + } + + if (mei_clear_lists(dev, file)) + dev->iamthif_state = MEI_IAMTHIF_IDLE; + + return 0; +} diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c new file mode 100644 index 00000000000..0e993ef28b9 --- /dev/null +++ b/drivers/misc/mei/bus.c @@ -0,0 +1,548 @@ +/* + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2012-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) +#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) + +static int mei_cl_device_match(struct device *dev, struct device_driver *drv) +{ + struct mei_cl_device *device = to_mei_cl_device(dev); + struct mei_cl_driver *driver = to_mei_cl_driver(drv); + const struct mei_cl_device_id *id; + + if (!device) + return 0; + + if (!driver || !driver->id_table) + return 0; + + id = driver->id_table; + + while (id->name[0]) { + if (!strncmp(dev_name(dev), id->name, sizeof(id->name))) + return 1; + + id++; + } + + return 0; +} + +static int mei_cl_device_probe(struct device *dev) +{ + struct mei_cl_device *device = to_mei_cl_device(dev); + struct mei_cl_driver *driver; + struct mei_cl_device_id id; + + if (!device) + return 0; + + driver = to_mei_cl_driver(dev->driver); + if (!driver || !driver->probe) + return -ENODEV; + + dev_dbg(dev, "Device probe\n"); + + strncpy(id.name, dev_name(dev), sizeof(id.name)); + + return driver->probe(device, &id); +} + +static int mei_cl_device_remove(struct device *dev) +{ + struct mei_cl_device *device = to_mei_cl_device(dev); + struct mei_cl_driver *driver; + + if (!device || !dev->driver) + return 0; + + if (device->event_cb) { + device->event_cb = NULL; + cancel_work_sync(&device->event_work); + } + + driver = to_mei_cl_driver(dev->driver); + if (!driver->remove) { + dev->driver = NULL; + + return 0; + } + + return driver->remove(device); +} + +static ssize_t modalias_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + int len; + + len = snprintf(buf, PAGE_SIZE, "mei:%s\n", dev_name(dev)); + + return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *mei_cl_dev_attrs[] = { + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mei_cl_dev); + +static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + if (add_uevent_var(env, "MODALIAS=mei:%s", dev_name(dev))) + return -ENOMEM; + + return 0; +} + +static struct bus_type mei_cl_bus_type = { + .name = "mei", + .dev_groups = mei_cl_dev_groups, + .match = mei_cl_device_match, + .probe = mei_cl_device_probe, + .remove = mei_cl_device_remove, + .uevent = mei_cl_uevent, +}; + +static void mei_cl_dev_release(struct device *dev) +{ + kfree(to_mei_cl_device(dev)); +} + +static struct device_type mei_cl_device_type = { + .release = mei_cl_dev_release, +}; + +static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev, + uuid_le uuid) +{ + struct mei_cl *cl; + + list_for_each_entry(cl, &dev->device_list, device_link) { + if (!uuid_le_cmp(uuid, cl->device_uuid)) + return cl; + } + + return NULL; +} +struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, + uuid_le uuid, char *name, + struct mei_cl_ops *ops) +{ + struct mei_cl_device *device; + struct mei_cl *cl; + int status; + + cl = mei_bus_find_mei_cl_by_uuid(dev, uuid); + if (cl == NULL) + return NULL; + + device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL); + if (!device) + return NULL; + + device->cl = cl; + device->ops = ops; + + device->dev.parent = &dev->pdev->dev; + device->dev.bus = &mei_cl_bus_type; + device->dev.type = &mei_cl_device_type; + + dev_set_name(&device->dev, "%s", name); + + status = device_register(&device->dev); + if (status) { + dev_err(&dev->pdev->dev, "Failed to register MEI device\n"); + kfree(device); + return NULL; + } + + cl->device = device; + + dev_dbg(&device->dev, "client %s registered\n", name); + + return device; +} +EXPORT_SYMBOL_GPL(mei_cl_add_device); + +void mei_cl_remove_device(struct mei_cl_device *device) +{ + device_unregister(&device->dev); +} +EXPORT_SYMBOL_GPL(mei_cl_remove_device); + +int __mei_cl_driver_register(struct mei_cl_driver *driver, struct module *owner) +{ + int err; + + driver->driver.name = driver->name; + driver->driver.owner = owner; + driver->driver.bus = &mei_cl_bus_type; + + err = driver_register(&driver->driver); + if (err) + return err; + + pr_debug("mei: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__mei_cl_driver_register); + +void mei_cl_driver_unregister(struct mei_cl_driver *driver) +{ + driver_unregister(&driver->driver); + + pr_debug("mei: driver [%s] unregistered\n", driver->driver.name); +} +EXPORT_SYMBOL_GPL(mei_cl_driver_unregister); + +static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, + bool blocking) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int id; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (cl->state != MEI_FILE_CONNECTED) + return -ENODEV; + + /* Check if we have an ME client device */ + id = mei_me_cl_by_id(dev, cl->me_client_id); + if (id < 0) + return id; + + if (length > dev->me_clients[id].props.max_msg_length) + return -EFBIG; + + cb = mei_io_cb_init(cl, NULL); + if (!cb) + return -ENOMEM; + + rets = mei_io_cb_alloc_req_buf(cb, length); + if (rets < 0) { + mei_io_cb_free(cb); + return rets; + } + + memcpy(cb->request_buffer.data, buf, length); + + mutex_lock(&dev->device_lock); + + rets = mei_cl_write(cl, cb, blocking); + + mutex_unlock(&dev->device_lock); + if (rets < 0) + mei_io_cb_free(cb); + + return rets; +} + +int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + size_t r_length; + int err; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + if (!cl->read_cb) { + err = mei_cl_read_start(cl, length); + if (err < 0) { + mutex_unlock(&dev->device_lock); + return err; + } + } + + if (cl->reading_state != MEI_READ_COMPLETE && + !waitqueue_active(&cl->rx_wait)) { + + mutex_unlock(&dev->device_lock); + + if (wait_event_interruptible(cl->rx_wait, + cl->reading_state == MEI_READ_COMPLETE || + mei_cl_is_transitioning(cl))) { + + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + + mutex_lock(&dev->device_lock); + } + + cb = cl->read_cb; + + if (cl->reading_state != MEI_READ_COMPLETE) { + r_length = 0; + goto out; + } + + r_length = min_t(size_t, length, cb->buf_idx); + + memcpy(buf, cb->response_buffer.data, r_length); + + mei_io_cb_free(cb); + cl->reading_state = MEI_IDLE; + cl->read_cb = NULL; + +out: + mutex_unlock(&dev->device_lock); + + return r_length; +} + +inline int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length) +{ + return ___mei_cl_send(cl, buf, length, 0); +} + +inline int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length) +{ + return ___mei_cl_send(cl, buf, length, 1); +} + +int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length) +{ + struct mei_cl *cl = device->cl; + + if (cl == NULL) + return -ENODEV; + + if (device->ops && device->ops->send) + return device->ops->send(device, buf, length); + + return __mei_cl_send(cl, buf, length); +} +EXPORT_SYMBOL_GPL(mei_cl_send); + +int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length) +{ + struct mei_cl *cl = device->cl; + + if (cl == NULL) + return -ENODEV; + + if (device->ops && device->ops->recv) + return device->ops->recv(device, buf, length); + + return __mei_cl_recv(cl, buf, length); +} +EXPORT_SYMBOL_GPL(mei_cl_recv); + +static void mei_bus_event_work(struct work_struct *work) +{ + struct mei_cl_device *device; + + device = container_of(work, struct mei_cl_device, event_work); + + if (device->event_cb) + device->event_cb(device, device->events, device->event_context); + + device->events = 0; + + /* Prepare for the next read */ + mei_cl_read_start(device->cl, 0); +} + +int mei_cl_register_event_cb(struct mei_cl_device *device, + mei_cl_event_cb_t event_cb, void *context) +{ + if (device->event_cb) + return -EALREADY; + + device->events = 0; + device->event_cb = event_cb; + device->event_context = context; + INIT_WORK(&device->event_work, mei_bus_event_work); + + mei_cl_read_start(device->cl, 0); + + return 0; +} +EXPORT_SYMBOL_GPL(mei_cl_register_event_cb); + +void *mei_cl_get_drvdata(const struct mei_cl_device *device) +{ + return dev_get_drvdata(&device->dev); +} +EXPORT_SYMBOL_GPL(mei_cl_get_drvdata); + +void mei_cl_set_drvdata(struct mei_cl_device *device, void *data) +{ + dev_set_drvdata(&device->dev, data); +} +EXPORT_SYMBOL_GPL(mei_cl_set_drvdata); + +int mei_cl_enable_device(struct mei_cl_device *device) +{ + int err; + struct mei_device *dev; + struct mei_cl *cl = device->cl; + + if (cl == NULL) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + err = mei_cl_connect(cl, NULL); + if (err < 0) { + mutex_unlock(&dev->device_lock); + dev_err(&dev->pdev->dev, "Could not connect to the ME client"); + + return err; + } + + mutex_unlock(&dev->device_lock); + + if (device->event_cb && !cl->read_cb) + mei_cl_read_start(device->cl, 0); + + if (!device->ops || !device->ops->enable) + return 0; + + return device->ops->enable(device); +} +EXPORT_SYMBOL_GPL(mei_cl_enable_device); + +int mei_cl_disable_device(struct mei_cl_device *device) +{ + int err; + struct mei_device *dev; + struct mei_cl *cl = device->cl; + + if (cl == NULL) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + if (cl->state != MEI_FILE_CONNECTED) { + mutex_unlock(&dev->device_lock); + dev_err(&dev->pdev->dev, "Already disconnected"); + + return 0; + } + + cl->state = MEI_FILE_DISCONNECTING; + + err = mei_cl_disconnect(cl); + if (err < 0) { + mutex_unlock(&dev->device_lock); + dev_err(&dev->pdev->dev, + "Could not disconnect from the ME client"); + + return err; + } + + /* Flush queues and remove any pending read */ + mei_cl_flush_queues(cl); + + if (cl->read_cb) { + struct mei_cl_cb *cb = NULL; + + cb = mei_cl_find_read_cb(cl); + /* Remove entry from read list */ + if (cb) + list_del(&cb->list); + + cb = cl->read_cb; + cl->read_cb = NULL; + + if (cb) { + mei_io_cb_free(cb); + cb = NULL; + } + } + + device->event_cb = NULL; + + mutex_unlock(&dev->device_lock); + + if (!device->ops || !device->ops->disable) + return 0; + + return device->ops->disable(device); +} +EXPORT_SYMBOL_GPL(mei_cl_disable_device); + +void mei_cl_bus_rx_event(struct mei_cl *cl) +{ + struct mei_cl_device *device = cl->device; + + if (!device || !device->event_cb) + return; + + set_bit(MEI_CL_EVENT_RX, &device->events); + + schedule_work(&device->event_work); +} + +void mei_cl_bus_remove_devices(struct mei_device *dev) +{ + struct mei_cl *cl, *next; + + mutex_lock(&dev->device_lock); + list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { + if (cl->device) + mei_cl_remove_device(cl->device); + + list_del(&cl->device_link); + mei_cl_unlink(cl); + kfree(cl); + } + mutex_unlock(&dev->device_lock); +} + +int __init mei_cl_bus_init(void) +{ + return bus_register(&mei_cl_bus_type); +} + +void __exit mei_cl_bus_exit(void) +{ + bus_unregister(&mei_cl_bus_type); +} diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c new file mode 100644 index 00000000000..59d20c599b1 --- /dev/null +++ b/drivers/misc/mei/client.c @@ -0,0 +1,1067 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +/** + * mei_me_cl_by_uuid - locate index of me client + * + * @dev: mei device + * + * Locking: called under "dev->device_lock" lock + * + * returns me client index or -ENOENT if not found + */ +int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) +{ + int i; + + for (i = 0; i < dev->me_clients_num; ++i) + if (uuid_le_cmp(*uuid, + dev->me_clients[i].props.protocol_name) == 0) + return i; + + return -ENOENT; +} + + +/** + * mei_me_cl_by_id return index to me_clients for client_id + * + * @dev: the device structure + * @client_id: me client id + * + * Locking: called under "dev->device_lock" lock + * + * returns index on success, -ENOENT on failure. + */ + +int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) +{ + int i; + + for (i = 0; i < dev->me_clients_num; i++) + if (dev->me_clients[i].client_id == client_id) + return i; + + return -ENOENT; +} + + +/** + * mei_cl_cmp_id - tells if the clients are the same + * + * @cl1: host client 1 + * @cl2: host client 2 + * + * returns true - if the clients has same host and me ids + * false - otherwise + */ +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, + const struct mei_cl *cl2) +{ + return cl1 && cl2 && + (cl1->host_client_id == cl2->host_client_id) && + (cl1->me_client_id == cl2->me_client_id); +} + +/** + * mei_io_list_flush - removes cbs belonging to cl. + * + * @list: an instance of our list structure + * @cl: host client, can be NULL for flushing the whole list + * @free: whether to free the cbs + */ +static void __mei_io_list_flush(struct mei_cl_cb *list, + struct mei_cl *cl, bool free) +{ + struct mei_cl_cb *cb; + struct mei_cl_cb *next; + + /* enable removing everything if no cl is specified */ + list_for_each_entry_safe(cb, next, &list->list, list) { + if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { + list_del(&cb->list); + if (free) + mei_io_cb_free(cb); + } + } +} + +/** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, false); +} + + +/** + * mei_io_list_free - removes cb belonging to cl and free them + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, true); +} + +/** + * mei_io_cb_free - free mei_cb_private related memory + * + * @cb: mei callback struct + */ +void mei_io_cb_free(struct mei_cl_cb *cb) +{ + if (cb == NULL) + return; + + kfree(cb->request_buffer.data); + kfree(cb->response_buffer.data); + kfree(cb); +} + +/** + * mei_io_cb_init - allocate and initialize io callback + * + * @cl - mei client + * @fp: pointer to file structure + * + * returns mei_cl_cb pointer or NULL; + */ +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) +{ + struct mei_cl_cb *cb; + + cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); + if (!cb) + return NULL; + + mei_io_list_init(cb); + + cb->file_object = fp; + cb->cl = cl; + cb->buf_idx = 0; + return cb; +} + +/** + * mei_io_cb_alloc_req_buf - allocate request buffer + * + * @cb: io callback structure + * @length: size of the buffer + * + * returns 0 on success + * -EINVAL if cb is NULL + * -ENOMEM if allocation failed + */ +int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) +{ + if (!cb) + return -EINVAL; + + if (length == 0) + return 0; + + cb->request_buffer.data = kmalloc(length, GFP_KERNEL); + if (!cb->request_buffer.data) + return -ENOMEM; + cb->request_buffer.size = length; + return 0; +} +/** + * mei_io_cb_alloc_resp_buf - allocate response buffer + * + * @cb: io callback structure + * @length: size of the buffer + * + * returns 0 on success + * -EINVAL if cb is NULL + * -ENOMEM if allocation failed + */ +int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) +{ + if (!cb) + return -EINVAL; + + if (length == 0) + return 0; + + cb->response_buffer.data = kmalloc(length, GFP_KERNEL); + if (!cb->response_buffer.data) + return -ENOMEM; + cb->response_buffer.size = length; + return 0; +} + + + +/** + * mei_cl_flush_queues - flushes queue lists belonging to cl. + * + * @cl: host client + */ +int mei_cl_flush_queues(struct mei_cl *cl) +{ + struct mei_device *dev; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + cl_dbg(dev, cl, "remove list entry belonging to cl\n"); + mei_io_list_flush(&cl->dev->read_list, cl); + mei_io_list_free(&cl->dev->write_list, cl); + mei_io_list_free(&cl->dev->write_waiting_list, cl); + mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); + mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); + mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); + mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); + return 0; +} + + +/** + * mei_cl_init - initializes cl. + * + * @cl: host client to be initialized + * @dev: mei device + */ +void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) +{ + memset(cl, 0, sizeof(struct mei_cl)); + init_waitqueue_head(&cl->wait); + init_waitqueue_head(&cl->rx_wait); + init_waitqueue_head(&cl->tx_wait); + INIT_LIST_HEAD(&cl->link); + INIT_LIST_HEAD(&cl->device_link); + cl->reading_state = MEI_IDLE; + cl->writing_state = MEI_IDLE; + cl->dev = dev; +} + +/** + * mei_cl_allocate - allocates cl structure and sets it up. + * + * @dev: mei device + * returns The allocated file or NULL on failure + */ +struct mei_cl *mei_cl_allocate(struct mei_device *dev) +{ + struct mei_cl *cl; + + cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); + if (!cl) + return NULL; + + mei_cl_init(cl, dev); + + return cl; +} + +/** + * mei_cl_find_read_cb - find this cl's callback in the read list + * + * @cl: host client + * + * returns cb on success, NULL on error + */ +struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + struct mei_cl_cb *cb; + + list_for_each_entry(cb, &dev->read_list.list, list) + if (mei_cl_cmp_id(cl, cb->cl)) + return cb; + return NULL; +} + +/** mei_cl_link: allocate host id in the host map + * + * @cl - host client + * @id - fixed host id or -1 for generic one + * + * returns 0 on success + * -EINVAL on incorrect values + * -ENONET if client not found + */ +int mei_cl_link(struct mei_cl *cl, int id) +{ + struct mei_device *dev; + long open_handle_count; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + /* If Id is not assigned get one*/ + if (id == MEI_HOST_CLIENT_ID_ANY) + id = find_first_zero_bit(dev->host_clients_map, + MEI_CLIENTS_MAX); + + if (id >= MEI_CLIENTS_MAX) { + dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); + return -EMFILE; + } + + open_handle_count = dev->open_handle_count + dev->iamthif_open_count; + if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { + dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", + MEI_MAX_OPEN_HANDLE_COUNT); + return -EMFILE; + } + + dev->open_handle_count++; + + cl->host_client_id = id; + list_add_tail(&cl->link, &dev->file_list); + + set_bit(id, dev->host_clients_map); + + cl->state = MEI_FILE_INITIALIZING; + + cl_dbg(dev, cl, "link cl\n"); + return 0; +} + +/** + * mei_cl_unlink - remove me_cl from the list + * + * @cl: host client + */ +int mei_cl_unlink(struct mei_cl *cl) +{ + struct mei_device *dev; + + /* don't shout on error exit path */ + if (!cl) + return 0; + + /* wd and amthif might not be initialized */ + if (!cl->dev) + return 0; + + dev = cl->dev; + + cl_dbg(dev, cl, "unlink client"); + + if (dev->open_handle_count > 0) + dev->open_handle_count--; + + /* never clear the 0 bit */ + if (cl->host_client_id) + clear_bit(cl->host_client_id, dev->host_clients_map); + + list_del_init(&cl->link); + + cl->state = MEI_FILE_INITIALIZING; + + return 0; +} + + +void mei_host_client_init(struct work_struct *work) +{ + struct mei_device *dev = container_of(work, + struct mei_device, init_work); + struct mei_client_properties *client_props; + int i; + + mutex_lock(&dev->device_lock); + + for (i = 0; i < dev->me_clients_num; i++) { + client_props = &dev->me_clients[i].props; + + if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) + mei_amthif_host_init(dev); + else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) + mei_wd_host_init(dev); + else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) + mei_nfc_host_init(dev); + + } + + dev->dev_state = MEI_DEV_ENABLED; + dev->reset_count = 0; + + mutex_unlock(&dev->device_lock); + + pm_runtime_mark_last_busy(&dev->pdev->dev); + dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); + pm_runtime_autosuspend(&dev->pdev->dev); +} + +/** + * mei_hbuf_acquire: try to acquire host buffer + * + * @dev: the device structure + * returns true if host buffer was acquired + */ +bool mei_hbuf_acquire(struct mei_device *dev) +{ + if (mei_pg_state(dev) == MEI_PG_ON || + dev->pg_event == MEI_PG_EVENT_WAIT) { + dev_dbg(&dev->pdev->dev, "device is in pg\n"); + return false; + } + + if (!dev->hbuf_is_ready) { + dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); + return false; + } + + dev->hbuf_is_ready = false; + + return true; +} + +/** + * mei_cl_disconnect - disconnect host client from the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_disconnect(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets, err; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + cl_dbg(dev, cl, "disconnecting"); + + if (cl->state != MEI_FILE_DISCONNECTING) + return 0; + + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + cb = mei_io_cb_init(cl, NULL); + if (!cb) { + rets = -ENOMEM; + goto free; + } + + cb->fop_type = MEI_FOP_CLOSE; + if (mei_hbuf_acquire(dev)) { + if (mei_hbm_cl_disconnect_req(dev, cl)) { + rets = -ENODEV; + cl_err(dev, cl, "failed to disconnect.\n"); + goto free; + } + mdelay(10); /* Wait for hardware disconnection ready */ + list_add_tail(&cb->list, &dev->ctrl_rd_list.list); + } else { + cl_dbg(dev, cl, "add disconnect cb to control write list\n"); + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + + } + mutex_unlock(&dev->device_lock); + + err = wait_event_timeout(dev->wait_recvd_msg, + MEI_FILE_DISCONNECTED == cl->state, + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + + mutex_lock(&dev->device_lock); + if (MEI_FILE_DISCONNECTED == cl->state) { + rets = 0; + cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); + } else { + rets = -ENODEV; + if (MEI_FILE_DISCONNECTED != cl->state) + cl_err(dev, cl, "wrong status client disconnect.\n"); + + if (err) + cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); + + cl_err(dev, cl, "failed to disconnect from FW client.\n"); + } + + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); +free: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + + mei_io_cb_free(cb); + return rets; +} + + +/** + * mei_cl_is_other_connecting - checks if other + * client with the same me client id is connecting + * + * @cl: private data of the file object + * + * returns true if other client is connected, false - otherwise. + */ +bool mei_cl_is_other_connecting(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl *ocl; /* the other client */ + + if (WARN_ON(!cl || !cl->dev)) + return false; + + dev = cl->dev; + + list_for_each_entry(ocl, &dev->file_list, link) { + if (ocl->state == MEI_FILE_CONNECTING && + ocl != cl && + cl->me_client_id == ocl->me_client_id) + return true; + + } + + return false; +} + +/** + * mei_cl_connect - connect host client to the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_connect(struct mei_cl *cl, struct file *file) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + cb = mei_io_cb_init(cl, file); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + cb->fop_type = MEI_FOP_CONNECT; + + /* run hbuf acquire last so we don't have to undo */ + if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { + cl->state = MEI_FILE_CONNECTING; + if (mei_hbm_cl_connect_req(dev, cl)) { + rets = -ENODEV; + goto out; + } + cl->timer_count = MEI_CONNECT_TIMEOUT; + list_add_tail(&cb->list, &dev->ctrl_rd_list.list); + } else { + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_recvd_msg, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECTED), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (cl->state != MEI_FILE_CONNECTED) { + /* something went really wrong */ + if (!cl->status) + cl->status = -EFAULT; + + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); + } + + rets = cl->status; + +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. + * + * @cl: private data of the file object + * + * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. + * -ENOENT if mei_cl is not present + * -EINVAL if single_recv_buf == 0 + */ +int mei_cl_flow_ctrl_creds(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_me_client *me_cl; + int id; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + if (!dev->me_clients_num) + return 0; + + if (cl->mei_flow_ctrl_creds > 0) + return 1; + + id = mei_me_cl_by_id(dev, cl->me_client_id); + if (id < 0) { + cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); + return id; + } + + me_cl = &dev->me_clients[id]; + if (me_cl->mei_flow_ctrl_creds) { + if (WARN_ON(me_cl->props.single_recv_buf == 0)) + return -EINVAL; + return 1; + } + return 0; +} + +/** + * mei_cl_flow_ctrl_reduce - reduces flow_control. + * + * @cl: private data of the file object + * + * @returns + * 0 on success + * -ENOENT when me client is not found + * -EINVAL when ctrl credits are <= 0 + */ +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_me_client *me_cl; + int id; + + if (WARN_ON(!cl || !cl->dev)) + return -EINVAL; + + dev = cl->dev; + + id = mei_me_cl_by_id(dev, cl->me_client_id); + if (id < 0) { + cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); + return id; + } + + me_cl = &dev->me_clients[id]; + if (me_cl->props.single_recv_buf != 0) { + if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + me_cl->mei_flow_ctrl_creds--; + } else { + if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->mei_flow_ctrl_creds--; + } + return 0; +} + +/** + * mei_cl_read_start - the start read client message function. + * + * @cl: host client + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_read_start(struct mei_cl *cl, size_t length) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + int i; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + if (cl->read_cb) { + cl_dbg(dev, cl, "read is pending.\n"); + return -EBUSY; + } + i = mei_me_cl_by_id(dev, cl->me_client_id); + if (i < 0) { + cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); + return -ENOTTY; + } + + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + cb = mei_io_cb_init(cl, NULL); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + /* always allocate at least client max message */ + length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); + rets = mei_io_cb_alloc_resp_buf(cb, length); + if (rets) + goto out; + + cb->fop_type = MEI_FOP_READ; + if (mei_hbuf_acquire(dev)) { + rets = mei_hbm_cl_flow_control_req(dev, cl); + if (rets < 0) + goto out; + + list_add_tail(&cb->list, &dev->read_list.list); + } else { + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + } + + cl->read_cb = cb; + +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + + if (rets) + mei_io_cb_free(cb); + + return rets; +} + +/** + * mei_cl_irq_write - write a message to device + * from the interrupt thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise error. + */ +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev; + struct mei_msg_data *buf; + struct mei_msg_hdr mei_hdr; + size_t len; + u32 msg_slots; + int slots; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + buf = &cb->request_buffer; + + rets = mei_cl_flow_ctrl_creds(cl); + if (rets < 0) + return rets; + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + return 0; + } + + slots = mei_hbuf_empty_slots(dev); + len = buf->size - cb->buf_idx; + msg_slots = mei_data2slots(len); + + mei_hdr.host_addr = cl->host_client_id; + mei_hdr.me_addr = cl->me_client_id; + mei_hdr.reserved = 0; + mei_hdr.internal = cb->internal; + + if (slots >= msg_slots) { + mei_hdr.length = len; + mei_hdr.msg_complete = 1; + /* Split the message only if we can write the whole host buffer */ + } else if (slots == dev->hbuf_depth) { + msg_slots = slots; + len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); + mei_hdr.length = len; + mei_hdr.msg_complete = 0; + } else { + /* wait for next time the host buffer is empty */ + return 0; + } + + cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", + cb->request_buffer.size, cb->buf_idx); + + rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); + if (rets) { + cl->status = rets; + list_move_tail(&cb->list, &cmpl_list->list); + return rets; + } + + cl->status = 0; + cl->writing_state = MEI_WRITING; + cb->buf_idx += mei_hdr.length; + + if (mei_hdr.msg_complete) { + if (mei_cl_flow_ctrl_reduce(cl)) + return -EIO; + list_move_tail(&cb->list, &dev->write_waiting_list.list); + } + + return 0; +} + +/** + * mei_cl_write - submit a write cb to mei device + assumes device_lock is locked + * + * @cl: host client + * @cl: write callback with filled data + * + * returns number of bytes sent on success, <0 on failure. + */ +int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) +{ + struct mei_device *dev; + struct mei_msg_data *buf; + struct mei_msg_hdr mei_hdr; + int rets; + + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + if (WARN_ON(!cb)) + return -EINVAL; + + dev = cl->dev; + + + buf = &cb->request_buffer; + + cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); + + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + cb->fop_type = MEI_FOP_WRITE; + cb->buf_idx = 0; + cl->writing_state = MEI_IDLE; + + mei_hdr.host_addr = cl->host_client_id; + mei_hdr.me_addr = cl->me_client_id; + mei_hdr.reserved = 0; + mei_hdr.msg_complete = 0; + mei_hdr.internal = cb->internal; + + rets = mei_cl_flow_ctrl_creds(cl); + if (rets < 0) + goto err; + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + rets = buf->size; + goto out; + } + if (!mei_hbuf_acquire(dev)) { + cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); + rets = buf->size; + goto out; + } + + /* Check for a maximum length */ + if (buf->size > mei_hbuf_max_len(dev)) { + mei_hdr.length = mei_hbuf_max_len(dev); + mei_hdr.msg_complete = 0; + } else { + mei_hdr.length = buf->size; + mei_hdr.msg_complete = 1; + } + + rets = mei_write_message(dev, &mei_hdr, buf->data); + if (rets) + goto err; + + cl->writing_state = MEI_WRITING; + cb->buf_idx = mei_hdr.length; + +out: + if (mei_hdr.msg_complete) { + rets = mei_cl_flow_ctrl_reduce(cl); + if (rets < 0) + goto err; + + list_add_tail(&cb->list, &dev->write_waiting_list.list); + } else { + list_add_tail(&cb->list, &dev->write_list.list); + } + + + if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { + + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE); + mutex_lock(&dev->device_lock); + /* wait_event_interruptible returns -ERESTARTSYS */ + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto err; + } + } + + rets = buf->size; +err: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + + return rets; +} + + +/** + * mei_cl_complete - processes completed operation for a client + * + * @cl: private data of the file object. + * @cb: callback block. + */ +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + if (cb->fop_type == MEI_FOP_WRITE) { + mei_io_cb_free(cb); + cb = NULL; + cl->writing_state = MEI_WRITE_COMPLETE; + if (waitqueue_active(&cl->tx_wait)) + wake_up_interruptible(&cl->tx_wait); + + } else if (cb->fop_type == MEI_FOP_READ && + MEI_READING == cl->reading_state) { + cl->reading_state = MEI_READ_COMPLETE; + if (waitqueue_active(&cl->rx_wait)) + wake_up_interruptible(&cl->rx_wait); + else + mei_cl_bus_rx_event(cl); + + } +} + + +/** + * mei_cl_all_disconnect - disconnect forcefully all connected clients + * + * @dev - mei device + */ + +void mei_cl_all_disconnect(struct mei_device *dev) +{ + struct mei_cl *cl; + + list_for_each_entry(cl, &dev->file_list, link) { + cl->state = MEI_FILE_DISCONNECTED; + cl->mei_flow_ctrl_creds = 0; + cl->timer_count = 0; + } +} + + +/** + * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted + * + * @dev - mei device + */ +void mei_cl_all_wakeup(struct mei_device *dev) +{ + struct mei_cl *cl; + list_for_each_entry(cl, &dev->file_list, link) { + if (waitqueue_active(&cl->rx_wait)) { + cl_dbg(dev, cl, "Waking up reading client!\n"); + wake_up_interruptible(&cl->rx_wait); + } + if (waitqueue_active(&cl->tx_wait)) { + cl_dbg(dev, cl, "Waking up writing client!\n"); + wake_up_interruptible(&cl->tx_wait); + } + } +} + +/** + * mei_cl_all_write_clear - clear all pending writes + + * @dev - mei device + */ +void mei_cl_all_write_clear(struct mei_device *dev) +{ + mei_io_list_free(&dev->write_list, NULL); + mei_io_list_free(&dev->write_waiting_list, NULL); +} + + diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h new file mode 100644 index 00000000000..96d5de0389f --- /dev/null +++ b/drivers/misc/mei/client.h @@ -0,0 +1,109 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_CLIENT_H_ +#define _MEI_CLIENT_H_ + +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/poll.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid); +int mei_me_cl_by_id(struct mei_device *dev, u8 client_id); + +/* + * MEI IO Functions + */ +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp); +void mei_io_cb_free(struct mei_cl_cb *priv_cb); +int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length); +int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length); + + +/** + * mei_io_list_init - Sets up a queue list. + * + * @list: An instance cl callback structure + */ +static inline void mei_io_list_init(struct mei_cl_cb *list) +{ + INIT_LIST_HEAD(&list->list); +} +/* + * MEI Host Client Functions + */ + +struct mei_cl *mei_cl_allocate(struct mei_device *dev); +void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); + + +int mei_cl_link(struct mei_cl *cl, int id); +int mei_cl_unlink(struct mei_cl *cl); + +int mei_cl_flush_queues(struct mei_cl *cl); +struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); + + +int mei_cl_flow_ctrl_creds(struct mei_cl *cl); + +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); +/* + * MEI input output function prototype + */ +static inline bool mei_cl_is_connected(struct mei_cl *cl) +{ + return cl->dev && + cl->dev->dev_state == MEI_DEV_ENABLED && + cl->state == MEI_FILE_CONNECTED; +} +static inline bool mei_cl_is_transitioning(struct mei_cl *cl) +{ + return MEI_FILE_INITIALIZING == cl->state || + MEI_FILE_DISCONNECTED == cl->state || + MEI_FILE_DISCONNECTING == cl->state; +} + +bool mei_cl_is_other_connecting(struct mei_cl *cl); +int mei_cl_disconnect(struct mei_cl *cl); +int mei_cl_connect(struct mei_cl *cl, struct file *file); +int mei_cl_read_start(struct mei_cl *cl, size_t length); +int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list); + +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); + +void mei_host_client_init(struct work_struct *work); + + + +void mei_cl_all_disconnect(struct mei_device *dev); +void mei_cl_all_wakeup(struct mei_device *dev); +void mei_cl_all_write_clear(struct mei_device *dev); + +#define MEI_CL_FMT "cl:host=%02d me=%02d " +#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id + +#define cl_dbg(dev, cl, format, arg...) \ + dev_dbg(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#define cl_err(dev, cl, format, arg...) \ + dev_err(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c new file mode 100644 index 00000000000..ced5b777c70 --- /dev/null +++ b/drivers/misc/mei/debugfs.c @@ -0,0 +1,197 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2012-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/pci.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw.h" + +static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_device *dev = fp->private_data; + struct mei_me_client *cl; + const size_t bufsz = 1024; + char *buf = kzalloc(bufsz, GFP_KERNEL); + int i; + int pos = 0; + int ret; + + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, + " |id|addr| UUID |con|msg len|\n"); + + mutex_lock(&dev->device_lock); + + /* if the driver is not enabled the list won't be consistent */ + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + for (i = 0; i < dev->me_clients_num; i++) { + cl = &dev->me_clients[i]; + + /* skip me clients that cannot be connected */ + if (cl->props.max_number_of_connections == 0) + continue; + + pos += scnprintf(buf + pos, bufsz - pos, + "%2d|%2d|%4d|%pUl|%3d|%7d|\n", + i, cl->client_id, + cl->props.fixed_address, + &cl->props.protocol_name, + cl->props.max_number_of_connections, + cl->props.max_msg_length); + } +out: + mutex_unlock(&dev->device_lock); + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_dbgfs_fops_meclients = { + .open = simple_open, + .read = mei_dbgfs_read_meclients, + .llseek = generic_file_llseek, +}; + +static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_device *dev = fp->private_data; + struct mei_cl *cl; + const size_t bufsz = 1024; + char *buf; + int i = 0; + int pos = 0; + int ret; + + if (!dev) + return -ENODEV; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, + " |me|host|state|rd|wr|\n"); + + mutex_lock(&dev->device_lock); + + /* if the driver is not enabled the list won't b consitent */ + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + list_for_each_entry(cl, &dev->file_list, link) { + + pos += scnprintf(buf + pos, bufsz - pos, + "%2d|%2d|%4d|%5d|%2d|%2d|\n", + i, cl->me_client_id, cl->host_client_id, cl->state, + cl->reading_state, cl->writing_state); + i++; + } +out: + mutex_unlock(&dev->device_lock); + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_dbgfs_fops_active = { + .open = simple_open, + .read = mei_dbgfs_read_active, + .llseek = generic_file_llseek, +}; + +static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_device *dev = fp->private_data; + const size_t bufsz = 1024; + char *buf = kzalloc(bufsz, GFP_KERNEL); + int pos = 0; + int ret; + + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + mei_dev_state_str(dev->dev_state)); + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} +static const struct file_operations mei_dbgfs_fops_devstate = { + .open = simple_open, + .read = mei_dbgfs_read_devstate, + .llseek = generic_file_llseek, +}; + +/** + * mei_dbgfs_deregister - Remove the debugfs files and directories + * @mei - pointer to mei device private data + */ +void mei_dbgfs_deregister(struct mei_device *dev) +{ + if (!dev->dbgfs_dir) + return; + debugfs_remove_recursive(dev->dbgfs_dir); + dev->dbgfs_dir = NULL; +} + +/** + * Add the debugfs files + * + */ +int mei_dbgfs_register(struct mei_device *dev, const char *name) +{ + struct dentry *dir, *f; + dir = debugfs_create_dir(name, NULL); + if (!dir) + return -ENOMEM; + + f = debugfs_create_file("meclients", S_IRUSR, dir, + dev, &mei_dbgfs_fops_meclients); + if (!f) { + dev_err(&dev->pdev->dev, "meclients: registration failed\n"); + goto err; + } + f = debugfs_create_file("active", S_IRUSR, dir, + dev, &mei_dbgfs_fops_active); + if (!f) { + dev_err(&dev->pdev->dev, "meclients: registration failed\n"); + goto err; + } + f = debugfs_create_file("devstate", S_IRUSR, dir, + dev, &mei_dbgfs_fops_devstate); + if (!f) { + dev_err(&dev->pdev->dev, "devstate: registration failed\n"); + goto err; + } + dev->dbgfs_dir = dir; + return 0; +err: + mei_dbgfs_deregister(dev); + return -ENODEV; +} + diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c new file mode 100644 index 00000000000..804106209d7 --- /dev/null +++ b/drivers/misc/mei/hbm.c @@ -0,0 +1,879 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/mei.h> +#include <linux/pm_runtime.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) +{ +#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status + switch (status) { + MEI_CL_CS(SUCCESS); + MEI_CL_CS(NOT_FOUND); + MEI_CL_CS(ALREADY_STARTED); + MEI_CL_CS(OUT_OF_RESOURCES); + MEI_CL_CS(MESSAGE_SMALL); + default: return "unknown"; + } +#undef MEI_CL_CCS +} + +/** + * mei_cl_conn_status_to_errno - convert client connect response + * status to error code + * + * @status: client connect response status + * + * returns corresponding error code + */ +static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) +{ + switch (status) { + case MEI_CL_CONN_SUCCESS: return 0; + case MEI_CL_CONN_NOT_FOUND: return -ENOTTY; + case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY; + case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY; + case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL; + default: return -EINVAL; + } +} + +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ + dev->init_clients_timer = 0; + dev->hbm_state = MEI_HBM_IDLE; +} + +/** + * mei_hbm_reset - reset hbm counters and book keeping data structurs + * + * @dev: the device structure + */ +void mei_hbm_reset(struct mei_device *dev) +{ + dev->me_clients_num = 0; + dev->me_client_presentation_num = 0; + dev->me_client_index = 0; + + kfree(dev->me_clients); + dev->me_clients = NULL; + + mei_hbm_idle(dev); +} + +/** + * mei_hbm_me_cl_allocate - allocates storage for me clients + * + * @dev: the device structure + * + * returns 0 on success -ENOMEM on allocation failure + */ +static int mei_hbm_me_cl_allocate(struct mei_device *dev) +{ + struct mei_me_client *clients; + int b; + + mei_hbm_reset(dev); + + /* count how many ME clients we have */ + for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) + dev->me_clients_num++; + + if (dev->me_clients_num == 0) + return 0; + + dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n", + dev->me_clients_num * sizeof(struct mei_me_client)); + /* allocate storage for ME clients representation */ + clients = kcalloc(dev->me_clients_num, + sizeof(struct mei_me_client), GFP_KERNEL); + if (!clients) { + dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); + return -ENOMEM; + } + dev->me_clients = clients; + return 0; +} + +/** + * mei_hbm_cl_hdr - construct client hbm header + * + * @cl: - client + * @hbm_cmd: host bus message command + * @buf: buffer for cl header + * @len: buffer length + */ +static inline +void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) +{ + struct mei_hbm_cl_cmd *cmd = buf; + + memset(cmd, 0, len); + + cmd->hbm_cmd = hbm_cmd; + cmd->host_addr = cl->host_client_id; + cmd->me_addr = cl->me_client_id; +} + +/** + * mei_hbm_cl_addr_equal - tells if they have the same address + * + * @cl: - client + * @buf: buffer with cl header + * + * returns true if addresses are the same + */ +static inline +bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) +{ + struct mei_hbm_cl_cmd *cmd = buf; + return cl->host_client_id == cmd->host_addr && + cl->me_client_id == cmd->me_addr; +} + + +int mei_hbm_start_wait(struct mei_device *dev) +{ + int ret; + if (dev->hbm_state > MEI_HBM_START) + return 0; + + mutex_unlock(&dev->device_lock); + ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, + dev->hbm_state == MEI_HBM_IDLE || + dev->hbm_state >= MEI_HBM_STARTED, + mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) { + dev->hbm_state = MEI_HBM_IDLE; + dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); + return -ETIME; + } + return 0; +} + +/** + * mei_hbm_start_req - sends start request message. + * + * @dev: the device structure + * + * returns 0 on success and < 0 on failure + */ +int mei_hbm_start_req(struct mei_device *dev) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_host_version_request *start_req; + const size_t len = sizeof(struct hbm_host_version_request); + int ret; + + mei_hbm_hdr(mei_hdr, len); + + /* host start message */ + start_req = (struct hbm_host_version_request *)dev->wr_msg.data; + memset(start_req, 0, len); + start_req->hbm_cmd = HOST_START_REQ_CMD; + start_req->host_version.major_version = HBM_MAJOR_VERSION; + start_req->host_version.minor_version = HBM_MINOR_VERSION; + + dev->hbm_state = MEI_HBM_IDLE; + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_START; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + return 0; +} + +/* + * mei_hbm_enum_clients_req - sends enumeration client request message. + * + * @dev: the device structure + * + * returns 0 on success and < 0 on failure + */ +static int mei_hbm_enum_clients_req(struct mei_device *dev) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_host_enum_request *enum_req; + const size_t len = sizeof(struct hbm_host_enum_request); + int ret; + + /* enumerate clients */ + mei_hbm_hdr(mei_hdr, len); + + enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; + memset(enum_req, 0, len); + enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; + + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", + ret); + return ret; + } + dev->hbm_state = MEI_HBM_ENUM_CLIENTS; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + return 0; +} + +/** + * mei_hbm_prop_req - request property for a single client + * + * @dev: the device structure + * + * returns 0 on success and < 0 on failure + */ + +static int mei_hbm_prop_req(struct mei_device *dev) +{ + + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_props_request *prop_req; + const size_t len = sizeof(struct hbm_props_request); + unsigned long next_client_index; + unsigned long client_num; + int ret; + + client_num = dev->me_client_presentation_num; + + next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, + dev->me_client_index); + + /* We got all client properties */ + if (next_client_index == MEI_CLIENTS_MAX) { + dev->hbm_state = MEI_HBM_STARTED; + schedule_work(&dev->init_work); + + return 0; + } + + dev->me_clients[client_num].client_id = next_client_index; + dev->me_clients[client_num].mei_flow_ctrl_creds = 0; + + mei_hbm_hdr(mei_hdr, len); + prop_req = (struct hbm_props_request *)dev->wr_msg.data; + + memset(prop_req, 0, sizeof(struct hbm_props_request)); + + + prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; + prop_req->address = next_client_index; + + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", + ret); + return ret; + } + + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->me_client_index = next_client_index; + + return 0; +} + +/* + * mei_hbm_pg - sends pg command + * + * @dev: the device structure + * @pg_cmd: the pg command code + * + * This function returns -EIO on write failure + */ +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_power_gate *req; + const size_t len = sizeof(struct hbm_power_gate); + int ret; + + mei_hbm_hdr(mei_hdr, len); + + req = (struct hbm_power_gate *)dev->wr_msg.data; + memset(req, 0, len); + req->hbm_cmd = pg_cmd; + + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) + dev_err(&dev->pdev->dev, "power gate command write failed.\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mei_hbm_pg); + +/** + * mei_hbm_stop_req - send stop request message + * + * @dev - mei device + * @cl: client info + * + * This function returns -EIO on write failure + */ +static int mei_hbm_stop_req(struct mei_device *dev) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_host_stop_request *req = + (struct hbm_host_stop_request *)dev->wr_msg.data; + const size_t len = sizeof(struct hbm_host_stop_request); + + mei_hbm_hdr(mei_hdr, len); + + memset(req, 0, len); + req->hbm_cmd = HOST_STOP_REQ_CMD; + req->reason = DRIVER_STOP_REQUEST; + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_flow_control_req - sends flow control request. + * + * @dev: the device structure + * @cl: client info + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_flow_control); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); + + cl_dbg(dev, cl, "sending flow control\n"); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_add_single_flow_creds - adds single buffer credentials. + * + * @dev: the device structure + * @flow: flow control. + * + * return 0 on success, < 0 otherwise + */ +static int mei_hbm_add_single_flow_creds(struct mei_device *dev, + struct hbm_flow_control *flow) +{ + struct mei_me_client *me_cl; + int id; + + id = mei_me_cl_by_id(dev, flow->me_addr); + if (id < 0) { + dev_err(&dev->pdev->dev, "no such me client %d\n", + flow->me_addr); + return id; + } + + me_cl = &dev->me_clients[id]; + if (me_cl->props.single_recv_buf) { + me_cl->mei_flow_ctrl_creds++; + dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", + flow->me_addr); + dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", + me_cl->mei_flow_ctrl_creds); + } else { + BUG(); /* error in flow control */ + } + + return 0; +} + +/** + * mei_hbm_cl_flow_control_res - flow control response from me + * + * @dev: the device structure + * @flow_control: flow control response bus message + */ +static void mei_hbm_cl_flow_control_res(struct mei_device *dev, + struct hbm_flow_control *flow_control) +{ + struct mei_cl *cl; + + if (!flow_control->host_addr) { + /* single receive buffer */ + mei_hbm_add_single_flow_creds(dev, flow_control); + return; + } + + /* normal connection */ + list_for_each_entry(cl, &dev->file_list, link) { + if (mei_hbm_cl_addr_equal(cl, flow_control)) { + cl->mei_flow_ctrl_creds++; + dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", + flow_control->host_addr, flow_control->me_addr); + dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n", + cl->mei_flow_ctrl_creds); + break; + } + } +} + + +/** + * mei_hbm_cl_disconnect_req - sends disconnect message to fw. + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_client_connect_request); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_client_connect_response); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_disconnect_res - disconnect response from ME + * + * @dev: the device structure + * @rs: disconnect response bus message + */ +static void mei_hbm_cl_disconnect_res(struct mei_device *dev, + struct hbm_client_connect_response *rs) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + + dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", + rs->me_addr, rs->host_addr, rs->status); + + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { + cl = cb->cl; + + /* this should not happen */ + if (WARN_ON(!cl)) { + list_del(&cb->list); + return; + } + + if (mei_hbm_cl_addr_equal(cl, rs)) { + list_del(&cb->list); + if (rs->status == MEI_CL_DISCONN_SUCCESS) + cl->state = MEI_FILE_DISCONNECTED; + + cl->status = 0; + cl->timer_count = 0; + break; + } + } +} + +/** + * mei_hbm_cl_connect_req - send connection request to specific me client + * + * @dev: the device structure + * @cl: a client to connect to + * + * returns -EIO on write failure + */ +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_client_connect_request); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_connect_res - connect response from the ME + * + * @dev: the device structure + * @rs: connect response bus message + */ +static void mei_hbm_cl_connect_res(struct mei_device *dev, + struct hbm_client_connect_response *rs) +{ + + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + + dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", + rs->me_addr, rs->host_addr, + mei_cl_conn_status_str(rs->status)); + + cl = NULL; + + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { + + cl = cb->cl; + /* this should not happen */ + if (WARN_ON(!cl)) { + list_del_init(&cb->list); + continue; + } + + if (cb->fop_type != MEI_FOP_CONNECT) + continue; + + if (mei_hbm_cl_addr_equal(cl, rs)) { + list_del(&cb->list); + break; + } + } + + if (!cl) + return; + + cl->timer_count = 0; + if (rs->status == MEI_CL_CONN_SUCCESS) + cl->state = MEI_FILE_CONNECTED; + else + cl->state = MEI_FILE_DISCONNECTED; + cl->status = mei_cl_conn_status_to_errno(rs->status); +} + + +/** + * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware + * host sends disconnect response + * + * @dev: the device structure. + * @disconnect_req: disconnect request bus message from the me + * + * returns -ENOMEM on allocation failure + */ +static int mei_hbm_fw_disconnect_req(struct mei_device *dev, + struct hbm_client_connect_request *disconnect_req) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb; + + list_for_each_entry(cl, &dev->file_list, link) { + if (mei_hbm_cl_addr_equal(cl, disconnect_req)) { + dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", + disconnect_req->host_addr, + disconnect_req->me_addr); + cl->state = MEI_FILE_DISCONNECTED; + cl->timer_count = 0; + + cb = mei_io_cb_init(cl, NULL); + if (!cb) + return -ENOMEM; + cb->fop_type = MEI_FOP_DISCONNECT_RSP; + cl_dbg(dev, cl, "add disconnect response as first\n"); + list_add(&cb->list, &dev->ctrl_wr_list.list); + + break; + } + } + return 0; +} + + +/** + * mei_hbm_version_is_supported - checks whether the driver can + * support the hbm version of the device + * + * @dev: the device structure + * returns true if driver can support hbm version of the device + */ +bool mei_hbm_version_is_supported(struct mei_device *dev) +{ + return (dev->version.major_version < HBM_MAJOR_VERSION) || + (dev->version.major_version == HBM_MAJOR_VERSION && + dev->version.minor_version <= HBM_MINOR_VERSION); +} + +/** + * mei_hbm_dispatch - bottom half read routine after ISR to + * handle the read bus message cmd processing. + * + * @dev: the device structure + * @mei_hdr: header of bus message + * + * returns 0 on success and < 0 on failure + */ +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +{ + struct mei_bus_message *mei_msg; + struct mei_me_client *me_client; + struct hbm_host_version_response *version_res; + struct hbm_client_connect_response *connect_res; + struct hbm_client_connect_response *disconnect_res; + struct hbm_client_connect_request *disconnect_req; + struct hbm_flow_control *flow_control; + struct hbm_props_response *props_res; + struct hbm_host_enum_response *enum_res; + + /* read the message to our buffer */ + BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); + mei_read_slots(dev, dev->rd_msg_buf, hdr->length); + mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; + + /* ignore spurious message and prevent reset nesting + * hbm is put to idle during system reset + */ + if (dev->hbm_state == MEI_HBM_IDLE) { + dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); + return 0; + } + + switch (mei_msg->hbm_cmd) { + case HOST_START_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); + + dev->init_clients_timer = 0; + + version_res = (struct hbm_host_version_response *)mei_msg; + + dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", + HBM_MAJOR_VERSION, HBM_MINOR_VERSION, + version_res->me_max_version.major_version, + version_res->me_max_version.minor_version); + + if (version_res->host_version_supported) { + dev->version.major_version = HBM_MAJOR_VERSION; + dev->version.minor_version = HBM_MINOR_VERSION; + } else { + dev->version.major_version = + version_res->me_max_version.major_version; + dev->version.minor_version = + version_res->me_max_version.minor_version; + } + + if (!mei_hbm_version_is_supported(dev)) { + dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); + + dev->hbm_state = MEI_HBM_STOPPED; + if (mei_hbm_stop_req(dev)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); + return -EIO; + } + break; + } + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_START) { + dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dev->hbm_state = MEI_HBM_STARTED; + + if (mei_hbm_enum_clients_req(dev)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); + return -EIO; + } + + wake_up_interruptible(&dev->wait_recvd_msg); + break; + + case CLIENT_CONNECT_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); + + connect_res = (struct hbm_client_connect_response *) mei_msg; + mei_hbm_cl_connect_res(dev, connect_res); + wake_up(&dev->wait_recvd_msg); + break; + + case CLIENT_DISCONNECT_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); + + disconnect_res = (struct hbm_client_connect_response *) mei_msg; + mei_hbm_cl_disconnect_res(dev, disconnect_res); + wake_up(&dev->wait_recvd_msg); + break; + + case MEI_FLOW_CONTROL_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); + + flow_control = (struct hbm_flow_control *) mei_msg; + mei_hbm_cl_flow_control_res(dev, flow_control); + break; + + case MEI_PG_ISOLATION_ENTRY_RES_CMD: + dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n"); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&dev->wait_pg)) + wake_up(&dev->wait_pg); + break; + + case MEI_PG_ISOLATION_EXIT_REQ_CMD: + dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n"); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&dev->wait_pg)) + wake_up(&dev->wait_pg); + else + /* + * If the driver is not waiting on this then + * this is HW initiated exit from PG. + * Start runtime pm resume sequence to exit from PG. + */ + pm_request_resume(&dev->pdev->dev); + break; + + case HOST_CLIENT_PROPERTIES_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->me_clients == NULL) { + dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); + return -EPROTO; + } + + props_res = (struct hbm_props_response *)mei_msg; + me_client = &dev->me_clients[dev->me_client_presentation_num]; + + if (props_res->status) { + dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", + props_res->status); + return -EPROTO; + } + + if (me_client->client_id != props_res->address) { + dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", + me_client->client_id, props_res->address); + return -EPROTO; + } + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { + dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + me_client->props = props_res->client_properties; + dev->me_client_index++; + dev->me_client_presentation_num++; + + /* request property for the next client */ + if (mei_hbm_prop_req(dev)) + return -EIO; + + break; + + case HOST_ENUM_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); + + dev->init_clients_timer = 0; + + enum_res = (struct hbm_host_enum_response *) mei_msg; + BUILD_BUG_ON(sizeof(dev->me_clients_map) + < sizeof(enum_res->valid_addresses)); + memcpy(dev->me_clients_map, enum_res->valid_addresses, + sizeof(enum_res->valid_addresses)); + + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { + dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + if (mei_hbm_me_cl_allocate(dev)) { + dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); + return -ENOMEM; + } + + dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; + + /* first property request */ + if (mei_hbm_prop_req(dev)) + return -EIO; + + break; + + case HOST_STOP_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_STOPPED) { + dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dev->dev_state = MEI_DEV_POWER_DOWN; + dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); + /* force the reset */ + return -EPROTO; + break; + + case CLIENT_DISCONNECT_REQ_CMD: + dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); + + disconnect_req = (struct hbm_client_connect_request *)mei_msg; + mei_hbm_fw_disconnect_req(dev, disconnect_req); + break; + + case ME_STOP_REQ_CMD: + dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); + dev->hbm_state = MEI_HBM_STOPPED; + if (mei_hbm_stop_req(dev)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); + return -EIO; + } + break; + default: + BUG(); + break; + + } + return 0; +} + diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h new file mode 100644 index 00000000000..683eb2835ce --- /dev/null +++ b/drivers/misc/mei/hbm.h @@ -0,0 +1,64 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HBM_H_ +#define _MEI_HBM_H_ + +struct mei_device; +struct mei_msg_hdr; +struct mei_cl; + +/** + * enum mei_hbm_state - host bus message protocol state + * + * @MEI_HBM_IDLE : protocol not started + * @MEI_HBM_START : start request message was sent + * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent + * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties + */ +enum mei_hbm_state { + MEI_HBM_IDLE = 0, + MEI_HBM_START, + MEI_HBM_STARTED, + MEI_HBM_ENUM_CLIENTS, + MEI_HBM_CLIENT_PROPERTIES, + MEI_HBM_STOPPED, +}; + +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); + +static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) +{ + hdr->host_addr = 0; + hdr->me_addr = 0; + hdr->length = length; + hdr->msg_complete = 1; + hdr->reserved = 0; +} + +void mei_hbm_idle(struct mei_device *dev); +void mei_hbm_reset(struct mei_device *dev); +int mei_hbm_start_req(struct mei_device *dev); +int mei_hbm_start_wait(struct mei_device *dev); +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); +bool mei_hbm_version_is_supported(struct mei_device *dev); +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); + +#endif /* _MEI_HBM_H_ */ + diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h new file mode 100644 index 00000000000..a7856c0ac57 --- /dev/null +++ b/drivers/misc/mei/hw-me-regs.h @@ -0,0 +1,185 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_MEI_REGS_H_ +#define _MEI_HW_MEI_REGS_H_ + +/* + * MEI device IDs + */ +#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */ +#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */ +#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */ +#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */ + +#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */ +#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */ + +#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */ +#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */ +#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */ +#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */ +#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */ + +#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */ +#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */ + +#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */ +#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */ + +#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */ +#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */ + +#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */ +#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */ + +#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */ +#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */ + +#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */ +#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ +#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ + +#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */ +#define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ +#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ +#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ + +#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ + +/* Host Firmware Status Registers in PCI Config Space */ +#define PCI_CFG_HFS_1 0x40 +#define PCI_CFG_HFS_2 0x48 + +/* + * MEI HW Section + */ + +/* MEI registers */ +/* H_CB_WW - Host Circular Buffer (CB) Write Window register */ +#define H_CB_WW 0 +/* H_CSR - Host Control Status register */ +#define H_CSR 4 +/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */ +#define ME_CB_RW 8 +/* ME_CSR_HA - ME Control Status Host Access register (read only) */ +#define ME_CSR_HA 0xC +/* H_HGC_CSR - PGI register */ +#define H_HPG_CSR 0x10 + + +/* register bits of H_CSR (Host Control Status register) */ +/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ +#define H_CBD 0xFF000000 +/* Host Circular Buffer Write Pointer */ +#define H_CBWP 0x00FF0000 +/* Host Circular Buffer Read Pointer */ +#define H_CBRP 0x0000FF00 +/* Host Reset */ +#define H_RST 0x00000010 +/* Host Ready */ +#define H_RDY 0x00000008 +/* Host Interrupt Generate */ +#define H_IG 0x00000004 +/* Host Interrupt Status */ +#define H_IS 0x00000002 +/* Host Interrupt Enable */ +#define H_IE 0x00000001 + + +/* register bits of ME_CSR_HA (ME Control Status Host Access register) */ +/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only +access to ME_CBD */ +#define ME_CBD_HRA 0xFF000000 +/* ME CB Write Pointer HRA - host read only access to ME_CBWP */ +#define ME_CBWP_HRA 0x00FF0000 +/* ME CB Read Pointer HRA - host read only access to ME_CBRP */ +#define ME_CBRP_HRA 0x0000FF00 +/* ME Power Gate Isolation Capability HRA - host ready only access */ +#define ME_PGIC_HRA 0x00000040 +/* ME Reset HRA - host read only access to ME_RST */ +#define ME_RST_HRA 0x00000010 +/* ME Ready HRA - host read only access to ME_RDY */ +#define ME_RDY_HRA 0x00000008 +/* ME Interrupt Generate HRA - host read only access to ME_IG */ +#define ME_IG_HRA 0x00000004 +/* ME Interrupt Status HRA - host read only access to ME_IS */ +#define ME_IS_HRA 0x00000002 +/* ME Interrupt Enable HRA - host read only access to ME_IE */ +#define ME_IE_HRA 0x00000001 + + +/* register bits - H_HPG_CSR */ +#define H_HPG_CSR_PGIHEXR 0x00000001 +#define H_HPG_CSR_PGI 0x00000002 + +#endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c new file mode 100644 index 00000000000..6a2d272cea4 --- /dev/null +++ b/drivers/misc/mei/hw-me.c @@ -0,0 +1,885 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> + +#include <linux/kthread.h> +#include <linux/interrupt.h> + +#include "mei_dev.h" +#include "hbm.h" + +#include "hw-me.h" +#include "hw-me-regs.h" + +/** + * mei_me_reg_read - Reads 32bit data from the mei device + * + * @dev: the device structure + * @offset: offset from which to read the data + * + * returns register value (u32) + */ +static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, + unsigned long offset) +{ + return ioread32(hw->mem_addr + offset); +} + + +/** + * mei_me_reg_write - Writes 32bit data to the mei device + * + * @dev: the device structure + * @offset: offset from which to write the data + * @value: register value to write (u32) + */ +static inline void mei_me_reg_write(const struct mei_me_hw *hw, + unsigned long offset, u32 value) +{ + iowrite32(value, hw->mem_addr + offset); +} + +/** + * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer + * read window register + * + * @dev: the device structure + * + * returns ME_CB_RW register value (u32) + */ +static u32 mei_me_mecbrw_read(const struct mei_device *dev) +{ + return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); +} +/** + * mei_me_mecsr_read - Reads 32bit data from the ME CSR + * + * @dev: the device structure + * + * returns ME_CSR_HA register value (u32) + */ +static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) +{ + return mei_me_reg_read(hw, ME_CSR_HA); +} + +/** + * mei_hcsr_read - Reads 32bit data from the host CSR + * + * @dev: the device structure + * + * returns H_CSR register value (u32) + */ +static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) +{ + return mei_me_reg_read(hw, H_CSR); +} + +/** + * mei_hcsr_set - writes H_CSR register to the mei device, + * and ignores the H_IS bit for it is write-one-to-zero. + * + * @dev: the device structure + */ +static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) +{ + hcsr &= ~H_IS; + mei_me_reg_write(hw, H_CSR, hcsr); +} + + +/** + * mei_me_hw_config - configure hw dependent settings + * + * @dev: mei device + */ +static void mei_me_hw_config(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(to_me_hw(dev)); + /* Doesn't change in runtime */ + dev->hbuf_depth = (hcsr & H_CBD) >> 24; + + hw->pg_state = MEI_PG_OFF; +} + +/** + * mei_me_pg_state - translate internal pg state + * to the mei power gating state + * + * @hw - me hardware + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + return hw->pg_state; +} + +/** + * mei_clear_interrupts - clear and stop interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_clear(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + if ((hcsr & H_IS) == H_IS) + mei_me_reg_write(hw, H_CSR, hcsr); +} +/** + * mei_me_intr_enable - enables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_enable(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + hcsr |= H_IE; + mei_hcsr_set(hw, hcsr); +} + +/** + * mei_disable_interrupts - disables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_disable(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + hcsr &= ~H_IE; + mei_hcsr_set(hw, hcsr); +} + +/** + * mei_me_hw_reset_release - release device from the reset + * + * @dev: the device structure + */ +static void mei_me_hw_reset_release(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + + hcsr |= H_IG; + hcsr &= ~H_RST; + mei_hcsr_set(hw, hcsr); + + /* complete this write before we set host ready on another CPU */ + mmiowb(); +} +/** + * mei_me_hw_reset - resets fw via mei csr register. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + */ +static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr = mei_hcsr_read(hw); + + hcsr |= H_RST | H_IG | H_IS; + + if (intr_enable) + hcsr |= H_IE; + else + hcsr &= ~H_IE; + + dev->recvd_hw_ready = false; + mei_me_reg_write(hw, H_CSR, hcsr); + + /* + * Host reads the H_CSR once to ensure that the + * posted write to H_CSR completes. + */ + hcsr = mei_hcsr_read(hw); + + if ((hcsr & H_RST) == 0) + dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); + + if ((hcsr & H_RDY) == H_RDY) + dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); + + if (intr_enable == false) + mei_me_hw_reset_release(dev); + + return 0; +} + +/** + * mei_me_host_set_ready - enable device + * + * @dev - mei device + * returns bool + */ + +static void mei_me_host_set_ready(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state = mei_hcsr_read(hw); + hw->host_hw_state |= H_IE | H_IG | H_RDY; + mei_hcsr_set(hw, hw->host_hw_state); +} +/** + * mei_me_host_is_ready - check whether the host has turned ready + * + * @dev - mei device + * returns bool + */ +static bool mei_me_host_is_ready(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state = mei_hcsr_read(hw); + return (hw->host_hw_state & H_RDY) == H_RDY; +} + +/** + * mei_me_hw_is_ready - check whether the me(hw) has turned ready + * + * @dev - mei device + * returns bool + */ +static bool mei_me_hw_is_ready(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + hw->me_hw_state = mei_me_mecsr_read(hw); + return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA; +} + +static int mei_me_hw_ready_wait(struct mei_device *dev) +{ + int err; + + mutex_unlock(&dev->device_lock); + err = wait_event_interruptible_timeout(dev->wait_hw_ready, + dev->recvd_hw_ready, + mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!err && !dev->recvd_hw_ready) { + if (!err) + err = -ETIME; + dev_err(&dev->pdev->dev, + "wait hw ready failed. status = %d\n", err); + return err; + } + + dev->recvd_hw_ready = false; + return 0; +} + +static int mei_me_hw_start(struct mei_device *dev) +{ + int ret = mei_me_hw_ready_wait(dev); + if (ret) + return ret; + dev_dbg(&dev->pdev->dev, "hw is ready\n"); + + mei_me_host_set_ready(dev); + return ret; +} + + +/** + * mei_hbuf_filled_slots - gets number of device filled buffer slots + * + * @dev: the device structure + * + * returns number of filled slots + */ +static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + char read_ptr, write_ptr; + + hw->host_hw_state = mei_hcsr_read(hw); + + read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8); + write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16); + + return (unsigned char) (write_ptr - read_ptr); +} + +/** + * mei_me_hbuf_is_empty - checks if host buffer is empty. + * + * @dev: the device structure + * + * returns true if empty, false - otherwise. + */ +static bool mei_me_hbuf_is_empty(struct mei_device *dev) +{ + return mei_hbuf_filled_slots(dev) == 0; +} + +/** + * mei_me_hbuf_empty_slots - counts write empty slots. + * + * @dev: the device structure + * + * returns -EOVERFLOW if overflow, otherwise empty slots count + */ +static int mei_me_hbuf_empty_slots(struct mei_device *dev) +{ + unsigned char filled_slots, empty_slots; + + filled_slots = mei_hbuf_filled_slots(dev); + empty_slots = dev->hbuf_depth - filled_slots; + + /* check for overflow */ + if (filled_slots > dev->hbuf_depth) + return -EOVERFLOW; + + return empty_slots; +} + +static size_t mei_me_hbuf_max_len(const struct mei_device *dev) +{ + return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); +} + + +/** + * mei_me_write_message - writes a message to mei device. + * + * @dev: the device structure + * @header: mei HECI header of message + * @buf: message payload will be written + * + * This function returns -EIO if write has failed + */ +static int mei_me_write_message(struct mei_device *dev, + struct mei_msg_hdr *header, + unsigned char *buf) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long rem; + unsigned long length = header->length; + u32 *reg_buf = (u32 *)buf; + u32 hcsr; + u32 dw_cnt; + int i; + int empty_slots; + + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + + empty_slots = mei_hbuf_empty_slots(dev); + dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots); + + dw_cnt = mei_data2slots(length); + if (empty_slots < 0 || dw_cnt > empty_slots) + return -EMSGSIZE; + + mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); + + for (i = 0; i < length / 4; i++) + mei_me_reg_write(hw, H_CB_WW, reg_buf[i]); + + rem = length & 0x3; + if (rem > 0) { + u32 reg = 0; + memcpy(®, &buf[length - rem], rem); + mei_me_reg_write(hw, H_CB_WW, reg); + } + + hcsr = mei_hcsr_read(hw) | H_IG; + mei_hcsr_set(hw, hcsr); + if (!mei_me_hw_is_ready(dev)) + return -EIO; + + return 0; +} + +/** + * mei_me_count_full_read_slots - counts read full slots. + * + * @dev: the device structure + * + * returns -EOVERFLOW if overflow, otherwise filled slots count + */ +static int mei_me_count_full_read_slots(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + char read_ptr, write_ptr; + unsigned char buffer_depth, filled_slots; + + hw->me_hw_state = mei_me_mecsr_read(hw); + buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24); + read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8); + write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16); + filled_slots = (unsigned char) (write_ptr - read_ptr); + + /* check for overflow */ + if (filled_slots > buffer_depth) + return -EOVERFLOW; + + dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots); + return (int)filled_slots; +} + +/** + * mei_me_read_slots - reads a message from mei device. + * + * @dev: the device structure + * @buffer: message buffer will be written + * @buffer_length: message size will be read + */ +static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, + unsigned long buffer_length) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 *reg_buf = (u32 *)buffer; + u32 hcsr; + + for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) + *reg_buf++ = mei_me_mecbrw_read(dev); + + if (buffer_length > 0) { + u32 reg = mei_me_mecbrw_read(dev); + memcpy(reg_buf, ®, buffer_length); + } + + hcsr = mei_hcsr_read(hw) | H_IG; + mei_hcsr_set(hw, hcsr); + return 0; +} + +/** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_enter(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_reg_read(hw, H_HPG_CSR); + reg |= H_HPG_CSR_PGI; + mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_exit(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_reg_read(hw, H_HPG_CSR); + + WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); + + reg |= H_HPG_CSR_PGIHEXR; + mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_set_sync - perform pg entry procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_set_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + + dev->pg_event = MEI_PG_EVENT_WAIT; + + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); + if (ret) + return ret; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { + mei_me_pg_enter(dev); + ret = 0; + } else { + ret = -ETIME; + } + + dev->pg_event = MEI_PG_EVENT_IDLE; + hw->pg_state = MEI_PG_ON; + + return ret; +} + +/** + * mei_me_pg_unset_sync - perform pg exit procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_unset_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) + goto reply; + + dev->pg_event = MEI_PG_EVENT_WAIT; + + mei_me_pg_exit(dev); + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + +reply: + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); + else + ret = -ETIME; + + dev->pg_event = MEI_PG_EVENT_IDLE; + hw->pg_state = MEI_PG_OFF; + + return ret; +} + +/** + * mei_me_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_me_pg_is_enabled(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_reg_read(hw, ME_CSR_HA); + + if ((reg & ME_PGIC_HRA) == 0) + goto notsupported; + + if (dev->version.major_version < HBM_MAJOR_VERSION_PGI) + goto notsupported; + + if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && + dev->version.minor_version < HBM_MINOR_VERSION_PGI) + goto notsupported; + + return true; + +notsupported: + dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", + !!(reg & ME_PGIC_HRA), + dev->version.major_version, + dev->version.minor_version, + HBM_MAJOR_VERSION_PGI, + HBM_MINOR_VERSION_PGI); + + return false; +} + +/** + * mei_me_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_me_hw *hw = to_me_hw(dev); + u32 csr_reg = mei_hcsr_read(hw); + + if ((csr_reg & H_IS) != H_IS) + return IRQ_NONE; + + /* clear H_IS bit in H_CSR */ + mei_me_reg_write(hw, H_CSR, csr_reg); + + return IRQ_WAKE_THREAD; +} + +/** + * mei_me_irq_thread_handler - function called after ISR to handle the interrupt + * processing. + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_cl_cb complete_list; + s32 slots; + int rets = 0; + + dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + mei_io_list_init(&complete_list); + + /* Ack the interrupt here + * In case of MSI we don't go through the quick handler */ + if (pci_dev_msi_enabled(dev->pdev)) + mei_clear_interrupts(dev); + + /* check if ME wants a reset */ + if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { + dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + } + + /* check if we need to start the dev */ + if (!mei_host_is_ready(dev)) { + if (mei_hw_is_ready(dev)) { + mei_me_hw_reset_release(dev); + dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); + + dev->recvd_hw_ready = true; + wake_up_interruptible(&dev->wait_hw_ready); + } else { + dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); + } + goto end; + } + /* check slots available for reading */ + slots = mei_count_full_read_slots(dev); + while (slots > 0) { + dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); + rets = mei_irq_read_handler(dev, &complete_list, &slots); + /* There is a race between ME write and interrupt delivery: + * Not all data is always available immediately after the + * interrupt, so try to read again on the next interrupt. + */ + if (rets == -ENODATA) + break; + + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n", + rets); + schedule_work(&dev->reset_work); + goto end; + } + } + + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + /* + * During PG handshake only allowed write is the replay to the + * PG exit message, so block calling write function + * if the pg state is not idle + */ + if (dev->pg_event == MEI_PG_EVENT_IDLE) { + rets = mei_irq_write_handler(dev, &complete_list); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + } + + mei_irq_compl_handler(dev, &complete_list); + +end: + dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + mutex_unlock(&dev->device_lock); + return IRQ_HANDLED; +} + +/** + * mei_me_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns 0 on success an error code otherwise + */ +static int mei_me_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2}; + int i; + + if (!fw_status) + return -EINVAL; + + switch (dev->pdev->device) { + case MEI_DEV_ID_IBXPK_1: + case MEI_DEV_ID_IBXPK_2: + case MEI_DEV_ID_CPT_1: + case MEI_DEV_ID_PBG_1: + case MEI_DEV_ID_PPT_1: + case MEI_DEV_ID_PPT_2: + case MEI_DEV_ID_PPT_3: + case MEI_DEV_ID_LPT_H: + case MEI_DEV_ID_LPT_W: + case MEI_DEV_ID_LPT_LP: + case MEI_DEV_ID_LPT_HR: + case MEI_DEV_ID_WPT_LP: + fw_status->count = 2; + break; + case MEI_DEV_ID_ICH10_1: + case MEI_DEV_ID_ICH10_2: + case MEI_DEV_ID_ICH10_3: + case MEI_DEV_ID_ICH10_4: + fw_status->count = 1; + break; + default: + fw_status->count = 0; + break; + } + + for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { + int ret; + ret = pci_read_config_dword(dev->pdev, + pci_cfg_reg[i], &fw_status->status[i]); + if (ret) + return ret; + } + return 0; +} + +static const struct mei_hw_ops mei_me_hw_ops = { + + .pg_state = mei_me_pg_state, + + .fw_status = mei_me_fw_status, + .host_is_ready = mei_me_host_is_ready, + + .hw_is_ready = mei_me_hw_is_ready, + .hw_reset = mei_me_hw_reset, + .hw_config = mei_me_hw_config, + .hw_start = mei_me_hw_start, + + .pg_is_enabled = mei_me_pg_is_enabled, + + .intr_clear = mei_me_intr_clear, + .intr_enable = mei_me_intr_enable, + .intr_disable = mei_me_intr_disable, + + .hbuf_free_slots = mei_me_hbuf_empty_slots, + .hbuf_is_ready = mei_me_hbuf_is_empty, + .hbuf_max_len = mei_me_hbuf_max_len, + + .write = mei_me_write_message, + + .rdbuf_full_slots = mei_me_count_full_read_slots, + .read_hdr = mei_me_mecbrw_read, + .read = mei_me_read_slots +}; + +static bool mei_me_fw_type_nm(struct pci_dev *pdev) +{ + u32 reg; + pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); + /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ + return (reg & 0x600) == 0x200; +} + +#define MEI_CFG_FW_NM \ + .quirk_probe = mei_me_fw_type_nm + +static bool mei_me_fw_type_sps(struct pci_dev *pdev) +{ + u32 reg; + /* Read ME FW Status check for SPS Firmware */ + pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); + /* if bits [19:16] = 15, running SPS Firmware */ + return (reg & 0xf0000) == 0xf0000; +} + +#define MEI_CFG_FW_SPS \ + .quirk_probe = mei_me_fw_type_sps + + +#define MEI_CFG_LEGACY_HFS \ + .fw_status.count = 0 + +#define MEI_CFG_ICH_HFS \ + .fw_status.count = 1, \ + .fw_status.status[0] = PCI_CFG_HFS_1 + +#define MEI_CFG_PCH_HFS \ + .fw_status.count = 2, \ + .fw_status.status[0] = PCI_CFG_HFS_1, \ + .fw_status.status[1] = PCI_CFG_HFS_2 + + +/* ICH Legacy devices */ +const struct mei_cfg mei_me_legacy_cfg = { + MEI_CFG_LEGACY_HFS, +}; + +/* ICH devices */ +const struct mei_cfg mei_me_ich_cfg = { + MEI_CFG_ICH_HFS, +}; + +/* PCH devices */ +const struct mei_cfg mei_me_pch_cfg = { + MEI_CFG_PCH_HFS, +}; + + +/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ +const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_NM, +}; + +/* PCH Lynx Point with quirk for SPS Firmware exclusion */ +const struct mei_cfg mei_me_lpt_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_SPS, +}; + +/** + * mei_me_dev_init - allocates and initializes the mei device structure + * + * @pdev: The pci device structure + * @cfg: per device generation config + * + * returns The mei_device_device pointer on success, NULL on failure. + */ +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg) +{ + struct mei_device *dev; + + dev = kzalloc(sizeof(struct mei_device) + + sizeof(struct mei_me_hw), GFP_KERNEL); + if (!dev) + return NULL; + + mei_device_init(dev, cfg); + + dev->ops = &mei_me_hw_ops; + + dev->pdev = pdev; + return dev; +} + diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h new file mode 100644 index 00000000000..12b0f4bbe1f --- /dev/null +++ b/drivers/misc/mei/hw-me.h @@ -0,0 +1,56 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + + + +#ifndef _MEI_INTERFACE_H_ +#define _MEI_INTERFACE_H_ + +#include <linux/mei.h> +#include <linux/irqreturn.h> +#include "mei_dev.h" +#include "client.h" + +#define MEI_ME_RPM_TIMEOUT 500 /* ms */ + +struct mei_me_hw { + void __iomem *mem_addr; + /* + * hw states of host and fw(ME) + */ + u32 host_hw_state; + u32 me_hw_state; + enum mei_pg_state pg_state; +}; + +#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) + +extern const struct mei_cfg mei_me_legacy_cfg; +extern const struct mei_cfg mei_me_ich_cfg; +extern const struct mei_cfg mei_me_pch_cfg; +extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg; +extern const struct mei_cfg mei_me_lpt_cfg; + +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg); + +int mei_me_pg_set_sync(struct mei_device *dev); +int mei_me_pg_unset_sync(struct mei_device *dev); + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); + +#endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h new file mode 100644 index 00000000000..f19229c4e65 --- /dev/null +++ b/drivers/misc/mei/hw-txe-regs.h @@ -0,0 +1,294 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_TXE_REGS_H_ +#define _MEI_HW_TXE_REGS_H_ + +#include "hw.h" + +#define SEC_ALIVENESS_TIMER_TIMEOUT (5 * MSEC_PER_SEC) +#define SEC_ALIVENESS_WAIT_TIMEOUT (1 * MSEC_PER_SEC) +#define SEC_RESET_WAIT_TIMEOUT (1 * MSEC_PER_SEC) +#define SEC_READY_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define RESET_CANCEL_WAIT_TIMEOUT (1 * MSEC_PER_SEC) + +enum { + SEC_BAR, + BRIDGE_BAR, + + NUM_OF_MEM_BARS +}; + +/* SeC FW Status Register + * + * FW uses this register in order to report its status to host. + * This register resides in PCI-E config space. + */ +#define PCI_CFG_TXE_FW_STS0 0x40 +# define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK 0x0000000F +# define PCI_CFG_TXE_FW_STS0_OP_ST_MSK 0x000001C0 +# define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200 +# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000 +# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000 +# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000 +#define PCI_CFG_TXE_FW_STS1 0x48 + +#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */ + +/* IPC Input Doorbell Register */ +#define SEC_IPC_INPUT_DOORBELL_REG (0x0000 + IPC_BASE_ADDR) + +/* IPC Input Status Register + * This register indicates whether or not processing of + * the most recent command has been completed by the SEC + * New commands and payloads should not be written by the Host + * until this indicates that the previous command has been processed. + */ +#define SEC_IPC_INPUT_STATUS_REG (0x0008 + IPC_BASE_ADDR) +# define SEC_IPC_INPUT_STATUS_RDY BIT(0) + +/* IPC Host Interrupt Status Register */ +#define SEC_IPC_HOST_INT_STATUS_REG (0x0010 + IPC_BASE_ADDR) +#define SEC_IPC_HOST_INT_STATUS_OUT_DB BIT(0) +#define SEC_IPC_HOST_INT_STATUS_IN_RDY BIT(1) +#define SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD BIT(5) +#define SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS BIT(17) +#define SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR BIT(18) +#define SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR BIT(19) +#define SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW BIT(21) + +/* Convenient mask for pending interrupts */ +#define SEC_IPC_HOST_INT_STATUS_PENDING \ + (SEC_IPC_HOST_INT_STATUS_OUT_DB| \ + SEC_IPC_HOST_INT_STATUS_IN_RDY) + +/* IPC Host Interrupt Mask Register */ +#define SEC_IPC_HOST_INT_MASK_REG (0x0014 + IPC_BASE_ADDR) + +# define SEC_IPC_HOST_INT_MASK_OUT_DB BIT(0) /* Output Doorbell Int Mask */ +# define SEC_IPC_HOST_INT_MASK_IN_RDY BIT(1) /* Input Ready Int Mask */ + +/* IPC Input Payload RAM */ +#define SEC_IPC_INPUT_PAYLOAD_REG (0x0100 + IPC_BASE_ADDR) +/* IPC Shared Payload RAM */ +#define IPC_SHARED_PAYLOAD_REG (0x0200 + IPC_BASE_ADDR) + +/* SeC Address Translation Table Entry 2 - Ctrl + * + * This register resides also in SeC's PCI-E Memory space. + */ +#define SATT2_CTRL_REG 0x1040 +# define SATT2_CTRL_VALID_MSK BIT(0) +# define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8 +# define SATT2_CTRL_BRIDGE_HOST_EN_MSK BIT(12) + +/* SATT Table Entry 2 SAP Base Address Register */ +#define SATT2_SAP_BA_REG 0x1044 +/* SATT Table Entry 2 SAP Size Register. */ +#define SATT2_SAP_SIZE_REG 0x1048 + /* SATT Table Entry 2 SAP Bridge Address - LSB Register */ +#define SATT2_BRG_BA_LSB_REG 0x104C + +/* Host High-level Interrupt Status Register */ +#define HHISR_REG 0x2020 +/* Host High-level Interrupt Enable Register + * + * Resides in PCI memory space. This is the top hierarchy for + * interrupts from SeC to host, aggregating both interrupts that + * arrive through HICR registers as well as interrupts + * that arrive via IPC. + */ +#define HHIER_REG 0x2024 +#define IPC_HHIER_SEC BIT(0) +#define IPC_HHIER_BRIDGE BIT(1) +#define IPC_HHIER_MSK (IPC_HHIER_SEC | IPC_HHIER_BRIDGE) + +/* Host High-level Interrupt Mask Register. + * + * Resides in PCI memory space. + * This is the top hierarchy for masking interrupts from SeC to host. + */ +#define HHIMR_REG 0x2028 +#define IPC_HHIMR_SEC BIT(0) +#define IPC_HHIMR_BRIDGE BIT(1) + +/* Host High-level IRQ Status Register */ +#define HHIRQSR_REG 0x202C + +/* Host Interrupt Cause Register 0 - SeC IPC Readiness + * + * This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * This register is used by SeC's IPC driver in order + * to synchronize with host about IPC interface state. + */ +#define HICR_SEC_IPC_READINESS_REG 0x2040 +#define HICR_SEC_IPC_READINESS_HOST_RDY BIT(0) +#define HICR_SEC_IPC_READINESS_SEC_RDY BIT(1) +#define HICR_SEC_IPC_READINESS_SYS_RDY \ + (HICR_SEC_IPC_READINESS_HOST_RDY | \ + HICR_SEC_IPC_READINESS_SEC_RDY) +#define HICR_SEC_IPC_READINESS_RDY_CLR BIT(2) + +/* Host Interrupt Cause Register 1 - Aliveness Response */ +/* This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * The register may be used by SeC to ACK a host request for aliveness. + */ +#define HICR_HOST_ALIVENESS_RESP_REG 0x2044 +#define HICR_HOST_ALIVENESS_RESP_ACK BIT(0) + +/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */ +#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048 + +/* Host Interrupt Status Register. + * + * Resides in PCI memory space. + * This is the main register involved in generating interrupts + * from SeC to host via HICRs. + * The interrupt generation rules are as follows: + * An interrupt will be generated whenever for any i, + * there is a transition from a state where at least one of + * the following conditions did not hold, to a state where + * ALL the following conditions hold: + * A) HISR.INT[i]_STS == 1. + * B) HIER.INT[i]_EN == 1. + */ +#define HISR_REG 0x2060 +#define HISR_INT_0_STS BIT(0) +#define HISR_INT_1_STS BIT(1) +#define HISR_INT_2_STS BIT(2) +#define HISR_INT_3_STS BIT(3) +#define HISR_INT_4_STS BIT(4) +#define HISR_INT_5_STS BIT(5) +#define HISR_INT_6_STS BIT(6) +#define HISR_INT_7_STS BIT(7) +#define HISR_INT_STS_MSK \ + (HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS) + +/* Host Interrupt Enable Register. Resides in PCI memory space. */ +#define HIER_REG 0x2064 +#define HIER_INT_0_EN BIT(0) +#define HIER_INT_1_EN BIT(1) +#define HIER_INT_2_EN BIT(2) +#define HIER_INT_3_EN BIT(3) +#define HIER_INT_4_EN BIT(4) +#define HIER_INT_5_EN BIT(5) +#define HIER_INT_6_EN BIT(6) +#define HIER_INT_7_EN BIT(7) + +#define HIER_INT_EN_MSK \ + (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN) + + +/* SEC Memory Space IPC output payload. + * + * This register is part of the output payload which SEC provides to host. + */ +#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG 0x20C0 + +/* SeC Interrupt Cause Register - Host Aliveness Request + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * The register is used by host to request SeC aliveness. + */ +#define SICR_HOST_ALIVENESS_REQ_REG 0x214C +#define SICR_HOST_ALIVENESS_REQ_REQUESTED BIT(0) + + +/* SeC Interrupt Cause Register - Host IPC Readiness + * + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * This register is used by the host's SeC driver uses in order + * to synchronize with SeC about IPC interface state. + */ +#define SICR_HOST_IPC_READINESS_REQ_REG 0x2150 + + +#define SICR_HOST_IPC_READINESS_HOST_RDY BIT(0) +#define SICR_HOST_IPC_READINESS_SEC_RDY BIT(1) +#define SICR_HOST_IPC_READINESS_SYS_RDY \ + (SICR_HOST_IPC_READINESS_HOST_RDY | \ + SICR_HOST_IPC_READINESS_SEC_RDY) +#define SICR_HOST_IPC_READINESS_RDY_CLR BIT(2) + +/* SeC Interrupt Cause Register - SeC IPC Output Status + * + * This register indicates whether or not processing of the most recent + * command has been completed by the Host. + * New commands and payloads should not be written by SeC until this + * register indicates that the previous command has been processed. + */ +#define SICR_SEC_IPC_OUTPUT_STATUS_REG 0x2154 +# define SEC_IPC_OUTPUT_STATUS_RDY BIT(0) + + + +/* MEI IPC Message payload size 64 bytes */ +#define PAYLOAD_SIZE 64 + +/* MAX size for SATT range 32MB */ +#define SATT_RANGE_MAX (32 << 20) + + +#endif /* _MEI_HW_TXE_REGS_H_ */ + diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c new file mode 100644 index 00000000000..93273783dec --- /dev/null +++ b/drivers/misc/mei/hw-txe.c @@ -0,0 +1,1190 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/irqreturn.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw-txe.h" +#include "client.h" +#include "hbm.h" + +/** + * mei_txe_reg_read - Reads 32bit data from the device + * + * @base_addr: registers base address + * @offset: register offset + * + */ +static inline u32 mei_txe_reg_read(void __iomem *base_addr, + unsigned long offset) +{ + return ioread32(base_addr + offset); +} + +/** + * mei_txe_reg_write - Writes 32bit data to the device + * + * @base_addr: registers base address + * @offset: register offset + * @value: the value to write + */ +static inline void mei_txe_reg_write(void __iomem *base_addr, + unsigned long offset, u32 value) +{ + iowrite32(value, base_addr + offset); +} + +/** + * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Doesn't check for aliveness while Reads 32bit data from the SeC BAR + */ +static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, + unsigned long offset) +{ + return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset); +} + +/** + * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, + unsigned long offset) +{ + WARN(!hw->aliveness, "sec read: aliveness not asserted\n"); + return mei_txe_sec_reg_read_silent(hw, offset); +} +/** + * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR + * doesn't check for aliveness + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Doesn't check for aliveness while writes 32bit data from to the SeC BAR + */ +static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value); +} + +/** + * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + WARN(!hw->aliveness, "sec write: aliveness not asserted\n"); + mei_txe_sec_reg_write_silent(hw, offset, value); +} +/** + * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to read the data + * + */ +static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, + unsigned long offset) +{ + return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset); +} + +/** + * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to write the data + * @value: the byte to write + */ +static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value); +} + +/** + * mei_txe_aliveness_set - request for aliveness change + * + * @dev: the device structure + * @req: requested aliveness value + * + * Request for aliveness change and returns true if the change is + * really needed and false if aliveness is already + * in the requested state + * Requires device lock to be held + */ +static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + bool do_req = hw->aliveness != req; + + dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", + hw->aliveness, req); + if (do_req) { + dev->pg_event = MEI_PG_EVENT_WAIT; + mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); + } + return do_req; +} + + +/** + * mei_txe_aliveness_req_get - get aliveness requested register value + * + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from + * from HICR_HOST_ALIVENESS_REQ register value + */ +static u32 mei_txe_aliveness_req_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg; + reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); + return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; +} + +/** + * mei_txe_aliveness_get - get aliveness response register value + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit + * from HICR_HOST_ALIVENESS_RESP register value + */ +static u32 mei_txe_aliveness_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg; + reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); + return reg & HICR_HOST_ALIVENESS_RESP_ACK; +} + +/** + * mei_txe_aliveness_poll - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns > 0 if the expected value was received, -ETIME otherwise + */ +static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + int t = 0; + + do { + hw->aliveness = mei_txe_aliveness_get(dev); + if (hw->aliveness == expected) { + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(&dev->pdev->dev, + "aliveness settled after %d msecs\n", t); + return t; + } + mutex_unlock(&dev->device_lock); + msleep(MSEC_PER_SEC / 5); + mutex_lock(&dev->device_lock); + t += MSEC_PER_SEC / 5; + } while (t < SEC_ALIVENESS_WAIT_TIMEOUT); + + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_err(&dev->pdev->dev, "aliveness timed out\n"); + return -ETIME; +} + +/** + * mei_txe_aliveness_wait - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns returns 0 on success and < 0 otherwise + */ +static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + const unsigned long timeout = + msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT); + long err; + int ret; + + hw->aliveness = mei_txe_aliveness_get(dev); + if (hw->aliveness == expected) + return 0; + + mutex_unlock(&dev->device_lock); + err = wait_event_timeout(hw->wait_aliveness_resp, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + hw->aliveness = mei_txe_aliveness_get(dev); + ret = hw->aliveness == expected ? 0 : -ETIME; + + if (ret) + dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", + err, hw->aliveness, dev->pg_event); + else + dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", + jiffies_to_msecs(timeout - err), + hw->aliveness, dev->pg_event); + + dev->pg_event = MEI_PG_EVENT_IDLE; + return ret; +} + +/** + * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete + * + * @dev: the device structure + * + * returns returns 0 on success and < 0 otherwise + */ +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) +{ + if (mei_txe_aliveness_set(dev, req)) + return mei_txe_aliveness_wait(dev, req); + return 0; +} + +/** + * mei_txe_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_txe_pg_is_enabled(struct mei_device *dev) +{ + return true; +} + +/** + * mei_txe_pg_state - translate aliveness register value + * to the mei power gating state + * + * @dev: the device structure + * + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; +} + +/** + * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt + * + * @dev: the device structure + */ +static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 hintmsk; + /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */ + hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG); + hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY; + mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk); +} + +/** + * mei_txe_input_doorbell_set + * - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. + * @dev: the device structure + */ +static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) +{ + /* Clear the interrupt cause */ + clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause); + mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1); +} + +/** + * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 + * + * @dev: the device structure + */ +static void mei_txe_output_ready_set(struct mei_txe_hw *hw) +{ + mei_txe_br_reg_write(hw, + SICR_SEC_IPC_OUTPUT_STATUS_REG, + SEC_IPC_OUTPUT_STATUS_RDY); +} + +/** + * mei_txe_is_input_ready - check if TXE is ready for receiving data + * + * @dev: the device structure + */ +static bool mei_txe_is_input_ready(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 status; + status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); + return !!(SEC_IPC_INPUT_STATUS_RDY & status); +} + +/** + * mei_txe_intr_clear - clear all interrupts + * + * @dev: the device structure + */ +static inline void mei_txe_intr_clear(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, + SEC_IPC_HOST_INT_STATUS_PENDING); + mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); + mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK); +} + +/** + * mei_txe_intr_disable - disable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_disable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, HHIER_REG, 0); + mei_txe_br_reg_write(hw, HIER_REG, 0); +} +/** + * mei_txe_intr_disable - enable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_enable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); + mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); +} + +/** + * mei_txe_pending_interrupts - check if there are pending interrupts + * only Aliveness, Input ready, and output doorbell are of relevance + * + * @dev: the device structure + * + * Checks if there are pending interrupts + * only Aliveness, Readiness, Input ready, and Output doorbell are relevant + */ +static bool mei_txe_pending_interrupts(struct mei_device *dev) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + bool ret = (hw->intr_cause & (TXE_INTR_READINESS | + TXE_INTR_ALIVENESS | + TXE_INTR_IN_READY | + TXE_INTR_OUT_DB)); + + if (ret) { + dev_dbg(&dev->pdev->dev, + "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", + !!(hw->intr_cause & TXE_INTR_IN_READY), + !!(hw->intr_cause & TXE_INTR_READINESS), + !!(hw->intr_cause & TXE_INTR_ALIVENESS), + !!(hw->intr_cause & TXE_INTR_OUT_DB)); + } + return ret; +} + +/** + * mei_txe_input_payload_write - write a dword to the host buffer + * at offset idx + * + * @dev: the device structure + * @idx: index in the host buffer + * @value: value + */ +static void mei_txe_input_payload_write(struct mei_device *dev, + unsigned long idx, u32 value) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + + (idx * sizeof(u32)), value); +} + +/** + * mei_txe_out_data_read - read dword from the device buffer + * at offset idx + * + * @dev: the device structure + * @idx: index in the device buffer + * + * returns register value at index + */ +static u32 mei_txe_out_data_read(const struct mei_device *dev, + unsigned long idx) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return mei_txe_br_reg_read(hw, + BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); +} + +/* Readiness */ + +/** + * mei_txe_readiness_set_host_rdy + * + * @dev: the device structure + */ +static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, + SICR_HOST_IPC_READINESS_REQ_REG, + SICR_HOST_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_clear + * + * @dev: the device structure + */ +static void mei_txe_readiness_clear(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, + SICR_HOST_IPC_READINESS_RDY_CLR); +} +/** + * mei_txe_readiness_get - Reads and returns + * the HICR_SEC_IPC_READINESS register value + * + * @dev: the device structure + */ +static u32 mei_txe_readiness_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +} + + +/** + * mei_txe_readiness_is_sec_rdy - check readiness + * for HICR_SEC_IPC_READINESS_SEC_RDY + * + * @readiness - cached readiness state + */ +static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) +{ + return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY); +} + +/** + * mei_txe_hw_is_ready - check if the hw is ready + * + * @dev: the device structure + */ +static bool mei_txe_hw_is_ready(struct mei_device *dev) +{ + u32 readiness = mei_txe_readiness_get(dev); + return mei_txe_readiness_is_sec_rdy(readiness); +} + +/** + * mei_txe_host_is_ready - check if the host is ready + * + * @dev: the device structure + */ +static inline bool mei_txe_host_is_ready(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); + return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_wait - wait till readiness settles + * + * @dev: the device structure + * + * returns 0 on success and -ETIME on timeout + */ +static int mei_txe_readiness_wait(struct mei_device *dev) +{ + if (mei_txe_hw_is_ready(dev)) + return 0; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, + msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!dev->recvd_hw_ready) { + dev_err(&dev->pdev->dev, "wait for readiness failed\n"); + return -ETIME; + } + + dev->recvd_hw_ready = false; + return 0; +} + +/** + * mei_txe_hw_config - configure hardware at the start of the devices + * + * @dev: the device structure + * + * Configure hardware at the start of the device should be done only + * once at the device probe time + */ +static void mei_txe_hw_config(struct mei_device *dev) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + /* Doesn't change in runtime */ + dev->hbuf_depth = PAYLOAD_SIZE / 4; + + hw->aliveness = mei_txe_aliveness_get(dev); + hw->readiness = mei_txe_readiness_get(dev); + + dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", + hw->aliveness, hw->readiness); +} + + +/** + * mei_txe_write - writes a message to device. + * + * @dev: the device structure + * @header: header of message + * @buf: message buffer will be written + * returns 1 if success, 0 - otherwise. + */ + +static int mei_txe_write(struct mei_device *dev, + struct mei_msg_hdr *header, unsigned char *buf) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + unsigned long rem; + unsigned long length; + int slots = dev->hbuf_depth; + u32 *reg_buf = (u32 *)buf; + u32 dw_cnt; + int i; + + if (WARN_ON(!header || !buf)) + return -EINVAL; + + length = header->length; + + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + + dw_cnt = mei_data2slots(length); + if (dw_cnt > slots) + return -EMSGSIZE; + + if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n")) + return -EAGAIN; + + /* Enable Input Ready Interrupt. */ + mei_txe_input_ready_interrupt_enable(dev); + + if (!mei_txe_is_input_ready(dev)) { + struct mei_fw_status fw_status; + mei_fw_status(dev, &fw_status); + dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n", + FW_STS_PRM(fw_status)); + return -EAGAIN; + } + + mei_txe_input_payload_write(dev, 0, *((u32 *)header)); + + for (i = 0; i < length / 4; i++) + mei_txe_input_payload_write(dev, i + 1, reg_buf[i]); + + rem = length & 0x3; + if (rem > 0) { + u32 reg = 0; + memcpy(®, &buf[length - rem], rem); + mei_txe_input_payload_write(dev, i + 1, reg); + } + + /* after each write the whole buffer is consumed */ + hw->slots = 0; + + /* Set Input-Doorbell */ + mei_txe_input_doorbell_set(hw); + + return 0; +} + +/** + * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns the PAYLOAD_SIZE - 4 + */ +static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) +{ + return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr); +} + +/** + * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns always hbuf_depth + */ +static int mei_txe_hbuf_empty_slots(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return hw->slots; +} + +/** + * mei_txe_count_full_read_slots - mimics the me device circular buffer + * + * @dev: the device structure + * + * returns always buffer size in dwords count + */ +static int mei_txe_count_full_read_slots(struct mei_device *dev) +{ + /* read buffers has static size */ + return PAYLOAD_SIZE / 4; +} + +/** + * mei_txe_read_hdr - read message header which is always in 4 first bytes + * + * @dev: the device structure + * + * returns mei message header + */ + +static u32 mei_txe_read_hdr(const struct mei_device *dev) +{ + return mei_txe_out_data_read(dev, 0); +} +/** + * mei_txe_read - reads a message from the txe device. + * + * @dev: the device structure + * @buf: message buffer will be written + * @len: message size will be read + * + * returns -EINVAL on error wrong argument and 0 on success + */ +static int mei_txe_read(struct mei_device *dev, + unsigned char *buf, unsigned long len) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 i; + u32 *reg_buf = (u32 *)buf; + u32 rem = len & 0x3; + + if (WARN_ON(!buf || !len)) + return -EINVAL; + + dev_dbg(&dev->pdev->dev, + "buffer-length = %lu buf[0]0x%08X\n", + len, mei_txe_out_data_read(dev, 0)); + + for (i = 0; i < len / 4; i++) { + /* skip header: index starts from 1 */ + u32 reg = mei_txe_out_data_read(dev, i + 1); + dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg); + *reg_buf++ = reg; + } + + if (rem) { + u32 reg = mei_txe_out_data_read(dev, i + 1); + memcpy(reg_buf, ®, rem); + } + + mei_txe_output_ready_set(hw); + return 0; +} + +/** + * mei_txe_hw_reset - resets host and fw. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + u32 aliveness_req; + /* + * read input doorbell to ensure consistency between Bridge and SeC + * return value might be garbage return + */ + (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG); + + aliveness_req = mei_txe_aliveness_req_get(dev); + hw->aliveness = mei_txe_aliveness_get(dev); + + /* Disable interrupts in this stage we will poll */ + mei_txe_intr_disable(dev); + + /* + * If Aliveness Request and Aliveness Response are not equal then + * wait for them to be equal + * Since we might have interrupts disabled - poll for it + */ + if (aliveness_req != hw->aliveness) + if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { + dev_err(&dev->pdev->dev, + "wait for aliveness settle failed ... bailing out\n"); + return -EIO; + } + + /* + * If Aliveness Request and Aliveness Response are set then clear them + */ + if (aliveness_req) { + mei_txe_aliveness_set(dev, 0); + if (mei_txe_aliveness_poll(dev, 0) < 0) { + dev_err(&dev->pdev->dev, + "wait for aliveness failed ... bailing out\n"); + return -EIO; + } + } + + /* + * Set rediness RDY_CLR bit + */ + mei_txe_readiness_clear(dev); + + return 0; +} + +/** + * mei_txe_hw_start - start the hardware after reset + * + * @dev: the device structure + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_start(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + int ret; + + u32 hisr; + + /* bring back interrupts */ + mei_txe_intr_enable(dev); + + ret = mei_txe_readiness_wait(dev); + if (ret < 0) { + dev_err(&dev->pdev->dev, "wating for readiness failed\n"); + return ret; + } + + /* + * If HISR.INT2_STS interrupt status bit is set then clear it. + */ + hisr = mei_txe_br_reg_read(hw, HISR_REG); + if (hisr & HISR_INT_2_STS) + mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS); + + /* Clear the interrupt cause of OutputDoorbell */ + clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause); + + ret = mei_txe_aliveness_set_sync(dev, 1); + if (ret < 0) { + dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n"); + return ret; + } + + /* enable input ready interrupts: + * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK + */ + mei_txe_input_ready_interrupt_enable(dev); + + + /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */ + mei_txe_output_ready_set(hw); + + /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY + */ + mei_txe_readiness_set_host_rdy(dev); + + return 0; +} + +/** + * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into + * single bit mask and acknowledge the interrupts + * + * @dev: the device structure + * @do_ack: acknowledge interrupts + */ +static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 hisr; + u32 hhisr; + u32 ipc_isr; + u32 aliveness; + bool generated; + + /* read interrupt registers */ + hhisr = mei_txe_br_reg_read(hw, HHISR_REG); + generated = (hhisr & IPC_HHIER_MSK); + if (!generated) + goto out; + + hisr = mei_txe_br_reg_read(hw, HISR_REG); + + aliveness = mei_txe_aliveness_get(dev); + if (hhisr & IPC_HHIER_SEC && aliveness) + ipc_isr = mei_txe_sec_reg_read_silent(hw, + SEC_IPC_HOST_INT_STATUS_REG); + else + ipc_isr = 0; + + generated = generated || + (hisr & HISR_INT_STS_MSK) || + (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING); + + if (generated && do_ack) { + /* Save the interrupt causes */ + hw->intr_cause |= hisr & HISR_INT_STS_MSK; + if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY) + hw->intr_cause |= TXE_INTR_IN_READY; + + + mei_txe_intr_disable(dev); + /* Clear the interrupts in hierarchy: + * IPC and Bridge, than the High Level */ + mei_txe_sec_reg_write_silent(hw, + SEC_IPC_HOST_INT_STATUS_REG, ipc_isr); + mei_txe_br_reg_write(hw, HISR_REG, hisr); + mei_txe_br_reg_write(hw, HHISR_REG, hhisr); + } + +out: + return generated; +} + +/** + * mei_txe_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) +{ + struct mei_device *dev = dev_id; + + if (mei_txe_check_and_ack_intrs(dev, true)) + return IRQ_WAKE_THREAD; + return IRQ_NONE; +} + + +/** + * mei_txe_irq_thread_handler - txe interrupt thread + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_txe_hw *hw = to_txe_hw(dev); + struct mei_cl_cb complete_list; + s32 slots; + int rets = 0; + + dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", + mei_txe_br_reg_read(hw, HHISR_REG), + mei_txe_br_reg_read(hw, HISR_REG), + mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); + + + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + mei_io_list_init(&complete_list); + + if (pci_dev_msi_enabled(dev->pdev)) + mei_txe_check_and_ack_intrs(dev, true); + + /* show irq events */ + mei_txe_pending_interrupts(dev); + + hw->aliveness = mei_txe_aliveness_get(dev); + hw->readiness = mei_txe_readiness_get(dev); + + /* Readiness: + * Detection of TXE driver going through reset + * or TXE driver resetting the HECI interface. + */ + if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { + dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n"); + + /* Check if SeC is going through reset */ + if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { + dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + } else { + dev->recvd_hw_ready = false; + if (dev->dev_state != MEI_DEV_RESETTING) { + + dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + + } + } + wake_up(&dev->wait_hw_ready); + } + + /************************************************************/ + /* Check interrupt cause: + * Aliveness: Detection of SeC acknowledge of host request that + * it remain alive or host cancellation of that request. + */ + + if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { + /* Clear the interrupt cause */ + dev_dbg(&dev->pdev->dev, + "Aliveness Interrupt: Status: %d\n", hw->aliveness); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&hw->wait_aliveness_resp)) + wake_up(&hw->wait_aliveness_resp); + } + + + /* Output Doorbell: + * Detection of SeC having sent output to host + */ + slots = mei_count_full_read_slots(dev); + if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) { + /* Read from TXE */ + rets = mei_irq_read_handler(dev, &complete_list, &slots); + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + dev_err(&dev->pdev->dev, + "mei_irq_read_handler ret = %d.\n", rets); + + schedule_work(&dev->reset_work); + goto end; + } + } + /* Input Ready: Detection if host can write to SeC */ + if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) { + dev->hbuf_is_ready = true; + hw->slots = dev->hbuf_depth; + } + + if (hw->aliveness && dev->hbuf_is_ready) { + /* get the real register value */ + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + rets = mei_irq_write_handler(dev, &complete_list); + if (rets && rets != -EMSGSIZE) + dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n", + rets); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + } + + mei_irq_compl_handler(dev, &complete_list); + +end: + dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + + mutex_unlock(&dev->device_lock); + + mei_enable_interrupts(dev); + return IRQ_HANDLED; +} + + +/** + * mei_txe_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns: 0 on success an error code otherwise + */ +static int mei_txe_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1}; + int i; + + if (!fw_status) + return -EINVAL; + + fw_status->count = 2; + + for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { + int ret; + ret = pci_read_config_dword(dev->pdev, + pci_cfg_reg[i], &fw_status->status[i]); + if (ret) + return ret; + } + + return 0; +} + +static const struct mei_hw_ops mei_txe_hw_ops = { + + .fw_status = mei_txe_fw_status, + .host_is_ready = mei_txe_host_is_ready, + + .pg_state = mei_txe_pg_state, + + .hw_is_ready = mei_txe_hw_is_ready, + .hw_reset = mei_txe_hw_reset, + .hw_config = mei_txe_hw_config, + .hw_start = mei_txe_hw_start, + + .pg_is_enabled = mei_txe_pg_is_enabled, + + .intr_clear = mei_txe_intr_clear, + .intr_enable = mei_txe_intr_enable, + .intr_disable = mei_txe_intr_disable, + + .hbuf_free_slots = mei_txe_hbuf_empty_slots, + .hbuf_is_ready = mei_txe_is_input_ready, + .hbuf_max_len = mei_txe_hbuf_max_len, + + .write = mei_txe_write, + + .rdbuf_full_slots = mei_txe_count_full_read_slots, + .read_hdr = mei_txe_read_hdr, + + .read = mei_txe_read, + +}; + +#define MEI_CFG_TXE_FW_STS \ + .fw_status.count = 2, \ + .fw_status.status[0] = PCI_CFG_TXE_FW_STS0, \ + .fw_status.status[1] = PCI_CFG_TXE_FW_STS1 + +const struct mei_cfg mei_txe_cfg = { + MEI_CFG_TXE_FW_STS, +}; + + +/** + * mei_txe_dev_init - allocates and initializes txe hardware specific structure + * + * @pdev - pci device + * @cfg - per device generation config + * + * returns struct mei_device * on success or NULL; + * + */ +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg) +{ + struct mei_device *dev; + struct mei_txe_hw *hw; + + dev = kzalloc(sizeof(struct mei_device) + + sizeof(struct mei_txe_hw), GFP_KERNEL); + if (!dev) + return NULL; + + mei_device_init(dev, cfg); + + hw = to_txe_hw(dev); + + init_waitqueue_head(&hw->wait_aliveness_resp); + + dev->ops = &mei_txe_hw_ops; + + dev->pdev = pdev; + return dev; +} + +/** + * mei_txe_setup_satt2 - SATT2 configuration for DMA support. + * + * @dev: the device structure + * @addr: physical address start of the range + * @range: physical range size + */ +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + u32 lo32 = lower_32_bits(addr); + u32 hi32 = upper_32_bits(addr); + u32 ctrl; + + /* SATT is limited to 36 Bits */ + if (hi32 & ~0xF) + return -EINVAL; + + /* SATT has to be 16Byte aligned */ + if (lo32 & 0xF) + return -EINVAL; + + /* SATT range has to be 4Bytes aligned */ + if (range & 0x4) + return -EINVAL; + + /* SATT is limited to 32 MB range*/ + if (range > SATT_RANGE_MAX) + return -EINVAL; + + ctrl = SATT2_CTRL_VALID_MSK; + ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT; + + mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); + mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); + mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); + dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", + range, lo32, ctrl); + + return 0; +} diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h new file mode 100644 index 00000000000..e244af79167 --- /dev/null +++ b/drivers/misc/mei/hw-txe.h @@ -0,0 +1,77 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HW_TXE_H_ +#define _MEI_HW_TXE_H_ + +#include <linux/irqreturn.h> + +#include "hw.h" +#include "hw-txe-regs.h" + +#define MEI_TXI_RPM_TIMEOUT 500 /* ms */ + +/* Flatten Hierarchy interrupt cause */ +#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */ +#define TXE_INTR_READINESS HISR_INT_0_STS +#define TXE_INTR_ALIVENESS_BIT 1 /* HISR_INT_1_STS */ +#define TXE_INTR_ALIVENESS HISR_INT_1_STS +#define TXE_INTR_OUT_DB_BIT 2 /* HISR_INT_2_STS */ +#define TXE_INTR_OUT_DB HISR_INT_2_STS +#define TXE_INTR_IN_READY_BIT 8 /* beyond HISR */ +#define TXE_INTR_IN_READY BIT(8) + +/** + * struct mei_txe_hw - txe hardware specifics + * + * @mem_addr: SeC and BRIDGE bars + * @aliveness: aliveness (power gating) state of the hardware + * @readiness: readiness state of the hardware + * @wait_aliveness_resp: aliveness wait queue + * @intr_cause: translated interrupt cause + */ +struct mei_txe_hw { + void __iomem *mem_addr[NUM_OF_MEM_BARS]; + u32 aliveness; + u32 readiness; + u32 slots; + + wait_queue_head_t wait_aliveness_resp; + + unsigned long intr_cause; +}; + +#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw) + +static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) +{ + return container_of((void *)hw, struct mei_device, hw); +} + +extern const struct mei_cfg mei_txe_cfg; + +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg); + +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); + +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req); + +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range); + + +#endif /* _MEI_HW_TXE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h new file mode 100644 index 00000000000..dd448e58cc8 --- /dev/null +++ b/drivers/misc/mei/hw.h @@ -0,0 +1,274 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HW_TYPES_H_ +#define _MEI_HW_TYPES_H_ + +#include <linux/uuid.h> + +/* + * Timeouts in Seconds + */ +#define MEI_HW_READY_TIMEOUT 2 /* Timeout on ready message */ +#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ + +#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ +#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ + +#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ +#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ + +#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ +#define MEI_HBM_TIMEOUT 1 /* 1 second */ + +/* + * MEI Version + */ +#define HBM_MINOR_VERSION 1 +#define HBM_MAJOR_VERSION 1 + +/* + * MEI version with PGI support + */ +#define HBM_MINOR_VERSION_PGI 1 +#define HBM_MAJOR_VERSION_PGI 1 + +/* Host bus message command opcode */ +#define MEI_HBM_CMD_OP_MSK 0x7f +/* Host bus message command RESPONSE */ +#define MEI_HBM_CMD_RES_MSK 0x80 + +/* + * MEI Bus Message Command IDs + */ +#define HOST_START_REQ_CMD 0x01 +#define HOST_START_RES_CMD 0x81 + +#define HOST_STOP_REQ_CMD 0x02 +#define HOST_STOP_RES_CMD 0x82 + +#define ME_STOP_REQ_CMD 0x03 + +#define HOST_ENUM_REQ_CMD 0x04 +#define HOST_ENUM_RES_CMD 0x84 + +#define HOST_CLIENT_PROPERTIES_REQ_CMD 0x05 +#define HOST_CLIENT_PROPERTIES_RES_CMD 0x85 + +#define CLIENT_CONNECT_REQ_CMD 0x06 +#define CLIENT_CONNECT_RES_CMD 0x86 + +#define CLIENT_DISCONNECT_REQ_CMD 0x07 +#define CLIENT_DISCONNECT_RES_CMD 0x87 + +#define MEI_FLOW_CONTROL_CMD 0x08 + +#define MEI_PG_ISOLATION_ENTRY_REQ_CMD 0x0a +#define MEI_PG_ISOLATION_ENTRY_RES_CMD 0x8a +#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b +#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b + +/* + * MEI Stop Reason + * used by hbm_host_stop_request.reason + */ +enum mei_stop_reason_types { + DRIVER_STOP_REQUEST = 0x00, + DEVICE_D1_ENTRY = 0x01, + DEVICE_D2_ENTRY = 0x02, + DEVICE_D3_ENTRY = 0x03, + SYSTEM_S1_ENTRY = 0x04, + SYSTEM_S2_ENTRY = 0x05, + SYSTEM_S3_ENTRY = 0x06, + SYSTEM_S4_ENTRY = 0x07, + SYSTEM_S5_ENTRY = 0x08 +}; + +/* + * Client Connect Status + * used by hbm_client_connect_response.status + */ +enum mei_cl_connect_status { + MEI_CL_CONN_SUCCESS = 0x00, + MEI_CL_CONN_NOT_FOUND = 0x01, + MEI_CL_CONN_ALREADY_STARTED = 0x02, + MEI_CL_CONN_OUT_OF_RESOURCES = 0x03, + MEI_CL_CONN_MESSAGE_SMALL = 0x04 +}; + +/* + * Client Disconnect Status + */ +enum mei_cl_disconnect_status { + MEI_CL_DISCONN_SUCCESS = 0x00 +}; + +/* + * MEI BUS Interface Section + */ +struct mei_msg_hdr { + u32 me_addr:8; + u32 host_addr:8; + u32 length:9; + u32 reserved:5; + u32 internal:1; + u32 msg_complete:1; +} __packed; + + +struct mei_bus_message { + u8 hbm_cmd; + u8 data[0]; +} __packed; + +/** + * struct hbm_cl_cmd - client specific host bus command + * CONNECT, DISCONNECT, and FlOW CONTROL + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @data + */ +struct mei_hbm_cl_cmd { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 data; +}; + +struct hbm_version { + u8 minor_version; + u8 major_version; +} __packed; + +struct hbm_host_version_request { + u8 hbm_cmd; + u8 reserved; + struct hbm_version host_version; +} __packed; + +struct hbm_host_version_response { + u8 hbm_cmd; + u8 host_version_supported; + struct hbm_version me_max_version; +} __packed; + +struct hbm_host_stop_request { + u8 hbm_cmd; + u8 reason; + u8 reserved[2]; +} __packed; + +struct hbm_host_stop_response { + u8 hbm_cmd; + u8 reserved[3]; +} __packed; + +struct hbm_me_stop_request { + u8 hbm_cmd; + u8 reason; + u8 reserved[2]; +} __packed; + +struct hbm_host_enum_request { + u8 hbm_cmd; + u8 reserved[3]; +} __packed; + +struct hbm_host_enum_response { + u8 hbm_cmd; + u8 reserved[3]; + u8 valid_addresses[32]; +} __packed; + +struct mei_client_properties { + uuid_le protocol_name; + u8 protocol_version; + u8 max_number_of_connections; + u8 fixed_address; + u8 single_recv_buf; + u32 max_msg_length; +} __packed; + +struct hbm_props_request { + u8 hbm_cmd; + u8 address; + u8 reserved[2]; +} __packed; + + +struct hbm_props_response { + u8 hbm_cmd; + u8 address; + u8 status; + u8 reserved[1]; + struct mei_client_properties client_properties; +} __packed; + +/** + * struct hbm_power_gate - power gate request/response + * + * @hbm_cmd - bus message command header + * @reserved[3] + */ +struct hbm_power_gate { + u8 hbm_cmd; + u8 reserved[3]; +} __packed; + +/** + * struct hbm_client_connect_request - connect/disconnect request + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @reserved + */ +struct hbm_client_connect_request { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 reserved; +} __packed; + +/** + * struct hbm_client_connect_response - connect/disconnect response + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @status - status of the request + */ +struct hbm_client_connect_response { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 status; +} __packed; + + +#define MEI_FC_MESSAGE_RESERVED_LENGTH 5 + +struct hbm_flow_control { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; +} __packed; + + +#endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c new file mode 100644 index 00000000000..00692922248 --- /dev/null +++ b/drivers/misc/mei/init.c @@ -0,0 +1,395 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +const char *mei_dev_state_str(int state) +{ +#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state + switch (state) { + MEI_DEV_STATE(INITIALIZING); + MEI_DEV_STATE(INIT_CLIENTS); + MEI_DEV_STATE(ENABLED); + MEI_DEV_STATE(RESETTING); + MEI_DEV_STATE(DISABLED); + MEI_DEV_STATE(POWER_DOWN); + MEI_DEV_STATE(POWER_UP); + default: + return "unknown"; + } +#undef MEI_DEV_STATE +} + + +/** + * mei_cancel_work. Cancel mei background jobs + * + * @dev: the device structure + * + * returns 0 on success or < 0 if the reset hasn't succeeded + */ +void mei_cancel_work(struct mei_device *dev) +{ + cancel_work_sync(&dev->init_work); + cancel_work_sync(&dev->reset_work); + + cancel_delayed_work(&dev->timer_work); +} +EXPORT_SYMBOL_GPL(mei_cancel_work); + +/** + * mei_reset - resets host and fw. + * + * @dev: the device structure + */ +int mei_reset(struct mei_device *dev) +{ + enum mei_dev_state state = dev->dev_state; + bool interrupts_enabled; + int ret; + + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_DISABLED && + state != MEI_DEV_POWER_DOWN && + state != MEI_DEV_POWER_UP) { + struct mei_fw_status fw_status; + mei_fw_status(dev, &fw_status); + dev_warn(&dev->pdev->dev, + "unexpected reset: dev_state = %s " FW_STS_FMT "\n", + mei_dev_state_str(state), FW_STS_PRM(fw_status)); + } + + /* we're already in reset, cancel the init timer + * if the reset was called due the hbm protocol error + * we need to call it before hw start + * so the hbm watchdog won't kick in + */ + mei_hbm_idle(dev); + + /* enter reset flow */ + interrupts_enabled = state != MEI_DEV_POWER_DOWN; + dev->dev_state = MEI_DEV_RESETTING; + + dev->reset_count++; + if (dev->reset_count > MEI_MAX_CONSEC_RESET) { + dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); + dev->dev_state = MEI_DEV_DISABLED; + return -ENODEV; + } + + ret = mei_hw_reset(dev, interrupts_enabled); + /* fall through and remove the sw state even if hw reset has failed */ + + /* no need to clean up software state in case of power up */ + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_POWER_UP) { + + /* remove all waiting requests */ + mei_cl_all_write_clear(dev); + + mei_cl_all_disconnect(dev); + + /* wake up all readers and writers so they can be interrupted */ + mei_cl_all_wakeup(dev); + + /* remove entry if already in list */ + dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); + mei_cl_unlink(&dev->wd_cl); + mei_cl_unlink(&dev->iamthif_cl); + mei_amthif_reset_params(dev); + } + + mei_hbm_reset(dev); + + dev->rd_msg_hdr = 0; + dev->wd_pending = false; + + if (ret) { + dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); + return ret; + } + + if (state == MEI_DEV_POWER_DOWN) { + dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); + dev->dev_state = MEI_DEV_DISABLED; + return 0; + } + + ret = mei_hw_start(dev); + if (ret) { + dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); + return ret; + } + + dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); + + dev->dev_state = MEI_DEV_INIT_CLIENTS; + ret = mei_hbm_start_req(dev); + if (ret) { + dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_RESETTING; + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_reset); + +/** + * mei_start - initializes host and fw to start work. + * + * @dev: the device structure + * + * returns 0 on success, <0 on failure. + */ +int mei_start(struct mei_device *dev) +{ + int ret; + mutex_lock(&dev->device_lock); + + /* acknowledge interrupt and stop interrupts */ + mei_clear_interrupts(dev); + + mei_hw_config(dev); + + dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); + + dev->reset_count = 0; + do { + dev->dev_state = MEI_DEV_INITIALIZING; + ret = mei_reset(dev); + + if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "reset failed ret = %d", ret); + goto err; + } + } while (ret); + + /* we cannot start the device w/o hbm start message completed */ + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "reset failed"); + goto err; + } + + if (mei_hbm_start_wait(dev)) { + dev_err(&dev->pdev->dev, "HBM haven't started"); + goto err; + } + + if (!mei_host_is_ready(dev)) { + dev_err(&dev->pdev->dev, "host is not ready.\n"); + goto err; + } + + if (!mei_hw_is_ready(dev)) { + dev_err(&dev->pdev->dev, "ME is not ready.\n"); + goto err; + } + + if (!mei_hbm_version_is_supported(dev)) { + dev_dbg(&dev->pdev->dev, "MEI start failed.\n"); + goto err; + } + + dev_dbg(&dev->pdev->dev, "link layer has been established.\n"); + + mutex_unlock(&dev->device_lock); + return 0; +err: + dev_err(&dev->pdev->dev, "link layer initialization failed.\n"); + dev->dev_state = MEI_DEV_DISABLED; + mutex_unlock(&dev->device_lock); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(mei_start); + +/** + * mei_restart - restart device after suspend + * + * @dev: the device structure + * + * returns 0 on success or -ENODEV if the restart hasn't succeeded + */ +int mei_restart(struct mei_device *dev) +{ + int err; + + mutex_lock(&dev->device_lock); + + mei_clear_interrupts(dev); + + dev->dev_state = MEI_DEV_POWER_UP; + dev->reset_count = 0; + + err = mei_reset(dev); + + mutex_unlock(&dev->device_lock); + + if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "device disabled = %d\n", err); + return -ENODEV; + } + + /* try to start again */ + if (err) + schedule_work(&dev->reset_work); + + + return 0; +} +EXPORT_SYMBOL_GPL(mei_restart); + +static void mei_reset_work(struct work_struct *work) +{ + struct mei_device *dev = + container_of(work, struct mei_device, reset_work); + int ret; + + mutex_lock(&dev->device_lock); + + ret = mei_reset(dev); + + mutex_unlock(&dev->device_lock); + + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "device disabled = %d\n", ret); + return; + } + + /* retry reset in case of failure */ + if (ret) + schedule_work(&dev->reset_work); +} + +void mei_stop(struct mei_device *dev) +{ + dev_dbg(&dev->pdev->dev, "stopping the device.\n"); + + mei_cancel_work(dev); + + mei_nfc_host_exit(dev); + + mei_cl_bus_remove_devices(dev); + + mutex_lock(&dev->device_lock); + + mei_wd_stop(dev); + + dev->dev_state = MEI_DEV_POWER_DOWN; + mei_reset(dev); + + mutex_unlock(&dev->device_lock); + + mei_watchdog_unregister(dev); +} +EXPORT_SYMBOL_GPL(mei_stop); + +/** + * mei_write_is_idle - check if the write queues are idle + * + * @dev: the device structure + * + * returns true of there is no pending write + */ +bool mei_write_is_idle(struct mei_device *dev) +{ + bool idle = (dev->dev_state == MEI_DEV_ENABLED && + list_empty(&dev->ctrl_wr_list.list) && + list_empty(&dev->write_list.list)); + + dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", + idle, + mei_dev_state_str(dev->dev_state), + list_empty(&dev->ctrl_wr_list.list), + list_empty(&dev->write_list.list)); + + return idle; +} +EXPORT_SYMBOL_GPL(mei_write_is_idle); + +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) +{ + int i; + const struct mei_fw_status *fw_src = &dev->cfg->fw_status; + + if (!fw_status) + return -EINVAL; + + fw_status->count = fw_src->count; + for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { + int ret; + ret = pci_read_config_dword(dev->pdev, + fw_src->status[i], &fw_status->status[i]); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_fw_status); + +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) +{ + /* setup our list array */ + INIT_LIST_HEAD(&dev->file_list); + INIT_LIST_HEAD(&dev->device_list); + mutex_init(&dev->device_lock); + init_waitqueue_head(&dev->wait_hw_ready); + init_waitqueue_head(&dev->wait_pg); + init_waitqueue_head(&dev->wait_recvd_msg); + init_waitqueue_head(&dev->wait_stop_wd); + dev->dev_state = MEI_DEV_INITIALIZING; + dev->reset_count = 0; + + mei_io_list_init(&dev->read_list); + mei_io_list_init(&dev->write_list); + mei_io_list_init(&dev->write_waiting_list); + mei_io_list_init(&dev->ctrl_wr_list); + mei_io_list_init(&dev->ctrl_rd_list); + + INIT_DELAYED_WORK(&dev->timer_work, mei_timer); + INIT_WORK(&dev->init_work, mei_host_client_init); + INIT_WORK(&dev->reset_work, mei_reset_work); + + INIT_LIST_HEAD(&dev->wd_cl.link); + INIT_LIST_HEAD(&dev->iamthif_cl.link); + mei_io_list_init(&dev->amthif_cmd_list); + mei_io_list_init(&dev->amthif_rd_complete_list); + + bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); + dev->open_handle_count = 0; + + /* + * Reserving the first client ID + * 0: Reserved for MEI Bus Message communications + */ + bitmap_set(dev->host_clients_map, 0, 1); + + dev->pg_event = MEI_PG_EVENT_IDLE; + dev->cfg = cfg; +} +EXPORT_SYMBOL_GPL(mei_device_init); + diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c new file mode 100644 index 00000000000..4e3cba6da3f --- /dev/null +++ b/drivers/misc/mei/interrupt.c @@ -0,0 +1,659 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + + +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/fs.h> +#include <linux/jiffies.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + + +/** + * mei_irq_compl_handler - dispatch complete handlers + * for the completed callbacks + * + * @dev - mei device + * @compl_list - list of completed cbs + */ +void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) +{ + struct mei_cl_cb *cb, *next; + struct mei_cl *cl; + + list_for_each_entry_safe(cb, next, &compl_list->list, list) { + cl = cb->cl; + list_del(&cb->list); + if (!cl) + continue; + + dev_dbg(&dev->pdev->dev, "completing call back.\n"); + if (cl == &dev->iamthif_cl) + mei_amthif_complete(dev, cb); + else + mei_cl_complete(cl, cb); + } +} +EXPORT_SYMBOL_GPL(mei_irq_compl_handler); + +/** + * mei_cl_hbm_equal - check if hbm is addressed to the client + * + * @cl: host client + * @mei_hdr: header of mei client message + * + * returns true if matches, false otherwise + */ +static inline int mei_cl_hbm_equal(struct mei_cl *cl, + struct mei_msg_hdr *mei_hdr) +{ + return cl->host_client_id == mei_hdr->host_addr && + cl->me_client_id == mei_hdr->me_addr; +} +/** + * mei_cl_is_reading - checks if the client + is the one to read this message + * + * @cl: mei client + * @mei_hdr: header of mei message + * + * returns true on match and false otherwise + */ +static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) +{ + return mei_cl_hbm_equal(cl, mei_hdr) && + cl->state == MEI_FILE_CONNECTED && + cl->reading_state != MEI_READ_COMPLETE; +} + +/** + * mei_irq_read_client_message - process client message + * + * @dev: the device structure + * @mei_hdr: header of mei client message + * @complete_list: An instance of our list structure + * + * returns 0 on success, <0 on failure. + */ +static int mei_cl_irq_read_msg(struct mei_device *dev, + struct mei_msg_hdr *mei_hdr, + struct mei_cl_cb *complete_list) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + unsigned char *buffer = NULL; + + list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { + cl = cb->cl; + if (!cl || !mei_cl_is_reading(cl, mei_hdr)) + continue; + + cl->reading_state = MEI_READING; + + if (cb->response_buffer.size == 0 || + cb->response_buffer.data == NULL) { + cl_err(dev, cl, "response buffer is not allocated.\n"); + list_del(&cb->list); + return -ENOMEM; + } + + if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { + cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", + cb->response_buffer.size, + mei_hdr->length, cb->buf_idx); + buffer = krealloc(cb->response_buffer.data, + mei_hdr->length + cb->buf_idx, + GFP_KERNEL); + + if (!buffer) { + cl_err(dev, cl, "allocation failed.\n"); + list_del(&cb->list); + return -ENOMEM; + } + cb->response_buffer.data = buffer; + cb->response_buffer.size = + mei_hdr->length + cb->buf_idx; + } + + buffer = cb->response_buffer.data + cb->buf_idx; + mei_read_slots(dev, buffer, mei_hdr->length); + + cb->buf_idx += mei_hdr->length; + if (mei_hdr->msg_complete) { + cl->status = 0; + list_del(&cb->list); + cl_dbg(dev, cl, "completed read length = %lu\n", + cb->buf_idx); + list_add_tail(&cb->list, &complete_list->list); + } + break; + } + + dev_dbg(&dev->pdev->dev, "message read\n"); + if (!buffer) { + mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); + dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n", + MEI_HDR_PRM(mei_hdr)); + } + + return 0; +} + +/** + * mei_cl_irq_disconnect_rsp - send disconnection response message + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + slots = mei_hbuf_empty_slots(dev); + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); + + if (slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_disconnect_rsp(dev, cl); + + cl->state = MEI_FILE_DISCONNECTED; + cl->status = 0; + list_del(&cb->list); + mei_io_cb_free(cb); + + return ret; +} + + + +/** + * mei_cl_irq_close - processes close related operation from + * interrupt thread context - send disconnect request + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + + if (slots < msg_slots) + return -EMSGSIZE; + + if (mei_hbm_cl_disconnect_req(dev, cl)) { + cl->status = 0; + cb->buf_idx = 0; + list_move_tail(&cb->list, &cmpl_list->list); + return -EIO; + } + + cl->state = MEI_FILE_DISCONNECTING; + cl->status = 0; + cb->buf_idx = 0; + list_move_tail(&cb->list, &dev->ctrl_rd_list.list); + cl->timer_count = MEI_CONNECT_TIMEOUT; + + return 0; +} + + +/** + * mei_cl_irq_close - processes client read related operation from the + * interrupt thread context - request for flow control credits + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); + slots = mei_hbuf_empty_slots(dev); + + if (slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_flow_control_req(dev, cl); + if (ret) { + cl->status = ret; + cb->buf_idx = 0; + list_move_tail(&cb->list, &cmpl_list->list); + return ret; + } + + list_move_tail(&cb->list, &dev->read_list.list); + + return 0; +} + + +/** + * mei_cl_irq_connect - send connect request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + + if (mei_cl_is_other_connecting(cl)) + return 0; + + if (slots < msg_slots) + return -EMSGSIZE; + + cl->state = MEI_FILE_CONNECTING; + + ret = mei_hbm_cl_connect_req(dev, cl); + if (ret) { + cl->status = ret; + cb->buf_idx = 0; + list_del(&cb->list); + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list.list); + cl->timer_count = MEI_CONNECT_TIMEOUT; + return 0; +} + + +/** + * mei_irq_read_handler - bottom half read routine after ISR to + * handle the read processing. + * + * @dev: the device structure + * @cmpl_list: An instance of our list structure + * @slots: slots to read. + * + * returns 0 on success, <0 on failure. + */ +int mei_irq_read_handler(struct mei_device *dev, + struct mei_cl_cb *cmpl_list, s32 *slots) +{ + struct mei_msg_hdr *mei_hdr; + struct mei_cl *cl; + int ret; + + if (!dev->rd_msg_hdr) { + dev->rd_msg_hdr = mei_read_hdr(dev); + (*slots)--; + dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); + } + mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); + + if (mei_hdr->reserved || !dev->rd_msg_hdr) { + dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", + dev->rd_msg_hdr); + ret = -EBADMSG; + goto end; + } + + if (mei_slots2data(*slots) < mei_hdr->length) { + dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", + *slots); + /* we can't read the message */ + ret = -ENODATA; + goto end; + } + + /* HBM message */ + if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { + ret = mei_hbm_dispatch(dev, mei_hdr); + if (ret) { + dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", + ret); + goto end; + } + goto reset_slots; + } + + /* find recipient cl */ + list_for_each_entry(cl, &dev->file_list, link) { + if (mei_cl_hbm_equal(cl, mei_hdr)) { + cl_dbg(dev, cl, "got a message\n"); + break; + } + } + + /* if no recipient cl was found we assume corrupted header */ + if (&cl->link == &dev->file_list) { + dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", + dev->rd_msg_hdr); + ret = -EBADMSG; + goto end; + } + + if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && + MEI_FILE_CONNECTED == dev->iamthif_cl.state && + dev->iamthif_state == MEI_IAMTHIF_READING) { + + ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); + if (ret) { + dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", + ret); + goto end; + } + } else { + ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); + if (ret) { + dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", + ret); + goto end; + } + } + +reset_slots: + /* reset the number of slots and header */ + *slots = mei_count_full_read_slots(dev); + dev->rd_msg_hdr = 0; + + if (*slots == -EOVERFLOW) { + /* overflow - reset */ + dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n"); + /* set the event since message has been read */ + ret = -ERANGE; + goto end; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(mei_irq_read_handler); + + +/** + * mei_irq_write_handler - dispatch write requests + * after irq received + * + * @dev: the device structure + * @cmpl_list: An instance of our list structure + * + * returns 0 on success, <0 on failure. + */ +int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) +{ + + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + struct mei_cl_cb *list; + s32 slots; + int ret; + + + if (!mei_hbuf_acquire(dev)) + return 0; + + slots = mei_hbuf_empty_slots(dev); + if (slots <= 0) + return -EMSGSIZE; + + /* complete all waiting for write CB */ + dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n"); + + list = &dev->write_waiting_list; + list_for_each_entry_safe(cb, next, &list->list, list) { + cl = cb->cl; + if (cl == NULL) + continue; + + cl->status = 0; + list_del(&cb->list); + if (cb->fop_type == MEI_FOP_WRITE && + cl != &dev->iamthif_cl) { + cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); + cl->writing_state = MEI_WRITE_COMPLETE; + list_add_tail(&cb->list, &cmpl_list->list); + } + if (cl == &dev->iamthif_cl) { + cl_dbg(dev, cl, "check iamthif flow control.\n"); + if (dev->iamthif_flow_control_pending) { + ret = mei_amthif_irq_read(dev, &slots); + if (ret) + return ret; + } + } + } + + if (dev->wd_state == MEI_WD_STOPPING) { + dev->wd_state = MEI_WD_IDLE; + wake_up(&dev->wait_stop_wd); + } + + if (mei_cl_is_connected(&dev->wd_cl)) { + if (dev->wd_pending && + mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { + ret = mei_wd_send(dev); + if (ret) + return ret; + dev->wd_pending = false; + } + } + + /* complete control write list CB */ + dev_dbg(&dev->pdev->dev, "complete control write list cb.\n"); + list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { + cl = cb->cl; + if (!cl) { + list_del(&cb->list); + return -ENODEV; + } + switch (cb->fop_type) { + case MEI_FOP_CLOSE: + /* send disconnect message */ + ret = mei_cl_irq_close(cl, cb, cmpl_list); + if (ret) + return ret; + + break; + case MEI_FOP_READ: + /* send flow control message */ + ret = mei_cl_irq_read(cl, cb, cmpl_list); + if (ret) + return ret; + + break; + case MEI_FOP_CONNECT: + /* connect message */ + ret = mei_cl_irq_connect(cl, cb, cmpl_list); + if (ret) + return ret; + + break; + case MEI_FOP_DISCONNECT_RSP: + /* send disconnect resp */ + ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); + if (ret) + return ret; + break; + default: + BUG(); + } + + } + /* complete write list CB */ + dev_dbg(&dev->pdev->dev, "complete write list cb.\n"); + list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { + cl = cb->cl; + if (cl == NULL) + continue; + if (cl == &dev->iamthif_cl) + ret = mei_amthif_irq_write(cl, cb, cmpl_list); + else + ret = mei_cl_irq_write(cl, cb, cmpl_list); + if (ret) + return ret; + } + return 0; +} +EXPORT_SYMBOL_GPL(mei_irq_write_handler); + + + +/** + * mei_timer - timer function. + * + * @work: pointer to the work_struct structure + * + */ +void mei_timer(struct work_struct *work) +{ + unsigned long timeout; + struct mei_cl *cl; + struct mei_cl_cb *cb_pos = NULL; + struct mei_cl_cb *cb_next = NULL; + + struct mei_device *dev = container_of(work, + struct mei_device, timer_work.work); + + + mutex_lock(&dev->device_lock); + + /* Catch interrupt stalls during HBM init handshake */ + if (dev->dev_state == MEI_DEV_INIT_CLIENTS && + dev->hbm_state != MEI_HBM_IDLE) { + + if (dev->init_clients_timer) { + if (--dev->init_clients_timer == 0) { + dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", + dev->hbm_state); + mei_reset(dev); + goto out; + } + } + } + + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + /*** connect/disconnect timeouts ***/ + list_for_each_entry(cl, &dev->file_list, link) { + if (cl->timer_count) { + if (--cl->timer_count == 0) { + dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); + mei_reset(dev); + goto out; + } + } + } + + if (!mei_cl_is_connected(&dev->iamthif_cl)) + goto out; + + if (dev->iamthif_stall_timer) { + if (--dev->iamthif_stall_timer == 0) { + dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); + mei_reset(dev); + dev->iamthif_msg_buf_size = 0; + dev->iamthif_msg_buf_index = 0; + dev->iamthif_canceled = false; + dev->iamthif_ioctl = true; + dev->iamthif_state = MEI_IAMTHIF_IDLE; + dev->iamthif_timer = 0; + + mei_io_cb_free(dev->iamthif_current_cb); + dev->iamthif_current_cb = NULL; + + dev->iamthif_file_object = NULL; + mei_amthif_run_next_cmd(dev); + } + } + + if (dev->iamthif_timer) { + + timeout = dev->iamthif_timer + + mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); + + dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", + dev->iamthif_timer); + dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout); + dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies); + if (time_after(jiffies, timeout)) { + /* + * User didn't read the AMTHI data on time (15sec) + * freeing AMTHI for other requests + */ + + dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n"); + + list_for_each_entry_safe(cb_pos, cb_next, + &dev->amthif_rd_complete_list.list, list) { + + cl = cb_pos->file_object->private_data; + + /* Finding the AMTHI entry. */ + if (cl == &dev->iamthif_cl) + list_del(&cb_pos->list); + } + mei_io_cb_free(dev->iamthif_current_cb); + dev->iamthif_current_cb = NULL; + + dev->iamthif_file_object->private_data = NULL; + dev->iamthif_file_object = NULL; + dev->iamthif_timer = 0; + mei_amthif_run_next_cmd(dev); + + } + } +out: + if (dev->dev_state != MEI_DEV_DISABLED) + schedule_delayed_work(&dev->timer_work, 2 * HZ); + mutex_unlock(&dev->device_lock); +} + diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c new file mode 100644 index 00000000000..66f0a1a0645 --- /dev/null +++ b/drivers/misc/mei/main.c @@ -0,0 +1,719 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/compat.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" + +/** + * mei_open - the open function + * + * @inode: pointer to inode structure + * @file: pointer to file structure + * + * returns 0 on success, <0 on error + */ +static int mei_open(struct inode *inode, struct file *file) +{ + struct miscdevice *misc = file->private_data; + struct pci_dev *pdev; + struct mei_cl *cl; + struct mei_device *dev; + + int err; + + if (!misc->parent) + return -ENODEV; + + pdev = container_of(misc->parent, struct pci_dev, dev); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + cl = NULL; + + err = -ENODEV; + if (dev->dev_state != MEI_DEV_ENABLED) { + dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", + mei_dev_state_str(dev->dev_state)); + goto err_unlock; + } + + err = -ENOMEM; + cl = mei_cl_allocate(dev); + if (!cl) + goto err_unlock; + + /* open_handle_count check is handled in the mei_cl_link */ + err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); + if (err) + goto err_unlock; + + file->private_data = cl; + + mutex_unlock(&dev->device_lock); + + return nonseekable_open(inode, file); + +err_unlock: + mutex_unlock(&dev->device_lock); + kfree(cl); + return err; +} + +/** + * mei_release - the release function + * + * @inode: pointer to inode structure + * @file: pointer to file structure + * + * returns 0 on success, <0 on error + */ +static int mei_release(struct inode *inode, struct file *file) +{ + struct mei_cl *cl = file->private_data; + struct mei_cl_cb *cb; + struct mei_device *dev; + int rets = 0; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + if (cl == &dev->iamthif_cl) { + rets = mei_amthif_release(dev, file); + goto out; + } + if (cl->state == MEI_FILE_CONNECTED) { + cl->state = MEI_FILE_DISCONNECTING; + cl_dbg(dev, cl, "disconnecting\n"); + rets = mei_cl_disconnect(cl); + } + mei_cl_flush_queues(cl); + cl_dbg(dev, cl, "removing\n"); + + mei_cl_unlink(cl); + + + /* free read cb */ + cb = NULL; + if (cl->read_cb) { + cb = mei_cl_find_read_cb(cl); + /* Remove entry from read list */ + if (cb) + list_del(&cb->list); + + cb = cl->read_cb; + cl->read_cb = NULL; + } + + file->private_data = NULL; + + mei_io_cb_free(cb); + + kfree(cl); +out: + mutex_unlock(&dev->device_lock); + return rets; +} + + +/** + * mei_read - the read function. + * + * @file: pointer to file structure + * @ubuf: pointer to user buffer + * @length: buffer length + * @offset: data offset in buffer + * + * returns >=0 data length on success , <0 on error + */ +static ssize_t mei_read(struct file *file, char __user *ubuf, + size_t length, loff_t *offset) +{ + struct mei_cl *cl = file->private_data; + struct mei_cl_cb *cb_pos = NULL; + struct mei_cl_cb *cb = NULL; + struct mei_device *dev; + int rets; + int err; + + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + + mutex_lock(&dev->device_lock); + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + if (length == 0) { + rets = 0; + goto out; + } + + if (cl == &dev->iamthif_cl) { + rets = mei_amthif_read(dev, file, ubuf, length, offset); + goto out; + } + + if (cl->read_cb) { + cb = cl->read_cb; + /* read what left */ + if (cb->buf_idx > *offset) + goto copy_buffer; + /* offset is beyond buf_idx we have no more data return 0 */ + if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { + rets = 0; + goto free; + } + /* Offset needs to be cleaned for contiguous reads*/ + if (cb->buf_idx == 0 && *offset > 0) + *offset = 0; + } else if (*offset > 0) { + *offset = 0; + } + + err = mei_cl_read_start(cl, length); + if (err && err != -EBUSY) { + dev_dbg(&dev->pdev->dev, + "mei start read failure with status = %d\n", err); + rets = err; + goto out; + } + + if (MEI_READ_COMPLETE != cl->reading_state && + !waitqueue_active(&cl->rx_wait)) { + if (file->f_flags & O_NONBLOCK) { + rets = -EAGAIN; + goto out; + } + + mutex_unlock(&dev->device_lock); + + if (wait_event_interruptible(cl->rx_wait, + MEI_READ_COMPLETE == cl->reading_state || + mei_cl_is_transitioning(cl))) { + + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + + mutex_lock(&dev->device_lock); + if (mei_cl_is_transitioning(cl)) { + rets = -EBUSY; + goto out; + } + } + + cb = cl->read_cb; + + if (!cb) { + rets = -ENODEV; + goto out; + } + if (cl->reading_state != MEI_READ_COMPLETE) { + rets = 0; + goto out; + } + /* now copy the data to user space */ +copy_buffer: + dev_dbg(&dev->pdev->dev, "buf.size = %d buf.idx= %ld\n", + cb->response_buffer.size, cb->buf_idx); + if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { + rets = -EMSGSIZE; + goto free; + } + + /* length is being truncated to PAGE_SIZE, + * however buf_idx may point beyond that */ + length = min_t(size_t, length, cb->buf_idx - *offset); + + if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { + dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto free; + } + + rets = length; + *offset += length; + if ((unsigned long)*offset < cb->buf_idx) + goto out; + +free: + cb_pos = mei_cl_find_read_cb(cl); + /* Remove entry from read list */ + if (cb_pos) + list_del(&cb_pos->list); + mei_io_cb_free(cb); + cl->reading_state = MEI_IDLE; + cl->read_cb = NULL; +out: + dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); + mutex_unlock(&dev->device_lock); + return rets; +} +/** + * mei_write - the write function. + * + * @file: pointer to file structure + * @ubuf: pointer to user buffer + * @length: buffer length + * @offset: data offset in buffer + * + * returns >=0 data length on success , <0 on error + */ +static ssize_t mei_write(struct file *file, const char __user *ubuf, + size_t length, loff_t *offset) +{ + struct mei_cl *cl = file->private_data; + struct mei_cl_cb *write_cb = NULL; + struct mei_device *dev; + unsigned long timeout = 0; + int rets; + int id; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + id = mei_me_cl_by_id(dev, cl->me_client_id); + if (id < 0) { + rets = -ENOTTY; + goto out; + } + + if (length == 0) { + rets = 0; + goto out; + } + + if (length > dev->me_clients[id].props.max_msg_length) { + rets = -EFBIG; + goto out; + } + + if (cl->state != MEI_FILE_CONNECTED) { + dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d", + cl->host_client_id, cl->me_client_id); + rets = -ENODEV; + goto out; + } + if (cl == &dev->iamthif_cl) { + write_cb = mei_amthif_find_read_list_entry(dev, file); + + if (write_cb) { + timeout = write_cb->read_time + + mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); + + if (time_after(jiffies, timeout) || + cl->reading_state == MEI_READ_COMPLETE) { + *offset = 0; + list_del(&write_cb->list); + mei_io_cb_free(write_cb); + write_cb = NULL; + } + } + } + + /* free entry used in read */ + if (cl->reading_state == MEI_READ_COMPLETE) { + *offset = 0; + write_cb = mei_cl_find_read_cb(cl); + if (write_cb) { + list_del(&write_cb->list); + mei_io_cb_free(write_cb); + write_cb = NULL; + cl->reading_state = MEI_IDLE; + cl->read_cb = NULL; + } + } else if (cl->reading_state == MEI_IDLE) + *offset = 0; + + + write_cb = mei_io_cb_init(cl, file); + if (!write_cb) { + dev_err(&dev->pdev->dev, "write cb allocation failed\n"); + rets = -ENOMEM; + goto out; + } + rets = mei_io_cb_alloc_req_buf(write_cb, length); + if (rets) + goto out; + + rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); + if (rets) { + dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + + if (cl == &dev->iamthif_cl) { + rets = mei_amthif_write(dev, write_cb); + + if (rets) { + dev_err(&dev->pdev->dev, + "amthif write failed with status = %d\n", rets); + goto out; + } + mutex_unlock(&dev->device_lock); + return length; + } + + rets = mei_cl_write(cl, write_cb, false); +out: + mutex_unlock(&dev->device_lock); + if (rets < 0) + mei_io_cb_free(write_cb); + return rets; +} + +/** + * mei_ioctl_connect_client - the connect to fw client IOCTL function + * + * @dev: the device structure + * @data: IOCTL connect data, input and output parameters + * @file: private data of the file object + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +static int mei_ioctl_connect_client(struct file *file, + struct mei_connect_client_data *data) +{ + struct mei_device *dev; + struct mei_client *client; + struct mei_cl *cl; + int i; + int rets; + + cl = file->private_data; + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto end; + } + + if (cl->state != MEI_FILE_INITIALIZING && + cl->state != MEI_FILE_DISCONNECTED) { + rets = -EBUSY; + goto end; + } + + /* find ME client we're trying to connect to */ + i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); + if (i < 0 || dev->me_clients[i].props.fixed_address) { + dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", + &data->in_client_uuid); + rets = -ENOTTY; + goto end; + } + + cl->me_client_id = dev->me_clients[i].client_id; + + dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", + cl->me_client_id); + dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", + dev->me_clients[i].props.protocol_version); + dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", + dev->me_clients[i].props.max_msg_length); + + /* if we're connecting to amthif client then we will use the + * existing connection + */ + if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { + dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); + if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { + rets = -ENODEV; + goto end; + } + mei_cl_unlink(cl); + + kfree(cl); + cl = NULL; + dev->iamthif_open_count++; + file->private_data = &dev->iamthif_cl; + + client = &data->out_client_properties; + client->max_msg_length = + dev->me_clients[i].props.max_msg_length; + client->protocol_version = + dev->me_clients[i].props.protocol_version; + rets = dev->iamthif_cl.status; + + goto end; + } + + + /* prepare the output buffer */ + client = &data->out_client_properties; + client->max_msg_length = dev->me_clients[i].props.max_msg_length; + client->protocol_version = dev->me_clients[i].props.protocol_version; + dev_dbg(&dev->pdev->dev, "Can connect?\n"); + + + rets = mei_cl_connect(cl, file); + +end: + return rets; +} + + +/** + * mei_ioctl - the IOCTL function + * + * @file: pointer to file structure + * @cmd: ioctl command + * @data: pointer to mei message structure + * + * returns 0 on success , <0 on error + */ +static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) +{ + struct mei_device *dev; + struct mei_cl *cl = file->private_data; + struct mei_connect_client_data *connect_data = NULL; + int rets; + + if (cmd != IOCTL_MEI_CONNECT_CLIENT) + return -EINVAL; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd); + + mutex_lock(&dev->device_lock); + if (dev->dev_state != MEI_DEV_ENABLED) { + rets = -ENODEV; + goto out; + } + + dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); + + connect_data = kzalloc(sizeof(struct mei_connect_client_data), + GFP_KERNEL); + if (!connect_data) { + rets = -ENOMEM; + goto out; + } + dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); + if (copy_from_user(connect_data, (char __user *)data, + sizeof(struct mei_connect_client_data))) { + dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + + rets = mei_ioctl_connect_client(file, connect_data); + + /* if all is ok, copying the data back to user. */ + if (rets) + goto out; + + dev_dbg(&dev->pdev->dev, "copy connect data to user\n"); + if (copy_to_user((char __user *)data, connect_data, + sizeof(struct mei_connect_client_data))) { + dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; + } + +out: + kfree(connect_data); + mutex_unlock(&dev->device_lock); + return rets; +} + +/** + * mei_compat_ioctl - the compat IOCTL function + * + * @file: pointer to file structure + * @cmd: ioctl command + * @data: pointer to mei message structure + * + * returns 0 on success , <0 on error + */ +#ifdef CONFIG_COMPAT +static long mei_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long data) +{ + return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); +} +#endif + + +/** + * mei_poll - the poll function + * + * @file: pointer to file structure + * @wait: pointer to poll_table structure + * + * returns poll mask + */ +static unsigned int mei_poll(struct file *file, poll_table *wait) +{ + struct mei_cl *cl = file->private_data; + struct mei_device *dev; + unsigned int mask = 0; + + if (WARN_ON(!cl || !cl->dev)) + return POLLERR; + + dev = cl->dev; + + mutex_lock(&dev->device_lock); + + if (!mei_cl_is_connected(cl)) { + mask = POLLERR; + goto out; + } + + mutex_unlock(&dev->device_lock); + + + if (cl == &dev->iamthif_cl) + return mei_amthif_poll(dev, file, wait); + + poll_wait(file, &cl->tx_wait, wait); + + mutex_lock(&dev->device_lock); + + if (!mei_cl_is_connected(cl)) { + mask = POLLERR; + goto out; + } + + mask |= (POLLIN | POLLRDNORM); + +out: + mutex_unlock(&dev->device_lock); + return mask; +} + +/* + * file operations structure will be used for mei char device. + */ +static const struct file_operations mei_fops = { + .owner = THIS_MODULE, + .read = mei_read, + .unlocked_ioctl = mei_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mei_compat_ioctl, +#endif + .open = mei_open, + .release = mei_release, + .write = mei_write, + .poll = mei_poll, + .llseek = no_llseek +}; + +/* + * Misc Device Struct + */ +static struct miscdevice mei_misc_device = { + .name = "mei", + .fops = &mei_fops, + .minor = MISC_DYNAMIC_MINOR, +}; + + +int mei_register(struct mei_device *dev) +{ + int ret; + mei_misc_device.parent = &dev->pdev->dev; + ret = misc_register(&mei_misc_device); + if (ret) + return ret; + + if (mei_dbgfs_register(dev, mei_misc_device.name)) + dev_err(&dev->pdev->dev, "cannot register debugfs\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(mei_register); + +void mei_deregister(struct mei_device *dev) +{ + mei_dbgfs_deregister(dev); + misc_deregister(&mei_misc_device); + mei_misc_device.parent = NULL; +} +EXPORT_SYMBOL_GPL(mei_deregister); + +static int __init mei_init(void) +{ + return mei_cl_bus_init(); +} + +static void __exit mei_exit(void) +{ + mei_cl_bus_exit(); +} + +module_init(mei_init); +module_exit(mei_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h new file mode 100644 index 00000000000..5c7e990e2f2 --- /dev/null +++ b/drivers/misc/mei/mei_dev.h @@ -0,0 +1,752 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_DEV_H_ +#define _MEI_DEV_H_ + +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/poll.h> +#include <linux/mei.h> +#include <linux/mei_cl_bus.h> + +#include "hw.h" +#include "hbm.h" + +/* + * watch dog definition + */ +#define MEI_WD_HDR_SIZE 4 +#define MEI_WD_STOP_MSG_SIZE MEI_WD_HDR_SIZE +#define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16) + +#define MEI_WD_DEFAULT_TIMEOUT 120 /* seconds */ +#define MEI_WD_MIN_TIMEOUT 120 /* seconds */ +#define MEI_WD_MAX_TIMEOUT 65535 /* seconds */ + +#define MEI_WD_STOP_TIMEOUT 10 /* msecs */ + +#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0) + +#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) + + +/* + * AMTHI Client UUID + */ +extern const uuid_le mei_amthif_guid; + +/* + * Watchdog Client UUID + */ +extern const uuid_le mei_wd_guid; + +/* + * Number of Maximum MEI Clients + */ +#define MEI_CLIENTS_MAX 256 + +/* + * maximum number of consecutive resets + */ +#define MEI_MAX_CONSEC_RESET 3 + +/* + * Number of File descriptors/handles + * that can be opened to the driver. + * + * Limit to 255: 256 Total Clients + * minus internal client for MEI Bus Messages + */ +#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) + +/* + * Internal Clients Number + */ +#define MEI_HOST_CLIENT_ID_ANY (-1) +#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */ +#define MEI_WD_HOST_CLIENT_ID 1 +#define MEI_IAMTHIF_HOST_CLIENT_ID 2 + + +/* File state */ +enum file_state { + MEI_FILE_INITIALIZING = 0, + MEI_FILE_CONNECTING, + MEI_FILE_CONNECTED, + MEI_FILE_DISCONNECTING, + MEI_FILE_DISCONNECTED +}; + +/* MEI device states */ +enum mei_dev_state { + MEI_DEV_INITIALIZING = 0, + MEI_DEV_INIT_CLIENTS, + MEI_DEV_ENABLED, + MEI_DEV_RESETTING, + MEI_DEV_DISABLED, + MEI_DEV_POWER_DOWN, + MEI_DEV_POWER_UP +}; + +const char *mei_dev_state_str(int state); + +enum iamthif_states { + MEI_IAMTHIF_IDLE, + MEI_IAMTHIF_WRITING, + MEI_IAMTHIF_FLOW_CONTROL, + MEI_IAMTHIF_READING, + MEI_IAMTHIF_READ_COMPLETE +}; + +enum mei_file_transaction_states { + MEI_IDLE, + MEI_WRITING, + MEI_WRITE_COMPLETE, + MEI_FLOW_CONTROL, + MEI_READING, + MEI_READ_COMPLETE +}; + +enum mei_wd_states { + MEI_WD_IDLE, + MEI_WD_RUNNING, + MEI_WD_STOPPING, +}; + +/** + * enum mei_cb_file_ops - file operation associated with the callback + * @MEI_FOP_READ - read + * @MEI_FOP_WRITE - write + * @MEI_FOP_CONNECT - connect + * @MEI_FOP_DISCONNECT_RSP - disconnect response + * @MEI_FOP_OPEN - open + * @MEI_FOP_CLOSE - close + */ +enum mei_cb_file_ops { + MEI_FOP_READ = 0, + MEI_FOP_WRITE, + MEI_FOP_CONNECT, + MEI_FOP_DISCONNECT_RSP, + MEI_FOP_OPEN, + MEI_FOP_CLOSE +}; + +/* + * Intel MEI message data struct + */ +struct mei_msg_data { + u32 size; + unsigned char *data; +}; + +/* Maximum number of processed FW status registers */ +#define MEI_FW_STATUS_MAX 2 + +/* + * struct mei_fw_status - storage of FW status data + * + * @count - number of actually available elements in array + * @status - FW status registers + */ +struct mei_fw_status { + int count; + u32 status[MEI_FW_STATUS_MAX]; +}; + +/** + * struct mei_me_client - representation of me (fw) client + * + * @props - client properties + * @client_id - me client id + * @mei_flow_ctrl_creds - flow control credits + */ +struct mei_me_client { + struct mei_client_properties props; + u8 client_id; + u8 mei_flow_ctrl_creds; +}; + + +struct mei_cl; + +/** + * struct mei_cl_cb - file operation callback structure + * + * @cl - file client who is running this operation + * @fop_type - file operation type + */ +struct mei_cl_cb { + struct list_head list; + struct mei_cl *cl; + enum mei_cb_file_ops fop_type; + struct mei_msg_data request_buffer; + struct mei_msg_data response_buffer; + unsigned long buf_idx; + unsigned long read_time; + struct file *file_object; + u32 internal:1; +}; + +/* MEI client instance carried as file->private_data*/ +struct mei_cl { + struct list_head link; + struct mei_device *dev; + enum file_state state; + wait_queue_head_t tx_wait; + wait_queue_head_t rx_wait; + wait_queue_head_t wait; + int status; + /* ID of client connected */ + u8 host_client_id; + u8 me_client_id; + u8 mei_flow_ctrl_creds; + u8 timer_count; + enum mei_file_transaction_states reading_state; + enum mei_file_transaction_states writing_state; + struct mei_cl_cb *read_cb; + + /* MEI CL bus data */ + struct mei_cl_device *device; + struct list_head device_link; + uuid_le device_uuid; +}; + +/** struct mei_hw_ops + * + * @fw_status - read FW status from PCI config space + * @host_is_ready - query for host readiness + + * @hw_is_ready - query if hw is ready + * @hw_reset - reset hw + * @hw_start - start hw after reset + * @hw_config - configure hw + + * @pg_state - power gating state of the device + * @pg_is_enabled - is power gating enabled + + * @intr_clear - clear pending interrupts + * @intr_enable - enable interrupts + * @intr_disable - disable interrupts + + * @hbuf_free_slots - query for write buffer empty slots + * @hbuf_is_ready - query if write buffer is empty + * @hbuf_max_len - query for write buffer max len + + * @write - write a message to FW + + * @rdbuf_full_slots - query how many slots are filled + + * @read_hdr - get first 4 bytes (header) + * @read - read a buffer from the FW + */ +struct mei_hw_ops { + + int (*fw_status)(struct mei_device *dev, + struct mei_fw_status *fw_status); + bool (*host_is_ready)(struct mei_device *dev); + + bool (*hw_is_ready)(struct mei_device *dev); + int (*hw_reset)(struct mei_device *dev, bool enable); + int (*hw_start)(struct mei_device *dev); + void (*hw_config)(struct mei_device *dev); + + enum mei_pg_state (*pg_state)(struct mei_device *dev); + bool (*pg_is_enabled)(struct mei_device *dev); + + void (*intr_clear)(struct mei_device *dev); + void (*intr_enable)(struct mei_device *dev); + void (*intr_disable)(struct mei_device *dev); + + int (*hbuf_free_slots)(struct mei_device *dev); + bool (*hbuf_is_ready)(struct mei_device *dev); + size_t (*hbuf_max_len)(const struct mei_device *dev); + + int (*write)(struct mei_device *dev, + struct mei_msg_hdr *hdr, + unsigned char *buf); + + int (*rdbuf_full_slots)(struct mei_device *dev); + + u32 (*read_hdr)(const struct mei_device *dev); + int (*read)(struct mei_device *dev, + unsigned char *buf, unsigned long len); +}; + +/* MEI bus API*/ + +/** + * struct mei_cl_ops - MEI CL device ops + * This structure allows ME host clients to implement technology + * specific operations. + * + * @enable: Enable an MEI CL device. Some devices require specific + * HECI commands to initialize completely. + * @disable: Disable an MEI CL device. + * @send: Tx hook for the device. This allows ME host clients to trap + * the device driver buffers before actually physically + * pushing it to the ME. + * @recv: Rx hook for the device. This allows ME host clients to trap the + * ME buffers before forwarding them to the device driver. + */ +struct mei_cl_ops { + int (*enable)(struct mei_cl_device *device); + int (*disable)(struct mei_cl_device *device); + int (*send)(struct mei_cl_device *device, u8 *buf, size_t length); + int (*recv)(struct mei_cl_device *device, u8 *buf, size_t length); +}; + +struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, + uuid_le uuid, char *name, + struct mei_cl_ops *ops); +void mei_cl_remove_device(struct mei_cl_device *device); + +int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length); +int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length); +int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); +void mei_cl_bus_rx_event(struct mei_cl *cl); +void mei_cl_bus_remove_devices(struct mei_device *dev); +int mei_cl_bus_init(void); +void mei_cl_bus_exit(void); + + +/** + * struct mei_cl_device - MEI device handle + * An mei_cl_device pointer is returned from mei_add_device() + * and links MEI bus clients to their actual ME host client pointer. + * Drivers for MEI devices will get an mei_cl_device pointer + * when being probed and shall use it for doing ME bus I/O. + * + * @dev: linux driver model device pointer + * @uuid: me client uuid + * @cl: mei client + * @ops: ME transport ops + * @event_cb: Drivers register this callback to get asynchronous ME + * events (e.g. Rx buffer pending) notifications. + * @events: Events bitmask sent to the driver. + * @priv_data: client private data + */ +struct mei_cl_device { + struct device dev; + + struct mei_cl *cl; + + const struct mei_cl_ops *ops; + + struct work_struct event_work; + mei_cl_event_cb_t event_cb; + void *event_context; + unsigned long events; + + void *priv_data; +}; + + + /** + * enum mei_pg_event - power gating transition events + * + * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition + * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete + * @MEI_PG_EVENT_RECEIVED: the driver received pg event + */ +enum mei_pg_event { + MEI_PG_EVENT_IDLE, + MEI_PG_EVENT_WAIT, + MEI_PG_EVENT_RECEIVED, +}; + +/** + * enum mei_pg_state - device internal power gating state + * + * @MEI_PG_OFF: device is not power gated - it is active + * @MEI_PG_ON: device is power gated - it is in lower power state + */ +enum mei_pg_state { + MEI_PG_OFF = 0, + MEI_PG_ON = 1, +}; + +/* + * mei_cfg + * + * @fw_status - FW status + * @quirk_probe - device exclusion quirk + */ +struct mei_cfg { + const struct mei_fw_status fw_status; + bool (*quirk_probe)(struct pci_dev *pdev); +}; + + +#define MEI_PCI_DEVICE(dev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .driver_data = (kernel_ulong_t)&(cfg) + + +/** + * struct mei_device - MEI private device struct + + * @reset_count - limits the number of consecutive resets + * @hbm_state - state of host bus message protocol + * @pg_event - power gating event + * @mem_addr - mem mapped base register address + + * @hbuf_depth - depth of hardware host/write buffer is slots + * @hbuf_is_ready - query if the host host/write buffer is ready + * @wr_msg - the buffer for hbm control messages + * @cfg - per device generation config and ops + */ +struct mei_device { + struct pci_dev *pdev; /* pointer to pci device struct */ + /* + * lists of queues + */ + /* array of pointers to aio lists */ + struct mei_cl_cb read_list; /* driver read queue */ + struct mei_cl_cb write_list; /* driver write queue */ + struct mei_cl_cb write_waiting_list; /* write waiting queue */ + struct mei_cl_cb ctrl_wr_list; /* managed write IOCTL list */ + struct mei_cl_cb ctrl_rd_list; /* managed read IOCTL list */ + + /* + * list of files + */ + struct list_head file_list; + long open_handle_count; + + /* + * lock for the device + */ + struct mutex device_lock; /* device lock */ + struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */ + + bool recvd_hw_ready; + /* + * waiting queue for receive message from FW + */ + wait_queue_head_t wait_hw_ready; + wait_queue_head_t wait_pg; + wait_queue_head_t wait_recvd_msg; + wait_queue_head_t wait_stop_wd; + + /* + * mei device states + */ + unsigned long reset_count; + enum mei_dev_state dev_state; + enum mei_hbm_state hbm_state; + u16 init_clients_timer; + + /* + * Power Gating support + */ + enum mei_pg_event pg_event; +#ifdef CONFIG_PM_RUNTIME + struct dev_pm_domain pg_domain; +#endif /* CONFIG_PM_RUNTIME */ + + unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ + u32 rd_msg_hdr; + + /* write buffer */ + u8 hbuf_depth; + bool hbuf_is_ready; + + /* used for control messages */ + struct { + struct mei_msg_hdr hdr; + unsigned char data[128]; + } wr_msg; + + struct hbm_version version; + + struct mei_me_client *me_clients; /* Note: memory has to be allocated */ + DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); + DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); + unsigned long me_clients_num; + unsigned long me_client_presentation_num; + unsigned long me_client_index; + + struct mei_cl wd_cl; + enum mei_wd_states wd_state; + bool wd_pending; + u16 wd_timeout; + unsigned char wd_data[MEI_WD_START_MSG_SIZE]; + + + /* amthif list for cmd waiting */ + struct mei_cl_cb amthif_cmd_list; + /* driver managed amthif list for reading completed amthif cmd data */ + struct mei_cl_cb amthif_rd_complete_list; + struct file *iamthif_file_object; + struct mei_cl iamthif_cl; + struct mei_cl_cb *iamthif_current_cb; + long iamthif_open_count; + int iamthif_mtu; + unsigned long iamthif_timer; + u32 iamthif_stall_timer; + unsigned char *iamthif_msg_buf; /* Note: memory has to be allocated */ + u32 iamthif_msg_buf_size; + u32 iamthif_msg_buf_index; + enum iamthif_states iamthif_state; + bool iamthif_flow_control_pending; + bool iamthif_ioctl; + bool iamthif_canceled; + + struct work_struct init_work; + struct work_struct reset_work; + + /* List of bus devices */ + struct list_head device_list; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ + + + const struct mei_hw_ops *ops; + const struct mei_cfg *cfg; + char hw[0] __aligned(sizeof(void *)); +}; + +static inline unsigned long mei_secs_to_jiffies(unsigned long sec) +{ + return msecs_to_jiffies(sec * MSEC_PER_SEC); +} + +/** + * mei_data2slots - get slots - number of (dwords) from a message length + * + size of the mei header + * @length - size of the messages in bytes + * returns - number of slots + */ +static inline u32 mei_data2slots(size_t length) +{ + return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); +} + +/** + * mei_slots2data- get data in slots - bytes from slots + * @slots - number of available slots + * returns - number of bytes in slots + */ +static inline u32 mei_slots2data(int slots) +{ + return slots * 4; +} + +/* + * mei init function prototypes + */ +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg); +int mei_reset(struct mei_device *dev); +int mei_start(struct mei_device *dev); +int mei_restart(struct mei_device *dev); +void mei_stop(struct mei_device *dev); +void mei_cancel_work(struct mei_device *dev); + +/* + * MEI interrupt functions prototype + */ + +void mei_timer(struct work_struct *work); +int mei_irq_read_handler(struct mei_device *dev, + struct mei_cl_cb *cmpl_list, s32 *slots); + +int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); +void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); + +/* + * AMTHIF - AMT Host Interface Functions + */ +void mei_amthif_reset_params(struct mei_device *dev); + +int mei_amthif_host_init(struct mei_device *dev); + +int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb); + +int mei_amthif_read(struct mei_device *dev, struct file *file, + char __user *ubuf, size_t length, loff_t *offset); + +unsigned int mei_amthif_poll(struct mei_device *dev, + struct file *file, poll_table *wait); + +int mei_amthif_release(struct mei_device *dev, struct file *file); + +struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, + struct file *file); + +void mei_amthif_run_next_cmd(struct mei_device *dev); + +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list); + +void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); +int mei_amthif_irq_read_msg(struct mei_device *dev, + struct mei_msg_hdr *mei_hdr, + struct mei_cl_cb *complete_list); +int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); + +/* + * NFC functions + */ +int mei_nfc_host_init(struct mei_device *dev); +void mei_nfc_host_exit(struct mei_device *dev); + +/* + * NFC Client UUID + */ +extern const uuid_le mei_nfc_guid; + +int mei_wd_send(struct mei_device *dev); +int mei_wd_stop(struct mei_device *dev); +int mei_wd_host_init(struct mei_device *dev); +/* + * mei_watchdog_register - Registering watchdog interface + * once we got connection to the WD Client + * @dev - mei device + */ +int mei_watchdog_register(struct mei_device *dev); +/* + * mei_watchdog_unregister - Unregistering watchdog interface + * @dev - mei device + */ +void mei_watchdog_unregister(struct mei_device *dev); + +/* + * Register Access Function + */ + + +static inline void mei_hw_config(struct mei_device *dev) +{ + dev->ops->hw_config(dev); +} + +static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) +{ + return dev->ops->pg_state(dev); +} + +static inline bool mei_pg_is_enabled(struct mei_device *dev) +{ + return dev->ops->pg_is_enabled(dev); +} + +static inline int mei_hw_reset(struct mei_device *dev, bool enable) +{ + return dev->ops->hw_reset(dev, enable); +} + +static inline int mei_hw_start(struct mei_device *dev) +{ + return dev->ops->hw_start(dev); +} + +static inline void mei_clear_interrupts(struct mei_device *dev) +{ + dev->ops->intr_clear(dev); +} + +static inline void mei_enable_interrupts(struct mei_device *dev) +{ + dev->ops->intr_enable(dev); +} + +static inline void mei_disable_interrupts(struct mei_device *dev) +{ + dev->ops->intr_disable(dev); +} + +static inline bool mei_host_is_ready(struct mei_device *dev) +{ + return dev->ops->host_is_ready(dev); +} +static inline bool mei_hw_is_ready(struct mei_device *dev) +{ + return dev->ops->hw_is_ready(dev); +} + +static inline bool mei_hbuf_is_ready(struct mei_device *dev) +{ + return dev->ops->hbuf_is_ready(dev); +} + +static inline int mei_hbuf_empty_slots(struct mei_device *dev) +{ + return dev->ops->hbuf_free_slots(dev); +} + +static inline size_t mei_hbuf_max_len(const struct mei_device *dev) +{ + return dev->ops->hbuf_max_len(dev); +} + +static inline int mei_write_message(struct mei_device *dev, + struct mei_msg_hdr *hdr, + unsigned char *buf) +{ + return dev->ops->write(dev, hdr, buf); +} + +static inline u32 mei_read_hdr(const struct mei_device *dev) +{ + return dev->ops->read_hdr(dev); +} + +static inline void mei_read_slots(struct mei_device *dev, + unsigned char *buf, unsigned long len) +{ + dev->ops->read(dev, buf, len); +} + +static inline int mei_count_full_read_slots(struct mei_device *dev) +{ + return dev->ops->rdbuf_full_slots(dev); +} + +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status); + +#define FW_STS_FMT "%08X %08X" +#define FW_STS_PRM(fw_status) \ + (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \ + (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF + +bool mei_hbuf_acquire(struct mei_device *dev); + +bool mei_write_is_idle(struct mei_device *dev); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int mei_dbgfs_register(struct mei_device *dev, const char *name); +void mei_dbgfs_deregister(struct mei_device *dev); +#else +static inline int mei_dbgfs_register(struct mei_device *dev, const char *name) +{ + return 0; +} +static inline void mei_dbgfs_deregister(struct mei_device *dev) {} +#endif /* CONFIG_DEBUG_FS */ + +int mei_register(struct mei_device *dev); +void mei_deregister(struct mei_device *dev); + +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" +#define MEI_HDR_PRM(hdr) \ + (hdr)->host_addr, (hdr)->me_addr, \ + (hdr)->length, (hdr)->internal, (hdr)->msg_complete + +#endif diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c new file mode 100644 index 00000000000..3095fc514a6 --- /dev/null +++ b/drivers/misc/mei/nfc.c @@ -0,0 +1,558 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +struct mei_nfc_cmd { + u8 command; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; + u8 sub_command; + u8 data[]; +} __packed; + +struct mei_nfc_reply { + u8 command; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; + u8 sub_command; + u8 reply_status; + u8 data[]; +} __packed; + +struct mei_nfc_if_version { + u8 radio_version_sw[3]; + u8 reserved[3]; + u8 radio_version_hw[3]; + u8 i2c_addr; + u8 fw_ivn; + u8 vendor_id; + u8 radio_type; +} __packed; + +struct mei_nfc_connect { + u8 fw_ivn; + u8 vendor_id; +} __packed; + +struct mei_nfc_connect_resp { + u8 fw_ivn; + u8 vendor_id; + u16 me_major; + u16 me_minor; + u16 me_hotfix; + u16 me_build; +} __packed; + +struct mei_nfc_hci_hdr { + u8 cmd; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; +} __packed; + +#define MEI_NFC_CMD_MAINTENANCE 0x00 +#define MEI_NFC_CMD_HCI_SEND 0x01 +#define MEI_NFC_CMD_HCI_RECV 0x02 + +#define MEI_NFC_SUBCMD_CONNECT 0x00 +#define MEI_NFC_SUBCMD_IF_VERSION 0x01 + +#define MEI_NFC_HEADER_SIZE 10 + +/** mei_nfc_dev - NFC mei device + * + * @cl: NFC host client + * @cl_info: NFC info host client + * @init_work: perform connection to the info client + * @fw_ivn: NFC Interface Version Number + * @vendor_id: NFC manufacturer ID + * @radio_type: NFC radio type + */ +struct mei_nfc_dev { + struct mei_cl *cl; + struct mei_cl *cl_info; + struct work_struct init_work; + wait_queue_head_t send_wq; + u8 fw_ivn; + u8 vendor_id; + u8 radio_type; + char *bus_name; + + u16 req_id; + u16 recv_req_id; +}; + +static struct mei_nfc_dev nfc_dev; + +/* UUIDs for NFC F/W clients */ +const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, + 0x94, 0xd4, 0x50, 0x26, + 0x67, 0x23, 0x77, 0x5c); + +static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d, + 0x48, 0xa4, 0xef, 0xab, + 0xba, 0x8a, 0x12, 0x06); + +/* Vendors */ +#define MEI_NFC_VENDOR_INSIDE 0x00 +#define MEI_NFC_VENDOR_NXP 0x01 + +/* Radio types */ +#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00 +#define MEI_NFC_VENDOR_NXP_PN544 0x01 + +static void mei_nfc_free(struct mei_nfc_dev *ndev) +{ + if (ndev->cl) { + list_del(&ndev->cl->device_link); + mei_cl_unlink(ndev->cl); + kfree(ndev->cl); + } + + if (ndev->cl_info) { + list_del(&ndev->cl_info->device_link); + mei_cl_unlink(ndev->cl_info); + kfree(ndev->cl_info); + } + + memset(ndev, 0, sizeof(struct mei_nfc_dev)); +} + +static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) +{ + struct mei_device *dev; + + if (!ndev->cl) + return -ENODEV; + + dev = ndev->cl->dev; + + switch (ndev->vendor_id) { + case MEI_NFC_VENDOR_INSIDE: + switch (ndev->radio_type) { + case MEI_NFC_VENDOR_INSIDE_UREAD: + ndev->bus_name = "microread"; + return 0; + + default: + dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", + ndev->radio_type); + + return -EINVAL; + } + + case MEI_NFC_VENDOR_NXP: + switch (ndev->radio_type) { + case MEI_NFC_VENDOR_NXP_PN544: + ndev->bus_name = "pn544"; + return 0; + default: + dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", + ndev->radio_type); + + return -EINVAL; + } + + default: + dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n", + ndev->vendor_id); + + return -EINVAL; + } + + return 0; +} + +static int mei_nfc_connect(struct mei_nfc_dev *ndev) +{ + struct mei_device *dev; + struct mei_cl *cl; + struct mei_nfc_cmd *cmd, *reply; + struct mei_nfc_connect *connect; + struct mei_nfc_connect_resp *connect_resp; + size_t connect_length, connect_resp_length; + int bytes_recv, ret; + + cl = ndev->cl; + dev = cl->dev; + + connect_length = sizeof(struct mei_nfc_cmd) + + sizeof(struct mei_nfc_connect); + + connect_resp_length = sizeof(struct mei_nfc_cmd) + + sizeof(struct mei_nfc_connect_resp); + + cmd = kzalloc(connect_length, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + connect = (struct mei_nfc_connect *)cmd->data; + + reply = kzalloc(connect_resp_length, GFP_KERNEL); + if (!reply) { + kfree(cmd); + return -ENOMEM; + } + + connect_resp = (struct mei_nfc_connect_resp *)reply->data; + + cmd->command = MEI_NFC_CMD_MAINTENANCE; + cmd->data_size = 3; + cmd->sub_command = MEI_NFC_SUBCMD_CONNECT; + connect->fw_ivn = ndev->fw_ivn; + connect->vendor_id = ndev->vendor_id; + + ret = __mei_cl_send(cl, (u8 *)cmd, connect_length); + if (ret < 0) { + dev_err(&dev->pdev->dev, "Could not send connect cmd\n"); + goto err; + } + + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length); + if (bytes_recv < 0) { + dev_err(&dev->pdev->dev, "Could not read connect response\n"); + ret = bytes_recv; + goto err; + } + + dev_info(&dev->pdev->dev, "IVN 0x%x Vendor ID 0x%x\n", + connect_resp->fw_ivn, connect_resp->vendor_id); + + dev_info(&dev->pdev->dev, "ME FW %d.%d.%d.%d\n", + connect_resp->me_major, connect_resp->me_minor, + connect_resp->me_hotfix, connect_resp->me_build); + + ret = 0; + +err: + kfree(reply); + kfree(cmd); + + return ret; +} + +static int mei_nfc_if_version(struct mei_nfc_dev *ndev) +{ + struct mei_device *dev; + struct mei_cl *cl; + + struct mei_nfc_cmd cmd; + struct mei_nfc_reply *reply = NULL; + struct mei_nfc_if_version *version; + size_t if_version_length; + int bytes_recv, ret; + + cl = ndev->cl_info; + dev = cl->dev; + + memset(&cmd, 0, sizeof(struct mei_nfc_cmd)); + cmd.command = MEI_NFC_CMD_MAINTENANCE; + cmd.data_size = 1; + cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION; + + ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); + if (ret < 0) { + dev_err(&dev->pdev->dev, "Could not send IF version cmd\n"); + return ret; + } + + /* to be sure on the stack we alloc memory */ + if_version_length = sizeof(struct mei_nfc_reply) + + sizeof(struct mei_nfc_if_version); + + reply = kzalloc(if_version_length, GFP_KERNEL); + if (!reply) + return -ENOMEM; + + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); + if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + dev_err(&dev->pdev->dev, "Could not read IF version\n"); + ret = -EIO; + goto err; + } + + version = (struct mei_nfc_if_version *)reply->data; + + ndev->fw_ivn = version->fw_ivn; + ndev->vendor_id = version->vendor_id; + ndev->radio_type = version->radio_type; + +err: + kfree(reply); + return ret; +} + +static int mei_nfc_enable(struct mei_cl_device *cldev) +{ + struct mei_device *dev; + struct mei_nfc_dev *ndev = &nfc_dev; + int ret; + + dev = ndev->cl->dev; + + ret = mei_nfc_connect(ndev); + if (ret < 0) { + dev_err(&dev->pdev->dev, "Could not connect to NFC"); + return ret; + } + + return 0; +} + +static int mei_nfc_disable(struct mei_cl_device *cldev) +{ + return 0; +} + +static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ + struct mei_device *dev; + struct mei_nfc_dev *ndev; + struct mei_nfc_hci_hdr *hdr; + u8 *mei_buf; + int err; + + ndev = (struct mei_nfc_dev *) cldev->priv_data; + dev = ndev->cl->dev; + + mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL); + if (!mei_buf) + return -ENOMEM; + + hdr = (struct mei_nfc_hci_hdr *) mei_buf; + hdr->cmd = MEI_NFC_CMD_HCI_SEND; + hdr->status = 0; + hdr->req_id = ndev->req_id; + hdr->reserved = 0; + hdr->data_size = length; + + memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); + + err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE); + if (err < 0) + return err; + + kfree(mei_buf); + + if (!wait_event_interruptible_timeout(ndev->send_wq, + ndev->recv_req_id == ndev->req_id, HZ)) { + dev_err(&dev->pdev->dev, "NFC MEI command timeout\n"); + err = -ETIME; + } else { + ndev->req_id++; + } + + return err; +} + +static int mei_nfc_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ + struct mei_nfc_dev *ndev; + struct mei_nfc_hci_hdr *hci_hdr; + int received_length; + + ndev = (struct mei_nfc_dev *)cldev->priv_data; + + received_length = __mei_cl_recv(ndev->cl, buf, length); + if (received_length < 0) + return received_length; + + hci_hdr = (struct mei_nfc_hci_hdr *) buf; + + if (hci_hdr->cmd == MEI_NFC_CMD_HCI_SEND) { + ndev->recv_req_id = hci_hdr->req_id; + wake_up(&ndev->send_wq); + + return 0; + } + + return received_length; +} + +static struct mei_cl_ops nfc_ops = { + .enable = mei_nfc_enable, + .disable = mei_nfc_disable, + .send = mei_nfc_send, + .recv = mei_nfc_recv, +}; + +static void mei_nfc_init(struct work_struct *work) +{ + struct mei_device *dev; + struct mei_cl_device *cldev; + struct mei_nfc_dev *ndev; + struct mei_cl *cl_info; + + ndev = container_of(work, struct mei_nfc_dev, init_work); + + cl_info = ndev->cl_info; + dev = cl_info->dev; + + mutex_lock(&dev->device_lock); + + if (mei_cl_connect(cl_info, NULL) < 0) { + mutex_unlock(&dev->device_lock); + dev_err(&dev->pdev->dev, + "Could not connect to the NFC INFO ME client"); + + goto err; + } + + mutex_unlock(&dev->device_lock); + + if (mei_nfc_if_version(ndev) < 0) { + dev_err(&dev->pdev->dev, "Could not get the NFC interface version"); + + goto err; + } + + dev_info(&dev->pdev->dev, + "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", + ndev->fw_ivn, ndev->vendor_id, ndev->radio_type); + + mutex_lock(&dev->device_lock); + + if (mei_cl_disconnect(cl_info) < 0) { + mutex_unlock(&dev->device_lock); + dev_err(&dev->pdev->dev, + "Could not disconnect the NFC INFO ME client"); + + goto err; + } + + mutex_unlock(&dev->device_lock); + + if (mei_nfc_build_bus_name(ndev) < 0) { + dev_err(&dev->pdev->dev, + "Could not build the bus ID name\n"); + return; + } + + cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops); + if (!cldev) { + dev_err(&dev->pdev->dev, + "Could not add the NFC device to the MEI bus\n"); + + goto err; + } + + cldev->priv_data = ndev; + + + return; + +err: + mutex_lock(&dev->device_lock); + mei_nfc_free(ndev); + mutex_unlock(&dev->device_lock); + + return; +} + + +int mei_nfc_host_init(struct mei_device *dev) +{ + struct mei_nfc_dev *ndev = &nfc_dev; + struct mei_cl *cl_info, *cl = NULL; + int i, ret; + + /* already initialized */ + if (ndev->cl_info) + return 0; + + ndev->cl_info = mei_cl_allocate(dev); + ndev->cl = mei_cl_allocate(dev); + + cl = ndev->cl; + cl_info = ndev->cl_info; + + if (!cl || !cl_info) { + ret = -ENOMEM; + goto err; + } + + /* check for valid client id */ + i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); + if (i < 0) { + dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); + ret = -ENOTTY; + goto err; + } + + cl_info->me_client_id = dev->me_clients[i].client_id; + + ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY); + if (ret) + goto err; + + cl_info->device_uuid = mei_nfc_info_guid; + + list_add_tail(&cl_info->device_link, &dev->device_list); + + /* check for valid client id */ + i = mei_me_cl_by_uuid(dev, &mei_nfc_guid); + if (i < 0) { + dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); + ret = -ENOTTY; + goto err; + } + + cl->me_client_id = dev->me_clients[i].client_id; + + ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); + if (ret) + goto err; + + cl->device_uuid = mei_nfc_guid; + + + list_add_tail(&cl->device_link, &dev->device_list); + + ndev->req_id = 1; + + INIT_WORK(&ndev->init_work, mei_nfc_init); + init_waitqueue_head(&ndev->send_wq); + schedule_work(&ndev->init_work); + + return 0; + +err: + mei_nfc_free(ndev); + + return ret; +} + +void mei_nfc_host_exit(struct mei_device *dev) +{ + struct mei_nfc_dev *ndev = &nfc_dev; + cancel_work_sync(&ndev->init_work); +} + + diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c new file mode 100644 index 00000000000..1b46c64a649 --- /dev/null +++ b/drivers/misc/mei/pci-me.c @@ -0,0 +1,488 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/compat.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> + +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" +#include "hw-me-regs.h" +#include "hw-me.h" + +/* mei_pci_tbl - PCI Device ID Table */ +static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, + + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); + +#ifdef CONFIG_PM_RUNTIME +static inline void mei_me_set_pm_domain(struct mei_device *dev); +static inline void mei_me_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_me_set_pm_domain(struct mei_device *dev) {} +static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + +/** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * + * @pdev: PCI device structure + * @cfg: per generation config + * + * returns true if ME Interface is valid, false otherwise + */ +static bool mei_me_quirk_probe(struct pci_dev *pdev, + const struct mei_cfg *cfg) +{ + if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; + } + + return true; +} + +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in kcs_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); + struct mei_device *dev; + struct mei_me_hw *hw; + int err; + + + if (!mei_me_quirk_probe(pdev, cfg)) + return -ENODEV; + + /* enable pci dev */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto end; + } + /* set PCI host mastering */ + pci_set_master(pdev); + /* pci request regions for mei driver */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto disable_device; + } + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + } + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto release_regions; + } + + + /* allocates and initializes the mei dev structure */ + dev = mei_me_dev_init(pdev, cfg); + if (!dev) { + err = -ENOMEM; + goto release_regions; + } + hw = to_me_hw(dev); + /* mapping IO device memory */ + hw->mem_addr = pci_iomap(pdev, 0, 0); + if (!hw->mem_addr) { + dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); + err = -ENOMEM; + goto free_device; + } + pci_enable_msi(pdev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_me_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", + pdev->irq); + goto disable_msi; + } + + if (mei_start(dev)) { + dev_err(&pdev->dev, "init hw failure.\n"); + err = -ENODEV; + goto release_irq; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + + err = mei_register(dev); + if (err) + goto release_irq; + + pci_set_drvdata(pdev, dev); + + schedule_delayed_work(&dev->timer_work, HZ); + + /* + * For not wake-able HW runtime pm framework + * can't be used on pci device level. + * Use domain runtime pm callbacks instead. + */ + if (!pci_dev_run_wake(pdev)) + mei_me_set_pm_domain(dev); + + if (mei_pg_is_enabled(dev)) + pm_runtime_put_noidle(&pdev->dev); + + dev_dbg(&pdev->dev, "initialization successful.\n"); + + return 0; + +release_irq: + mei_cancel_work(dev); + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); +disable_msi: + pci_disable_msi(pdev); + pci_iounmap(pdev, hw->mem_addr); +free_device: + kfree(dev); +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +end: + dev_err(&pdev->dev, "initialization failed.\n"); + return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_me_remove(struct pci_dev *pdev) +{ + struct mei_device *dev; + struct mei_me_hw *hw; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + if (mei_pg_is_enabled(dev)) + pm_runtime_get_noresume(&pdev->dev); + + hw = to_me_hw(dev); + + + dev_dbg(&pdev->dev, "stop\n"); + mei_stop(dev); + + if (!pci_dev_run_wake(pdev)) + mei_me_unset_pm_domain(dev); + + /* disable interrupts */ + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + if (hw->mem_addr) + pci_iounmap(pdev, hw->mem_addr); + + mei_deregister(dev); + + kfree(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + +} +#ifdef CONFIG_PM_SLEEP +static int mei_me_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev = pci_get_drvdata(pdev); + + if (!dev) + return -ENODEV; + + dev_dbg(&pdev->dev, "suspend\n"); + + mei_stop(dev); + + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + return 0; +} + +static int mei_me_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int err; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + pci_enable_msi(pdev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_me_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", + pdev->irq); + return err; + } + + err = mei_restart(dev); + if (err) + return err; + + /* Start timer if stopped in suspend */ + schedule_delayed_work(&dev->timer_work, HZ); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_me_pm_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + + dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + if (mei_write_is_idle(dev)) + pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); + + return -EBUSY; +} + +static int mei_me_pm_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (mei_write_is_idle(dev)) + ret = mei_me_pg_set_sync(dev); + else + ret = -EAGAIN; + + mutex_unlock(&dev->device_lock); + + dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); + + return ret; +} + +static int mei_me_pm_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + ret = mei_me_pg_unset_sync(dev); + + mutex_unlock(&dev->device_lock); + + dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); + + return ret; +} + +/** + * mei_me_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_set_pm_domain(struct mei_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (pdev->dev.bus && pdev->dev.bus->pm) { + dev->pg_domain.ops = *pdev->dev.bus->pm; + + dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; + dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; + dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; + + pdev->dev.pm_domain = &dev->pg_domain; + } +} + +/** + * mei_me_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_unset_pm_domain(struct mei_device *dev) +{ + /* stop using pm callbacks if any */ + dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_me_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, + mei_me_pci_resume) + SET_RUNTIME_PM_OPS( + mei_me_pm_runtime_suspend, + mei_me_pm_runtime_resume, + mei_me_pm_runtime_idle) +}; + +#define MEI_ME_PM_OPS (&mei_me_pm_ops) +#else +#define MEI_ME_PM_OPS NULL +#endif /* CONFIG_PM */ +/* + * PCI driver structure + */ +static struct pci_driver mei_me_driver = { + .name = KBUILD_MODNAME, + .id_table = mei_me_pci_tbl, + .probe = mei_me_probe, + .remove = mei_me_remove, + .shutdown = mei_me_remove, + .driver.pm = MEI_ME_PM_OPS, +}; + +module_pci_driver(mei_me_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c new file mode 100644 index 00000000000..2343c6236df --- /dev/null +++ b/drivers/misc/mei/pci-txe.c @@ -0,0 +1,436 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + + +#include "mei_dev.h" +#include "hw-txe.h" + +static const struct pci_device_id mei_txe_pci_tbl[] = { + {MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); + +#ifdef CONFIG_PM_RUNTIME +static inline void mei_txe_set_pm_domain(struct mei_device *dev); +static inline void mei_txe_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + +static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) +{ + int i; + for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { + if (hw->mem_addr[i]) { + pci_iounmap(pdev, hw->mem_addr[i]); + hw->mem_addr[i] = NULL; + } + } +} +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mei_txe_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); + struct mei_device *dev; + struct mei_txe_hw *hw; + int err; + int i; + + /* enable pci dev */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto end; + } + /* set PCI host mastering */ + pci_set_master(pdev); + /* pci request regions for mei driver */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No suitable DMA available.\n"); + goto release_regions; + } + } + + /* allocates and initializes the mei dev structure */ + dev = mei_txe_dev_init(pdev, cfg); + if (!dev) { + err = -ENOMEM; + goto release_regions; + } + hw = to_txe_hw(dev); + + /* mapping IO device memory */ + for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { + hw->mem_addr[i] = pci_iomap(pdev, i, 0); + if (!hw->mem_addr[i]) { + dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); + err = -ENOMEM; + goto free_device; + } + } + + + pci_enable_msi(pdev); + + /* clear spurious interrupts */ + mei_clear_interrupts(dev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_txe_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_txe_irq_quick_handler, + mei_txe_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", + pdev->irq); + goto free_device; + } + + if (mei_start(dev)) { + dev_err(&pdev->dev, "init hw failure.\n"); + err = -ENODEV; + goto release_irq; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + + err = mei_register(dev); + if (err) + goto release_irq; + + pci_set_drvdata(pdev, dev); + + /* + * For not wake-able HW runtime pm framework + * can't be used on pci device level. + * Use domain runtime pm callbacks instead. + */ + if (!pci_dev_run_wake(pdev)) + mei_txe_set_pm_domain(dev); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +release_irq: + + mei_cancel_work(dev); + + /* disable interrupts */ + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + +free_device: + mei_txe_pci_iounmap(pdev, hw); + + kfree(dev); +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +end: + dev_err(&pdev->dev, "initialization failed.\n"); + return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_txe_remove(struct pci_dev *pdev) +{ + struct mei_device *dev; + struct mei_txe_hw *hw; + + dev = pci_get_drvdata(pdev); + if (!dev) { + dev_err(&pdev->dev, "mei: dev =NULL\n"); + return; + } + + pm_runtime_get_noresume(&pdev->dev); + + hw = to_txe_hw(dev); + + mei_stop(dev); + + if (!pci_dev_run_wake(pdev)) + mei_txe_unset_pm_domain(dev); + + /* disable interrupts */ + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + pci_set_drvdata(pdev, NULL); + + mei_txe_pci_iounmap(pdev, hw); + + mei_deregister(dev); + + kfree(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + + +#ifdef CONFIG_PM_SLEEP +static int mei_txe_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev = pci_get_drvdata(pdev); + + if (!dev) + return -ENODEV; + + dev_dbg(&pdev->dev, "suspend\n"); + + mei_stop(dev); + + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + return 0; +} + +static int mei_txe_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int err; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + pci_enable_msi(pdev); + + mei_clear_interrupts(dev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_txe_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_txe_irq_quick_handler, + mei_txe_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", + pdev->irq); + return err; + } + + err = mei_restart(dev); + + return err; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_txe_pm_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + + dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + if (mei_write_is_idle(dev)) + pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); + + return -EBUSY; +} +static int mei_txe_pm_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (mei_write_is_idle(dev)) + ret = mei_txe_aliveness_set_sync(dev, 0); + else + ret = -EAGAIN; + + /* + * If everything is okay we're about to enter PCI low + * power state (D3) therefor we need to disable the + * interrupts towards host. + * However if device is not wakeable we do not enter + * D-low state and we need to keep the interrupt kicking + */ + if (!ret && pci_dev_run_wake(pdev)) + mei_disable_interrupts(dev); + + dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); + + mutex_unlock(&dev->device_lock); + return ret; +} + +static int mei_txe_pm_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + mei_enable_interrupts(dev); + + ret = mei_txe_aliveness_set_sync(dev, 1); + + mutex_unlock(&dev->device_lock); + + dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); + + return ret; +} + +/** + * mei_txe_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_set_pm_domain(struct mei_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (pdev->dev.bus && pdev->dev.bus->pm) { + dev->pg_domain.ops = *pdev->dev.bus->pm; + + dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; + dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; + dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; + + pdev->dev.pm_domain = &dev->pg_domain; + } +} + +/** + * mei_txe_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) +{ + /* stop using pm callbacks if any */ + dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_txe_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, + mei_txe_pci_resume) + SET_RUNTIME_PM_OPS( + mei_txe_pm_runtime_suspend, + mei_txe_pm_runtime_resume, + mei_txe_pm_runtime_idle) +}; + +#define MEI_TXE_PM_OPS (&mei_txe_pm_ops) +#else +#define MEI_TXE_PM_OPS NULL +#endif /* CONFIG_PM */ + +/* + * PCI driver structure + */ +static struct pci_driver mei_txe_driver = { + .name = KBUILD_MODNAME, + .id_table = mei_txe_pci_tbl, + .probe = mei_txe_probe, + .remove = mei_txe_remove, + .shutdown = mei_txe_remove, + .driver.pm = MEI_TXE_PM_OPS, +}; + +module_pci_driver(mei_txe_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c new file mode 100644 index 00000000000..a84a664dfcc --- /dev/null +++ b/drivers/misc/mei/wd.c @@ -0,0 +1,401 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/watchdog.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; +static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 }; + +/* + * AMT Watchdog Device + */ +#define INTEL_AMT_WATCHDOG_ID "INTCAMT" + +/* UUIDs for AMT F/W clients */ +const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89, + 0x9D, 0xA9, 0x15, 0x14, 0xCB, + 0x32, 0xAB); + +static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) +{ + dev_dbg(&dev->pdev->dev, "wd: set timeout=%d.\n", timeout); + memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE); + memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16)); +} + +/** + * mei_wd_host_init - connect to the watchdog client + * + * @dev: the device structure + * + * returns -ENOTTY if wd client cannot be found + * -EIO if write has failed + * 0 on success + */ +int mei_wd_host_init(struct mei_device *dev) +{ + struct mei_cl *cl = &dev->wd_cl; + int id; + int ret; + + mei_cl_init(cl, dev); + + dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; + dev->wd_state = MEI_WD_IDLE; + + + /* check for valid client id */ + id = mei_me_cl_by_uuid(dev, &mei_wd_guid); + if (id < 0) { + dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); + return -ENOTTY; + } + + cl->me_client_id = dev->me_clients[id].client_id; + + ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); + + if (ret < 0) { + dev_info(&dev->pdev->dev, "wd: failed link client\n"); + return ret; + } + + ret = mei_cl_connect(cl, NULL); + + if (ret) { + dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret); + mei_cl_unlink(cl); + return ret; + } + + ret = mei_watchdog_register(dev); + if (ret) { + mei_cl_disconnect(cl); + mei_cl_unlink(cl); + } + return ret; +} + +/** + * mei_wd_send - sends watch dog message to fw. + * + * @dev: the device structure + * + * returns 0 if success, + * -EIO when message send fails + * -EINVAL when invalid message is to be sent + * -ENODEV on flow control failure + */ +int mei_wd_send(struct mei_device *dev) +{ + struct mei_cl *cl = &dev->wd_cl; + struct mei_msg_hdr hdr; + int ret; + + hdr.host_addr = cl->host_client_id; + hdr.me_addr = cl->me_client_id; + hdr.msg_complete = 1; + hdr.reserved = 0; + hdr.internal = 0; + + if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) + hdr.length = MEI_WD_START_MSG_SIZE; + else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) + hdr.length = MEI_WD_STOP_MSG_SIZE; + else { + dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n"); + return -EINVAL; + } + + ret = mei_write_message(dev, &hdr, dev->wd_data); + if (ret) { + dev_err(&dev->pdev->dev, "wd: write message failed\n"); + return ret; + } + + ret = mei_cl_flow_ctrl_reduce(cl); + if (ret) { + dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n"); + return ret; + } + + return 0; +} + +/** + * mei_wd_stop - sends watchdog stop message to fw. + * + * @dev: the device structure + * @preserve: indicate if to keep the timeout value + * + * returns 0 if success + * on error: + * -EIO when message send fails + * -EINVAL when invalid message is to be sent + * -ETIME on message timeout + */ +int mei_wd_stop(struct mei_device *dev) +{ + int ret; + + if (dev->wd_cl.state != MEI_FILE_CONNECTED || + dev->wd_state != MEI_WD_RUNNING) + return 0; + + memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE); + + dev->wd_state = MEI_WD_STOPPING; + + ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); + if (ret < 0) + goto err; + + if (ret && mei_hbuf_acquire(dev)) { + ret = mei_wd_send(dev); + if (ret) + goto err; + dev->wd_pending = false; + } else { + dev->wd_pending = true; + } + + mutex_unlock(&dev->device_lock); + + ret = wait_event_timeout(dev->wait_stop_wd, + dev->wd_state == MEI_WD_IDLE, + msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (dev->wd_state != MEI_WD_IDLE) { + /* timeout */ + ret = -ETIME; + dev_warn(&dev->pdev->dev, + "wd: stop failed to complete ret=%d.\n", ret); + goto err; + } + dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n", + MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); + return 0; +err: + return ret; +} + +/* + * mei_wd_ops_start - wd start command from the watchdog core. + * + * @wd_dev - watchdog device struct + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_start(struct watchdog_device *wd_dev) +{ + int err = -ENODEV; + struct mei_device *dev; + + dev = watchdog_get_drvdata(wd_dev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (dev->dev_state != MEI_DEV_ENABLED) { + dev_dbg(&dev->pdev->dev, + "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n", + mei_dev_state_str(dev->dev_state)); + goto end_unlock; + } + + if (dev->wd_cl.state != MEI_FILE_CONNECTED) { + dev_dbg(&dev->pdev->dev, + "MEI Driver is not connected to Watchdog Client\n"); + goto end_unlock; + } + + mei_wd_set_start_timeout(dev, dev->wd_timeout); + + err = 0; +end_unlock: + mutex_unlock(&dev->device_lock); + return err; +} + +/* + * mei_wd_ops_stop - wd stop command from the watchdog core. + * + * @wd_dev - watchdog device struct + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_stop(struct watchdog_device *wd_dev) +{ + struct mei_device *dev; + + dev = watchdog_get_drvdata(wd_dev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + mei_wd_stop(dev); + mutex_unlock(&dev->device_lock); + + return 0; +} + +/* + * mei_wd_ops_ping - wd ping command from the watchdog core. + * + * @wd_dev - watchdog device struct + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_ping(struct watchdog_device *wd_dev) +{ + struct mei_device *dev; + int ret; + + dev = watchdog_get_drvdata(wd_dev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (dev->wd_cl.state != MEI_FILE_CONNECTED) { + dev_err(&dev->pdev->dev, "wd: not connected.\n"); + ret = -ENODEV; + goto end; + } + + dev->wd_state = MEI_WD_RUNNING; + + ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); + if (ret < 0) + goto end; + /* Check if we can send the ping to HW*/ + if (ret && mei_hbuf_acquire(dev)) { + + dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); + + ret = mei_wd_send(dev); + if (ret) + goto end; + dev->wd_pending = false; + } else { + dev->wd_pending = true; + } + +end: + mutex_unlock(&dev->device_lock); + return ret; +} + +/* + * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core. + * + * @wd_dev - watchdog device struct + * @timeout - timeout value to set + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, + unsigned int timeout) +{ + struct mei_device *dev; + + dev = watchdog_get_drvdata(wd_dev); + if (!dev) + return -ENODEV; + + /* Check Timeout value */ + if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT) + return -EINVAL; + + mutex_lock(&dev->device_lock); + + dev->wd_timeout = timeout; + wd_dev->timeout = timeout; + mei_wd_set_start_timeout(dev, dev->wd_timeout); + + mutex_unlock(&dev->device_lock); + + return 0; +} + +/* + * Watchdog Device structs + */ +static const struct watchdog_ops wd_ops = { + .owner = THIS_MODULE, + .start = mei_wd_ops_start, + .stop = mei_wd_ops_stop, + .ping = mei_wd_ops_ping, + .set_timeout = mei_wd_ops_set_timeout, +}; +static const struct watchdog_info wd_info = { + .identity = INTEL_AMT_WATCHDOG_ID, + .options = WDIOF_KEEPALIVEPING | + WDIOF_SETTIMEOUT | + WDIOF_ALARMONLY, +}; + +static struct watchdog_device amt_wd_dev = { + .info = &wd_info, + .ops = &wd_ops, + .timeout = MEI_WD_DEFAULT_TIMEOUT, + .min_timeout = MEI_WD_MIN_TIMEOUT, + .max_timeout = MEI_WD_MAX_TIMEOUT, +}; + + +int mei_watchdog_register(struct mei_device *dev) +{ + + int ret; + + /* unlock to perserve correct locking order */ + mutex_unlock(&dev->device_lock); + ret = watchdog_register_device(&amt_wd_dev); + mutex_lock(&dev->device_lock); + if (ret) { + dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n", + ret); + return ret; + } + + dev_dbg(&dev->pdev->dev, + "wd: successfully register watchdog interface.\n"); + watchdog_set_drvdata(&amt_wd_dev, dev); + return 0; +} + +void mei_watchdog_unregister(struct mei_device *dev) +{ + if (watchdog_get_drvdata(&amt_wd_dev) == NULL) + return; + + watchdog_set_drvdata(&amt_wd_dev, NULL); + watchdog_unregister_device(&amt_wd_dev); +} + diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig new file mode 100644 index 00000000000..462a5b1d865 --- /dev/null +++ b/drivers/misc/mic/Kconfig @@ -0,0 +1,37 @@ +comment "Intel MIC Host Driver" + +config INTEL_MIC_HOST + tristate "Intel MIC Host Driver" + depends on 64BIT && PCI && X86 + select VHOST_RING + help + This enables Host Driver support for the Intel Many Integrated + Core (MIC) family of PCIe form factor coprocessor devices that + run a 64 bit Linux OS. The driver manages card OS state and + enables communication between host and card. Intel MIC X100 + devices are currently supported. + + If you are building a host kernel with an Intel MIC device then + say M (recommended) or Y, else say N. If unsure say N. + + More information about the Intel MIC family as well as the Linux + OS and tools for MIC to use with this driver are available from + <http://software.intel.com/en-us/mic-developer>. + +comment "Intel MIC Card Driver" + +config INTEL_MIC_CARD + tristate "Intel MIC Card Driver" + depends on 64BIT && X86 + select VIRTIO + help + This enables card driver support for the Intel Many Integrated + Core (MIC) device family. The card driver communicates shutdown/ + crash events to the host and allows registration/configuration of + virtio devices. Intel MIC X100 devices are currently supported. + + If you are building a card kernel for an Intel MIC device then + say M (recommended) or Y, else say N. If unsure say N. + + For more information see + <http://software.intel.com/en-us/mic-developer>. diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile new file mode 100644 index 00000000000..05b34d683a5 --- /dev/null +++ b/drivers/misc/mic/Makefile @@ -0,0 +1,6 @@ +# +# Makefile - Intel MIC Linux driver. +# Copyright(c) 2013, Intel Corporation. +# +obj-$(CONFIG_INTEL_MIC_HOST) += host/ +obj-$(CONFIG_INTEL_MIC_CARD) += card/ diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile new file mode 100644 index 00000000000..69d58bef92c --- /dev/null +++ b/drivers/misc/mic/card/Makefile @@ -0,0 +1,11 @@ +# +# Makefile - Intel MIC Linux driver. +# Copyright(c) 2013, Intel Corporation. +# +ccflags-y += -DINTEL_MIC_CARD + +obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o +mic_card-y += mic_x100.o +mic_card-y += mic_device.o +mic_card-y += mic_debugfs.o +mic_card-y += mic_virtio.o diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c new file mode 100644 index 00000000000..421b3d7911d --- /dev/null +++ b/drivers/misc/mic/card/mic_debugfs.c @@ -0,0 +1,130 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/seq_file.h> +#include <linux/interrupt.h> +#include <linux/device.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" + +/* Debugfs parent dir */ +static struct dentry *mic_dbg; + +/** + * mic_intr_test - Send interrupts to host. + */ +static int mic_intr_test(struct seq_file *s, void *unused) +{ + struct mic_driver *mdrv = s->private; + struct mic_device *mdev = &mdrv->mdev; + + mic_send_intr(mdev, 0); + msleep(1000); + mic_send_intr(mdev, 1); + msleep(1000); + mic_send_intr(mdev, 2); + msleep(1000); + mic_send_intr(mdev, 3); + msleep(1000); + + return 0; +} + +static int mic_intr_test_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_intr_test, inode->i_private); +} + +static int mic_intr_test_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations intr_test_ops = { + .owner = THIS_MODULE, + .open = mic_intr_test_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_intr_test_release +}; + +/** + * mic_create_card_debug_dir - Initialize MIC debugfs entries. + */ +void __init mic_create_card_debug_dir(struct mic_driver *mdrv) +{ + struct dentry *d; + + if (!mic_dbg) + return; + + mdrv->dbg_dir = debugfs_create_dir(mdrv->name, mic_dbg); + if (!mdrv->dbg_dir) { + dev_err(mdrv->dev, "Cant create dbg_dir %s\n", mdrv->name); + return; + } + + d = debugfs_create_file("intr_test", 0444, mdrv->dbg_dir, + mdrv, &intr_test_ops); + + if (!d) { + dev_err(mdrv->dev, + "Cant create dbg intr_test %s\n", mdrv->name); + return; + } +} + +/** + * mic_delete_card_debug_dir - Uninitialize MIC debugfs entries. + */ +void mic_delete_card_debug_dir(struct mic_driver *mdrv) +{ + if (!mdrv->dbg_dir) + return; + + debugfs_remove_recursive(mdrv->dbg_dir); +} + +/** + * mic_init_card_debugfs - Initialize global debugfs entry. + */ +void __init mic_init_card_debugfs(void) +{ + mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!mic_dbg) + pr_err("can't create debugfs dir\n"); +} + +/** + * mic_exit_card_debugfs - Uninitialize global debugfs entry + */ +void mic_exit_card_debugfs(void) +{ + debugfs_remove(mic_dbg); +} diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c new file mode 100644 index 00000000000..d0980ff9683 --- /dev/null +++ b/drivers/misc/mic/card/mic_device.c @@ -0,0 +1,305 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/reboot.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_virtio.h" + +static struct mic_driver *g_drv; +static struct mic_irq *shutdown_cookie; + +static void mic_notify_host(u8 state) +{ + struct mic_driver *mdrv = g_drv; + struct mic_bootparam __iomem *bootparam = mdrv->dp; + + iowrite8(state, &bootparam->shutdown_status); + dev_dbg(mdrv->dev, "%s %d system_state %d\n", + __func__, __LINE__, state); + mic_send_intr(&mdrv->mdev, ioread8(&bootparam->c2h_shutdown_db)); +} + +static int mic_panic_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct mic_driver *mdrv = g_drv; + struct mic_bootparam __iomem *bootparam = mdrv->dp; + + iowrite8(-1, &bootparam->h2c_config_db); + iowrite8(-1, &bootparam->h2c_shutdown_db); + mic_notify_host(MIC_CRASHED); + return NOTIFY_DONE; +} + +static struct notifier_block mic_panic = { + .notifier_call = mic_panic_event, +}; + +static irqreturn_t mic_shutdown_isr(int irq, void *data) +{ + struct mic_driver *mdrv = g_drv; + struct mic_bootparam __iomem *bootparam = mdrv->dp; + + mic_ack_interrupt(&g_drv->mdev); + if (ioread8(&bootparam->shutdown_card)) + orderly_poweroff(true); + return IRQ_HANDLED; +} + +static int mic_shutdown_init(void) +{ + int rc = 0; + struct mic_driver *mdrv = g_drv; + struct mic_bootparam __iomem *bootparam = mdrv->dp; + int shutdown_db; + + shutdown_db = mic_next_card_db(); + shutdown_cookie = mic_request_card_irq(mic_shutdown_isr, + "Shutdown", mdrv, shutdown_db); + if (IS_ERR(shutdown_cookie)) + rc = PTR_ERR(shutdown_cookie); + else + iowrite8(shutdown_db, &bootparam->h2c_shutdown_db); + return rc; +} + +static void mic_shutdown_uninit(void) +{ + struct mic_driver *mdrv = g_drv; + struct mic_bootparam __iomem *bootparam = mdrv->dp; + + iowrite8(-1, &bootparam->h2c_shutdown_db); + mic_free_card_irq(shutdown_cookie, mdrv); +} + +static int __init mic_dp_init(void) +{ + struct mic_driver *mdrv = g_drv; + struct mic_device *mdev = &mdrv->mdev; + struct mic_bootparam __iomem *bootparam; + u64 lo, hi, dp_dma_addr; + u32 magic; + + lo = mic_read_spad(&mdrv->mdev, MIC_DPLO_SPAD); + hi = mic_read_spad(&mdrv->mdev, MIC_DPHI_SPAD); + + dp_dma_addr = lo | (hi << 32); + mdrv->dp = mic_card_map(mdev, dp_dma_addr, MIC_DP_SIZE); + if (!mdrv->dp) { + dev_err(mdrv->dev, "Cannot remap Aperture BAR\n"); + return -ENOMEM; + } + bootparam = mdrv->dp; + magic = ioread32(&bootparam->magic); + if (MIC_MAGIC != magic) { + dev_err(mdrv->dev, "bootparam magic mismatch 0x%x\n", magic); + return -EIO; + } + return 0; +} + +/* Uninitialize the device page */ +static void mic_dp_uninit(void) +{ + mic_card_unmap(&g_drv->mdev, g_drv->dp); +} + +/** + * mic_request_card_irq - request an irq. + * + * @func: The callback function that handles the interrupt. + * @name: The ASCII name of the callee requesting the irq. + * @data: private data that is returned back when calling the + * function handler. + * @index: The doorbell index of the requester. + * + * returns: The cookie that is transparent to the caller. Passed + * back when calling mic_free_irq. An appropriate error code + * is returned on failure. Caller needs to use IS_ERR(return_val) + * to check for failure and PTR_ERR(return_val) to obtained the + * error code. + * + */ +struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data), + const char *name, void *data, int index) +{ + int rc = 0; + unsigned long cookie; + struct mic_driver *mdrv = g_drv; + + rc = request_irq(mic_db_to_irq(mdrv, index), func, + 0, name, data); + if (rc) { + dev_err(mdrv->dev, "request_irq failed rc = %d\n", rc); + goto err; + } + mdrv->irq_info.irq_usage_count[index]++; + cookie = index; + return (struct mic_irq *)cookie; +err: + return ERR_PTR(rc); +} + +/** + * mic_free_card_irq - free irq. + * + * @cookie: cookie obtained during a successful call to mic_request_irq + * @data: private data specified by the calling function during the + * mic_request_irq + * + * returns: none. + */ +void mic_free_card_irq(struct mic_irq *cookie, void *data) +{ + int index; + struct mic_driver *mdrv = g_drv; + + index = (unsigned long)cookie & 0xFFFFU; + free_irq(mic_db_to_irq(mdrv, index), data); + mdrv->irq_info.irq_usage_count[index]--; +} + +/** + * mic_next_card_db - Get the doorbell with minimum usage count. + * + * Returns the irq index. + */ +int mic_next_card_db(void) +{ + int i; + int index = 0; + struct mic_driver *mdrv = g_drv; + + for (i = 0; i < mdrv->intr_info.num_intr; i++) { + if (mdrv->irq_info.irq_usage_count[i] < + mdrv->irq_info.irq_usage_count[index]) + index = i; + } + + return index; +} + +/** + * mic_init_irq - Initialize irq information. + * + * Returns 0 in success. Appropriate error code on failure. + */ +static int mic_init_irq(void) +{ + struct mic_driver *mdrv = g_drv; + + mdrv->irq_info.irq_usage_count = kzalloc((sizeof(u32) * + mdrv->intr_info.num_intr), + GFP_KERNEL); + if (!mdrv->irq_info.irq_usage_count) + return -ENOMEM; + return 0; +} + +/** + * mic_uninit_irq - Uninitialize irq information. + * + * None. + */ +static void mic_uninit_irq(void) +{ + struct mic_driver *mdrv = g_drv; + + kfree(mdrv->irq_info.irq_usage_count); +} + +/* + * mic_driver_init - MIC driver initialization tasks. + * + * Returns 0 in success. Appropriate error code on failure. + */ +int __init mic_driver_init(struct mic_driver *mdrv) +{ + int rc; + + g_drv = mdrv; + /* + * Unloading the card module is not supported. The MIC card module + * handles fundamental operations like host/card initiated shutdowns + * and informing the host about card crashes and cannot be unloaded. + */ + if (!try_module_get(mdrv->dev->driver->owner)) { + rc = -ENODEV; + goto done; + } + rc = mic_dp_init(); + if (rc) + goto put; + rc = mic_init_irq(); + if (rc) + goto dp_uninit; + rc = mic_shutdown_init(); + if (rc) + goto irq_uninit; + rc = mic_devices_init(mdrv); + if (rc) + goto shutdown_uninit; + mic_create_card_debug_dir(mdrv); + atomic_notifier_chain_register(&panic_notifier_list, &mic_panic); +done: + return rc; +shutdown_uninit: + mic_shutdown_uninit(); +irq_uninit: + mic_uninit_irq(); +dp_uninit: + mic_dp_uninit(); +put: + module_put(mdrv->dev->driver->owner); + return rc; +} + +/* + * mic_driver_uninit - MIC driver uninitialization tasks. + * + * Returns None + */ +void mic_driver_uninit(struct mic_driver *mdrv) +{ + mic_delete_card_debug_dir(mdrv); + mic_devices_uninit(mdrv); + /* + * Inform the host about the shutdown status i.e. poweroff/restart etc. + * The module cannot be unloaded so the only code path to call + * mic_devices_uninit(..) is the shutdown callback. + */ + mic_notify_host(system_state); + mic_shutdown_uninit(); + mic_uninit_irq(); + mic_dp_uninit(); + module_put(mdrv->dev->driver->owner); +} diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h new file mode 100644 index 00000000000..306f502be95 --- /dev/null +++ b/drivers/misc/mic/card/mic_device.h @@ -0,0 +1,134 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#ifndef _MIC_CARD_DEVICE_H_ +#define _MIC_CARD_DEVICE_H_ + +#include <linux/workqueue.h> +#include <linux/io.h> +#include <linux/irqreturn.h> + +/** + * struct mic_intr_info - Contains h/w specific interrupt sources info + * + * @num_intr: The number of irqs available + */ +struct mic_intr_info { + u32 num_intr; +}; + +/** + * struct mic_irq_info - OS specific irq information + * + * @irq_usage_count: usage count array tracking the number of sources + * assigned for each irq. + */ +struct mic_irq_info { + int *irq_usage_count; +}; + +/** + * struct mic_device - MIC device information. + * + * @mmio: MMIO bar information. + */ +struct mic_device { + struct mic_mw mmio; +}; + +/** + * struct mic_driver - MIC card driver information. + * + * @name: Name for MIC driver. + * @dbg_dir: debugfs directory of this MIC device. + * @dev: The device backing this MIC. + * @dp: The pointer to the virtio device page. + * @mdev: MIC device information for the host. + * @hotplug_work: Hot plug work for adding/removing virtio devices. + * @irq_info: The OS specific irq information + * @intr_info: H/W specific interrupt information. + */ +struct mic_driver { + char name[20]; + struct dentry *dbg_dir; + struct device *dev; + void __iomem *dp; + struct mic_device mdev; + struct work_struct hotplug_work; + struct mic_irq_info irq_info; + struct mic_intr_info intr_info; +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/** + * mic_mmio_read - read from an MMIO register. + * @mw: MMIO register base virtual address. + * @offset: register offset. + * + * RETURNS: register value. + */ +static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset) +{ + return ioread32(mw->va + offset); +} + +/** + * mic_mmio_write - write to an MMIO register. + * @mw: MMIO register base virtual address. + * @val: the data value to put into the register + * @offset: register offset. + * + * RETURNS: none. + */ +static inline void +mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset) +{ + iowrite32(val, mw->va + offset); +} + +int mic_driver_init(struct mic_driver *mdrv); +void mic_driver_uninit(struct mic_driver *mdrv); +int mic_next_card_db(void); +struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data), + const char *name, void *data, int intr_src); +void mic_free_card_irq(struct mic_irq *cookie, void *data); +u32 mic_read_spad(struct mic_device *mdev, unsigned int idx); +void mic_send_intr(struct mic_device *mdev, int doorbell); +int mic_db_to_irq(struct mic_driver *mdrv, int db); +u32 mic_ack_interrupt(struct mic_device *mdev); +void mic_hw_intr_init(struct mic_driver *mdrv); +void __iomem * +mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size); +void mic_card_unmap(struct mic_device *mdev, void __iomem *addr); +void __init mic_create_card_debug_dir(struct mic_driver *mdrv); +void mic_delete_card_debug_dir(struct mic_driver *mdrv); +void __init mic_init_card_debugfs(void); +void mic_exit_card_debugfs(void); +#endif diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c new file mode 100644 index 00000000000..653799b96bf --- /dev/null +++ b/drivers/misc/mic/card/mic_virtio.c @@ -0,0 +1,633 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Adapted from: + * + * virtio for kvm on s390 + * + * Copyright IBM Corp. 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> + * + * Intel MIC Card driver. + * + */ +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/virtio_config.h> + +#include "../common/mic_dev.h" +#include "mic_virtio.h" + +#define VIRTIO_SUBCODE_64 0x0D00 + +#define MIC_MAX_VRINGS 4 +struct mic_vdev { + struct virtio_device vdev; + struct mic_device_desc __iomem *desc; + struct mic_device_ctrl __iomem *dc; + struct mic_device *mdev; + void __iomem *vr[MIC_MAX_VRINGS]; + int used_size[MIC_MAX_VRINGS]; + struct completion reset_done; + struct mic_irq *virtio_cookie; + int c2h_vdev_db; +}; + +static struct mic_irq *virtio_config_cookie; +#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev) + +/* Helper API to obtain the parent of the virtio device */ +static inline struct device *mic_dev(struct mic_vdev *mvdev) +{ + return mvdev->vdev.dev.parent; +} + +/* This gets the device's feature bits. */ +static u32 mic_get_features(struct virtio_device *vdev) +{ + unsigned int i, bits; + u32 features = 0; + struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; + u8 __iomem *in_features = mic_vq_features(desc); + int feature_len = ioread8(&desc->feature_len); + + bits = min_t(unsigned, feature_len, + sizeof(vdev->features)) * 8; + for (i = 0; i < bits; i++) + if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) + features |= BIT(i); + + return features; +} + +static void mic_finalize_features(struct virtio_device *vdev) +{ + unsigned int i, bits; + struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; + u8 feature_len = ioread8(&desc->feature_len); + /* Second half of bitmap is features we accept. */ + u8 __iomem *out_features = + mic_vq_features(desc) + feature_len; + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + memset_io(out_features, 0, feature_len); + bits = min_t(unsigned, feature_len, + sizeof(vdev->features)) * 8; + for (i = 0; i < bits; i++) { + if (test_bit(i, vdev->features)) + iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), + &out_features[i / 8]); + } +} + +/* + * Reading and writing elements in config space + */ +static void mic_get(struct virtio_device *vdev, unsigned int offset, + void *buf, unsigned len) +{ + struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; + + if (offset + len > ioread8(&desc->config_len)) + return; + memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len); +} + +static void mic_set(struct virtio_device *vdev, unsigned int offset, + const void *buf, unsigned len) +{ + struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; + + if (offset + len > ioread8(&desc->config_len)) + return; + memcpy_toio(mic_vq_configspace(desc) + offset, buf, len); +} + +/* + * The operations to get and set the status word just access the status + * field of the device descriptor. set_status also interrupts the host + * to tell about status changes. + */ +static u8 mic_get_status(struct virtio_device *vdev) +{ + return ioread8(&to_micvdev(vdev)->desc->status); +} + +static void mic_set_status(struct virtio_device *vdev, u8 status) +{ + struct mic_vdev *mvdev = to_micvdev(vdev); + if (!status) + return; + iowrite8(status, &mvdev->desc->status); + mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); +} + +/* Inform host on a virtio device reset and wait for ack from host */ +static void mic_reset_inform_host(struct virtio_device *vdev) +{ + struct mic_vdev *mvdev = to_micvdev(vdev); + struct mic_device_ctrl __iomem *dc = mvdev->dc; + int retry; + + iowrite8(0, &dc->host_ack); + iowrite8(1, &dc->vdev_reset); + mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); + + /* Wait till host completes all card accesses and acks the reset */ + for (retry = 100; retry--;) { + if (ioread8(&dc->host_ack)) + break; + msleep(100); + }; + + dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); + + /* Reset status to 0 in case we timed out */ + iowrite8(0, &mvdev->desc->status); +} + +static void mic_reset(struct virtio_device *vdev) +{ + struct mic_vdev *mvdev = to_micvdev(vdev); + + dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n", + __func__, vdev->id.device); + + mic_reset_inform_host(vdev); + complete_all(&mvdev->reset_done); +} + +/* + * The virtio_ring code calls this API when it wants to notify the Host. + */ +static bool mic_notify(struct virtqueue *vq) +{ + struct mic_vdev *mvdev = vq->priv; + + mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); + return true; +} + +static void mic_del_vq(struct virtqueue *vq, int n) +{ + struct mic_vdev *mvdev = to_micvdev(vq->vdev); + struct vring *vr = (struct vring *)(vq + 1); + + free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n])); + vring_del_virtqueue(vq); + mic_card_unmap(mvdev->mdev, mvdev->vr[n]); + mvdev->vr[n] = NULL; +} + +static void mic_del_vqs(struct virtio_device *vdev) +{ + struct mic_vdev *mvdev = to_micvdev(vdev); + struct virtqueue *vq, *n; + int idx = 0; + + dev_dbg(mic_dev(mvdev), "%s\n", __func__); + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + mic_del_vq(vq, idx++); +} + +/* + * This routine will assign vring's allocated in host/io memory. Code in + * virtio_ring.c however continues to access this io memory as if it were local + * memory without io accessors. + */ +static struct virtqueue *mic_find_vq(struct virtio_device *vdev, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct mic_vdev *mvdev = to_micvdev(vdev); + struct mic_vqconfig __iomem *vqconfig; + struct mic_vqconfig config; + struct virtqueue *vq; + void __iomem *va; + struct _mic_vring_info __iomem *info; + void *used; + int vr_size, _vr_size, err, magic; + struct vring *vr; + u8 type = ioread8(&mvdev->desc->type); + + if (index >= ioread8(&mvdev->desc->num_vq)) + return ERR_PTR(-ENOENT); + + if (!name) + return ERR_PTR(-ENOENT); + + /* First assign the vring's allocated in host memory */ + vqconfig = mic_vq_config(mvdev->desc) + index; + memcpy_fromio(&config, vqconfig, sizeof(config)); + _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); + vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); + va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); + if (!va) + return ERR_PTR(-ENOMEM); + mvdev->vr[index] = va; + memset_io(va, 0x0, _vr_size); + vq = vring_new_virtqueue(index, le16_to_cpu(config.num), + MIC_VIRTIO_RING_ALIGN, vdev, false, + (void __force *)va, mic_notify, callback, + name); + if (!vq) { + err = -ENOMEM; + goto unmap; + } + info = va + _vr_size; + magic = ioread32(&info->magic); + + if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { + err = -EIO; + goto unmap; + } + + /* Allocate and reassign used ring now */ + mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + + sizeof(struct vring_used_elem) * + le16_to_cpu(config.num)); + used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(mvdev->used_size[index])); + if (!used) { + err = -ENOMEM; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, err); + goto del_vq; + } + iowrite64(virt_to_phys(used), &vqconfig->used_address); + + /* + * To reassign the used ring here we are directly accessing + * struct vring_virtqueue which is a private data structure + * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in + * vring_new_virtqueue() would ensure that + * (&vq->vring == (struct vring *) (&vq->vq + 1)); + */ + vr = (struct vring *)(vq + 1); + vr->used = used; + + vq->priv = mvdev; + return vq; +del_vq: + vring_del_virtqueue(vq); +unmap: + mic_card_unmap(mvdev->mdev, mvdev->vr[index]); + return ERR_PTR(err); +} + +static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct mic_vdev *mvdev = to_micvdev(vdev); + struct mic_device_ctrl __iomem *dc = mvdev->dc; + int i, err, retry; + + /* We must have this many virtqueues. */ + if (nvqs > ioread8(&mvdev->desc->num_vq)) + return -ENOENT; + + for (i = 0; i < nvqs; ++i) { + dev_dbg(mic_dev(mvdev), "%s: %d: %s\n", + __func__, i, names[i]); + vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto error; + } + } + + iowrite8(1, &dc->used_address_updated); + /* + * Send an interrupt to the host to inform it that used + * rings have been re-assigned. + */ + mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); + for (retry = 100; retry--;) { + if (!ioread8(&dc->used_address_updated)) + break; + msleep(100); + }; + + dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); + if (!retry) { + err = -ENODEV; + goto error; + } + + return 0; +error: + mic_del_vqs(vdev); + return err; +} + +/* + * The config ops structure as defined by virtio config + */ +static struct virtio_config_ops mic_vq_config_ops = { + .get_features = mic_get_features, + .finalize_features = mic_finalize_features, + .get = mic_get, + .set = mic_set, + .get_status = mic_get_status, + .set_status = mic_set_status, + .reset = mic_reset, + .find_vqs = mic_find_vqs, + .del_vqs = mic_del_vqs, +}; + +static irqreturn_t +mic_virtio_intr_handler(int irq, void *data) +{ + struct mic_vdev *mvdev = data; + struct virtqueue *vq; + + mic_ack_interrupt(mvdev->mdev); + list_for_each_entry(vq, &mvdev->vdev.vqs, list) + vring_interrupt(0, vq); + + return IRQ_HANDLED; +} + +static void mic_virtio_release_dev(struct device *_d) +{ + /* + * No need for a release method similar to virtio PCI. + * Provide an empty one to avoid getting a warning from core. + */ +} + +/* + * adds a new device and register it with virtio + * appropriate drivers are loaded by the device model + */ +static int mic_add_device(struct mic_device_desc __iomem *d, + unsigned int offset, struct mic_driver *mdrv) +{ + struct mic_vdev *mvdev; + int ret; + int virtio_db; + u8 type = ioread8(&d->type); + + mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); + if (!mvdev) { + dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n", + offset, type); + return -ENOMEM; + } + + mvdev->mdev = &mdrv->mdev; + mvdev->vdev.dev.parent = mdrv->dev; + mvdev->vdev.dev.release = mic_virtio_release_dev; + mvdev->vdev.id.device = type; + mvdev->vdev.config = &mic_vq_config_ops; + mvdev->desc = d; + mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d); + init_completion(&mvdev->reset_done); + + virtio_db = mic_next_card_db(); + mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler, + "virtio intr", mvdev, virtio_db); + if (IS_ERR(mvdev->virtio_cookie)) { + ret = PTR_ERR(mvdev->virtio_cookie); + goto kfree; + } + iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db); + mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db); + + ret = register_virtio_device(&mvdev->vdev); + if (ret) { + dev_err(mic_dev(mvdev), + "Failed to register mic device %u type %u\n", + offset, type); + goto free_irq; + } + iowrite64((u64)mvdev, &mvdev->dc->vdev); + dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n", + __func__, offset, type, mvdev); + + return 0; + +free_irq: + mic_free_card_irq(mvdev->virtio_cookie, mvdev); +kfree: + kfree(mvdev); + return ret; +} + +/* + * match for a mic device with a specific desc pointer + */ +static int mic_match_desc(struct device *dev, void *data) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct mic_vdev *mvdev = to_micvdev(vdev); + + return mvdev->desc == (void __iomem *)data; +} + +static void mic_handle_config_change(struct mic_device_desc __iomem *d, + unsigned int offset, struct mic_driver *mdrv) +{ + struct mic_device_ctrl __iomem *dc + = (void __iomem *)d + mic_aligned_desc_size(d); + struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); + struct virtio_driver *drv; + + if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) + return; + + dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__); + drv = container_of(mvdev->vdev.dev.driver, + struct virtio_driver, driver); + if (drv->config_changed) + drv->config_changed(&mvdev->vdev); + iowrite8(1, &dc->guest_ack); +} + +/* + * removes a virtio device if a hot remove event has been + * requested by the host. + */ +static int mic_remove_device(struct mic_device_desc __iomem *d, + unsigned int offset, struct mic_driver *mdrv) +{ + struct mic_device_ctrl __iomem *dc + = (void __iomem *)d + mic_aligned_desc_size(d); + struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); + u8 status; + int ret = -1; + + if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { + dev_dbg(mdrv->dev, + "%s %d config_change %d type %d mvdev %p\n", + __func__, __LINE__, + ioread8(&dc->config_change), ioread8(&d->type), mvdev); + + status = ioread8(&d->status); + reinit_completion(&mvdev->reset_done); + unregister_virtio_device(&mvdev->vdev); + mic_free_card_irq(mvdev->virtio_cookie, mvdev); + if (status & VIRTIO_CONFIG_S_DRIVER_OK) + wait_for_completion(&mvdev->reset_done); + kfree(mvdev); + iowrite8(1, &dc->guest_ack); + dev_dbg(mdrv->dev, "%s %d guest_ack %d\n", + __func__, __LINE__, ioread8(&dc->guest_ack)); + ret = 0; + } + + return ret; +} + +#define REMOVE_DEVICES true + +static void mic_scan_devices(struct mic_driver *mdrv, bool remove) +{ + s8 type; + unsigned int i; + struct mic_device_desc __iomem *d; + struct mic_device_ctrl __iomem *dc; + struct device *dev; + int ret; + + for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; + i += mic_total_desc_size(d)) { + d = mdrv->dp + i; + dc = (void __iomem *)d + mic_aligned_desc_size(d); + /* + * This read barrier is paired with the corresponding write + * barrier on the host which is inserted before adding or + * removing a virtio device descriptor, by updating the type. + */ + rmb(); + type = ioread8(&d->type); + + /* end of list */ + if (type == 0) + break; + + if (type == -1) + continue; + + /* device already exists */ + dev = device_find_child(mdrv->dev, (void __force *)d, + mic_match_desc); + if (dev) { + if (remove) + iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, + &dc->config_change); + put_device(dev); + mic_handle_config_change(d, i, mdrv); + ret = mic_remove_device(d, i, mdrv); + if (!ret && !remove) + iowrite8(-1, &d->type); + if (remove) { + iowrite8(0, &dc->config_change); + iowrite8(0, &dc->guest_ack); + } + continue; + } + + /* new device */ + dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n", + __func__, __LINE__, d); + if (!remove) + mic_add_device(d, i, mdrv); + } +} + +/* + * mic_hotplug_device tries to find changes in the device page. + */ +static void mic_hotplug_devices(struct work_struct *work) +{ + struct mic_driver *mdrv = container_of(work, + struct mic_driver, hotplug_work); + + mic_scan_devices(mdrv, !REMOVE_DEVICES); +} + +/* + * Interrupt handler for hot plug/config changes etc. + */ +static irqreturn_t +mic_extint_handler(int irq, void *data) +{ + struct mic_driver *mdrv = (struct mic_driver *)data; + + dev_dbg(mdrv->dev, "%s %d hotplug work\n", + __func__, __LINE__); + mic_ack_interrupt(&mdrv->mdev); + schedule_work(&mdrv->hotplug_work); + return IRQ_HANDLED; +} + +/* + * Init function for virtio + */ +int mic_devices_init(struct mic_driver *mdrv) +{ + int rc; + struct mic_bootparam __iomem *bootparam; + int config_db; + + INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices); + mic_scan_devices(mdrv, !REMOVE_DEVICES); + + config_db = mic_next_card_db(); + virtio_config_cookie = mic_request_card_irq(mic_extint_handler, + "virtio_config_intr", mdrv, config_db); + if (IS_ERR(virtio_config_cookie)) { + rc = PTR_ERR(virtio_config_cookie); + goto exit; + } + + bootparam = mdrv->dp; + iowrite8(config_db, &bootparam->h2c_config_db); + return 0; +exit: + return rc; +} + +/* + * Uninit function for virtio + */ +void mic_devices_uninit(struct mic_driver *mdrv) +{ + struct mic_bootparam __iomem *bootparam = mdrv->dp; + iowrite8(-1, &bootparam->h2c_config_db); + mic_free_card_irq(virtio_config_cookie, mdrv); + flush_work(&mdrv->hotplug_work); + mic_scan_devices(mdrv, REMOVE_DEVICES); +} diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h new file mode 100644 index 00000000000..d0407ba53bb --- /dev/null +++ b/drivers/misc/mic/card/mic_virtio.h @@ -0,0 +1,76 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#ifndef __MIC_CARD_VIRTIO_H +#define __MIC_CARD_VIRTIO_H + +#include <linux/mic_common.h> +#include "mic_device.h" + +/* + * 64 bit I/O access + */ +#ifndef ioread64 +#define ioread64 readq +#endif +#ifndef iowrite64 +#define iowrite64 writeq +#endif + +static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) +{ + return sizeof(*desc) + + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) + + ioread8(&desc->feature_len) * 2 + + ioread8(&desc->config_len); +} + +static inline struct mic_vqconfig __iomem * +mic_vq_config(struct mic_device_desc __iomem *desc) +{ + return (struct mic_vqconfig __iomem *)(desc + 1); +} + +static inline __u8 __iomem * +mic_vq_features(struct mic_device_desc __iomem *desc) +{ + return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq)); +} + +static inline __u8 __iomem * +mic_vq_configspace(struct mic_device_desc __iomem *desc) +{ + return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2; +} +static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) +{ + return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); +} + +int mic_devices_init(struct mic_driver *mdrv); +void mic_devices_uninit(struct mic_driver *mdrv); + +#endif diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c new file mode 100644 index 00000000000..2868945c9a4 --- /dev/null +++ b/drivers/misc/mic/card/mic_x100.c @@ -0,0 +1,256 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_x100.h" + +static const char mic_driver_name[] = "mic"; + +static struct mic_driver g_drv; + +/** + * mic_read_spad - read from the scratchpad register + * @mdev: pointer to mic_device instance + * @idx: index to scratchpad register, 0 based + * + * This function allows reading of the 32bit scratchpad register. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +u32 mic_read_spad(struct mic_device *mdev, unsigned int idx) +{ + return mic_mmio_read(&mdev->mmio, + MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_SPAD0 + idx * 4); +} + +/** + * __mic_send_intr - Send interrupt to Host. + * @mdev: pointer to mic_device instance + * @doorbell: Doorbell number. + */ +void mic_send_intr(struct mic_device *mdev, int doorbell) +{ + struct mic_mw *mw = &mdev->mmio; + + if (doorbell > MIC_X100_MAX_DOORBELL_IDX) + return; + /* Ensure that the interrupt is ordered w.r.t previous stores. */ + wmb(); + mic_mmio_write(mw, MIC_X100_SBOX_SDBIC0_DBREQ_BIT, + MIC_X100_SBOX_BASE_ADDRESS + + (MIC_X100_SBOX_SDBIC0 + (4 * doorbell))); +} + +/** + * mic_ack_interrupt - Device specific interrupt handling. + * @mdev: pointer to mic_device instance + * + * Returns: bitmask of doorbell events triggered. + */ +u32 mic_ack_interrupt(struct mic_device *mdev) +{ + return 0; +} + +static inline int mic_get_sbox_irq(int db) +{ + return MIC_X100_IRQ_BASE + db; +} + +static inline int mic_get_rdmasr_irq(int index) +{ + return MIC_X100_RDMASR_IRQ_BASE + index; +} + +/** + * mic_hw_intr_init - Initialize h/w specific interrupt + * information. + * @mdrv: pointer to mic_driver + */ +void mic_hw_intr_init(struct mic_driver *mdrv) +{ + mdrv->intr_info.num_intr = MIC_X100_NUM_SBOX_IRQ + + MIC_X100_NUM_RDMASR_IRQ; +} + +/** + * mic_db_to_irq - Retrieve irq number corresponding to a doorbell. + * @mdrv: pointer to mic_driver + * @db: The doorbell obtained for which the irq is needed. Doorbell + * may correspond to an sbox doorbell or an rdmasr index. + * + * Returns the irq corresponding to the doorbell. + */ +int mic_db_to_irq(struct mic_driver *mdrv, int db) +{ + int rdmasr_index; + if (db < MIC_X100_NUM_SBOX_IRQ) { + return mic_get_sbox_irq(db); + } else { + rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ + + MIC_X100_RDMASR_IRQ_BASE; + return mic_get_rdmasr_irq(rdmasr_index); + } +} + +/* + * mic_card_map - Allocate virtual address for a remote memory region. + * @mdev: pointer to mic_device instance. + * @addr: Remote DMA address. + * @size: Size of the region. + * + * Returns: Virtual address backing the remote memory region. + */ +void __iomem * +mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size) +{ + return ioremap(addr, size); +} + +/* + * mic_card_unmap - Unmap the virtual address for a remote memory region. + * @mdev: pointer to mic_device instance. + * @addr: Virtual address for remote memory region. + * + * Returns: None. + */ +void mic_card_unmap(struct mic_device *mdev, void __iomem *addr) +{ + iounmap(addr); +} + +static int __init mic_probe(struct platform_device *pdev) +{ + struct mic_driver *mdrv = &g_drv; + struct mic_device *mdev = &mdrv->mdev; + int rc = 0; + + mdrv->dev = &pdev->dev; + snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name); + + mdev->mmio.pa = MIC_X100_MMIO_BASE; + mdev->mmio.len = MIC_X100_MMIO_LEN; + mdev->mmio.va = ioremap(MIC_X100_MMIO_BASE, MIC_X100_MMIO_LEN); + if (!mdev->mmio.va) { + dev_err(&pdev->dev, "Cannot remap MMIO BAR\n"); + rc = -EIO; + goto done; + } + mic_hw_intr_init(mdrv); + rc = mic_driver_init(mdrv); + if (rc) { + dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc); + goto iounmap; + } +done: + return rc; +iounmap: + iounmap(mdev->mmio.va); + return rc; +} + +static int mic_remove(struct platform_device *pdev) +{ + struct mic_driver *mdrv = &g_drv; + struct mic_device *mdev = &mdrv->mdev; + + mic_driver_uninit(mdrv); + iounmap(mdev->mmio.va); + return 0; +} + +static void mic_platform_shutdown(struct platform_device *pdev) +{ + mic_remove(pdev); +} + +static struct platform_device mic_platform_dev = { + .name = mic_driver_name, + .id = 0, + .num_resources = 0, +}; + +static struct platform_driver __refdata mic_platform_driver = { + .probe = mic_probe, + .remove = mic_remove, + .shutdown = mic_platform_shutdown, + .driver = { + .name = mic_driver_name, + .owner = THIS_MODULE, + }, +}; + +static int __init mic_init(void) +{ + int ret; + struct cpuinfo_x86 *c = &cpu_data(0); + + if (!(c->x86 == 11 && c->x86_model == 1)) { + ret = -ENODEV; + pr_err("%s not running on X100 ret %d\n", __func__, ret); + goto done; + } + + mic_init_card_debugfs(); + ret = platform_device_register(&mic_platform_dev); + if (ret) { + pr_err("platform_device_register ret %d\n", ret); + goto cleanup_debugfs; + } + ret = platform_driver_register(&mic_platform_driver); + if (ret) { + pr_err("platform_driver_register ret %d\n", ret); + goto device_unregister; + } + return ret; + +device_unregister: + platform_device_unregister(&mic_platform_dev); +cleanup_debugfs: + mic_exit_card_debugfs(); +done: + return ret; +} + +static void __exit mic_exit(void) +{ + platform_driver_unregister(&mic_platform_driver); + platform_device_unregister(&mic_platform_dev); + mic_exit_card_debugfs(); +} + +module_init(mic_init); +module_exit(mic_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MIC X100 Card driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h new file mode 100644 index 00000000000..d66ea55639c --- /dev/null +++ b/drivers/misc/mic/card/mic_x100.h @@ -0,0 +1,48 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Disclaimer: The codes contained in these modules may be specific to + * the Intel Software Development Platform codenamed: Knights Ferry, and + * the Intel product codenamed: Knights Corner, and are not backward + * compatible with other Intel products. Additionally, Intel will NOT + * support the codes or instruction set in future products. + * + * Intel MIC Card driver. + * + */ +#ifndef _MIC_X100_CARD_H_ +#define _MIC_X100_CARD_H_ + +#define MIC_X100_MMIO_BASE 0x08007C0000ULL +#define MIC_X100_MMIO_LEN 0x00020000ULL +#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000ULL + +#define MIC_X100_SBOX_SPAD0 0x0000AB20 +#define MIC_X100_SBOX_SDBIC0 0x0000CC90 +#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000 +#define MIC_X100_SBOX_RDMASR0 0x0000B180 + +#define MIC_X100_MAX_DOORBELL_IDX 8 + +#define MIC_X100_NUM_SBOX_IRQ 8 +#define MIC_X100_NUM_RDMASR_IRQ 8 +#define MIC_X100_SBOX_IRQ_BASE 0 +#define MIC_X100_RDMASR_IRQ_BASE 17 + +#define MIC_X100_IRQ_BASE 26 + +#endif diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h new file mode 100644 index 00000000000..92999c2bbf8 --- /dev/null +++ b/drivers/misc/mic/common/mic_dev.h @@ -0,0 +1,51 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC driver. + * + */ +#ifndef __MIC_DEV_H__ +#define __MIC_DEV_H__ + +/** + * struct mic_mw - MIC memory window + * + * @pa: Base physical address. + * @va: Base ioremap'd virtual address. + * @len: Size of the memory window. + */ +struct mic_mw { + phys_addr_t pa; + void __iomem *va; + resource_size_t len; +}; + +/* + * Scratch pad register offsets used by the host to communicate + * device page DMA address to the card. + */ +#define MIC_DPLO_SPAD 14 +#define MIC_DPHI_SPAD 15 + +/* + * These values are supposed to be in the config_change field of the + * device page when the host sends a config change interrupt to the card. + */ +#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1 +#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2 + +#endif diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile new file mode 100644 index 00000000000..c2197f99939 --- /dev/null +++ b/drivers/misc/mic/host/Makefile @@ -0,0 +1,14 @@ +# +# Makefile - Intel MIC Linux driver. +# Copyright(c) 2013, Intel Corporation. +# +obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o +mic_host-objs := mic_main.o +mic_host-objs += mic_x100.o +mic_host-objs += mic_sysfs.o +mic_host-objs += mic_smpt.o +mic_host-objs += mic_intr.o +mic_host-objs += mic_boot.o +mic_host-objs += mic_debugfs.o +mic_host-objs += mic_fops.o +mic_host-objs += mic_virtio.o diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c new file mode 100644 index 00000000000..b75c6b5cc20 --- /dev/null +++ b/drivers/misc/mic/host/mic_boot.c @@ -0,0 +1,300 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/pci.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" +#include "mic_virtio.h" + +/** + * mic_reset - Reset the MIC device. + * @mdev: pointer to mic_device instance + */ +static void mic_reset(struct mic_device *mdev) +{ + int i; + +#define MIC_RESET_TO (45) + + reinit_completion(&mdev->reset_wait); + mdev->ops->reset_fw_ready(mdev); + mdev->ops->reset(mdev); + + for (i = 0; i < MIC_RESET_TO; i++) { + if (mdev->ops->is_fw_ready(mdev)) + goto done; + /* + * Resets typically take 10s of seconds to complete. + * Since an MMIO read is required to check if the + * firmware is ready or not, a 1 second delay works nicely. + */ + msleep(1000); + } + mic_set_state(mdev, MIC_RESET_FAILED); +done: + complete_all(&mdev->reset_wait); +} + +/* Initialize the MIC bootparams */ +void mic_bootparam_init(struct mic_device *mdev) +{ + struct mic_bootparam *bootparam = mdev->dp; + + bootparam->magic = cpu_to_le32(MIC_MAGIC); + bootparam->c2h_shutdown_db = mdev->shutdown_db; + bootparam->h2c_shutdown_db = -1; + bootparam->h2c_config_db = -1; + bootparam->shutdown_status = 0; + bootparam->shutdown_card = 0; +} + +/** + * mic_start - Start the MIC. + * @mdev: pointer to mic_device instance + * @buf: buffer containing boot string including firmware/ramdisk path. + * + * This function prepares an MIC for boot and initiates boot. + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +int mic_start(struct mic_device *mdev, const char *buf) +{ + int rc; + mutex_lock(&mdev->mic_mutex); +retry: + if (MIC_OFFLINE != mdev->state) { + rc = -EINVAL; + goto unlock_ret; + } + if (!mdev->ops->is_fw_ready(mdev)) { + mic_reset(mdev); + /* + * The state will either be MIC_OFFLINE if the reset succeeded + * or MIC_RESET_FAILED if the firmware reset failed. + */ + goto retry; + } + rc = mdev->ops->load_mic_fw(mdev, buf); + if (rc) + goto unlock_ret; + mic_smpt_restore(mdev); + mic_intr_restore(mdev); + mdev->intr_ops->enable_interrupts(mdev); + mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr); + mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); + mdev->ops->send_firmware_intr(mdev); + mic_set_state(mdev, MIC_ONLINE); +unlock_ret: + mutex_unlock(&mdev->mic_mutex); + return rc; +} + +/** + * mic_stop - Prepare the MIC for reset and trigger reset. + * @mdev: pointer to mic_device instance + * @force: force a MIC to reset even if it is already offline. + * + * RETURNS: None. + */ +void mic_stop(struct mic_device *mdev, bool force) +{ + mutex_lock(&mdev->mic_mutex); + if (MIC_OFFLINE != mdev->state || force) { + mic_virtio_reset_devices(mdev); + mic_bootparam_init(mdev); + mic_reset(mdev); + if (MIC_RESET_FAILED == mdev->state) + goto unlock; + mic_set_shutdown_status(mdev, MIC_NOP); + if (MIC_SUSPENDED != mdev->state) + mic_set_state(mdev, MIC_OFFLINE); + } +unlock: + mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_shutdown - Initiate MIC shutdown. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_shutdown(struct mic_device *mdev) +{ + struct mic_bootparam *bootparam = mdev->dp; + s8 db = bootparam->h2c_shutdown_db; + + mutex_lock(&mdev->mic_mutex); + if (MIC_ONLINE == mdev->state && db != -1) { + bootparam->shutdown_card = 1; + mdev->ops->send_intr(mdev, db); + mic_set_state(mdev, MIC_SHUTTING_DOWN); + } + mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_shutdown_work - Handle shutdown interrupt from MIC. + * @work: The work structure. + * + * This work is scheduled whenever the host has received a shutdown + * interrupt from the MIC. + */ +void mic_shutdown_work(struct work_struct *work) +{ + struct mic_device *mdev = container_of(work, struct mic_device, + shutdown_work); + struct mic_bootparam *bootparam = mdev->dp; + + mutex_lock(&mdev->mic_mutex); + mic_set_shutdown_status(mdev, bootparam->shutdown_status); + bootparam->shutdown_status = 0; + + /* + * if state is MIC_SUSPENDED, OSPM suspend is in progress. We do not + * change the state here so as to prevent users from booting the card + * during and after the suspend operation. + */ + if (MIC_SHUTTING_DOWN != mdev->state && + MIC_SUSPENDED != mdev->state) + mic_set_state(mdev, MIC_SHUTTING_DOWN); + mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_reset_trigger_work - Trigger MIC reset. + * @work: The work structure. + * + * This work is scheduled whenever the host wants to reset the MIC. + */ +void mic_reset_trigger_work(struct work_struct *work) +{ + struct mic_device *mdev = container_of(work, struct mic_device, + reset_trigger_work); + + mic_stop(mdev, false); +} + +/** + * mic_complete_resume - Complete MIC Resume after an OSPM suspend/hibernate + * event. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_complete_resume(struct mic_device *mdev) +{ + if (mdev->state != MIC_SUSPENDED) { + dev_warn(mdev->sdev->parent, "state %d should be %d\n", + mdev->state, MIC_SUSPENDED); + return; + } + + /* Make sure firmware is ready */ + if (!mdev->ops->is_fw_ready(mdev)) + mic_stop(mdev, true); + + mutex_lock(&mdev->mic_mutex); + mic_set_state(mdev, MIC_OFFLINE); + mutex_unlock(&mdev->mic_mutex); +} + +/** + * mic_prepare_suspend - Handle suspend notification for the MIC device. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_prepare_suspend(struct mic_device *mdev) +{ + int rc; + +#define MIC_SUSPEND_TIMEOUT (60 * HZ) + + mutex_lock(&mdev->mic_mutex); + switch (mdev->state) { + case MIC_OFFLINE: + /* + * Card is already offline. Set state to MIC_SUSPENDED + * to prevent users from booting the card. + */ + mic_set_state(mdev, MIC_SUSPENDED); + mutex_unlock(&mdev->mic_mutex); + break; + case MIC_ONLINE: + /* + * Card is online. Set state to MIC_SUSPENDING and notify + * MIC user space daemon which will issue card + * shutdown and reset. + */ + mic_set_state(mdev, MIC_SUSPENDING); + mutex_unlock(&mdev->mic_mutex); + rc = wait_for_completion_timeout(&mdev->reset_wait, + MIC_SUSPEND_TIMEOUT); + /* Force reset the card if the shutdown completion timed out */ + if (!rc) { + mutex_lock(&mdev->mic_mutex); + mic_set_state(mdev, MIC_SUSPENDED); + mutex_unlock(&mdev->mic_mutex); + mic_stop(mdev, true); + } + break; + case MIC_SHUTTING_DOWN: + /* + * Card is shutting down. Set state to MIC_SUSPENDED + * to prevent further boot of the card. + */ + mic_set_state(mdev, MIC_SUSPENDED); + mutex_unlock(&mdev->mic_mutex); + rc = wait_for_completion_timeout(&mdev->reset_wait, + MIC_SUSPEND_TIMEOUT); + /* Force reset the card if the shutdown completion timed out */ + if (!rc) + mic_stop(mdev, true); + break; + default: + mutex_unlock(&mdev->mic_mutex); + break; + } +} + +/** + * mic_suspend - Initiate MIC suspend. Suspend merely issues card shutdown. + * @mdev: pointer to mic_device instance + * + * RETURNS: None. + */ +void mic_suspend(struct mic_device *mdev) +{ + struct mic_bootparam *bootparam = mdev->dp; + s8 db = bootparam->h2c_shutdown_db; + + mutex_lock(&mdev->mic_mutex); + if (MIC_SUSPENDING == mdev->state && db != -1) { + bootparam->shutdown_card = 1; + mdev->ops->send_intr(mdev, db); + mic_set_state(mdev, MIC_SUSPENDED); + } + mutex_unlock(&mdev->mic_mutex); +} diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c new file mode 100644 index 00000000000..028ba5d6fd1 --- /dev/null +++ b/drivers/misc/mic/host/mic_debugfs.c @@ -0,0 +1,491 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/debugfs.h> +#include <linux/pci.h> +#include <linux/seq_file.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" +#include "mic_virtio.h" + +/* Debugfs parent dir */ +static struct dentry *mic_dbg; + +/** + * mic_log_buf_show - Display MIC kernel log buffer. + * + * log_buf addr/len is read from System.map by user space + * and populated in sysfs entries. + */ +static int mic_log_buf_show(struct seq_file *s, void *unused) +{ + void __iomem *log_buf_va; + int __iomem *log_buf_len_va; + struct mic_device *mdev = s->private; + void *kva; + int size; + unsigned long aper_offset; + + if (!mdev || !mdev->log_buf_addr || !mdev->log_buf_len) + goto done; + /* + * Card kernel will never be relocated and any kernel text/data mapping + * can be translated to phys address by subtracting __START_KERNEL_map. + */ + aper_offset = (unsigned long)mdev->log_buf_len - __START_KERNEL_map; + log_buf_len_va = mdev->aper.va + aper_offset; + aper_offset = (unsigned long)mdev->log_buf_addr - __START_KERNEL_map; + log_buf_va = mdev->aper.va + aper_offset; + size = ioread32(log_buf_len_va); + + kva = kmalloc(size, GFP_KERNEL); + if (!kva) + goto done; + mutex_lock(&mdev->mic_mutex); + memcpy_fromio(kva, log_buf_va, size); + switch (mdev->state) { + case MIC_ONLINE: + /* Fall through */ + case MIC_SHUTTING_DOWN: + seq_write(s, kva, size); + break; + default: + break; + } + mutex_unlock(&mdev->mic_mutex); + kfree(kva); +done: + return 0; +} + +static int mic_log_buf_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_log_buf_show, inode->i_private); +} + +static int mic_log_buf_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations log_buf_ops = { + .owner = THIS_MODULE, + .open = mic_log_buf_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_log_buf_release +}; + +static int mic_smpt_show(struct seq_file *s, void *pos) +{ + int i; + struct mic_device *mdev = s->private; + unsigned long flags; + + seq_printf(s, "MIC %-2d |%-10s| %-14s %-10s\n", + mdev->id, "SMPT entry", "SW DMA addr", "RefCount"); + seq_puts(s, "====================================================\n"); + + if (mdev->smpt) { + struct mic_smpt_info *smpt_info = mdev->smpt; + spin_lock_irqsave(&smpt_info->smpt_lock, flags); + for (i = 0; i < smpt_info->info.num_reg; i++) { + seq_printf(s, "%9s|%-10d| %-#14llx %-10lld\n", + " ", i, smpt_info->entry[i].dma_addr, + smpt_info->entry[i].ref_count); + } + spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); + } + seq_puts(s, "====================================================\n"); + return 0; +} + +static int mic_smpt_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_smpt_show, inode->i_private); +} + +static int mic_smpt_debug_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations smpt_file_ops = { + .owner = THIS_MODULE, + .open = mic_smpt_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_smpt_debug_release +}; + +static int mic_soft_reset_show(struct seq_file *s, void *pos) +{ + struct mic_device *mdev = s->private; + + mic_stop(mdev, true); + return 0; +} + +static int mic_soft_reset_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_soft_reset_show, inode->i_private); +} + +static int mic_soft_reset_debug_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations soft_reset_ops = { + .owner = THIS_MODULE, + .open = mic_soft_reset_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_soft_reset_debug_release +}; + +static int mic_post_code_show(struct seq_file *s, void *pos) +{ + struct mic_device *mdev = s->private; + u32 reg = mdev->ops->get_postcode(mdev); + + seq_printf(s, "%c%c", reg & 0xff, (reg >> 8) & 0xff); + return 0; +} + +static int mic_post_code_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_post_code_show, inode->i_private); +} + +static int mic_post_code_debug_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations post_code_ops = { + .owner = THIS_MODULE, + .open = mic_post_code_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_post_code_debug_release +}; + +static int mic_dp_show(struct seq_file *s, void *pos) +{ + struct mic_device *mdev = s->private; + struct mic_device_desc *d; + struct mic_device_ctrl *dc; + struct mic_vqconfig *vqconfig; + __u32 *features; + __u8 *config; + struct mic_bootparam *bootparam = mdev->dp; + int i, j; + + seq_printf(s, "Bootparam: magic 0x%x\n", + bootparam->magic); + seq_printf(s, "Bootparam: h2c_shutdown_db %d\n", + bootparam->h2c_shutdown_db); + seq_printf(s, "Bootparam: h2c_config_db %d\n", + bootparam->h2c_config_db); + seq_printf(s, "Bootparam: c2h_shutdown_db %d\n", + bootparam->c2h_shutdown_db); + seq_printf(s, "Bootparam: shutdown_status %d\n", + bootparam->shutdown_status); + seq_printf(s, "Bootparam: shutdown_card %d\n", + bootparam->shutdown_card); + + for (i = sizeof(*bootparam); i < MIC_DP_SIZE; + i += mic_total_desc_size(d)) { + d = mdev->dp + i; + dc = (void *)d + mic_aligned_desc_size(d); + + /* end of list */ + if (d->type == 0) + break; + + if (d->type == -1) + continue; + + seq_printf(s, "Type %d ", d->type); + seq_printf(s, "Num VQ %d ", d->num_vq); + seq_printf(s, "Feature Len %d\n", d->feature_len); + seq_printf(s, "Config Len %d ", d->config_len); + seq_printf(s, "Shutdown Status %d\n", d->status); + + for (j = 0; j < d->num_vq; j++) { + vqconfig = mic_vq_config(d) + j; + seq_printf(s, "vqconfig[%d]: ", j); + seq_printf(s, "address 0x%llx ", vqconfig->address); + seq_printf(s, "num %d ", vqconfig->num); + seq_printf(s, "used address 0x%llx\n", + vqconfig->used_address); + } + + features = (__u32 *)mic_vq_features(d); + seq_printf(s, "Features: Host 0x%x ", features[0]); + seq_printf(s, "Guest 0x%x\n", features[1]); + + config = mic_vq_configspace(d); + for (j = 0; j < d->config_len; j++) + seq_printf(s, "config[%d]=%d\n", j, config[j]); + + seq_puts(s, "Device control:\n"); + seq_printf(s, "Config Change %d ", dc->config_change); + seq_printf(s, "Vdev reset %d\n", dc->vdev_reset); + seq_printf(s, "Guest Ack %d ", dc->guest_ack); + seq_printf(s, "Host ack %d\n", dc->host_ack); + seq_printf(s, "Used address updated %d ", + dc->used_address_updated); + seq_printf(s, "Vdev 0x%llx\n", dc->vdev); + seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db); + seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db); + } + + return 0; +} + +static int mic_dp_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_dp_show, inode->i_private); +} + +static int mic_dp_debug_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations dp_ops = { + .owner = THIS_MODULE, + .open = mic_dp_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_dp_debug_release +}; + +static int mic_vdev_info_show(struct seq_file *s, void *unused) +{ + struct mic_device *mdev = s->private; + struct list_head *pos, *tmp; + struct mic_vdev *mvdev; + int i, j; + + mutex_lock(&mdev->mic_mutex); + list_for_each_safe(pos, tmp, &mdev->vdev_list) { + mvdev = list_entry(pos, struct mic_vdev, list); + seq_printf(s, "VDEV type %d state %s in %ld out %ld\n", + mvdev->virtio_id, + mic_vdevup(mvdev) ? "UP" : "DOWN", + mvdev->in_bytes, + mvdev->out_bytes); + for (i = 0; i < MIC_MAX_VRINGS; i++) { + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; + struct mic_vringh *mvr = &mvdev->mvr[i]; + struct vringh *vrh = &mvr->vrh; + int num = vrh->vring.num; + if (!num) + continue; + desc = vrh->vring.desc; + seq_printf(s, "vring i %d avail_idx %d", + i, mvr->vring.info->avail_idx & (num - 1)); + seq_printf(s, " vring i %d avail_idx %d\n", + i, mvr->vring.info->avail_idx); + seq_printf(s, "vrh i %d weak_barriers %d", + i, vrh->weak_barriers); + seq_printf(s, " last_avail_idx %d last_used_idx %d", + vrh->last_avail_idx, vrh->last_used_idx); + seq_printf(s, " completed %d\n", vrh->completed); + for (j = 0; j < num; j++) { + seq_printf(s, "desc[%d] addr 0x%llx len %d", + j, desc->addr, desc->len); + seq_printf(s, " flags 0x%x next %d\n", + desc->flags, desc->next); + desc++; + } + avail = vrh->vring.avail; + seq_printf(s, "avail flags 0x%x idx %d\n", + avail->flags, avail->idx & (num - 1)); + seq_printf(s, "avail flags 0x%x idx %d\n", + avail->flags, avail->idx); + for (j = 0; j < num; j++) + seq_printf(s, "avail ring[%d] %d\n", + j, avail->ring[j]); + used = vrh->vring.used; + seq_printf(s, "used flags 0x%x idx %d\n", + used->flags, used->idx & (num - 1)); + seq_printf(s, "used flags 0x%x idx %d\n", + used->flags, used->idx); + for (j = 0; j < num; j++) + seq_printf(s, "used ring[%d] id %d len %d\n", + j, used->ring[j].id, + used->ring[j].len); + } + } + mutex_unlock(&mdev->mic_mutex); + + return 0; +} + +static int mic_vdev_info_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_vdev_info_show, inode->i_private); +} + +static int mic_vdev_info_debug_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations vdev_info_ops = { + .owner = THIS_MODULE, + .open = mic_vdev_info_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_vdev_info_debug_release +}; + +static int mic_msi_irq_info_show(struct seq_file *s, void *pos) +{ + struct mic_device *mdev = s->private; + int reg; + int i, j; + u16 entry; + u16 vector; + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + + if (pci_dev_msi_enabled(pdev)) { + for (i = 0; i < mdev->irq_info.num_vectors; i++) { + if (pdev->msix_enabled) { + entry = mdev->irq_info.msix_entries[i].entry; + vector = mdev->irq_info.msix_entries[i].vector; + } else { + entry = 0; + vector = pdev->irq; + } + + reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry); + + seq_printf(s, "%s %-10d %s %-10d MXAR[%d]: %08X\n", + "IRQ:", vector, "Entry:", entry, i, reg); + + seq_printf(s, "%-10s", "offset:"); + for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--) + seq_printf(s, "%4d ", j); + seq_puts(s, "\n"); + + + seq_printf(s, "%-10s", "count:"); + for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--) + seq_printf(s, "%4d ", + (mdev->irq_info.mic_msi_map[i] & + BIT(j)) ? 1 : 0); + seq_puts(s, "\n\n"); + } + } else { + seq_puts(s, "MSI/MSIx interrupts not enabled\n"); + } + + return 0; +} + +static int mic_msi_irq_info_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mic_msi_irq_info_show, inode->i_private); +} + +static int +mic_msi_irq_info_debug_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations msi_irq_info_ops = { + .owner = THIS_MODULE, + .open = mic_msi_irq_info_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = mic_msi_irq_info_debug_release +}; + +/** + * mic_create_debug_dir - Initialize MIC debugfs entries. + */ +void mic_create_debug_dir(struct mic_device *mdev) +{ + if (!mic_dbg) + return; + + mdev->dbg_dir = debugfs_create_dir(dev_name(mdev->sdev), mic_dbg); + if (!mdev->dbg_dir) + return; + + debugfs_create_file("log_buf", 0444, mdev->dbg_dir, mdev, &log_buf_ops); + + debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops); + + debugfs_create_file("soft_reset", 0444, mdev->dbg_dir, mdev, + &soft_reset_ops); + + debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, + &post_code_ops); + + debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops); + + debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev, + &vdev_info_ops); + + debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, + &msi_irq_info_ops); +} + +/** + * mic_delete_debug_dir - Uninitialize MIC debugfs entries. + */ +void mic_delete_debug_dir(struct mic_device *mdev) +{ + if (!mdev->dbg_dir) + return; + + debugfs_remove_recursive(mdev->dbg_dir); +} + +/** + * mic_init_debugfs - Initialize global debugfs entry. + */ +void __init mic_init_debugfs(void) +{ + mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!mic_dbg) + pr_err("can't create debugfs dir\n"); +} + +/** + * mic_exit_debugfs - Uninitialize global debugfs entry + */ +void mic_exit_debugfs(void) +{ + debugfs_remove(mic_dbg); +} diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h new file mode 100644 index 00000000000..0398c696d25 --- /dev/null +++ b/drivers/misc/mic/host/mic_device.h @@ -0,0 +1,207 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_DEVICE_H_ +#define _MIC_DEVICE_H_ + +#include <linux/cdev.h> +#include <linux/idr.h> +#include <linux/notifier.h> +#include <linux/irqreturn.h> + +#include "mic_intr.h" + +/* The maximum number of MIC devices supported in a single host system. */ +#define MIC_MAX_NUM_DEVS 256 + +/** + * enum mic_hw_family - The hardware family to which a device belongs. + */ +enum mic_hw_family { + MIC_FAMILY_X100 = 0, + MIC_FAMILY_UNKNOWN +}; + +/** + * enum mic_stepping - MIC stepping ids. + */ +enum mic_stepping { + MIC_A0_STEP = 0x0, + MIC_B0_STEP = 0x10, + MIC_B1_STEP = 0x11, + MIC_C0_STEP = 0x20, +}; + +/** + * struct mic_device - MIC device information for each card. + * + * @mmio: MMIO bar information. + * @aper: Aperture bar information. + * @family: The MIC family to which this device belongs. + * @ops: MIC HW specific operations. + * @id: The unique device id for this MIC device. + * @stepping: Stepping ID. + * @attr_group: Pointer to list of sysfs attribute groups. + * @sdev: Device for sysfs entries. + * @mic_mutex: Mutex for synchronizing access to mic_device. + * @intr_ops: HW specific interrupt operations. + * @smpt_ops: Hardware specific SMPT operations. + * @smpt: MIC SMPT information. + * @intr_info: H/W specific interrupt information. + * @irq_info: The OS specific irq information + * @dbg_dir: debugfs directory of this MIC device. + * @cmdline: Kernel command line. + * @firmware: Firmware file name. + * @ramdisk: Ramdisk file name. + * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates. + * @bootaddr: MIC boot address. + * @reset_trigger_work: Work for triggering reset requests. + * @shutdown_work: Work for handling shutdown interrupts. + * @state: MIC state. + * @shutdown_status: MIC status reported by card for shutdown/crashes. + * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes. + * @reset_wait: Waitqueue for sleeping while reset completes. + * @log_buf_addr: Log buffer address for MIC. + * @log_buf_len: Log buffer length address for MIC. + * @dp: virtio device page + * @dp_dma_addr: virtio device page DMA address. + * @shutdown_db: shutdown doorbell. + * @shutdown_cookie: shutdown cookie. + * @cdev: Character device for MIC. + * @vdev_list: list of virtio devices. + * @pm_notifier: Handles PM notifications from the OS. + */ +struct mic_device { + struct mic_mw mmio; + struct mic_mw aper; + enum mic_hw_family family; + struct mic_hw_ops *ops; + int id; + enum mic_stepping stepping; + const struct attribute_group **attr_group; + struct device *sdev; + struct mutex mic_mutex; + struct mic_hw_intr_ops *intr_ops; + struct mic_smpt_ops *smpt_ops; + struct mic_smpt_info *smpt; + struct mic_intr_info *intr_info; + struct mic_irq_info irq_info; + struct dentry *dbg_dir; + char *cmdline; + char *firmware; + char *ramdisk; + char *bootmode; + u32 bootaddr; + struct work_struct reset_trigger_work; + struct work_struct shutdown_work; + u8 state; + u8 shutdown_status; + struct kernfs_node *state_sysfs; + struct completion reset_wait; + void *log_buf_addr; + int *log_buf_len; + void *dp; + dma_addr_t dp_dma_addr; + int shutdown_db; + struct mic_irq *shutdown_cookie; + struct cdev cdev; + struct list_head vdev_list; + struct notifier_block pm_notifier; +}; + +/** + * struct mic_hw_ops - MIC HW specific operations. + * @aper_bar: Aperture bar resource number. + * @mmio_bar: MMIO bar resource number. + * @read_spad: Read from scratch pad register. + * @write_spad: Write to scratch pad register. + * @send_intr: Send an interrupt for a particular doorbell on the card. + * @ack_interrupt: Hardware specific operations to ack the h/w on + * receipt of an interrupt. + * @intr_workarounds: Hardware specific workarounds needed after + * handling an interrupt. + * @reset: Reset the remote processor. + * @reset_fw_ready: Reset firmware ready field. + * @is_fw_ready: Check if firmware is ready for OS download. + * @send_firmware_intr: Send an interrupt to the card firmware. + * @load_mic_fw: Load firmware segments required to boot the card + * into card memory. This includes the kernel, command line, ramdisk etc. + * @get_postcode: Get post code status from firmware. + */ +struct mic_hw_ops { + u8 aper_bar; + u8 mmio_bar; + u32 (*read_spad)(struct mic_device *mdev, unsigned int idx); + void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); + void (*send_intr)(struct mic_device *mdev, int doorbell); + u32 (*ack_interrupt)(struct mic_device *mdev); + void (*intr_workarounds)(struct mic_device *mdev); + void (*reset)(struct mic_device *mdev); + void (*reset_fw_ready)(struct mic_device *mdev); + bool (*is_fw_ready)(struct mic_device *mdev); + void (*send_firmware_intr)(struct mic_device *mdev); + int (*load_mic_fw)(struct mic_device *mdev, const char *buf); + u32 (*get_postcode)(struct mic_device *mdev); +}; + +/** + * mic_mmio_read - read from an MMIO register. + * @mw: MMIO register base virtual address. + * @offset: register offset. + * + * RETURNS: register value. + */ +static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset) +{ + return ioread32(mw->va + offset); +} + +/** + * mic_mmio_write - write to an MMIO register. + * @mw: MMIO register base virtual address. + * @val: the data value to put into the register + * @offset: register offset. + * + * RETURNS: none. + */ +static inline void +mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset) +{ + iowrite32(val, mw->va + offset); +} + +void mic_sysfs_init(struct mic_device *mdev); +int mic_start(struct mic_device *mdev, const char *buf); +void mic_stop(struct mic_device *mdev, bool force); +void mic_shutdown(struct mic_device *mdev); +void mic_reset_delayed_work(struct work_struct *work); +void mic_reset_trigger_work(struct work_struct *work); +void mic_shutdown_work(struct work_struct *work); +void mic_bootparam_init(struct mic_device *mdev); +void mic_set_state(struct mic_device *mdev, u8 state); +void mic_set_shutdown_status(struct mic_device *mdev, u8 status); +void mic_create_debug_dir(struct mic_device *dev); +void mic_delete_debug_dir(struct mic_device *dev); +void __init mic_init_debugfs(void); +void mic_exit_debugfs(void); +void mic_prepare_suspend(struct mic_device *mdev); +void mic_complete_resume(struct mic_device *mdev); +void mic_suspend(struct mic_device *mdev); +#endif diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c new file mode 100644 index 00000000000..85776d7327f --- /dev/null +++ b/drivers/misc/mic/host/mic_fops.c @@ -0,0 +1,222 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/poll.h> +#include <linux/pci.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_fops.h" +#include "mic_virtio.h" + +int mic_open(struct inode *inode, struct file *f) +{ + struct mic_vdev *mvdev; + struct mic_device *mdev = container_of(inode->i_cdev, + struct mic_device, cdev); + + mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); + if (!mvdev) + return -ENOMEM; + + init_waitqueue_head(&mvdev->waitq); + INIT_LIST_HEAD(&mvdev->list); + mvdev->mdev = mdev; + mvdev->virtio_id = -1; + + f->private_data = mvdev; + return 0; +} + +int mic_release(struct inode *inode, struct file *f) +{ + struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; + + if (-1 != mvdev->virtio_id) + mic_virtio_del_device(mvdev); + f->private_data = NULL; + kfree(mvdev); + return 0; +} + +long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; + void __user *argp = (void __user *)arg; + int ret; + + switch (cmd) { + case MIC_VIRTIO_ADD_DEVICE: + { + ret = mic_virtio_add_device(mvdev, argp); + if (ret < 0) { + dev_err(mic_dev(mvdev), + "%s %d errno ret %d\n", + __func__, __LINE__, ret); + return ret; + } + break; + } + case MIC_VIRTIO_COPY_DESC: + { + struct mic_copy_desc copy; + + ret = mic_vdev_inited(mvdev); + if (ret) + return ret; + + if (copy_from_user(©, argp, sizeof(copy))) + return -EFAULT; + + dev_dbg(mic_dev(mvdev), + "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n", + __func__, __LINE__, copy.iovcnt, copy.vr_idx, + copy.update_used); + + ret = mic_virtio_copy_desc(mvdev, ©); + if (ret < 0) { + dev_err(mic_dev(mvdev), + "%s %d errno ret %d\n", + __func__, __LINE__, ret); + return ret; + } + if (copy_to_user( + &((struct mic_copy_desc __user *)argp)->out_len, + ©.out_len, sizeof(copy.out_len))) { + dev_err(mic_dev(mvdev), "%s %d errno ret %d\n", + __func__, __LINE__, -EFAULT); + return -EFAULT; + } + break; + } + case MIC_VIRTIO_CONFIG_CHANGE: + { + ret = mic_vdev_inited(mvdev); + if (ret) + return ret; + + ret = mic_virtio_config_change(mvdev, argp); + if (ret < 0) { + dev_err(mic_dev(mvdev), + "%s %d errno ret %d\n", + __func__, __LINE__, ret); + return ret; + } + break; + } + default: + return -ENOIOCTLCMD; + }; + return 0; +} + +/* + * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and + * not when previously enqueued buffers may be available. This means that + * in the card->host (TX) path, when userspace is unblocked by poll it + * must drain all available descriptors or it can stall. + */ +unsigned int mic_poll(struct file *f, poll_table *wait) +{ + struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; + int mask = 0; + + poll_wait(f, &mvdev->waitq, wait); + + if (mic_vdev_inited(mvdev)) { + mask = POLLERR; + } else if (mvdev->poll_wake) { + mvdev->poll_wake = 0; + mask = POLLIN | POLLOUT; + } + + return mask; +} + +static inline int +mic_query_offset(struct mic_vdev *mvdev, unsigned long offset, + unsigned long *size, unsigned long *pa) +{ + struct mic_device *mdev = mvdev->mdev; + unsigned long start = MIC_DP_SIZE; + int i; + + /* + * MMAP interface is as follows: + * offset region + * 0x0 virtio device_page + * 0x1000 first vring + * 0x1000 + size of 1st vring second vring + * .... + */ + if (!offset) { + *pa = virt_to_phys(mdev->dp); + *size = MIC_DP_SIZE; + return 0; + } + + for (i = 0; i < mvdev->dd->num_vq; i++) { + struct mic_vringh *mvr = &mvdev->mvr[i]; + if (offset == start) { + *pa = virt_to_phys(mvr->vring.va); + *size = mvr->vring.len; + return 0; + } + start += mvr->vring.len; + } + return -1; +} + +/* + * Maps the device page and virtio rings to user space for readonly access. + */ +int +mic_mmap(struct file *f, struct vm_area_struct *vma) +{ + struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; + int i, err; + + err = mic_vdev_inited(mvdev); + if (err) + return err; + + if (vma->vm_flags & VM_WRITE) + return -EACCES; + + while (size_rem) { + i = mic_query_offset(mvdev, offset, &size, &pa); + if (i < 0) + return -EINVAL; + err = remap_pfn_range(vma, vma->vm_start + offset, + pa >> PAGE_SHIFT, size, vma->vm_page_prot); + if (err) + return err; + dev_dbg(mic_dev(mvdev), + "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n", + __func__, __LINE__, mvdev->virtio_id, size, offset, + pa, vma->vm_start + offset); + size_rem -= size; + offset += size; + } + return 0; +} diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h new file mode 100644 index 00000000000..dc3893dff66 --- /dev/null +++ b/drivers/misc/mic/host/mic_fops.h @@ -0,0 +1,32 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_FOPS_H_ +#define _MIC_FOPS_H_ + +int mic_open(struct inode *inode, struct file *filp); +int mic_release(struct inode *inode, struct file *filp); +ssize_t mic_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos); +long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int mic_mmap(struct file *f, struct vm_area_struct *vma); +unsigned int mic_poll(struct file *f, poll_table *wait); + +#endif diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c new file mode 100644 index 00000000000..dbc5afde139 --- /dev/null +++ b/drivers/misc/mic/host/mic_intr.c @@ -0,0 +1,630 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" + +/* + * mic_invoke_callback - Invoke callback functions registered for + * the corresponding source id. + * + * @mdev: pointer to the mic_device instance + * @idx: The interrupt source id. + * + * Returns none. + */ +static inline void mic_invoke_callback(struct mic_device *mdev, int idx) +{ + struct mic_intr_cb *intr_cb; + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + + spin_lock(&mdev->irq_info.mic_intr_lock); + list_for_each_entry(intr_cb, &mdev->irq_info.cb_list[idx], list) + if (intr_cb->func) + intr_cb->func(pdev->irq, intr_cb->data); + spin_unlock(&mdev->irq_info.mic_intr_lock); +} + +/** + * mic_interrupt - Generic interrupt handler for + * MSI and INTx based interrupts. + */ +static irqreturn_t mic_interrupt(int irq, void *dev) +{ + struct mic_device *mdev = dev; + struct mic_intr_info *info = mdev->intr_info; + u32 mask; + int i; + + mask = mdev->ops->ack_interrupt(mdev); + if (!mask) + return IRQ_NONE; + + for (i = info->intr_start_idx[MIC_INTR_DB]; + i < info->intr_len[MIC_INTR_DB]; i++) + if (mask & BIT(i)) + mic_invoke_callback(mdev, i); + + return IRQ_HANDLED; +} + +/* Return the interrupt offset from the index. Index is 0 based. */ +static u16 mic_map_src_to_offset(struct mic_device *mdev, + int intr_src, enum mic_intr_type type) +{ + if (type >= MIC_NUM_INTR_TYPES) + return MIC_NUM_OFFSETS; + if (intr_src >= mdev->intr_info->intr_len[type]) + return MIC_NUM_OFFSETS; + + return mdev->intr_info->intr_start_idx[type] + intr_src; +} + +/* Return next available msix_entry. */ +static struct msix_entry *mic_get_available_vector(struct mic_device *mdev) +{ + int i; + struct mic_irq_info *info = &mdev->irq_info; + + for (i = 0; i < info->num_vectors; i++) + if (!info->mic_msi_map[i]) + return &info->msix_entries[i]; + return NULL; +} + +/** + * mic_register_intr_callback - Register a callback handler for the + * given source id. + * + * @mdev: pointer to the mic_device instance + * @idx: The source id to be registered. + * @func: The function to be called when the source id receives + * the interrupt. + * @data: Private data of the requester. + * Return the callback structure that was registered or an + * appropriate error on failure. + */ +static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev, + u8 idx, irqreturn_t (*func) (int irq, void *dev), + void *data) +{ + struct mic_intr_cb *intr_cb; + unsigned long flags; + int rc; + intr_cb = kmalloc(sizeof(*intr_cb), GFP_KERNEL); + + if (!intr_cb) + return ERR_PTR(-ENOMEM); + + intr_cb->func = func; + intr_cb->data = data; + intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida, + 0, 0, GFP_KERNEL); + if (intr_cb->cb_id < 0) { + rc = intr_cb->cb_id; + goto ida_fail; + } + + spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); + list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]); + spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); + + return intr_cb; +ida_fail: + kfree(intr_cb); + return ERR_PTR(rc); +} + +/** + * mic_unregister_intr_callback - Unregister the callback handler + * identified by its callback id. + * + * @mdev: pointer to the mic_device instance + * @idx: The callback structure id to be unregistered. + * Return the source id that was unregistered or MIC_NUM_OFFSETS if no + * such callback handler was found. + */ +static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx) +{ + struct list_head *pos, *tmp; + struct mic_intr_cb *intr_cb; + unsigned long flags; + int i; + + for (i = 0; i < MIC_NUM_OFFSETS; i++) { + spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); + list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) { + intr_cb = list_entry(pos, struct mic_intr_cb, list); + if (intr_cb->cb_id == idx) { + list_del(pos); + ida_simple_remove(&mdev->irq_info.cb_ida, + intr_cb->cb_id); + kfree(intr_cb); + spin_unlock_irqrestore( + &mdev->irq_info.mic_intr_lock, flags); + return i; + } + } + spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); + } + return MIC_NUM_OFFSETS; +} + +/** + * mic_setup_msix - Initializes MSIx interrupts. + * + * @mdev: pointer to mic_device instance + * + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev) +{ + int rc, i; + int entry_size = sizeof(*mdev->irq_info.msix_entries); + + mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX, + entry_size, GFP_KERNEL); + if (!mdev->irq_info.msix_entries) { + rc = -ENOMEM; + goto err_nomem1; + } + + for (i = 0; i < MIC_MIN_MSIX; i++) + mdev->irq_info.msix_entries[i].entry = i; + + rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries, + MIC_MIN_MSIX); + if (rc) { + dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc); + goto err_enable_msix; + } + + mdev->irq_info.num_vectors = MIC_MIN_MSIX; + mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) * + mdev->irq_info.num_vectors), GFP_KERNEL); + + if (!mdev->irq_info.mic_msi_map) { + rc = -ENOMEM; + goto err_nomem2; + } + + dev_dbg(mdev->sdev->parent, + "%d MSIx irqs setup\n", mdev->irq_info.num_vectors); + return 0; +err_nomem2: + pci_disable_msix(pdev); +err_enable_msix: + kfree(mdev->irq_info.msix_entries); +err_nomem1: + mdev->irq_info.num_vectors = 0; + return rc; +} + +/** + * mic_setup_callbacks - Initialize data structures needed + * to handle callbacks. + * + * @mdev: pointer to mic_device instance + */ +static int mic_setup_callbacks(struct mic_device *mdev) +{ + int i; + + mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS, + sizeof(*mdev->irq_info.cb_list), + GFP_KERNEL); + if (!mdev->irq_info.cb_list) + return -ENOMEM; + + for (i = 0; i < MIC_NUM_OFFSETS; i++) + INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]); + ida_init(&mdev->irq_info.cb_ida); + spin_lock_init(&mdev->irq_info.mic_intr_lock); + return 0; +} + +/** + * mic_release_callbacks - Uninitialize data structures needed + * to handle callbacks. + * + * @mdev: pointer to mic_device instance + */ +static void mic_release_callbacks(struct mic_device *mdev) +{ + unsigned long flags; + struct list_head *pos, *tmp; + struct mic_intr_cb *intr_cb; + int i; + + for (i = 0; i < MIC_NUM_OFFSETS; i++) { + spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); + + if (list_empty(&mdev->irq_info.cb_list[i])) { + spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, + flags); + break; + } + + list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) { + intr_cb = list_entry(pos, struct mic_intr_cb, list); + list_del(pos); + ida_simple_remove(&mdev->irq_info.cb_ida, + intr_cb->cb_id); + kfree(intr_cb); + } + spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); + } + ida_destroy(&mdev->irq_info.cb_ida); + kfree(mdev->irq_info.cb_list); +} + +/** + * mic_setup_msi - Initializes MSI interrupts. + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev) +{ + int rc; + + rc = pci_enable_msi(pdev); + if (rc) { + dev_dbg(&pdev->dev, "Error enabling MSI. rc = %d\n", rc); + return rc; + } + + mdev->irq_info.num_vectors = 1; + mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) * + mdev->irq_info.num_vectors), GFP_KERNEL); + + if (!mdev->irq_info.mic_msi_map) { + rc = -ENOMEM; + goto err_nomem1; + } + + rc = mic_setup_callbacks(mdev); + if (rc) { + dev_err(&pdev->dev, "Error setting up callbacks\n"); + goto err_nomem2; + } + + rc = request_irq(pdev->irq, mic_interrupt, 0 , "mic-msi", mdev); + if (rc) { + dev_err(&pdev->dev, "Error allocating MSI interrupt\n"); + goto err_irq_req_fail; + } + + dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors); + return 0; +err_irq_req_fail: + mic_release_callbacks(mdev); +err_nomem2: + kfree(mdev->irq_info.mic_msi_map); +err_nomem1: + pci_disable_msi(pdev); + mdev->irq_info.num_vectors = 0; + return rc; +} + +/** + * mic_setup_intx - Initializes legacy interrupts. + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev) +{ + int rc; + + pci_msi_off(pdev); + + /* Enable intx */ + pci_intx(pdev, 1); + rc = mic_setup_callbacks(mdev); + if (rc) { + dev_err(&pdev->dev, "Error setting up callbacks\n"); + goto err_nomem; + } + + rc = request_irq(pdev->irq, mic_interrupt, + IRQF_SHARED, "mic-intx", mdev); + if (rc) + goto err; + + dev_dbg(&pdev->dev, "intx irq setup\n"); + return 0; +err: + mic_release_callbacks(mdev); +err_nomem: + return rc; +} + +/** + * mic_next_db - Retrieve the next doorbell interrupt source id. + * The id is picked sequentially from the available pool of + * doorlbell ids. + * + * @mdev: pointer to the mic_device instance. + * + * Returns the next doorbell interrupt source. + */ +int mic_next_db(struct mic_device *mdev) +{ + int next_db; + + next_db = mdev->irq_info.next_avail_src % + mdev->intr_info->intr_len[MIC_INTR_DB]; + mdev->irq_info.next_avail_src++; + return next_db; +} + +#define COOKIE_ID_SHIFT 16 +#define GET_ENTRY(cookie) ((cookie) & 0xFFFF) +#define GET_OFFSET(cookie) ((cookie) >> COOKIE_ID_SHIFT) +#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT) + +/** + * mic_request_irq - request an irq. mic_mutex needs + * to be held before calling this function. + * + * @mdev: pointer to mic_device instance + * @func: The callback function that handles the interrupt. + * The function needs to call ack_interrupts + * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts. + * @name: The ASCII name of the callee requesting the irq. + * @data: private data that is returned back when calling the + * function handler. + * @intr_src: The source id of the requester. Its the doorbell id + * for Doorbell interrupts and DMA channel id for DMA interrupts. + * @type: The type of interrupt. Values defined in mic_intr_type + * + * returns: The cookie that is transparent to the caller. Passed + * back when calling mic_free_irq. An appropriate error code + * is returned on failure. Caller needs to use IS_ERR(return_val) + * to check for failure and PTR_ERR(return_val) to obtained the + * error code. + * + */ +struct mic_irq *mic_request_irq(struct mic_device *mdev, + irqreturn_t (*func)(int irq, void *dev), + const char *name, void *data, int intr_src, + enum mic_intr_type type) +{ + u16 offset; + int rc = 0; + struct msix_entry *msix = NULL; + unsigned long cookie = 0; + u16 entry; + struct mic_intr_cb *intr_cb; + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + + offset = mic_map_src_to_offset(mdev, intr_src, type); + if (offset >= MIC_NUM_OFFSETS) { + dev_err(mdev->sdev->parent, + "Error mapping index %d to a valid source id.\n", + intr_src); + rc = -EINVAL; + goto err; + } + + if (mdev->irq_info.num_vectors > 1) { + msix = mic_get_available_vector(mdev); + if (!msix) { + dev_err(mdev->sdev->parent, + "No MSIx vectors available for use.\n"); + rc = -ENOSPC; + goto err; + } + + rc = request_irq(msix->vector, func, 0, name, data); + if (rc) { + dev_dbg(mdev->sdev->parent, + "request irq failed rc = %d\n", rc); + goto err; + } + entry = msix->entry; + mdev->irq_info.mic_msi_map[entry] |= BIT(offset); + mdev->intr_ops->program_msi_to_src_map(mdev, + entry, offset, true); + cookie = MK_COOKIE(entry, offset); + dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n", + msix->vector, intr_src); + } else { + intr_cb = mic_register_intr_callback(mdev, + offset, func, data); + if (IS_ERR(intr_cb)) { + dev_err(mdev->sdev->parent, + "No available callback entries for use\n"); + rc = PTR_ERR(intr_cb); + goto err; + } + + entry = 0; + if (pci_dev_msi_enabled(pdev)) { + mdev->irq_info.mic_msi_map[entry] |= (1 << offset); + mdev->intr_ops->program_msi_to_src_map(mdev, + entry, offset, true); + } + cookie = MK_COOKIE(entry, intr_cb->cb_id); + dev_dbg(mdev->sdev->parent, "callback %d registered for src: %d\n", + intr_cb->cb_id, intr_src); + } + return (struct mic_irq *)cookie; +err: + return ERR_PTR(rc); +} + +/** + * mic_free_irq - free irq. mic_mutex + * needs to be held before calling this function. + * + * @mdev: pointer to mic_device instance + * @cookie: cookie obtained during a successful call to mic_request_irq + * @data: private data specified by the calling function during the + * mic_request_irq + * + * returns: none. + */ +void mic_free_irq(struct mic_device *mdev, + struct mic_irq *cookie, void *data) +{ + u32 offset; + u32 entry; + u8 src_id; + unsigned int irq; + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + + entry = GET_ENTRY((unsigned long)cookie); + offset = GET_OFFSET((unsigned long)cookie); + if (mdev->irq_info.num_vectors > 1) { + if (entry >= mdev->irq_info.num_vectors) { + dev_warn(mdev->sdev->parent, + "entry %d should be < num_irq %d\n", + entry, mdev->irq_info.num_vectors); + return; + } + irq = mdev->irq_info.msix_entries[entry].vector; + free_irq(irq, data); + mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset)); + mdev->intr_ops->program_msi_to_src_map(mdev, + entry, offset, false); + + dev_dbg(mdev->sdev->parent, "irq: %d freed\n", irq); + } else { + irq = pdev->irq; + src_id = mic_unregister_intr_callback(mdev, offset); + if (src_id >= MIC_NUM_OFFSETS) { + dev_warn(mdev->sdev->parent, "Error unregistering callback\n"); + return; + } + if (pci_dev_msi_enabled(pdev)) { + mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id)); + mdev->intr_ops->program_msi_to_src_map(mdev, + entry, src_id, false); + } + dev_dbg(mdev->sdev->parent, "callback %d unregistered for src: %d\n", + offset, src_id); + } +} + +/** + * mic_setup_interrupts - Initializes interrupts. + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev) +{ + int rc; + + rc = mic_setup_msix(mdev, pdev); + if (!rc) + goto done; + + rc = mic_setup_msi(mdev, pdev); + if (!rc) + goto done; + + rc = mic_setup_intx(mdev, pdev); + if (rc) { + dev_err(mdev->sdev->parent, "no usable interrupts\n"); + return rc; + } +done: + mdev->intr_ops->enable_interrupts(mdev); + return 0; +} + +/** + * mic_free_interrupts - Frees interrupts setup by mic_setup_interrupts + * + * @mdev: pointer to mic_device instance + * @pdev: PCI device structure + * + * returns none. + */ +void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev) +{ + int i; + + mdev->intr_ops->disable_interrupts(mdev); + if (mdev->irq_info.num_vectors > 1) { + for (i = 0; i < mdev->irq_info.num_vectors; i++) { + if (mdev->irq_info.mic_msi_map[i]) + dev_warn(&pdev->dev, "irq %d may still be in use.\n", + mdev->irq_info.msix_entries[i].vector); + } + kfree(mdev->irq_info.mic_msi_map); + kfree(mdev->irq_info.msix_entries); + pci_disable_msix(pdev); + } else { + if (pci_dev_msi_enabled(pdev)) { + free_irq(pdev->irq, mdev); + kfree(mdev->irq_info.mic_msi_map); + pci_disable_msi(pdev); + } else { + free_irq(pdev->irq, mdev); + } + mic_release_callbacks(mdev); + } +} + +/** + * mic_intr_restore - Restore MIC interrupt registers. + * + * @mdev: pointer to mic_device instance. + * + * Restore the interrupt registers to values previously + * stored in the SW data structures. mic_mutex needs to + * be held before calling this function. + * + * returns None. + */ +void mic_intr_restore(struct mic_device *mdev) +{ + int entry, offset; + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + + if (!pci_dev_msi_enabled(pdev)) + return; + + for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) { + for (offset = 0; offset < MIC_NUM_OFFSETS; offset++) { + if (mdev->irq_info.mic_msi_map[entry] & BIT(offset)) + mdev->intr_ops->program_msi_to_src_map(mdev, + entry, offset, true); + } + } +} diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h new file mode 100644 index 00000000000..6091aa97e11 --- /dev/null +++ b/drivers/misc/mic/host/mic_intr.h @@ -0,0 +1,137 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_INTR_H_ +#define _MIC_INTR_H_ + +/* + * The minimum number of msix vectors required for normal operation. + * 3 for virtio network, console and block devices. + * 1 for card shutdown notifications. + */ +#define MIC_MIN_MSIX 4 +#define MIC_NUM_OFFSETS 32 + +/** + * mic_intr_source - The type of source that will generate + * the interrupt.The number of types needs to be in sync with + * MIC_NUM_INTR_TYPES + * + * MIC_INTR_DB: The source is a doorbell + * MIC_INTR_DMA: The source is a DMA channel + * MIC_INTR_ERR: The source is an error interrupt e.g. SBOX ERR + * MIC_NUM_INTR_TYPES: Total number of interrupt sources. + */ +enum mic_intr_type { + MIC_INTR_DB = 0, + MIC_INTR_DMA, + MIC_INTR_ERR, + MIC_NUM_INTR_TYPES +}; + +/** + * struct mic_intr_info - Contains h/w specific interrupt sources + * information. + * + * @intr_start_idx: Contains the starting indexes of the + * interrupt types. + * @intr_len: Contains the length of the interrupt types. + */ +struct mic_intr_info { + u16 intr_start_idx[MIC_NUM_INTR_TYPES]; + u16 intr_len[MIC_NUM_INTR_TYPES]; +}; + +/** + * struct mic_irq_info - OS specific irq information + * + * @next_avail_src: next available doorbell that can be assigned. + * @msix_entries: msix entries allocated while setting up MSI-x + * @mic_msi_map: The MSI/MSI-x mapping information. + * @num_vectors: The number of MSI/MSI-x vectors that have been allocated. + * @cb_ida: callback ID allocator to track the callbacks registered. + * @mic_intr_lock: spinlock to protect the interrupt callback list. + * @cb_list: Array of callback lists one for each source. + */ +struct mic_irq_info { + int next_avail_src; + struct msix_entry *msix_entries; + u32 *mic_msi_map; + u16 num_vectors; + struct ida cb_ida; + spinlock_t mic_intr_lock; + struct list_head *cb_list; +}; + +/** + * struct mic_intr_cb - Interrupt callback structure. + * + * @func: The callback function + * @data: Private data of the requester. + * @cb_id: The callback id. Identifies this callback. + * @list: list head pointing to the next callback structure. + */ +struct mic_intr_cb { + irqreturn_t (*func) (int irq, void *data); + void *data; + int cb_id; + struct list_head list; +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/* Forward declaration */ +struct mic_device; + +/** + * struct mic_hw_intr_ops: MIC HW specific interrupt operations + * @intr_init: Initialize H/W specific interrupt information. + * @enable_interrupts: Enable interrupts from the hardware. + * @disable_interrupts: Disable interrupts from the hardware. + * @program_msi_to_src_map: Update MSI mapping registers with + * irq information. + * @read_msi_to_src_map: Read MSI mapping registers containing + * irq information. + */ +struct mic_hw_intr_ops { + void (*intr_init)(struct mic_device *mdev); + void (*enable_interrupts)(struct mic_device *mdev); + void (*disable_interrupts)(struct mic_device *mdev); + void (*program_msi_to_src_map) (struct mic_device *mdev, + int idx, int intr_src, bool set); + u32 (*read_msi_to_src_map) (struct mic_device *mdev, + int idx); +}; + +int mic_next_db(struct mic_device *mdev); +struct mic_irq *mic_request_irq(struct mic_device *mdev, + irqreturn_t (*func)(int irq, void *data), + const char *name, void *data, int intr_src, + enum mic_intr_type type); + +void mic_free_irq(struct mic_device *mdev, + struct mic_irq *cookie, void *data); +int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev); +void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev); +void mic_intr_restore(struct mic_device *mdev); +#endif diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c new file mode 100644 index 00000000000..c04a021e20c --- /dev/null +++ b/drivers/misc/mic/host/mic_main.c @@ -0,0 +1,536 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + * Global TODO's across the driver to be added after initial base + * patches are accepted upstream: + * 1) Enable DMA support. + * 2) Enable per vring interrupt support. + */ +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/suspend.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_x100.h" +#include "mic_smpt.h" +#include "mic_fops.h" +#include "mic_virtio.h" + +static const char mic_driver_name[] = "mic"; + +static DEFINE_PCI_DEVICE_TABLE(mic_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2253)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2254)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2255)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2256)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2257)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2258)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2259)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225a)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225b)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225c)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225d)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225e)}, + + /* required last entry */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mic_pci_tbl); + +/* ID allocator for MIC devices */ +static struct ida g_mic_ida; +/* Class of MIC devices for sysfs accessibility. */ +static struct class *g_mic_class; +/* Base device node number for MIC devices */ +static dev_t g_mic_devno; + +static const struct file_operations mic_fops = { + .open = mic_open, + .release = mic_release, + .unlocked_ioctl = mic_ioctl, + .poll = mic_poll, + .mmap = mic_mmap, + .owner = THIS_MODULE, +}; + +/* Initialize the device page */ +static int mic_dp_init(struct mic_device *mdev) +{ + mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL); + if (!mdev->dp) { + dev_err(mdev->sdev->parent, "%s %d err %d\n", + __func__, __LINE__, -ENOMEM); + return -ENOMEM; + } + + mdev->dp_dma_addr = mic_map_single(mdev, + mdev->dp, MIC_DP_SIZE); + if (mic_map_error(mdev->dp_dma_addr)) { + kfree(mdev->dp); + dev_err(mdev->sdev->parent, "%s %d err %d\n", + __func__, __LINE__, -ENOMEM); + return -ENOMEM; + } + mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr); + mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); + return 0; +} + +/* Uninitialize the device page */ +static void mic_dp_uninit(struct mic_device *mdev) +{ + mic_unmap_single(mdev, mdev->dp_dma_addr, MIC_DP_SIZE); + kfree(mdev->dp); +} + +/** + * mic_shutdown_db - Shutdown doorbell interrupt handler. + */ +static irqreturn_t mic_shutdown_db(int irq, void *data) +{ + struct mic_device *mdev = data; + struct mic_bootparam *bootparam = mdev->dp; + + mdev->ops->intr_workarounds(mdev); + + switch (bootparam->shutdown_status) { + case MIC_HALTED: + case MIC_POWER_OFF: + case MIC_RESTART: + /* Fall through */ + case MIC_CRASHED: + schedule_work(&mdev->shutdown_work); + break; + default: + break; + }; + return IRQ_HANDLED; +} + +/** + * mic_ops_init: Initialize HW specific operation tables. + * + * @mdev: pointer to mic_device instance + * + * returns none. + */ +static void mic_ops_init(struct mic_device *mdev) +{ + switch (mdev->family) { + case MIC_FAMILY_X100: + mdev->ops = &mic_x100_ops; + mdev->intr_ops = &mic_x100_intr_ops; + mdev->smpt_ops = &mic_x100_smpt_ops; + break; + default: + break; + } +} + +/** + * mic_get_family - Determine hardware family to which this MIC belongs. + * + * @pdev: The pci device structure + * + * returns family. + */ +static enum mic_hw_family mic_get_family(struct pci_dev *pdev) +{ + enum mic_hw_family family; + + switch (pdev->device) { + case MIC_X100_PCI_DEVICE_2250: + case MIC_X100_PCI_DEVICE_2251: + case MIC_X100_PCI_DEVICE_2252: + case MIC_X100_PCI_DEVICE_2253: + case MIC_X100_PCI_DEVICE_2254: + case MIC_X100_PCI_DEVICE_2255: + case MIC_X100_PCI_DEVICE_2256: + case MIC_X100_PCI_DEVICE_2257: + case MIC_X100_PCI_DEVICE_2258: + case MIC_X100_PCI_DEVICE_2259: + case MIC_X100_PCI_DEVICE_225a: + case MIC_X100_PCI_DEVICE_225b: + case MIC_X100_PCI_DEVICE_225c: + case MIC_X100_PCI_DEVICE_225d: + case MIC_X100_PCI_DEVICE_225e: + family = MIC_FAMILY_X100; + break; + default: + family = MIC_FAMILY_UNKNOWN; + break; + } + return family; +} + +/** +* mic_pm_notifier: Notifier callback function that handles +* PM notifications. +* +* @notifier_block: The notifier structure. +* @pm_event: The event for which the driver was notified. +* @unused: Meaningless. Always NULL. +* +* returns NOTIFY_DONE +*/ +static int mic_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, void *unused) +{ + struct mic_device *mdev = container_of(notifier, + struct mic_device, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + /* Fall through */ + case PM_SUSPEND_PREPARE: + mic_prepare_suspend(mdev); + break; + case PM_POST_HIBERNATION: + /* Fall through */ + case PM_POST_SUSPEND: + /* Fall through */ + case PM_POST_RESTORE: + mic_complete_resume(mdev); + break; + case PM_RESTORE_PREPARE: + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * mic_device_init - Allocates and initializes the MIC device structure + * + * @mdev: pointer to mic_device instance + * @pdev: The pci device structure + * + * returns none. + */ +static int +mic_device_init(struct mic_device *mdev, struct pci_dev *pdev) +{ + int rc; + + mdev->family = mic_get_family(pdev); + mdev->stepping = pdev->revision; + mic_ops_init(mdev); + mic_sysfs_init(mdev); + mutex_init(&mdev->mic_mutex); + mdev->irq_info.next_avail_src = 0; + INIT_WORK(&mdev->reset_trigger_work, mic_reset_trigger_work); + INIT_WORK(&mdev->shutdown_work, mic_shutdown_work); + init_completion(&mdev->reset_wait); + INIT_LIST_HEAD(&mdev->vdev_list); + mdev->pm_notifier.notifier_call = mic_pm_notifier; + rc = register_pm_notifier(&mdev->pm_notifier); + if (rc) { + dev_err(&pdev->dev, "register_pm_notifier failed rc %d\n", + rc); + goto register_pm_notifier_fail; + } + return 0; +register_pm_notifier_fail: + flush_work(&mdev->shutdown_work); + flush_work(&mdev->reset_trigger_work); + return rc; +} + +/** + * mic_device_uninit - Frees resources allocated during mic_device_init(..) + * + * @mdev: pointer to mic_device instance + * + * returns none + */ +static void mic_device_uninit(struct mic_device *mdev) +{ + /* The cmdline sysfs entry might have allocated cmdline */ + kfree(mdev->cmdline); + kfree(mdev->firmware); + kfree(mdev->ramdisk); + kfree(mdev->bootmode); + flush_work(&mdev->reset_trigger_work); + flush_work(&mdev->shutdown_work); + unregister_pm_notifier(&mdev->pm_notifier); +} + +/** + * mic_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mic_pci_tbl + * + * returns 0 on success, < 0 on failure. + */ +static int mic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + struct mic_device *mdev; + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) { + rc = -ENOMEM; + dev_err(&pdev->dev, "mdev kmalloc failed rc %d\n", rc); + goto mdev_alloc_fail; + } + mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL); + if (mdev->id < 0) { + rc = mdev->id; + dev_err(&pdev->dev, "ida_simple_get failed rc %d\n", rc); + goto ida_fail; + } + + rc = mic_device_init(mdev, pdev); + if (rc) { + dev_err(&pdev->dev, "mic_device_init failed rc %d\n", rc); + goto device_init_fail; + } + + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto uninit_device; + } + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, mic_driver_name); + if (rc) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto disable_device; + } + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(&pdev->dev, "Cannot set DMA mask\n"); + goto release_regions; + } + + mdev->mmio.pa = pci_resource_start(pdev, mdev->ops->mmio_bar); + mdev->mmio.len = pci_resource_len(pdev, mdev->ops->mmio_bar); + mdev->mmio.va = pci_ioremap_bar(pdev, mdev->ops->mmio_bar); + if (!mdev->mmio.va) { + dev_err(&pdev->dev, "Cannot remap MMIO BAR\n"); + rc = -EIO; + goto release_regions; + } + + mdev->aper.pa = pci_resource_start(pdev, mdev->ops->aper_bar); + mdev->aper.len = pci_resource_len(pdev, mdev->ops->aper_bar); + mdev->aper.va = ioremap_wc(mdev->aper.pa, mdev->aper.len); + if (!mdev->aper.va) { + dev_err(&pdev->dev, "Cannot remap Aperture BAR\n"); + rc = -EIO; + goto unmap_mmio; + } + + mdev->intr_ops->intr_init(mdev); + rc = mic_setup_interrupts(mdev, pdev); + if (rc) { + dev_err(&pdev->dev, "mic_setup_interrupts failed %d\n", rc); + goto unmap_aper; + } + rc = mic_smpt_init(mdev); + if (rc) { + dev_err(&pdev->dev, "smpt_init failed %d\n", rc); + goto free_interrupts; + } + + pci_set_drvdata(pdev, mdev); + + mdev->sdev = device_create_with_groups(g_mic_class, &pdev->dev, + MKDEV(MAJOR(g_mic_devno), mdev->id), NULL, + mdev->attr_group, "mic%d", mdev->id); + if (IS_ERR(mdev->sdev)) { + rc = PTR_ERR(mdev->sdev); + dev_err(&pdev->dev, + "device_create_with_groups failed rc %d\n", rc); + goto smpt_uninit; + } + mdev->state_sysfs = sysfs_get_dirent(mdev->sdev->kobj.sd, "state"); + if (!mdev->state_sysfs) { + rc = -ENODEV; + dev_err(&pdev->dev, "sysfs_get_dirent failed rc %d\n", rc); + goto destroy_device; + } + + rc = mic_dp_init(mdev); + if (rc) { + dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc); + goto sysfs_put; + } + mutex_lock(&mdev->mic_mutex); + + mdev->shutdown_db = mic_next_db(mdev); + mdev->shutdown_cookie = mic_request_irq(mdev, mic_shutdown_db, + "shutdown-interrupt", mdev, mdev->shutdown_db, MIC_INTR_DB); + if (IS_ERR(mdev->shutdown_cookie)) { + rc = PTR_ERR(mdev->shutdown_cookie); + mutex_unlock(&mdev->mic_mutex); + goto dp_uninit; + } + mutex_unlock(&mdev->mic_mutex); + mic_bootparam_init(mdev); + + mic_create_debug_dir(mdev); + cdev_init(&mdev->cdev, &mic_fops); + mdev->cdev.owner = THIS_MODULE; + rc = cdev_add(&mdev->cdev, MKDEV(MAJOR(g_mic_devno), mdev->id), 1); + if (rc) { + dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc); + goto cleanup_debug_dir; + } + return 0; +cleanup_debug_dir: + mic_delete_debug_dir(mdev); + mutex_lock(&mdev->mic_mutex); + mic_free_irq(mdev, mdev->shutdown_cookie, mdev); + mutex_unlock(&mdev->mic_mutex); +dp_uninit: + mic_dp_uninit(mdev); +sysfs_put: + sysfs_put(mdev->state_sysfs); +destroy_device: + device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id)); +smpt_uninit: + mic_smpt_uninit(mdev); +free_interrupts: + mic_free_interrupts(mdev, pdev); +unmap_aper: + iounmap(mdev->aper.va); +unmap_mmio: + iounmap(mdev->mmio.va); +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +uninit_device: + mic_device_uninit(mdev); +device_init_fail: + ida_simple_remove(&g_mic_ida, mdev->id); +ida_fail: + kfree(mdev); +mdev_alloc_fail: + dev_err(&pdev->dev, "Probe failed rc %d\n", rc); + return rc; +} + +/** + * mic_remove - Device Removal Routine + * mic_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + * + * @pdev: PCI device structure + */ +static void mic_remove(struct pci_dev *pdev) +{ + struct mic_device *mdev; + + mdev = pci_get_drvdata(pdev); + if (!mdev) + return; + + mic_stop(mdev, false); + cdev_del(&mdev->cdev); + mic_delete_debug_dir(mdev); + mutex_lock(&mdev->mic_mutex); + mic_free_irq(mdev, mdev->shutdown_cookie, mdev); + mutex_unlock(&mdev->mic_mutex); + flush_work(&mdev->shutdown_work); + mic_dp_uninit(mdev); + sysfs_put(mdev->state_sysfs); + device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id)); + mic_smpt_uninit(mdev); + mic_free_interrupts(mdev, pdev); + iounmap(mdev->mmio.va); + iounmap(mdev->aper.va); + mic_device_uninit(mdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + ida_simple_remove(&g_mic_ida, mdev->id); + kfree(mdev); +} +static struct pci_driver mic_driver = { + .name = mic_driver_name, + .id_table = mic_pci_tbl, + .probe = mic_probe, + .remove = mic_remove +}; + +static int __init mic_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&g_mic_devno, 0, + MIC_MAX_NUM_DEVS, mic_driver_name); + if (ret) { + pr_err("alloc_chrdev_region failed ret %d\n", ret); + goto error; + } + + g_mic_class = class_create(THIS_MODULE, mic_driver_name); + if (IS_ERR(g_mic_class)) { + ret = PTR_ERR(g_mic_class); + pr_err("class_create failed ret %d\n", ret); + goto cleanup_chrdev; + } + + mic_init_debugfs(); + ida_init(&g_mic_ida); + ret = pci_register_driver(&mic_driver); + if (ret) { + pr_err("pci_register_driver failed ret %d\n", ret); + goto cleanup_debugfs; + } + return ret; +cleanup_debugfs: + mic_exit_debugfs(); + class_destroy(g_mic_class); +cleanup_chrdev: + unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); +error: + return ret; +} + +static void __exit mic_exit(void) +{ + pci_unregister_driver(&mic_driver); + ida_destroy(&g_mic_ida); + mic_exit_debugfs(); + class_destroy(g_mic_class); + unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); +} + +module_init(mic_init); +module_exit(mic_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MIC X100 Host driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c new file mode 100644 index 00000000000..fae474c4899 --- /dev/null +++ b/drivers/misc/mic/host/mic_smpt.c @@ -0,0 +1,442 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" + +static inline u64 mic_system_page_mask(struct mic_device *mdev) +{ + return (1ULL << mdev->smpt->info.page_shift) - 1ULL; +} + +static inline u8 mic_sys_addr_to_smpt(struct mic_device *mdev, dma_addr_t pa) +{ + return (pa - mdev->smpt->info.base) >> mdev->smpt->info.page_shift; +} + +static inline u64 mic_smpt_to_pa(struct mic_device *mdev, u8 index) +{ + return mdev->smpt->info.base + (index * mdev->smpt->info.page_size); +} + +static inline u64 mic_smpt_offset(struct mic_device *mdev, dma_addr_t pa) +{ + return pa & mic_system_page_mask(mdev); +} + +static inline u64 mic_smpt_align_low(struct mic_device *mdev, dma_addr_t pa) +{ + return ALIGN(pa - mic_system_page_mask(mdev), + mdev->smpt->info.page_size); +} + +static inline u64 mic_smpt_align_high(struct mic_device *mdev, dma_addr_t pa) +{ + return ALIGN(pa, mdev->smpt->info.page_size); +} + +/* Total Cumulative system memory accessible by MIC across all SMPT entries */ +static inline u64 mic_max_system_memory(struct mic_device *mdev) +{ + return mdev->smpt->info.num_reg * mdev->smpt->info.page_size; +} + +/* Maximum system memory address accessible by MIC */ +static inline u64 mic_max_system_addr(struct mic_device *mdev) +{ + return mdev->smpt->info.base + mic_max_system_memory(mdev) - 1ULL; +} + +/* Check if the DMA address is a MIC system memory address */ +static inline bool +mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa) +{ + return pa >= mdev->smpt->info.base && pa <= mic_max_system_addr(mdev); +} + +/* Populate an SMPT entry and update the reference counts. */ +static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr, + int entries, struct mic_device *mdev) +{ + struct mic_smpt_info *smpt_info = mdev->smpt; + int i; + + for (i = spt; i < spt + entries; i++, + addr += smpt_info->info.page_size) { + if (!smpt_info->entry[i].ref_count && + (smpt_info->entry[i].dma_addr != addr)) { + mdev->smpt_ops->set(mdev, addr, i); + smpt_info->entry[i].dma_addr = addr; + } + smpt_info->entry[i].ref_count += ref[i - spt]; + } +} + +/* + * Find an available MIC address in MIC SMPT address space + * for a given DMA address and size. + */ +static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr, + int entries, s64 *ref, size_t size) +{ + int spt; + int ae = 0; + int i; + unsigned long flags; + dma_addr_t mic_addr = 0; + dma_addr_t addr = dma_addr; + struct mic_smpt_info *smpt_info = mdev->smpt; + + spin_lock_irqsave(&smpt_info->smpt_lock, flags); + + /* find existing entries */ + for (i = 0; i < smpt_info->info.num_reg; i++) { + if (smpt_info->entry[i].dma_addr == addr) { + ae++; + addr += smpt_info->info.page_size; + } else if (ae) /* cannot find contiguous entries */ + goto not_found; + + if (ae == entries) + goto found; + } + + /* find free entry */ + for (ae = 0, i = 0; i < smpt_info->info.num_reg; i++) { + ae = (smpt_info->entry[i].ref_count == 0) ? ae + 1 : 0; + if (ae == entries) + goto found; + } + +not_found: + spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); + return mic_addr; + +found: + spt = i - entries + 1; + mic_addr = mic_smpt_to_pa(mdev, spt); + mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev); + smpt_info->map_count++; + smpt_info->ref_count += (s64)size; + spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); + return mic_addr; +} + +/* + * Returns number of smpt entries needed for dma_addr to dma_addr + size + * also returns the reference count array for each of those entries + * and the starting smpt address + */ +static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr, + size_t size, s64 *ref, u64 *smpt_start) +{ + u64 start = dma_addr; + u64 end = dma_addr + size; + int i = 0; + + while (start < end) { + ref[i++] = min(mic_smpt_align_high(mdev, start + 1), + end) - start; + start = mic_smpt_align_high(mdev, start + 1); + } + + if (smpt_start) + *smpt_start = mic_smpt_align_low(mdev, dma_addr); + + return i; +} + +/* + * mic_to_dma_addr - Converts a MIC address to a DMA address. + * + * @mdev: pointer to mic_device instance. + * @mic_addr: MIC address. + * + * returns a DMA address. + */ +static dma_addr_t +mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr) +{ + struct mic_smpt_info *smpt_info = mdev->smpt; + int spt; + dma_addr_t dma_addr; + + if (!mic_is_system_addr(mdev, mic_addr)) { + dev_err(mdev->sdev->parent, + "mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr); + return -EINVAL; + } + spt = mic_sys_addr_to_smpt(mdev, mic_addr); + dma_addr = smpt_info->entry[spt].dma_addr + + mic_smpt_offset(mdev, mic_addr); + return dma_addr; +} + +/** + * mic_map - Maps a DMA address to a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @dma_addr: DMA address. + * @size: Size of the region to be mapped. + * + * This API converts the DMA address provided to a DMA address understood + * by MIC. Caller should check for errors by calling mic_map_error(..). + * + * returns DMA address as required by MIC. + */ +dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size) +{ + dma_addr_t mic_addr = 0; + int num_entries; + s64 *ref; + u64 smpt_start; + + if (!size || size > mic_max_system_memory(mdev)) + return mic_addr; + + ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL); + if (!ref) + return mic_addr; + + num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size, + ref, &smpt_start); + + /* Set the smpt table appropriately and get 16G aligned mic address */ + mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size); + + kfree(ref); + + /* + * If mic_addr is zero then its an error case + * since mic_addr can never be zero. + * else generate mic_addr by adding the 16G offset in dma_addr + */ + if (!mic_addr && MIC_FAMILY_X100 == mdev->family) { + dev_err(mdev->sdev->parent, + "mic_map failed dma_addr 0x%llx size 0x%lx\n", + dma_addr, size); + return mic_addr; + } else { + return mic_addr + mic_smpt_offset(mdev, dma_addr); + } +} + +/** + * mic_unmap - Unmaps a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @mic_addr: MIC physical address. + * @size: Size of the region to be unmapped. + * + * This API unmaps the mappings created by mic_map(..). + * + * returns None. + */ +void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size) +{ + struct mic_smpt_info *smpt_info = mdev->smpt; + s64 *ref; + int num_smpt; + int spt; + int i; + unsigned long flags; + + if (!size) + return; + + if (!mic_is_system_addr(mdev, mic_addr)) { + dev_err(mdev->sdev->parent, + "invalid address: 0x%llx\n", mic_addr); + return; + } + + spt = mic_sys_addr_to_smpt(mdev, mic_addr); + ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL); + if (!ref) + return; + + /* Get number of smpt entries to be mapped, ref count array */ + num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL); + + spin_lock_irqsave(&smpt_info->smpt_lock, flags); + smpt_info->unmap_count++; + smpt_info->ref_count -= (s64)size; + + for (i = spt; i < spt + num_smpt; i++) { + smpt_info->entry[i].ref_count -= ref[i - spt]; + if (smpt_info->entry[i].ref_count < 0) + dev_warn(mdev->sdev->parent, + "ref count for entry %d is negative\n", i); + } + spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); + kfree(ref); +} + +/** + * mic_map_single - Maps a virtual address to a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @va: Kernel direct mapped virtual address. + * @size: Size of the region to be mapped. + * + * This API calls pci_map_single(..) for the direct mapped virtual address + * and then converts the DMA address provided to a DMA address understood + * by MIC. Caller should check for errors by calling mic_map_error(..). + * + * returns DMA address as required by MIC. + */ +dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size) +{ + dma_addr_t mic_addr = 0; + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + dma_addr_t dma_addr = + pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL); + + if (!pci_dma_mapping_error(pdev, dma_addr)) { + mic_addr = mic_map(mdev, dma_addr, size); + if (!mic_addr) { + dev_err(mdev->sdev->parent, + "mic_map failed dma_addr 0x%llx size 0x%lx\n", + dma_addr, size); + pci_unmap_single(pdev, dma_addr, + size, PCI_DMA_BIDIRECTIONAL); + } + } + return mic_addr; +} + +/** + * mic_unmap_single - Unmaps a MIC physical address. + * + * @mdev: pointer to mic_device instance. + * @mic_addr: MIC physical address. + * @size: Size of the region to be unmapped. + * + * This API unmaps the mappings created by mic_map_single(..). + * + * returns None. + */ +void +mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size) +{ + struct pci_dev *pdev = container_of(mdev->sdev->parent, + struct pci_dev, dev); + dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr); + mic_unmap(mdev, mic_addr, size); + pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); +} + +/** + * mic_smpt_init - Initialize MIC System Memory Page Tables. + * + * @mdev: pointer to mic_device instance. + * + * returns 0 for success and -errno for error. + */ +int mic_smpt_init(struct mic_device *mdev) +{ + int i, err = 0; + dma_addr_t dma_addr; + struct mic_smpt_info *smpt_info; + + mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL); + if (!mdev->smpt) + return -ENOMEM; + + smpt_info = mdev->smpt; + mdev->smpt_ops->init(mdev); + smpt_info->entry = kmalloc_array(smpt_info->info.num_reg, + sizeof(*smpt_info->entry), GFP_KERNEL); + if (!smpt_info->entry) { + err = -ENOMEM; + goto free_smpt; + } + spin_lock_init(&smpt_info->smpt_lock); + for (i = 0; i < smpt_info->info.num_reg; i++) { + dma_addr = i * smpt_info->info.page_size; + smpt_info->entry[i].dma_addr = dma_addr; + smpt_info->entry[i].ref_count = 0; + mdev->smpt_ops->set(mdev, dma_addr, i); + } + smpt_info->ref_count = 0; + smpt_info->map_count = 0; + smpt_info->unmap_count = 0; + return 0; +free_smpt: + kfree(smpt_info); + return err; +} + +/** + * mic_smpt_uninit - UnInitialize MIC System Memory Page Tables. + * + * @mdev: pointer to mic_device instance. + * + * returns None. + */ +void mic_smpt_uninit(struct mic_device *mdev) +{ + struct mic_smpt_info *smpt_info = mdev->smpt; + int i; + + dev_dbg(mdev->sdev->parent, + "nodeid %d SMPT ref count %lld map %lld unmap %lld\n", + mdev->id, smpt_info->ref_count, + smpt_info->map_count, smpt_info->unmap_count); + + for (i = 0; i < smpt_info->info.num_reg; i++) { + dev_dbg(mdev->sdev->parent, + "SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n", + i, smpt_info->entry[i].dma_addr, + smpt_info->entry[i].ref_count); + if (smpt_info->entry[i].ref_count) + dev_warn(mdev->sdev->parent, + "ref count for entry %d is not zero\n", i); + } + kfree(smpt_info->entry); + kfree(smpt_info); +} + +/** + * mic_smpt_restore - Restore MIC System Memory Page Tables. + * + * @mdev: pointer to mic_device instance. + * + * Restore the SMPT registers to values previously stored in the + * SW data structures. Some MIC steppings lose register state + * across resets and this API should be called for performing + * a restore operation if required. + * + * returns None. + */ +void mic_smpt_restore(struct mic_device *mdev) +{ + int i; + dma_addr_t dma_addr; + + for (i = 0; i < mdev->smpt->info.num_reg; i++) { + dma_addr = mdev->smpt->entry[i].dma_addr; + mdev->smpt_ops->set(mdev, dma_addr, i); + } +} diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h new file mode 100644 index 00000000000..51970abfe7d --- /dev/null +++ b/drivers/misc/mic/host/mic_smpt.h @@ -0,0 +1,98 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef MIC_SMPT_H +#define MIC_SMPT_H +/** + * struct mic_smpt_ops - MIC HW specific SMPT operations. + * @init: Initialize hardware specific SMPT information in mic_smpt_hw_info. + * @set: Set the value for a particular SMPT entry. + */ +struct mic_smpt_ops { + void (*init)(struct mic_device *mdev); + void (*set)(struct mic_device *mdev, dma_addr_t dma_addr, u8 index); +}; + +/** + * struct mic_smpt - MIC SMPT entry information. + * @dma_addr: Base DMA address for this SMPT entry. + * @ref_count: Number of active mappings for this SMPT entry in bytes. + */ +struct mic_smpt { + dma_addr_t dma_addr; + s64 ref_count; +}; + +/** + * struct mic_smpt_hw_info - MIC SMPT hardware specific information. + * @num_reg: Number of SMPT registers. + * @page_shift: System memory page shift. + * @page_size: System memory page size. + * @base: System address base. + */ +struct mic_smpt_hw_info { + u8 num_reg; + u8 page_shift; + u64 page_size; + u64 base; +}; + +/** + * struct mic_smpt_info - MIC SMPT information. + * @entry: Array of SMPT entries. + * @smpt_lock: Spin lock protecting access to SMPT data structures. + * @info: Hardware specific SMPT information. + * @ref_count: Number of active SMPT mappings (for debug). + * @map_count: Number of SMPT mappings created (for debug). + * @unmap_count: Number of SMPT mappings destroyed (for debug). + */ +struct mic_smpt_info { + struct mic_smpt *entry; + spinlock_t smpt_lock; + struct mic_smpt_hw_info info; + s64 ref_count; + s64 map_count; + s64 unmap_count; +}; + +dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size); +void mic_unmap_single(struct mic_device *mdev, + dma_addr_t mic_addr, size_t size); +dma_addr_t mic_map(struct mic_device *mdev, + dma_addr_t dma_addr, size_t size); +void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size); + +/** + * mic_map_error - Check a MIC address for errors. + * + * @mdev: pointer to mic_device instance. + * + * returns Whether there was an error during mic_map..(..) APIs. + */ +static inline bool mic_map_error(dma_addr_t mic_addr) +{ + return !mic_addr; +} + +int mic_smpt_init(struct mic_device *mdev); +void mic_smpt_uninit(struct mic_device *mdev); +void mic_smpt_restore(struct mic_device *mdev); + +#endif diff --git a/drivers/misc/mic/host/mic_sysfs.c b/drivers/misc/mic/host/mic_sysfs.c new file mode 100644 index 00000000000..6dd864e4a61 --- /dev/null +++ b/drivers/misc/mic/host/mic_sysfs.c @@ -0,0 +1,459 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" + +/* + * A state-to-string lookup table, for exposing a human readable state + * via sysfs. Always keep in sync with enum mic_states + */ +static const char * const mic_state_string[] = { + [MIC_OFFLINE] = "offline", + [MIC_ONLINE] = "online", + [MIC_SHUTTING_DOWN] = "shutting_down", + [MIC_RESET_FAILED] = "reset_failed", + [MIC_SUSPENDING] = "suspending", + [MIC_SUSPENDED] = "suspended", +}; + +/* + * A shutdown-status-to-string lookup table, for exposing a human + * readable state via sysfs. Always keep in sync with enum mic_shutdown_status + */ +static const char * const mic_shutdown_status_string[] = { + [MIC_NOP] = "nop", + [MIC_CRASHED] = "crashed", + [MIC_HALTED] = "halted", + [MIC_POWER_OFF] = "poweroff", + [MIC_RESTART] = "restart", +}; + +void mic_set_shutdown_status(struct mic_device *mdev, u8 shutdown_status) +{ + dev_dbg(mdev->sdev->parent, "Shutdown Status %s -> %s\n", + mic_shutdown_status_string[mdev->shutdown_status], + mic_shutdown_status_string[shutdown_status]); + mdev->shutdown_status = shutdown_status; +} + +void mic_set_state(struct mic_device *mdev, u8 state) +{ + dev_dbg(mdev->sdev->parent, "State %s -> %s\n", + mic_state_string[mdev->state], + mic_state_string[state]); + mdev->state = state; + sysfs_notify_dirent(mdev->state_sysfs); +} + +static ssize_t +family_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + static const char x100[] = "x100"; + static const char unknown[] = "Unknown"; + const char *card = NULL; + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + switch (mdev->family) { + case MIC_FAMILY_X100: + card = x100; + break; + default: + card = unknown; + break; + } + return scnprintf(buf, PAGE_SIZE, "%s\n", card); +} +static DEVICE_ATTR_RO(family); + +static ssize_t +stepping_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + char *string = "??"; + + if (!mdev) + return -EINVAL; + + switch (mdev->stepping) { + case MIC_A0_STEP: + string = "A0"; + break; + case MIC_B0_STEP: + string = "B0"; + break; + case MIC_B1_STEP: + string = "B1"; + break; + case MIC_C0_STEP: + string = "C0"; + break; + default: + break; + } + return scnprintf(buf, PAGE_SIZE, "%s\n", string); +} +static DEVICE_ATTR_RO(stepping); + +static ssize_t +state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev || mdev->state >= MIC_LAST) + return -EINVAL; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + mic_state_string[mdev->state]); +} + +static ssize_t +state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc = 0; + struct mic_device *mdev = dev_get_drvdata(dev->parent); + if (!mdev) + return -EINVAL; + if (sysfs_streq(buf, "boot")) { + rc = mic_start(mdev, buf); + if (rc) { + dev_err(mdev->sdev->parent, + "mic_boot failed rc %d\n", rc); + count = rc; + } + goto done; + } + + if (sysfs_streq(buf, "reset")) { + schedule_work(&mdev->reset_trigger_work); + goto done; + } + + if (sysfs_streq(buf, "shutdown")) { + mic_shutdown(mdev); + goto done; + } + + if (sysfs_streq(buf, "suspend")) { + mic_suspend(mdev); + goto done; + } + + count = -EINVAL; +done: + return count; +} +static DEVICE_ATTR_RW(state); + +static ssize_t shutdown_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev || mdev->shutdown_status >= MIC_STATUS_LAST) + return -EINVAL; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + mic_shutdown_status_string[mdev->shutdown_status]); +} +static DEVICE_ATTR_RO(shutdown_status); + +static ssize_t +cmdline_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + char *cmdline; + + if (!mdev) + return -EINVAL; + + cmdline = mdev->cmdline; + + if (cmdline) + return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline); + return 0; +} + +static ssize_t +cmdline_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + mutex_lock(&mdev->mic_mutex); + kfree(mdev->cmdline); + + mdev->cmdline = kmalloc(count + 1, GFP_KERNEL); + if (!mdev->cmdline) { + count = -ENOMEM; + goto unlock; + } + + strncpy(mdev->cmdline, buf, count); + + if (mdev->cmdline[count - 1] == '\n') + mdev->cmdline[count - 1] = '\0'; + else + mdev->cmdline[count] = '\0'; +unlock: + mutex_unlock(&mdev->mic_mutex); + return count; +} +static DEVICE_ATTR_RW(cmdline); + +static ssize_t +firmware_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + char *firmware; + + if (!mdev) + return -EINVAL; + + firmware = mdev->firmware; + + if (firmware) + return scnprintf(buf, PAGE_SIZE, "%s\n", firmware); + return 0; +} + +static ssize_t +firmware_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + mutex_lock(&mdev->mic_mutex); + kfree(mdev->firmware); + + mdev->firmware = kmalloc(count + 1, GFP_KERNEL); + if (!mdev->firmware) { + count = -ENOMEM; + goto unlock; + } + strncpy(mdev->firmware, buf, count); + + if (mdev->firmware[count - 1] == '\n') + mdev->firmware[count - 1] = '\0'; + else + mdev->firmware[count] = '\0'; +unlock: + mutex_unlock(&mdev->mic_mutex); + return count; +} +static DEVICE_ATTR_RW(firmware); + +static ssize_t +ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + char *ramdisk; + + if (!mdev) + return -EINVAL; + + ramdisk = mdev->ramdisk; + + if (ramdisk) + return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk); + return 0; +} + +static ssize_t +ramdisk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + mutex_lock(&mdev->mic_mutex); + kfree(mdev->ramdisk); + + mdev->ramdisk = kmalloc(count + 1, GFP_KERNEL); + if (!mdev->ramdisk) { + count = -ENOMEM; + goto unlock; + } + + strncpy(mdev->ramdisk, buf, count); + + if (mdev->ramdisk[count - 1] == '\n') + mdev->ramdisk[count - 1] = '\0'; + else + mdev->ramdisk[count] = '\0'; +unlock: + mutex_unlock(&mdev->mic_mutex); + return count; +} +static DEVICE_ATTR_RW(ramdisk); + +static ssize_t +bootmode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + char *bootmode; + + if (!mdev) + return -EINVAL; + + bootmode = mdev->bootmode; + + if (bootmode) + return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode); + return 0; +} + +static ssize_t +bootmode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "elf")) + return -EINVAL; + + mutex_lock(&mdev->mic_mutex); + kfree(mdev->bootmode); + + mdev->bootmode = kmalloc(count + 1, GFP_KERNEL); + if (!mdev->bootmode) { + count = -ENOMEM; + goto unlock; + } + + strncpy(mdev->bootmode, buf, count); + + if (mdev->bootmode[count - 1] == '\n') + mdev->bootmode[count - 1] = '\0'; + else + mdev->bootmode[count] = '\0'; +unlock: + mutex_unlock(&mdev->mic_mutex); + return count; +} +static DEVICE_ATTR_RW(bootmode); + +static ssize_t +log_buf_addr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_addr); +} + +static ssize_t +log_buf_addr_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + int ret; + unsigned long addr; + + if (!mdev) + return -EINVAL; + + ret = kstrtoul(buf, 16, &addr); + if (ret) + goto exit; + + mdev->log_buf_addr = (void *)addr; + ret = count; +exit: + return ret; +} +static DEVICE_ATTR_RW(log_buf_addr); + +static ssize_t +log_buf_len_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + + if (!mdev) + return -EINVAL; + + return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_len); +} + +static ssize_t +log_buf_len_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mic_device *mdev = dev_get_drvdata(dev->parent); + int ret; + unsigned long addr; + + if (!mdev) + return -EINVAL; + + ret = kstrtoul(buf, 16, &addr); + if (ret) + goto exit; + + mdev->log_buf_len = (int *)addr; + ret = count; +exit: + return ret; +} +static DEVICE_ATTR_RW(log_buf_len); + +static struct attribute *mic_default_attrs[] = { + &dev_attr_family.attr, + &dev_attr_stepping.attr, + &dev_attr_state.attr, + &dev_attr_shutdown_status.attr, + &dev_attr_cmdline.attr, + &dev_attr_firmware.attr, + &dev_attr_ramdisk.attr, + &dev_attr_bootmode.attr, + &dev_attr_log_buf_addr.attr, + &dev_attr_log_buf_len.attr, + + NULL +}; + +ATTRIBUTE_GROUPS(mic_default); + +void mic_sysfs_init(struct mic_device *mdev) +{ + mdev->attr_group = mic_default_groups; +} diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c new file mode 100644 index 00000000000..7e1ef0ebbb8 --- /dev/null +++ b/drivers/misc/mic/host/mic_virtio.c @@ -0,0 +1,701 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/uaccess.h> + +#include <linux/mic_common.h> +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_smpt.h" +#include "mic_virtio.h" + +/* + * Initiates the copies across the PCIe bus from card memory to + * a user space buffer. + */ +static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, + void __user *ubuf, size_t len, u64 addr) +{ + int err; + void __iomem *dbuf = mvdev->mdev->aper.va + addr; + /* + * We are copying from IO below an should ideally use something + * like copy_to_user_fromio(..) if it existed. + */ + if (copy_to_user(ubuf, (void __force *)dbuf, len)) { + err = -EFAULT; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, err); + goto err; + } + mvdev->in_bytes += len; + err = 0; +err: + return err; +} + +/* + * Initiates copies across the PCIe bus from a user space + * buffer to card memory. + */ +static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, + void __user *ubuf, size_t len, u64 addr) +{ + int err; + void __iomem *dbuf = mvdev->mdev->aper.va + addr; + /* + * We are copying to IO below and should ideally use something + * like copy_from_user_toio(..) if it existed. + */ + if (copy_from_user((void __force *)dbuf, ubuf, len)) { + err = -EFAULT; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, err); + goto err; + } + mvdev->out_bytes += len; + err = 0; +err: + return err; +} + +#define MIC_VRINGH_READ true + +/* The function to call to notify the card about added buffers */ +static void mic_notify(struct vringh *vrh) +{ + struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh); + struct mic_vdev *mvdev = mvrh->mvdev; + s8 db = mvdev->dc->h2c_vdev_db; + + if (db != -1) + mvdev->mdev->ops->send_intr(mvdev->mdev, db); +} + +/* Determine the total number of bytes consumed in a VRINGH KIOV */ +static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov) +{ + int i; + u32 total = iov->consumed; + + for (i = 0; i < iov->i; i++) + total += iov->iov[i].iov_len; + return total; +} + +/* + * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. + * This API is heavily based on the vringh_iov_xfer(..) implementation + * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) + * and vringh_iov_push_kern(..) directly is because there is no + * way to override the VRINGH xfer(..) routines as of v3.10. + */ +static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, + void __user *ubuf, size_t len, bool read, size_t *out_len) +{ + int ret = 0; + size_t partlen, tot_len = 0; + + while (len && iov->i < iov->used) { + partlen = min(iov->iov[iov->i].iov_len, len); + if (read) + ret = mic_virtio_copy_to_user(mvdev, + ubuf, partlen, + (u64)iov->iov[iov->i].iov_base); + else + ret = mic_virtio_copy_from_user(mvdev, + ubuf, partlen, + (u64)iov->iov[iov->i].iov_base); + if (ret) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + break; + } + len -= partlen; + ubuf += partlen; + tot_len += partlen; + iov->consumed += partlen; + iov->iov[iov->i].iov_len -= partlen; + iov->iov[iov->i].iov_base += partlen; + if (!iov->iov[iov->i].iov_len) { + /* Fix up old iov element then increment. */ + iov->iov[iov->i].iov_len = iov->consumed; + iov->iov[iov->i].iov_base -= iov->consumed; + + iov->consumed = 0; + iov->i++; + } + } + *out_len = tot_len; + return ret; +} + +/* + * Use the standard VRINGH infrastructure in the kernel to fetch new + * descriptors, initiate the copies and update the used ring. + */ +static int _mic_virtio_copy(struct mic_vdev *mvdev, + struct mic_copy_desc *copy) +{ + int ret = 0; + u32 iovcnt = copy->iovcnt; + struct iovec iov; + struct iovec __user *u_iov = copy->iov; + void __user *ubuf = NULL; + struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; + struct vringh_kiov *riov = &mvr->riov; + struct vringh_kiov *wiov = &mvr->wiov; + struct vringh *vrh = &mvr->vrh; + u16 *head = &mvr->head; + struct mic_vring *vr = &mvr->vring; + size_t len = 0, out_len; + + copy->out_len = 0; + /* Fetch a new IOVEC if all previous elements have been processed */ + if (riov->i == riov->used && wiov->i == wiov->used) { + ret = vringh_getdesc_kern(vrh, riov, wiov, + head, GFP_KERNEL); + /* Check if there are available descriptors */ + if (ret <= 0) + return ret; + } + while (iovcnt) { + if (!len) { + /* Copy over a new iovec from user space. */ + ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); + if (ret) { + ret = -EINVAL; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + break; + } + len = iov.iov_len; + ubuf = iov.iov_base; + } + /* Issue all the read descriptors first */ + ret = mic_vringh_copy(mvdev, riov, ubuf, len, + MIC_VRINGH_READ, &out_len); + if (ret) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + break; + } + len -= out_len; + ubuf += out_len; + copy->out_len += out_len; + /* Issue the write descriptors next */ + ret = mic_vringh_copy(mvdev, wiov, ubuf, len, + !MIC_VRINGH_READ, &out_len); + if (ret) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + break; + } + len -= out_len; + ubuf += out_len; + copy->out_len += out_len; + if (!len) { + /* One user space iovec is now completed */ + iovcnt--; + u_iov++; + } + /* Exit loop if all elements in KIOVs have been processed. */ + if (riov->i == riov->used && wiov->i == wiov->used) + break; + } + /* + * Update the used ring if a descriptor was available and some data was + * copied in/out and the user asked for a used ring update. + */ + if (*head != USHRT_MAX && copy->out_len && copy->update_used) { + u32 total = 0; + + /* Determine the total data consumed */ + total += mic_vringh_iov_consumed(riov); + total += mic_vringh_iov_consumed(wiov); + vringh_complete_kern(vrh, *head, total); + *head = USHRT_MAX; + if (vringh_need_notify_kern(vrh) > 0) + vringh_notify(vrh); + vringh_kiov_cleanup(riov); + vringh_kiov_cleanup(wiov); + /* Update avail idx for user space */ + vr->info->avail_idx = vrh->last_avail_idx; + } + return ret; +} + +static inline int mic_verify_copy_args(struct mic_vdev *mvdev, + struct mic_copy_desc *copy) +{ + if (copy->vr_idx >= mvdev->dd->num_vq) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -EINVAL); + return -EINVAL; + } + return 0; +} + +/* Copy a specified number of virtio descriptors in a chain */ +int mic_virtio_copy_desc(struct mic_vdev *mvdev, + struct mic_copy_desc *copy) +{ + int err; + struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; + + err = mic_verify_copy_args(mvdev, copy); + if (err) + return err; + + mutex_lock(&mvr->vr_mutex); + if (!mic_vdevup(mvdev)) { + err = -ENODEV; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, err); + goto err; + } + err = _mic_virtio_copy(mvdev, copy); + if (err) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, err); + } +err: + mutex_unlock(&mvr->vr_mutex); + return err; +} + +static void mic_virtio_init_post(struct mic_vdev *mvdev) +{ + struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd); + int i; + + for (i = 0; i < mvdev->dd->num_vq; i++) { + if (!le64_to_cpu(vqconfig[i].used_address)) { + dev_warn(mic_dev(mvdev), "used_address zero??\n"); + continue; + } + mvdev->mvr[i].vrh.vring.used = + (void __force *)mvdev->mdev->aper.va + + le64_to_cpu(vqconfig[i].used_address); + } + + mvdev->dc->used_address_updated = 0; + + dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n", + __func__, mvdev->virtio_id); +} + +static inline void mic_virtio_device_reset(struct mic_vdev *mvdev) +{ + int i; + + dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n", + __func__, mvdev->dd->status, mvdev->virtio_id); + + for (i = 0; i < mvdev->dd->num_vq; i++) + /* + * Avoid lockdep false positive. The + 1 is for the mic + * mutex which is held in the reset devices code path. + */ + mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); + + /* 0 status means "reset" */ + mvdev->dd->status = 0; + mvdev->dc->vdev_reset = 0; + mvdev->dc->host_ack = 1; + + for (i = 0; i < mvdev->dd->num_vq; i++) { + struct vringh *vrh = &mvdev->mvr[i].vrh; + mvdev->mvr[i].vring.info->avail_idx = 0; + vrh->completed = 0; + vrh->last_avail_idx = 0; + vrh->last_used_idx = 0; + } + + for (i = 0; i < mvdev->dd->num_vq; i++) + mutex_unlock(&mvdev->mvr[i].vr_mutex); +} + +void mic_virtio_reset_devices(struct mic_device *mdev) +{ + struct list_head *pos, *tmp; + struct mic_vdev *mvdev; + + dev_dbg(mdev->sdev->parent, "%s\n", __func__); + + list_for_each_safe(pos, tmp, &mdev->vdev_list) { + mvdev = list_entry(pos, struct mic_vdev, list); + mic_virtio_device_reset(mvdev); + mvdev->poll_wake = 1; + wake_up(&mvdev->waitq); + } +} + +void mic_bh_handler(struct work_struct *work) +{ + struct mic_vdev *mvdev = container_of(work, struct mic_vdev, + virtio_bh_work); + + if (mvdev->dc->used_address_updated) + mic_virtio_init_post(mvdev); + + if (mvdev->dc->vdev_reset) + mic_virtio_device_reset(mvdev); + + mvdev->poll_wake = 1; + wake_up(&mvdev->waitq); +} + +static irqreturn_t mic_virtio_intr_handler(int irq, void *data) +{ + struct mic_vdev *mvdev = data; + struct mic_device *mdev = mvdev->mdev; + + mdev->ops->intr_workarounds(mdev); + schedule_work(&mvdev->virtio_bh_work); + return IRQ_HANDLED; +} + +int mic_virtio_config_change(struct mic_vdev *mvdev, + void __user *argp) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); + int ret = 0, retry, i; + struct mic_bootparam *bootparam = mvdev->mdev->dp; + s8 db = bootparam->h2c_config_db; + + mutex_lock(&mvdev->mdev->mic_mutex); + for (i = 0; i < mvdev->dd->num_vq; i++) + mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); + + if (db == -1 || mvdev->dd->type == -1) { + ret = -EIO; + goto exit; + } + + if (copy_from_user(mic_vq_configspace(mvdev->dd), + argp, mvdev->dd->config_len)) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -EFAULT); + ret = -EFAULT; + goto exit; + } + mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; + mvdev->mdev->ops->send_intr(mvdev->mdev, db); + + for (retry = 100; retry--;) { + ret = wait_event_timeout(wake, + mvdev->dc->guest_ack, msecs_to_jiffies(100)); + if (ret) + break; + } + + dev_dbg(mic_dev(mvdev), + "%s %d retry: %d\n", __func__, __LINE__, retry); + mvdev->dc->config_change = 0; + mvdev->dc->guest_ack = 0; +exit: + for (i = 0; i < mvdev->dd->num_vq; i++) + mutex_unlock(&mvdev->mvr[i].vr_mutex); + mutex_unlock(&mvdev->mdev->mic_mutex); + return ret; +} + +static int mic_copy_dp_entry(struct mic_vdev *mvdev, + void __user *argp, + __u8 *type, + struct mic_device_desc **devpage) +{ + struct mic_device *mdev = mvdev->mdev; + struct mic_device_desc dd, *dd_config, *devp; + struct mic_vqconfig *vqconfig; + int ret = 0, i; + bool slot_found = false; + + if (copy_from_user(&dd, argp, sizeof(dd))) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -EFAULT); + return -EFAULT; + } + + if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || + dd.num_vq > MIC_MAX_VRINGS) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -EINVAL); + return -EINVAL; + } + + dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL); + if (dd_config == NULL) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -ENOMEM); + return -ENOMEM; + } + if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { + ret = -EFAULT; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + goto exit; + } + + vqconfig = mic_vq_config(dd_config); + for (i = 0; i < dd.num_vq; i++) { + if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { + ret = -EINVAL; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + goto exit; + } + } + + /* Find the first free device page entry */ + for (i = sizeof(struct mic_bootparam); + i < MIC_DP_SIZE - mic_total_desc_size(dd_config); + i += mic_total_desc_size(devp)) { + devp = mdev->dp + i; + if (devp->type == 0 || devp->type == -1) { + slot_found = true; + break; + } + } + if (!slot_found) { + ret = -EINVAL; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + goto exit; + } + /* + * Save off the type before doing the memcpy. Type will be set in the + * end after completing all initialization for the new device. + */ + *type = dd_config->type; + dd_config->type = 0; + memcpy(devp, dd_config, mic_desc_size(dd_config)); + + *devpage = devp; +exit: + kfree(dd_config); + return ret; +} + +static void mic_init_device_ctrl(struct mic_vdev *mvdev, + struct mic_device_desc *devpage) +{ + struct mic_device_ctrl *dc; + + dc = (void *)devpage + mic_aligned_desc_size(devpage); + + dc->config_change = 0; + dc->guest_ack = 0; + dc->vdev_reset = 0; + dc->host_ack = 0; + dc->used_address_updated = 0; + dc->c2h_vdev_db = -1; + dc->h2c_vdev_db = -1; + mvdev->dc = dc; +} + +int mic_virtio_add_device(struct mic_vdev *mvdev, + void __user *argp) +{ + struct mic_device *mdev = mvdev->mdev; + struct mic_device_desc *dd = NULL; + struct mic_vqconfig *vqconfig; + int vr_size, i, j, ret; + u8 type = 0; + s8 db; + char irqname[10]; + struct mic_bootparam *bootparam = mdev->dp; + u16 num; + dma_addr_t vr_addr; + + mutex_lock(&mdev->mic_mutex); + + ret = mic_copy_dp_entry(mvdev, argp, &type, &dd); + if (ret) { + mutex_unlock(&mdev->mic_mutex); + return ret; + } + + mic_init_device_ctrl(mvdev, dd); + + mvdev->dd = dd; + mvdev->virtio_id = type; + vqconfig = mic_vq_config(dd); + INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler); + + for (i = 0; i < dd->num_vq; i++) { + struct mic_vringh *mvr = &mvdev->mvr[i]; + struct mic_vring *vr = &mvdev->mvr[i].vring; + num = le16_to_cpu(vqconfig[i].num); + mutex_init(&mvr->vr_mutex); + vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + + sizeof(struct _mic_vring_info)); + vr->va = (void *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(vr_size)); + if (!vr->va) { + ret = -ENOMEM; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + goto err; + } + vr->len = vr_size; + vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); + vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); + vr_addr = mic_map_single(mdev, vr->va, vr_size); + if (mic_map_error(vr_addr)) { + free_pages((unsigned long)vr->va, get_order(vr_size)); + ret = -ENOMEM; + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + goto err; + } + vqconfig[i].address = cpu_to_le64(vr_addr); + + vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); + ret = vringh_init_kern(&mvr->vrh, + *(u32 *)mic_vq_features(mvdev->dd), num, false, + vr->vr.desc, vr->vr.avail, vr->vr.used); + if (ret) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, ret); + goto err; + } + vringh_kiov_init(&mvr->riov, NULL, 0); + vringh_kiov_init(&mvr->wiov, NULL, 0); + mvr->head = USHRT_MAX; + mvr->mvdev = mvdev; + mvr->vrh.notify = mic_notify; + dev_dbg(mdev->sdev->parent, + "%s %d index %d va %p info %p vr_size 0x%x\n", + __func__, __LINE__, i, vr->va, vr->info, vr_size); + } + + snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, + mvdev->virtio_id); + mvdev->virtio_db = mic_next_db(mdev); + mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler, + irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB); + if (IS_ERR(mvdev->virtio_cookie)) { + ret = PTR_ERR(mvdev->virtio_cookie); + dev_dbg(mdev->sdev->parent, "request irq failed\n"); + goto err; + } + + mvdev->dc->c2h_vdev_db = mvdev->virtio_db; + + list_add_tail(&mvdev->list, &mdev->vdev_list); + /* + * Order the type update with previous stores. This write barrier + * is paired with the corresponding read barrier before the uncached + * system memory read of the type, on the card while scanning the + * device page. + */ + smp_wmb(); + dd->type = type; + + dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type); + + db = bootparam->h2c_config_db; + if (db != -1) + mdev->ops->send_intr(mdev, db); + mutex_unlock(&mdev->mic_mutex); + return 0; +err: + vqconfig = mic_vq_config(dd); + for (j = 0; j < i; j++) { + struct mic_vringh *mvr = &mvdev->mvr[j]; + mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address), + mvr->vring.len); + free_pages((unsigned long)mvr->vring.va, + get_order(mvr->vring.len)); + } + mutex_unlock(&mdev->mic_mutex); + return ret; +} + +void mic_virtio_del_device(struct mic_vdev *mvdev) +{ + struct list_head *pos, *tmp; + struct mic_vdev *tmp_mvdev; + struct mic_device *mdev = mvdev->mdev; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); + int i, ret, retry; + struct mic_vqconfig *vqconfig; + struct mic_bootparam *bootparam = mdev->dp; + s8 db; + + mutex_lock(&mdev->mic_mutex); + db = bootparam->h2c_config_db; + if (db == -1) + goto skip_hot_remove; + dev_dbg(mdev->sdev->parent, + "Requesting hot remove id %d\n", mvdev->virtio_id); + mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; + mdev->ops->send_intr(mdev, db); + for (retry = 100; retry--;) { + ret = wait_event_timeout(wake, + mvdev->dc->guest_ack, msecs_to_jiffies(100)); + if (ret) + break; + } + dev_dbg(mdev->sdev->parent, + "Device id %d config_change %d guest_ack %d retry %d\n", + mvdev->virtio_id, mvdev->dc->config_change, + mvdev->dc->guest_ack, retry); + mvdev->dc->config_change = 0; + mvdev->dc->guest_ack = 0; +skip_hot_remove: + mic_free_irq(mdev, mvdev->virtio_cookie, mvdev); + flush_work(&mvdev->virtio_bh_work); + vqconfig = mic_vq_config(mvdev->dd); + for (i = 0; i < mvdev->dd->num_vq; i++) { + struct mic_vringh *mvr = &mvdev->mvr[i]; + vringh_kiov_cleanup(&mvr->riov); + vringh_kiov_cleanup(&mvr->wiov); + mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), + mvr->vring.len); + free_pages((unsigned long)mvr->vring.va, + get_order(mvr->vring.len)); + } + + list_for_each_safe(pos, tmp, &mdev->vdev_list) { + tmp_mvdev = list_entry(pos, struct mic_vdev, list); + if (tmp_mvdev == mvdev) { + list_del(pos); + dev_dbg(mdev->sdev->parent, + "Removing virtio device id %d\n", + mvdev->virtio_id); + break; + } + } + /* + * Order the type update with previous stores. This write barrier + * is paired with the corresponding read barrier before the uncached + * system memory read of the type, on the card while scanning the + * device page. + */ + smp_wmb(); + mvdev->dd->type = -1; + mutex_unlock(&mdev->mic_mutex); +} diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h new file mode 100644 index 00000000000..184f3c84805 --- /dev/null +++ b/drivers/misc/mic/host/mic_virtio.h @@ -0,0 +1,138 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef MIC_VIRTIO_H +#define MIC_VIRTIO_H + +#include <linux/virtio_config.h> +#include <linux/mic_ioctl.h> + +/* + * Note on endianness. + * 1. Host can be both BE or LE + * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail + * rings and ioreadXX/iowriteXX to access used ring. + * 3. Device page exposed by host to guest contains LE values. Guest + * accesses these using ioreadXX/iowriteXX etc. This way in general we + * obey the virtio spec according to which guest works with native + * endianness and host is aware of guest endianness and does all + * required endianness conversion. + * 4. Data provided from user space to guest (in ADD_DEVICE and + * CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be + * in guest endianness. + */ + +/** + * struct mic_vringh - Virtio ring host information. + * + * @vring: The MIC vring used for setting up user space mappings. + * @vrh: The host VRINGH used for accessing the card vrings. + * @riov: The VRINGH read kernel IOV. + * @wiov: The VRINGH write kernel IOV. + * @head: The VRINGH head index address passed to vringh_getdesc_kern(..). + * @vr_mutex: Mutex for synchronizing access to the VRING. + * @mvdev: Back pointer to MIC virtio device for vringh_notify(..). + */ +struct mic_vringh { + struct mic_vring vring; + struct vringh vrh; + struct vringh_kiov riov; + struct vringh_kiov wiov; + u16 head; + struct mutex vr_mutex; + struct mic_vdev *mvdev; +}; + +/** + * struct mic_vdev - Host information for a card Virtio device. + * + * @virtio_id - Virtio device id. + * @waitq - Waitqueue to allow ring3 apps to poll. + * @mdev - Back pointer to host MIC device. + * @poll_wake - Used for waking up threads blocked in poll. + * @out_bytes - Debug stats for number of bytes copied from host to card. + * @in_bytes - Debug stats for number of bytes copied from card to host. + * @mvr - Store per VRING data structures. + * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. + * @dd - Virtio device descriptor. + * @dc - Virtio device control fields. + * @list - List of Virtio devices. + * @virtio_db - The doorbell used by the card to interrupt the host. + * @virtio_cookie - The cookie returned while requesting interrupts. + */ +struct mic_vdev { + int virtio_id; + wait_queue_head_t waitq; + struct mic_device *mdev; + int poll_wake; + unsigned long out_bytes; + unsigned long in_bytes; + struct mic_vringh mvr[MIC_MAX_VRINGS]; + struct work_struct virtio_bh_work; + struct mic_device_desc *dd; + struct mic_device_ctrl *dc; + struct list_head list; + int virtio_db; + struct mic_irq *virtio_cookie; +}; + +void mic_virtio_uninit(struct mic_device *mdev); +int mic_virtio_add_device(struct mic_vdev *mvdev, + void __user *argp); +void mic_virtio_del_device(struct mic_vdev *mvdev); +int mic_virtio_config_change(struct mic_vdev *mvdev, + void __user *argp); +int mic_virtio_copy_desc(struct mic_vdev *mvdev, + struct mic_copy_desc *request); +void mic_virtio_reset_devices(struct mic_device *mdev); +void mic_bh_handler(struct work_struct *work); + +/* Helper API to obtain the MIC PCIe device */ +static inline struct device *mic_dev(struct mic_vdev *mvdev) +{ + return mvdev->mdev->sdev->parent; +} + +/* Helper API to check if a virtio device is initialized */ +static inline int mic_vdev_inited(struct mic_vdev *mvdev) +{ + /* Device has not been created yet */ + if (!mvdev->dd || !mvdev->dd->type) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -EINVAL); + return -EINVAL; + } + + /* Device has been removed/deleted */ + if (mvdev->dd->type == -1) { + dev_err(mic_dev(mvdev), "%s %d err %d\n", + __func__, __LINE__, -ENODEV); + return -ENODEV; + } + + return 0; +} + +/* Helper API to check if a virtio device is running */ +static inline bool mic_vdevup(struct mic_vdev *mvdev) +{ + return !!mvdev->dd->status; +} +#endif diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c new file mode 100644 index 00000000000..5562fdd3ef4 --- /dev/null +++ b/drivers/misc/mic/host/mic_x100.c @@ -0,0 +1,574 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#include <linux/fs.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/firmware.h> +#include <linux/delay.h> + +#include "../common/mic_dev.h" +#include "mic_device.h" +#include "mic_x100.h" +#include "mic_smpt.h" + +/** + * mic_x100_write_spad - write to the scratchpad register + * @mdev: pointer to mic_device instance + * @idx: index to the scratchpad register, 0 based + * @val: the data value to put into the register + * + * This function allows writing of a 32bit value to the indexed scratchpad + * register. + * + * RETURNS: none. + */ +static void +mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val) +{ + dev_dbg(mdev->sdev->parent, "Writing 0x%x to scratch pad index %d\n", + val, idx); + mic_mmio_write(&mdev->mmio, val, + MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_SPAD0 + idx * 4); +} + +/** + * mic_x100_read_spad - read from the scratchpad register + * @mdev: pointer to mic_device instance + * @idx: index to scratchpad register, 0 based + * + * This function allows reading of the 32bit scratchpad register. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static u32 +mic_x100_read_spad(struct mic_device *mdev, unsigned int idx) +{ + u32 val = mic_mmio_read(&mdev->mmio, + MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_SPAD0 + idx * 4); + + dev_dbg(mdev->sdev->parent, + "Reading 0x%x from scratch pad index %d\n", val, idx); + return val; +} + +/** + * mic_x100_enable_interrupts - Enable interrupts. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_enable_interrupts(struct mic_device *mdev) +{ + u32 reg; + struct mic_mw *mw = &mdev->mmio; + u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0; + u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0; + + reg = mic_mmio_read(mw, sice0); + reg |= MIC_X100_SBOX_DBR_BITS(0xf) | MIC_X100_SBOX_DMA_BITS(0xff); + mic_mmio_write(mw, reg, sice0); + + /* + * Enable auto-clear when enabling interrupts. Applicable only for + * MSI-x. Legacy and MSI mode cannot have auto-clear enabled. + */ + if (mdev->irq_info.num_vectors > 1) { + reg = mic_mmio_read(mw, siac0); + reg |= MIC_X100_SBOX_DBR_BITS(0xf) | + MIC_X100_SBOX_DMA_BITS(0xff); + mic_mmio_write(mw, reg, siac0); + } +} + +/** + * mic_x100_disable_interrupts - Disable interrupts. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_disable_interrupts(struct mic_device *mdev) +{ + u32 reg; + struct mic_mw *mw = &mdev->mmio; + u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0; + u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0; + u32 sicc0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICC0; + + reg = mic_mmio_read(mw, sice0); + mic_mmio_write(mw, reg, sicc0); + + if (mdev->irq_info.num_vectors > 1) { + reg = mic_mmio_read(mw, siac0); + reg &= ~(MIC_X100_SBOX_DBR_BITS(0xf) | + MIC_X100_SBOX_DMA_BITS(0xff)); + mic_mmio_write(mw, reg, siac0); + } +} + +/** + * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_send_sbox_intr(struct mic_device *mdev, + int doorbell) +{ + struct mic_mw *mw = &mdev->mmio; + u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8; + u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS + + apic_icr_offset); + + /* for MIC we need to make sure we "hit" the send_icr bit (13) */ + apicicr_low = (apicicr_low | (1 << 13)); + + /* Ensure that the interrupt is ordered w.r.t. previous stores. */ + wmb(); + mic_mmio_write(mw, apicicr_low, + MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset); +} + +/** + * mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_send_rdmasr_intr(struct mic_device *mdev, + int doorbell) +{ + int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2); + /* Ensure that the interrupt is ordered w.r.t. previous stores. */ + wmb(); + mic_mmio_write(&mdev->mmio, 0, + MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset); +} + +/** + * __mic_x100_send_intr - Send interrupt to MIC. + * @mdev: pointer to mic_device instance + * @doorbell: doorbell number. + */ +static void mic_x100_send_intr(struct mic_device *mdev, int doorbell) +{ + int rdmasr_db; + if (doorbell < MIC_X100_NUM_SBOX_IRQ) { + mic_x100_send_sbox_intr(mdev, doorbell); + } else { + rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ + + MIC_X100_RDMASR_IRQ_BASE; + mic_x100_send_rdmasr_intr(mdev, rdmasr_db); + } +} + +/** + * mic_x100_ack_interrupt - Read the interrupt sources register and + * clear it. This function will be called in the MSI/INTx case. + * @mdev: Pointer to mic_device instance. + * + * Returns: bitmask of interrupt sources triggered. + */ +static u32 mic_x100_ack_interrupt(struct mic_device *mdev) +{ + u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; + u32 reg = mic_mmio_read(&mdev->mmio, sicr0); + mic_mmio_write(&mdev->mmio, reg, sicr0); + return reg; +} + +/** + * mic_x100_intr_workarounds - These hardware specific workarounds are + * to be invoked everytime an interrupt is handled. + * @mdev: Pointer to mic_device instance. + * + * Returns: none + */ +static void mic_x100_intr_workarounds(struct mic_device *mdev) +{ + struct mic_mw *mw = &mdev->mmio; + + /* Clear pending bit array. */ + if (MIC_A0_STEP == mdev->stepping) + mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_MSIXPBACR); + + if (mdev->stepping >= MIC_B0_STEP) + mdev->intr_ops->enable_interrupts(mdev); +} + +/** + * mic_x100_hw_intr_init - Initialize h/w specific interrupt + * information. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_hw_intr_init(struct mic_device *mdev) +{ + mdev->intr_info = (struct mic_intr_info *)mic_x100_intr_init; +} + +/** + * mic_x100_read_msi_to_src_map - read from the MSI mapping registers + * @mdev: pointer to mic_device instance + * @idx: index to the mapping register, 0 based + * + * This function allows reading of the 32bit MSI mapping register. + * + * RETURNS: The value in the register. + */ +static u32 +mic_x100_read_msi_to_src_map(struct mic_device *mdev, int idx) +{ + return mic_mmio_read(&mdev->mmio, + MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_MXAR0 + idx * 4); +} + +/** + * mic_x100_program_msi_to_src_map - program the MSI mapping registers + * @mdev: pointer to mic_device instance + * @idx: index to the mapping register, 0 based + * @offset: The bit offset in the register that needs to be updated. + * @set: boolean specifying if the bit in the specified offset needs + * to be set or cleared. + * + * RETURNS: None. + */ +static void +mic_x100_program_msi_to_src_map(struct mic_device *mdev, + int idx, int offset, bool set) +{ + unsigned long reg; + struct mic_mw *mw = &mdev->mmio; + u32 mxar = MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_MXAR0 + idx * 4; + + reg = mic_mmio_read(mw, mxar); + if (set) + __set_bit(offset, ®); + else + __clear_bit(offset, ®); + mic_mmio_write(mw, reg, mxar); +} + +/* + * mic_x100_reset_fw_ready - Reset Firmware ready status field. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_reset_fw_ready(struct mic_device *mdev) +{ + mdev->ops->write_spad(mdev, MIC_X100_DOWNLOAD_INFO, 0); +} + +/* + * mic_x100_is_fw_ready - Check if firmware is ready. + * @mdev: pointer to mic_device instance + */ +static bool mic_x100_is_fw_ready(struct mic_device *mdev) +{ + u32 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); + return MIC_X100_SPAD2_DOWNLOAD_STATUS(scratch2) ? true : false; +} + +/** + * mic_x100_get_apic_id - Get bootstrap APIC ID. + * @mdev: pointer to mic_device instance + */ +static u32 mic_x100_get_apic_id(struct mic_device *mdev) +{ + u32 scratch2 = 0; + + scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); + return MIC_X100_SPAD2_APIC_ID(scratch2); +} + +/** + * mic_x100_send_firmware_intr - Send an interrupt to the firmware on MIC. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_send_firmware_intr(struct mic_device *mdev) +{ + u32 apicicr_low; + u64 apic_icr_offset = MIC_X100_SBOX_APICICR7; + int vector = MIC_X100_BSP_INTERRUPT_VECTOR; + struct mic_mw *mw = &mdev->mmio; + + /* + * For MIC we need to make sure we "hit" + * the send_icr bit (13). + */ + apicicr_low = (vector | (1 << 13)); + + mic_mmio_write(mw, mic_x100_get_apic_id(mdev), + MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset + 4); + + /* Ensure that the interrupt is ordered w.r.t. previous stores. */ + wmb(); + mic_mmio_write(mw, apicicr_low, + MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset); +} + +/** + * mic_x100_hw_reset - Reset the MIC device. + * @mdev: pointer to mic_device instance + */ +static void mic_x100_hw_reset(struct mic_device *mdev) +{ + u32 reset_reg; + u32 rgcr = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_RGCR; + struct mic_mw *mw = &mdev->mmio; + + /* Ensure that the reset is ordered w.r.t. previous loads and stores */ + mb(); + /* Trigger reset */ + reset_reg = mic_mmio_read(mw, rgcr); + reset_reg |= 0x1; + mic_mmio_write(mw, reset_reg, rgcr); + /* + * It seems we really want to delay at least 1 second + * after touching reset to prevent a lot of problems. + */ + msleep(1000); +} + +/** + * mic_x100_load_command_line - Load command line to MIC. + * @mdev: pointer to mic_device instance + * @fw: the firmware image + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw) +{ + u32 len = 0; + u32 boot_mem; + char *buf; + void __iomem *cmd_line_va = mdev->aper.va + mdev->bootaddr + fw->size; +#define CMDLINE_SIZE 2048 + + boot_mem = mdev->aper.len >> 20; + buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL); + if (!buf) { + dev_err(mdev->sdev->parent, + "%s %d allocation failed\n", __func__, __LINE__); + return -ENOMEM; + } + len += snprintf(buf, CMDLINE_SIZE - len, + " mem=%dM", boot_mem); + if (mdev->cmdline) + snprintf(buf + len, CMDLINE_SIZE - len, " %s", mdev->cmdline); + memcpy_toio(cmd_line_va, buf, strlen(buf) + 1); + kfree(buf); + return 0; +} + +/** + * mic_x100_load_ramdisk - Load ramdisk to MIC. + * @mdev: pointer to mic_device instance + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_load_ramdisk(struct mic_device *mdev) +{ + const struct firmware *fw; + int rc; + struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr; + + rc = request_firmware(&fw, + mdev->ramdisk, mdev->sdev->parent); + if (rc < 0) { + dev_err(mdev->sdev->parent, + "ramdisk request_firmware failed: %d %s\n", + rc, mdev->ramdisk); + goto error; + } + /* + * Typically the bootaddr for card OS is 64M + * so copy over the ramdisk @ 128M. + */ + memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); + iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image); + iowrite32(fw->size, &bp->hdr.ramdisk_size); + release_firmware(fw); +error: + return rc; +} + +/** + * mic_x100_get_boot_addr - Get MIC boot address. + * @mdev: pointer to mic_device instance + * + * This function is called during firmware load to determine + * the address at which the OS should be downloaded in card + * memory i.e. GDDR. + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_get_boot_addr(struct mic_device *mdev) +{ + u32 scratch2, boot_addr; + int rc = 0; + + scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); + boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2); + dev_dbg(mdev->sdev->parent, "%s %d boot_addr 0x%x\n", + __func__, __LINE__, boot_addr); + if (boot_addr > (1 << 31)) { + dev_err(mdev->sdev->parent, + "incorrect bootaddr 0x%x\n", + boot_addr); + rc = -EINVAL; + goto error; + } + mdev->bootaddr = boot_addr; +error: + return rc; +} + +/** + * mic_x100_load_firmware - Load firmware to MIC. + * @mdev: pointer to mic_device instance + * @buf: buffer containing boot string including firmware/ramdisk path. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +static int +mic_x100_load_firmware(struct mic_device *mdev, const char *buf) +{ + int rc; + const struct firmware *fw; + + rc = mic_x100_get_boot_addr(mdev); + if (rc) + goto error; + /* load OS */ + rc = request_firmware(&fw, mdev->firmware, mdev->sdev->parent); + if (rc < 0) { + dev_err(mdev->sdev->parent, + "ramdisk request_firmware failed: %d %s\n", + rc, mdev->firmware); + goto error; + } + if (mdev->bootaddr > mdev->aper.len - fw->size) { + rc = -EINVAL; + dev_err(mdev->sdev->parent, "%s %d rc %d bootaddr 0x%x\n", + __func__, __LINE__, rc, mdev->bootaddr); + release_firmware(fw); + goto error; + } + memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); + mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); + if (!strcmp(mdev->bootmode, "elf")) + goto done; + /* load command line */ + rc = mic_x100_load_command_line(mdev, fw); + if (rc) { + dev_err(mdev->sdev->parent, "%s %d rc %d\n", + __func__, __LINE__, rc); + goto error; + } + release_firmware(fw); + /* load ramdisk */ + if (mdev->ramdisk) + rc = mic_x100_load_ramdisk(mdev); +error: + dev_dbg(mdev->sdev->parent, "%s %d rc %d\n", __func__, __LINE__, rc); +done: + return rc; +} + +/** + * mic_x100_get_postcode - Get postcode status from firmware. + * @mdev: pointer to mic_device instance + * + * RETURNS: postcode. + */ +static u32 mic_x100_get_postcode(struct mic_device *mdev) +{ + return mic_mmio_read(&mdev->mmio, MIC_X100_POSTCODE); +} + +/** + * mic_x100_smpt_set - Update an SMPT entry with a DMA address. + * @mdev: pointer to mic_device instance + * + * RETURNS: none. + */ +static void +mic_x100_smpt_set(struct mic_device *mdev, dma_addr_t dma_addr, u8 index) +{ +#define SNOOP_ON (0 << 0) +#define SNOOP_OFF (1 << 0) +/* + * Sbox Smpt Reg Bits: + * Bits 31:2 Host address + * Bits 1 RSVD + * Bits 0 No snoop + */ +#define BUILD_SMPT(NO_SNOOP, HOST_ADDR) \ + (u32)(((HOST_ADDR) << 2) | ((NO_SNOOP) & 0x01)) + + uint32_t smpt_reg_val = BUILD_SMPT(SNOOP_ON, + dma_addr >> mdev->smpt->info.page_shift); + mic_mmio_write(&mdev->mmio, smpt_reg_val, + MIC_X100_SBOX_BASE_ADDRESS + + MIC_X100_SBOX_SMPT00 + (4 * index)); +} + +/** + * mic_x100_smpt_hw_init - Initialize SMPT X100 specific fields. + * @mdev: pointer to mic_device instance + * + * RETURNS: none. + */ +static void mic_x100_smpt_hw_init(struct mic_device *mdev) +{ + struct mic_smpt_hw_info *info = &mdev->smpt->info; + + info->num_reg = 32; + info->page_shift = 34; + info->page_size = (1ULL << info->page_shift); + info->base = 0x8000000000ULL; +} + +struct mic_smpt_ops mic_x100_smpt_ops = { + .init = mic_x100_smpt_hw_init, + .set = mic_x100_smpt_set, +}; + +struct mic_hw_ops mic_x100_ops = { + .aper_bar = MIC_X100_APER_BAR, + .mmio_bar = MIC_X100_MMIO_BAR, + .read_spad = mic_x100_read_spad, + .write_spad = mic_x100_write_spad, + .send_intr = mic_x100_send_intr, + .ack_interrupt = mic_x100_ack_interrupt, + .intr_workarounds = mic_x100_intr_workarounds, + .reset = mic_x100_hw_reset, + .reset_fw_ready = mic_x100_reset_fw_ready, + .is_fw_ready = mic_x100_is_fw_ready, + .send_firmware_intr = mic_x100_send_firmware_intr, + .load_mic_fw = mic_x100_load_firmware, + .get_postcode = mic_x100_get_postcode, +}; + +struct mic_hw_intr_ops mic_x100_intr_ops = { + .intr_init = mic_x100_hw_intr_init, + .enable_interrupts = mic_x100_enable_interrupts, + .disable_interrupts = mic_x100_disable_interrupts, + .program_msi_to_src_map = mic_x100_program_msi_to_src_map, + .read_msi_to_src_map = mic_x100_read_msi_to_src_map, +}; diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h new file mode 100644 index 00000000000..8b7daa182e5 --- /dev/null +++ b/drivers/misc/mic/host/mic_x100.h @@ -0,0 +1,98 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Host driver. + * + */ +#ifndef _MIC_X100_HW_H_ +#define _MIC_X100_HW_H_ + +#define MIC_X100_PCI_DEVICE_2250 0x2250 +#define MIC_X100_PCI_DEVICE_2251 0x2251 +#define MIC_X100_PCI_DEVICE_2252 0x2252 +#define MIC_X100_PCI_DEVICE_2253 0x2253 +#define MIC_X100_PCI_DEVICE_2254 0x2254 +#define MIC_X100_PCI_DEVICE_2255 0x2255 +#define MIC_X100_PCI_DEVICE_2256 0x2256 +#define MIC_X100_PCI_DEVICE_2257 0x2257 +#define MIC_X100_PCI_DEVICE_2258 0x2258 +#define MIC_X100_PCI_DEVICE_2259 0x2259 +#define MIC_X100_PCI_DEVICE_225a 0x225a +#define MIC_X100_PCI_DEVICE_225b 0x225b +#define MIC_X100_PCI_DEVICE_225c 0x225c +#define MIC_X100_PCI_DEVICE_225d 0x225d +#define MIC_X100_PCI_DEVICE_225e 0x225e + +#define MIC_X100_APER_BAR 0 +#define MIC_X100_MMIO_BAR 4 + +#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000 +#define MIC_X100_SBOX_SPAD0 0x0000AB20 +#define MIC_X100_SBOX_SICR0_DBR(x) ((x) & 0xf) +#define MIC_X100_SBOX_SICR0_DMA(x) (((x) >> 8) & 0xff) +#define MIC_X100_SBOX_SICE0_DBR(x) ((x) & 0xf) +#define MIC_X100_SBOX_DBR_BITS(x) ((x) & 0xf) +#define MIC_X100_SBOX_SICE0_DMA(x) (((x) >> 8) & 0xff) +#define MIC_X100_SBOX_DMA_BITS(x) (((x) & 0xff) << 8) + +#define MIC_X100_SBOX_APICICR0 0x0000A9D0 +#define MIC_X100_SBOX_SICR0 0x00009004 +#define MIC_X100_SBOX_SICE0 0x0000900C +#define MIC_X100_SBOX_SICC0 0x00009010 +#define MIC_X100_SBOX_SIAC0 0x00009014 +#define MIC_X100_SBOX_MSIXPBACR 0x00009084 +#define MIC_X100_SBOX_MXAR0 0x00009044 +#define MIC_X100_SBOX_SMPT00 0x00003100 +#define MIC_X100_SBOX_RDMASR0 0x0000B180 + +#define MIC_X100_DOORBELL_IDX_START 0 +#define MIC_X100_NUM_DOORBELL 4 +#define MIC_X100_DMA_IDX_START 8 +#define MIC_X100_NUM_DMA 8 +#define MIC_X100_ERR_IDX_START 30 +#define MIC_X100_NUM_ERR 1 + +#define MIC_X100_NUM_SBOX_IRQ 8 +#define MIC_X100_NUM_RDMASR_IRQ 8 +#define MIC_X100_RDMASR_IRQ_BASE 17 +#define MIC_X100_SPAD2_DOWNLOAD_STATUS(x) ((x) & 0x1) +#define MIC_X100_SPAD2_APIC_ID(x) (((x) >> 1) & 0x1ff) +#define MIC_X100_SPAD2_DOWNLOAD_ADDR(x) ((x) & 0xfffff000) +#define MIC_X100_SBOX_APICICR7 0x0000AA08 +#define MIC_X100_SBOX_RGCR 0x00004010 +#define MIC_X100_SBOX_SDBIC0 0x0000CC90 +#define MIC_X100_DOWNLOAD_INFO 2 +#define MIC_X100_FW_SIZE 5 +#define MIC_X100_POSTCODE 0x242c + +static const u16 mic_x100_intr_init[] = { + MIC_X100_DOORBELL_IDX_START, + MIC_X100_DMA_IDX_START, + MIC_X100_ERR_IDX_START, + MIC_X100_NUM_DOORBELL, + MIC_X100_NUM_DMA, + MIC_X100_NUM_ERR, +}; + +/* Host->Card(bootstrap) Interrupt Vector */ +#define MIC_X100_BSP_INTERRUPT_VECTOR 229 + +extern struct mic_hw_ops mic_x100_ops; +extern struct mic_smpt_ops mic_x100_smpt_ops; +extern struct mic_hw_intr_ops mic_x100_intr_ops; + +#endif diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c new file mode 100644 index 00000000000..956597321d2 --- /dev/null +++ b/drivers/misc/pch_phub.c @@ -0,0 +1,896 @@ +/* + * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/if_ether.h> +#include <linux/ctype.h> +#include <linux/dmi.h> + +#define PHUB_STATUS 0x00 /* Status Register offset */ +#define PHUB_CONTROL 0x04 /* Control Register offset */ +#define PHUB_TIMEOUT 0x05 /* Time out value for Status Register */ +#define PCH_PHUB_ROM_WRITE_ENABLE 0x01 /* Enabling for writing ROM */ +#define PCH_PHUB_ROM_WRITE_DISABLE 0x00 /* Disabling for writing ROM */ +#define PCH_PHUB_MAC_START_ADDR_EG20T 0x14 /* MAC data area start address + offset */ +#define PCH_PHUB_MAC_START_ADDR_ML7223 0x20C /* MAC data area start address + offset */ +#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset + (Intel EG20T PCH)*/ +#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address + offset(LAPIS Semicon ML7213) + */ +#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address + offset(LAPIS Semicon ML7223) + */ + +/* MAX number of INT_REDUCE_CONTROL registers */ +#define MAX_NUM_INT_REDUCE_CONTROL_REG 128 +#define PCI_DEVICE_ID_PCH1_PHUB 0x8801 +#define PCH_MINOR_NOS 1 +#define CLKCFG_CAN_50MHZ 0x12000000 +#define CLKCFG_CANCLK_MASK 0xFF000000 +#define CLKCFG_UART_MASK 0xFFFFFF + +/* CM-iTC */ +#define CLKCFG_UART_48MHZ (1 << 16) +#define CLKCFG_BAUDDIV (2 << 20) +#define CLKCFG_PLL2VCO (8 << 9) +#define CLKCFG_UARTCLKSEL (1 << 18) + +/* Macros for ML7213 */ +#define PCI_VENDOR_ID_ROHM 0x10db +#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A + +/* Macros for ML7223 */ +#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ +#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ + +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801 + +/* SROM ACCESS Macro */ +#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) + +/* Registers address offset */ +#define PCH_PHUB_ID_REG 0x0000 +#define PCH_PHUB_QUEUE_PRI_VAL_REG 0x0004 +#define PCH_PHUB_RC_QUEUE_MAXSIZE_REG 0x0008 +#define PCH_PHUB_BRI_QUEUE_MAXSIZE_REG 0x000C +#define PCH_PHUB_COMP_RESP_TIMEOUT_REG 0x0010 +#define PCH_PHUB_BUS_SLAVE_CONTROL_REG 0x0014 +#define PCH_PHUB_DEADLOCK_AVOID_TYPE_REG 0x0018 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG0 0x0020 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG1 0x0024 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG2 0x0028 +#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C +#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040 +#define CLKCFG_REG_OFFSET 0x500 +#define FUNCSEL_REG_OFFSET 0x508 + +#define PCH_PHUB_OROM_SIZE 15360 + +/** + * struct pch_phub_reg - PHUB register structure + * @phub_id_reg: PHUB_ID register val + * @q_pri_val_reg: QUEUE_PRI_VAL register val + * @rc_q_maxsize_reg: RC_QUEUE_MAXSIZE register val + * @bri_q_maxsize_reg: BRI_QUEUE_MAXSIZE register val + * @comp_resp_timeout_reg: COMP_RESP_TIMEOUT register val + * @bus_slave_control_reg: BUS_SLAVE_CONTROL_REG register val + * @deadlock_avoid_type_reg: DEADLOCK_AVOID_TYPE register val + * @intpin_reg_wpermit_reg0: INTPIN_REG_WPERMIT register 0 val + * @intpin_reg_wpermit_reg1: INTPIN_REG_WPERMIT register 1 val + * @intpin_reg_wpermit_reg2: INTPIN_REG_WPERMIT register 2 val + * @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val + * @int_reduce_control_reg: INT_REDUCE_CONTROL registers val + * @clkcfg_reg: CLK CFG register val + * @funcsel_reg: Function select register value + * @pch_phub_base_address: Register base address + * @pch_phub_extrom_base_address: external rom base address + * @pch_mac_start_address: MAC address area start address + * @pch_opt_rom_start_address: Option ROM start address + * @ioh_type: Save IOH type + * @pdev: pointer to pci device struct + */ +struct pch_phub_reg { + u32 phub_id_reg; + u32 q_pri_val_reg; + u32 rc_q_maxsize_reg; + u32 bri_q_maxsize_reg; + u32 comp_resp_timeout_reg; + u32 bus_slave_control_reg; + u32 deadlock_avoid_type_reg; + u32 intpin_reg_wpermit_reg0; + u32 intpin_reg_wpermit_reg1; + u32 intpin_reg_wpermit_reg2; + u32 intpin_reg_wpermit_reg3; + u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG]; + u32 clkcfg_reg; + u32 funcsel_reg; + void __iomem *pch_phub_base_address; + void __iomem *pch_phub_extrom_base_address; + u32 pch_mac_start_address; + u32 pch_opt_rom_start_address; + int ioh_type; + struct pci_dev *pdev; +}; + +/* SROM SPEC for MAC address assignment offset */ +static const int pch_phub_mac_offset[ETH_ALEN] = {0x3, 0x2, 0x1, 0x0, 0xb, 0xa}; + +static DEFINE_MUTEX(pch_phub_mutex); + +/** + * pch_phub_read_modify_write_reg() - Reading modifying and writing register + * @reg_addr_offset: Register offset address value. + * @data: Writing value. + * @mask: Mask value. + */ +static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip, + unsigned int reg_addr_offset, + unsigned int data, unsigned int mask) +{ + void __iomem *reg_addr = chip->pch_phub_base_address + reg_addr_offset; + iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr); +} + +/* pch_phub_save_reg_conf - saves register configuration */ +static void pch_phub_save_reg_conf(struct pci_dev *pdev) +{ + unsigned int i; + struct pch_phub_reg *chip = pci_get_drvdata(pdev); + + void __iomem *p = chip->pch_phub_base_address; + + chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG); + chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG); + chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG); + chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG); + chip->comp_resp_timeout_reg = + ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG); + chip->bus_slave_control_reg = + ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG); + chip->deadlock_avoid_type_reg = + ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG); + chip->intpin_reg_wpermit_reg0 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0); + chip->intpin_reg_wpermit_reg1 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1); + chip->intpin_reg_wpermit_reg2 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2); + chip->intpin_reg_wpermit_reg3 = + ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3); + dev_dbg(&pdev->dev, "%s : " + "chip->phub_id_reg=%x, " + "chip->q_pri_val_reg=%x, " + "chip->rc_q_maxsize_reg=%x, " + "chip->bri_q_maxsize_reg=%x, " + "chip->comp_resp_timeout_reg=%x, " + "chip->bus_slave_control_reg=%x, " + "chip->deadlock_avoid_type_reg=%x, " + "chip->intpin_reg_wpermit_reg0=%x, " + "chip->intpin_reg_wpermit_reg1=%x, " + "chip->intpin_reg_wpermit_reg2=%x, " + "chip->intpin_reg_wpermit_reg3=%x\n", __func__, + chip->phub_id_reg, + chip->q_pri_val_reg, + chip->rc_q_maxsize_reg, + chip->bri_q_maxsize_reg, + chip->comp_resp_timeout_reg, + chip->bus_slave_control_reg, + chip->deadlock_avoid_type_reg, + chip->intpin_reg_wpermit_reg0, + chip->intpin_reg_wpermit_reg1, + chip->intpin_reg_wpermit_reg2, + chip->intpin_reg_wpermit_reg3); + for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) { + chip->int_reduce_control_reg[i] = + ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i); + dev_dbg(&pdev->dev, "%s : " + "chip->int_reduce_control_reg[%d]=%x\n", + __func__, i, chip->int_reduce_control_reg[i]); + } + chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET); + if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) + chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET); +} + +/* pch_phub_restore_reg_conf - restore register configuration */ +static void pch_phub_restore_reg_conf(struct pci_dev *pdev) +{ + unsigned int i; + struct pch_phub_reg *chip = pci_get_drvdata(pdev); + void __iomem *p; + p = chip->pch_phub_base_address; + + iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG); + iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG); + iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG); + iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG); + iowrite32(chip->comp_resp_timeout_reg, + p + PCH_PHUB_COMP_RESP_TIMEOUT_REG); + iowrite32(chip->bus_slave_control_reg, + p + PCH_PHUB_BUS_SLAVE_CONTROL_REG); + iowrite32(chip->deadlock_avoid_type_reg, + p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG); + iowrite32(chip->intpin_reg_wpermit_reg0, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0); + iowrite32(chip->intpin_reg_wpermit_reg1, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1); + iowrite32(chip->intpin_reg_wpermit_reg2, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2); + iowrite32(chip->intpin_reg_wpermit_reg3, + p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3); + dev_dbg(&pdev->dev, "%s : " + "chip->phub_id_reg=%x, " + "chip->q_pri_val_reg=%x, " + "chip->rc_q_maxsize_reg=%x, " + "chip->bri_q_maxsize_reg=%x, " + "chip->comp_resp_timeout_reg=%x, " + "chip->bus_slave_control_reg=%x, " + "chip->deadlock_avoid_type_reg=%x, " + "chip->intpin_reg_wpermit_reg0=%x, " + "chip->intpin_reg_wpermit_reg1=%x, " + "chip->intpin_reg_wpermit_reg2=%x, " + "chip->intpin_reg_wpermit_reg3=%x\n", __func__, + chip->phub_id_reg, + chip->q_pri_val_reg, + chip->rc_q_maxsize_reg, + chip->bri_q_maxsize_reg, + chip->comp_resp_timeout_reg, + chip->bus_slave_control_reg, + chip->deadlock_avoid_type_reg, + chip->intpin_reg_wpermit_reg0, + chip->intpin_reg_wpermit_reg1, + chip->intpin_reg_wpermit_reg2, + chip->intpin_reg_wpermit_reg3); + for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) { + iowrite32(chip->int_reduce_control_reg[i], + p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i); + dev_dbg(&pdev->dev, "%s : " + "chip->int_reduce_control_reg[%d]=%x\n", + __func__, i, chip->int_reduce_control_reg[i]); + } + + iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET); + if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) + iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET); +} + +/** + * pch_phub_read_serial_rom() - Reading Serial ROM + * @offset_address: Serial ROM offset address to read. + * @data: Read buffer for specified Serial ROM value. + */ +static void pch_phub_read_serial_rom(struct pch_phub_reg *chip, + unsigned int offset_address, u8 *data) +{ + void __iomem *mem_addr = chip->pch_phub_extrom_base_address + + offset_address; + + *data = ioread8(mem_addr); +} + +/** + * pch_phub_write_serial_rom() - Writing Serial ROM + * @offset_address: Serial ROM offset address. + * @data: Serial ROM value to write. + */ +static int pch_phub_write_serial_rom(struct pch_phub_reg *chip, + unsigned int offset_address, u8 data) +{ + void __iomem *mem_addr = chip->pch_phub_extrom_base_address + + (offset_address & PCH_WORD_ADDR_MASK); + int i; + unsigned int word_data; + unsigned int pos; + unsigned int mask; + pos = (offset_address % 4) * 8; + mask = ~(0xFF << pos); + + iowrite32(PCH_PHUB_ROM_WRITE_ENABLE, + chip->pch_phub_extrom_base_address + PHUB_CONTROL); + + word_data = ioread32(mem_addr); + iowrite32((word_data & mask) | (u32)data << pos, mem_addr); + + i = 0; + while (ioread8(chip->pch_phub_extrom_base_address + + PHUB_STATUS) != 0x00) { + msleep(1); + if (i == PHUB_TIMEOUT) + return -ETIMEDOUT; + i++; + } + + iowrite32(PCH_PHUB_ROM_WRITE_DISABLE, + chip->pch_phub_extrom_base_address + PHUB_CONTROL); + + return 0; +} + +/** + * pch_phub_read_serial_rom_val() - Read Serial ROM value + * @offset_address: Serial ROM address offset value. + * @data: Serial ROM value to read. + */ +static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip, + unsigned int offset_address, u8 *data) +{ + unsigned int mem_addr; + + mem_addr = chip->pch_mac_start_address + + pch_phub_mac_offset[offset_address]; + + pch_phub_read_serial_rom(chip, mem_addr, data); +} + +/** + * pch_phub_write_serial_rom_val() - writing Serial ROM value + * @offset_address: Serial ROM address offset value. + * @data: Serial ROM value. + */ +static int pch_phub_write_serial_rom_val(struct pch_phub_reg *chip, + unsigned int offset_address, u8 data) +{ + int retval; + unsigned int mem_addr; + + mem_addr = chip->pch_mac_start_address + + pch_phub_mac_offset[offset_address]; + + retval = pch_phub_write_serial_rom(chip, mem_addr, data); + + return retval; +} + +/* pch_phub_gbe_serial_rom_conf - makes Serial ROM header format configuration + * for Gigabit Ethernet MAC address + */ +static int pch_phub_gbe_serial_rom_conf(struct pch_phub_reg *chip) +{ + int retval; + + retval = pch_phub_write_serial_rom(chip, 0x0b, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x0a, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x09, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x08, 0x02); + + retval |= pch_phub_write_serial_rom(chip, 0x0f, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x0e, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x0d, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x0c, 0x80); + + retval |= pch_phub_write_serial_rom(chip, 0x13, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x12, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x11, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x10, 0x18); + + retval |= pch_phub_write_serial_rom(chip, 0x1b, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x1a, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x19, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x18, 0x19); + + retval |= pch_phub_write_serial_rom(chip, 0x23, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x22, 0x10); + retval |= pch_phub_write_serial_rom(chip, 0x21, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x20, 0x3a); + + retval |= pch_phub_write_serial_rom(chip, 0x27, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x26, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x25, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x24, 0x00); + + return retval; +} + +/* pch_phub_gbe_serial_rom_conf_mp - makes SerialROM header format configuration + * for Gigabit Ethernet MAC address + */ +static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip) +{ + int retval; + u32 offset_addr; + + offset_addr = 0x200; + retval = pch_phub_write_serial_rom(chip, 0x03 + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x02 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x01 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x00 + offset_addr, 0x02); + + retval |= pch_phub_write_serial_rom(chip, 0x07 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x06 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x05 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x04 + offset_addr, 0x80); + + retval |= pch_phub_write_serial_rom(chip, 0x0b + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x0a + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x09 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x08 + offset_addr, 0x18); + + retval |= pch_phub_write_serial_rom(chip, 0x13 + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x12 + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x11 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x10 + offset_addr, 0x19); + + retval |= pch_phub_write_serial_rom(chip, 0x1b + offset_addr, 0xbc); + retval |= pch_phub_write_serial_rom(chip, 0x1a + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x19 + offset_addr, 0x40); + retval |= pch_phub_write_serial_rom(chip, 0x18 + offset_addr, 0x3a); + + retval |= pch_phub_write_serial_rom(chip, 0x1f + offset_addr, 0x01); + retval |= pch_phub_write_serial_rom(chip, 0x1e + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x1d + offset_addr, 0x00); + retval |= pch_phub_write_serial_rom(chip, 0x1c + offset_addr, 0x00); + + return retval; +} + +/** + * pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address + * @offset_address: Gigabit Ethernet MAC address offset value. + * @data: Buffer of the Gigabit Ethernet MAC address value. + */ +static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) + pch_phub_read_serial_rom_val(chip, i, &data[i]); +} + +/** + * pch_phub_write_gbe_mac_addr() - Write MAC address + * @offset_address: Gigabit Ethernet MAC address offset value. + * @data: Gigabit Ethernet MAC address value. + */ +static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) +{ + int retval; + int i; + + if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/ + retval = pch_phub_gbe_serial_rom_conf(chip); + else /* ML7223 */ + retval = pch_phub_gbe_serial_rom_conf_mp(chip); + if (retval) + return retval; + + for (i = 0; i < ETH_ALEN; i++) { + retval = pch_phub_write_serial_rom_val(chip, i, data[i]); + if (retval) + return retval; + } + + return retval; +} + +static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + unsigned int rom_signature; + unsigned char rom_length; + unsigned int tmp; + unsigned int addr_offset; + unsigned int orom_size; + int ret; + int err; + ssize_t rom_size; + + struct pch_phub_reg *chip = + dev_get_drvdata(container_of(kobj, struct device, kobj)); + + ret = mutex_lock_interruptible(&pch_phub_mutex); + if (ret) { + err = -ERESTARTSYS; + goto return_err_nomutex; + } + + /* Get Rom signature */ + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + goto exrom_map_err; + + pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, + (unsigned char *)&rom_signature); + rom_signature &= 0xff; + pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address + 1, + (unsigned char *)&tmp); + rom_signature |= (tmp & 0xff) << 8; + if (rom_signature == 0xAA55) { + pch_phub_read_serial_rom(chip, + chip->pch_opt_rom_start_address + 2, + &rom_length); + orom_size = rom_length * 512; + if (orom_size < off) { + addr_offset = 0; + goto return_ok; + } + if (orom_size < count) { + addr_offset = 0; + goto return_ok; + } + + for (addr_offset = 0; addr_offset < count; addr_offset++) { + pch_phub_read_serial_rom(chip, + chip->pch_opt_rom_start_address + addr_offset + off, + &buf[addr_offset]); + } + } else { + err = -ENODATA; + goto return_err; + } +return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + mutex_unlock(&pch_phub_mutex); + return addr_offset; + +return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); +exrom_map_err: + mutex_unlock(&pch_phub_mutex); +return_err_nomutex: + return err; +} + +static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + int err; + unsigned int addr_offset; + int ret; + ssize_t rom_size; + struct pch_phub_reg *chip = + dev_get_drvdata(container_of(kobj, struct device, kobj)); + + ret = mutex_lock_interruptible(&pch_phub_mutex); + if (ret) + return -ERESTARTSYS; + + if (off > PCH_PHUB_OROM_SIZE) { + addr_offset = 0; + goto return_ok; + } + if (count > PCH_PHUB_OROM_SIZE) { + addr_offset = 0; + goto return_ok; + } + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) { + err = -ENOMEM; + goto exrom_map_err; + } + + for (addr_offset = 0; addr_offset < count; addr_offset++) { + if (PCH_PHUB_OROM_SIZE < off + addr_offset) + goto return_ok; + + ret = pch_phub_write_serial_rom(chip, + chip->pch_opt_rom_start_address + addr_offset + off, + buf[addr_offset]); + if (ret) { + err = ret; + goto return_err; + } + } + +return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + mutex_unlock(&pch_phub_mutex); + return addr_offset; + +return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + +exrom_map_err: + mutex_unlock(&pch_phub_mutex); + return err; +} + +static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 mac[8]; + struct pch_phub_reg *chip = dev_get_drvdata(dev); + ssize_t rom_size; + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; + + pch_phub_read_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + + return sprintf(buf, "%pM\n", mac); +} + +static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u8 mac[ETH_ALEN]; + ssize_t rom_size; + struct pch_phub_reg *chip = dev_get_drvdata(dev); + int ret; + + if (!mac_pton(buf, mac)) + return -EINVAL; + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; + + ret = pch_phub_write_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + if (ret) + return ret; + + return count; +} + +static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac); + +static struct bin_attribute pch_bin_attr = { + .attr = { + .name = "pch_firmware", + .mode = S_IRUGO | S_IWUSR, + }, + .size = PCH_PHUB_OROM_SIZE + 1, + .read = pch_phub_bin_read, + .write = pch_phub_bin_write, +}; + +static int pch_phub_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret; + struct pch_phub_reg *chip; + + chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); + if (chip == NULL) + return -ENOMEM; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, + "%s : pci_enable_device FAILED(ret=%d)", __func__, ret); + goto err_pci_enable_dev; + } + dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__, + ret); + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(&pdev->dev, + "%s : pci_request_regions FAILED(ret=%d)", __func__, ret); + goto err_req_regions; + } + dev_dbg(&pdev->dev, "%s : " + "pci_request_regions returns %d\n", __func__, ret); + + chip->pch_phub_base_address = pci_iomap(pdev, 1, 0); + + + if (chip->pch_phub_base_address == NULL) { + dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); + ret = -ENOMEM; + goto err_pci_iomap; + } + dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value " + "in pch_phub_base_address variable is %p\n", __func__, + chip->pch_phub_base_address); + + chip->pdev = pdev; /* Save pci device struct */ + + if (id->driver_data == 1) { /* EG20T PCH */ + const char *board_name; + + ret = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (ret) + goto err_sysfs_create; + + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto exit_bin_attr; + + pch_phub_read_modify_write_reg(chip, + (unsigned int)CLKCFG_REG_OFFSET, + CLKCFG_CAN_50MHZ, + CLKCFG_CANCLK_MASK); + + /* quirk for CM-iTC board */ + board_name = dmi_get_system_info(DMI_BOARD_NAME); + if (board_name && strstr(board_name, "CM-iTC")) + pch_phub_read_modify_write_reg(chip, + (unsigned int)CLKCFG_REG_OFFSET, + CLKCFG_UART_48MHZ | CLKCFG_BAUDDIV | + CLKCFG_PLL2VCO | CLKCFG_UARTCLKSEL, + CLKCFG_UART_MASK); + + /* set the prefech value */ + iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x44); + chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; + } else if (id->driver_data == 2) { /* ML7213 IOH */ + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto err_sysfs_create; + /* set the prefech value + * Device2(USB OHCI #1/ USB EHCI #1/ USB Device):a + * Device4(SDIO #0,1,2):f + * Device6(SATA 2):f + * Device8(USB OHCI #0/ USB EHCI #0):a + */ + iowrite32(0x000affa0, chip->pch_phub_base_address + 0x14); + chip->pch_opt_rom_start_address =\ + PCH_PHUB_ROM_START_ADDR_ML7213; + } else if (id->driver_data == 3) { /* ML7223 IOH Bus-m*/ + /* set the prefech value + * Device8(GbE) + */ + iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x140); + chip->pch_opt_rom_start_address =\ + PCH_PHUB_ROM_START_ADDR_ML7223; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; + } else if (id->driver_data == 4) { /* ML7223 IOH Bus-n*/ + ret = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (ret) + goto err_sysfs_create; + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto exit_bin_attr; + /* set the prefech value + * Device2(USB OHCI #0,1,2,3/ USB EHCI #0):a + * Device4(SDIO #0,1):f + * Device6(SATA 2):f + */ + iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14); + chip->pch_opt_rom_start_address =\ + PCH_PHUB_ROM_START_ADDR_ML7223; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; + } else if (id->driver_data == 5) { /* ML7831 */ + ret = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (ret) + goto err_sysfs_create; + + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (ret) + goto exit_bin_attr; + + /* set the prefech value */ + iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x44); + chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; + } + + chip->ioh_type = id->driver_data; + pci_set_drvdata(pdev, chip); + + return 0; +exit_bin_attr: + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); + +err_sysfs_create: + pci_iounmap(pdev, chip->pch_phub_base_address); +err_pci_iomap: + pci_release_regions(pdev); +err_req_regions: + pci_disable_device(pdev); +err_pci_enable_dev: + kfree(chip); + dev_err(&pdev->dev, "%s returns %d\n", __func__, ret); + return ret; +} + +static void pch_phub_remove(struct pci_dev *pdev) +{ + struct pch_phub_reg *chip = pci_get_drvdata(pdev); + + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); + sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); + pci_iounmap(pdev, chip->pch_phub_base_address); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(chip); +} + +#ifdef CONFIG_PM + +static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int ret; + + pch_phub_save_reg_conf(pdev); + ret = pci_save_state(pdev); + if (ret) { + dev_err(&pdev->dev, + " %s -pci_save_state returns %d\n", __func__, ret); + return ret; + } + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int pch_phub_resume(struct pci_dev *pdev) +{ + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, + "%s-pci_enable_device failed(ret=%d) ", __func__, ret); + return ret; + } + + pci_enable_wake(pdev, PCI_D3hot, 0); + pch_phub_restore_reg_conf(pdev); + + return 0; +} +#else +#define pch_phub_suspend NULL +#define pch_phub_resume NULL +#endif /* CONFIG_PM */ + +static struct pci_device_id pch_phub_pcidev_id[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, }, + { } +}; +MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); + +static struct pci_driver pch_phub_driver = { + .name = "pch_phub", + .id_table = pch_phub_pcidev_id, + .probe = pch_phub_probe, + .remove = pch_phub_remove, + .suspend = pch_phub_suspend, + .resume = pch_phub_resume +}; + +module_pci_driver(pch_phub_driver); + +MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 04c27266f56..30754927fd8 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -21,11 +21,12 @@ #include <linux/poll.h> #include <linux/interrupt.h> #include <linux/cdev.h> +#include <linux/slab.h> #include <linux/phantom.h> #include <linux/sched.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/io.h> #define PHANTOM_VERSION "n0.9.8" @@ -37,6 +38,7 @@ #define PHB_RUNNING 1 #define PHB_NOT_OH 2 +static DEFINE_MUTEX(phantom_mutex); static struct class *phantom_class; static int phantom_major; @@ -214,17 +216,17 @@ static int phantom_open(struct inode *inode, struct file *file) struct phantom_device *dev = container_of(inode->i_cdev, struct phantom_device, cdev); - lock_kernel(); + mutex_lock(&phantom_mutex); nonseekable_open(inode, file); if (mutex_lock_interruptible(&dev->open_lock)) { - unlock_kernel(); + mutex_unlock(&phantom_mutex); return -ERESTARTSYS; } if (dev->opened) { mutex_unlock(&dev->open_lock); - unlock_kernel(); + mutex_unlock(&phantom_mutex); return -EINVAL; } @@ -235,7 +237,7 @@ static int phantom_open(struct inode *inode, struct file *file) atomic_set(&dev->counter, 0); dev->opened++; mutex_unlock(&dev->open_lock); - unlock_kernel(); + mutex_unlock(&phantom_mutex); return 0; } @@ -278,6 +280,7 @@ static const struct file_operations phantom_file_ops = { .unlocked_ioctl = phantom_ioctl, .compat_ioctl = phantom_compat_ioctl, .poll = phantom_poll, + .llseek = no_llseek, }; static irqreturn_t phantom_isr(int irq, void *data) @@ -321,7 +324,7 @@ static irqreturn_t phantom_isr(int irq, void *data) * Init and deinit driver */ -static unsigned int __devinit phantom_get_free(void) +static unsigned int phantom_get_free(void) { unsigned int i; @@ -332,7 +335,7 @@ static unsigned int __devinit phantom_get_free(void) return i; } -static int __devinit phantom_probe(struct pci_dev *pdev, +static int phantom_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct phantom_device *pht; @@ -340,8 +343,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev, int retval; retval = pci_enable_device(pdev); - if (retval) + if (retval) { + dev_err(&pdev->dev, "pci_enable_device failed!\n"); goto err; + } minor = phantom_get_free(); if (minor == PHANTOM_MAX_MINORS) { @@ -353,8 +358,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev, phantom_devices[minor] = 1; retval = pci_request_regions(pdev, "phantom"); - if (retval) + if (retval) { + dev_err(&pdev->dev, "pci_request_regions failed!\n"); goto err_null; + } retval = -ENOMEM; pht = kzalloc(sizeof(*pht), GFP_KERNEL); @@ -388,7 +395,7 @@ static int __devinit phantom_probe(struct pci_dev *pdev, iowrite32(0, pht->caddr + PHN_IRQCTL); ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */ retval = request_irq(pdev->irq, phantom_isr, - IRQF_SHARED | IRQF_DISABLED, "phantom", pht); + IRQF_SHARED, "phantom", pht); if (retval) { dev_err(&pdev->dev, "can't establish ISR\n"); goto err_unmo; @@ -428,7 +435,7 @@ err: return retval; } -static void __devexit phantom_remove(struct pci_dev *pdev) +static void phantom_remove(struct pci_dev *pdev) { struct phantom_device *pht = pci_get_drvdata(pdev); unsigned int minor = MINOR(pht->cdev.dev); @@ -480,7 +487,7 @@ static int phantom_resume(struct pci_dev *pdev) #define phantom_resume NULL #endif -static struct pci_device_id phantom_pci_tbl[] __devinitdata = { +static struct pci_device_id phantom_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050, .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 }, @@ -492,17 +499,12 @@ static struct pci_driver phantom_pci_driver = { .name = "phantom", .id_table = phantom_pci_tbl, .probe = phantom_probe, - .remove = __devexit_p(phantom_remove), + .remove = phantom_remove, .suspend = phantom_suspend, .resume = phantom_resume }; -static ssize_t phantom_show_version(struct class *cls, char *buf) -{ - return sprintf(buf, PHANTOM_VERSION "\n"); -} - -static CLASS_ATTR(version, 0444, phantom_show_version, NULL); +static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION); static int __init phantom_init(void) { @@ -515,7 +517,7 @@ static int __init phantom_init(void) printk(KERN_ERR "phantom: can't register phantom class\n"); goto err; } - retval = class_create_file(phantom_class, &class_attr_version); + retval = class_create_file(phantom_class, &class_attr_version.attr); if (retval) { printk(KERN_ERR "phantom: can't create sysfs version file\n"); goto err_class; @@ -541,7 +543,7 @@ static int __init phantom_init(void) err_unchr: unregister_chrdev_region(dev, PHANTOM_MAX_MINORS); err_attr: - class_remove_file(phantom_class, &class_attr_version); + class_remove_file(phantom_class, &class_attr_version.attr); err_class: class_destroy(phantom_class); err: @@ -554,7 +556,7 @@ static void __exit phantom_exit(void) unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS); - class_remove_file(phantom_class, &class_attr_version); + class_remove_file(phantom_class, &class_attr_version.attr); class_destroy(phantom_class); pr_debug("phantom: module successfully removed\n"); diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c new file mode 100644 index 00000000000..eda38cbe853 --- /dev/null +++ b/drivers/misc/pti.c @@ -0,0 +1,988 @@ +/* + * pti.c - PTI driver for cJTAG data extration + * + * Copyright (C) Intel 2010 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The PTI (Parallel Trace Interface) driver directs trace data routed from + * various parts in the system out through the Intel Penwell PTI port and + * out of the mobile device for analysis with a debugging tool + * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, + * compact JTAG, standard. + */ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/console.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/pci.h> +#include <linux/mutex.h> +#include <linux/miscdevice.h> +#include <linux/pti.h> +#include <linux/slab.h> +#include <linux/uaccess.h> + +#define DRIVERNAME "pti" +#define PCINAME "pciPTI" +#define TTYNAME "ttyPTI" +#define CHARNAME "pti" +#define PTITTY_MINOR_START 0 +#define PTITTY_MINOR_NUM 2 +#define MAX_APP_IDS 16 /* 128 channel ids / u8 bit size */ +#define MAX_OS_IDS 16 /* 128 channel ids / u8 bit size */ +#define MAX_MODEM_IDS 16 /* 128 channel ids / u8 bit size */ +#define MODEM_BASE_ID 71 /* modem master ID address */ +#define CONTROL_ID 72 /* control master ID address */ +#define CONSOLE_ID 73 /* console master ID address */ +#define OS_BASE_ID 74 /* base OS master ID address */ +#define APP_BASE_ID 80 /* base App master ID address */ +#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */ +#define USER_COPY_SIZE 8192 /* 8Kb buffer for user space copy */ +#define APERTURE_14 0x3800000 /* offset to first OS write addr */ +#define APERTURE_LEN 0x400000 /* address length */ + +struct pti_tty { + struct pti_masterchannel *mc; +}; + +struct pti_dev { + struct tty_port port[PTITTY_MINOR_NUM]; + unsigned long pti_addr; + unsigned long aperture_base; + void __iomem *pti_ioaddr; + u8 ia_app[MAX_APP_IDS]; + u8 ia_os[MAX_OS_IDS]; + u8 ia_modem[MAX_MODEM_IDS]; +}; + +/* + * This protects access to ia_app, ia_os, and ia_modem, + * which keeps track of channels allocated in + * an aperture write id. + */ +static DEFINE_MUTEX(alloclock); + +static const struct pci_device_id pci_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)}, + {0} +}; + +static struct tty_driver *pti_tty_driver; +static struct pti_dev *drv_data; + +static unsigned int pti_console_channel; +static unsigned int pti_control_channel; + +/** + * pti_write_to_aperture()- The private write function to PTI HW. + * + * @mc: The 'aperture'. It's part of a write address that holds + * a master and channel ID. + * @buf: Data being written to the HW that will ultimately be seen + * in a debugging tool (Fido, Lauterbach). + * @len: Size of buffer. + * + * Since each aperture is specified by a unique + * master/channel ID, no two processes will be writing + * to the same aperture at the same time so no lock is required. The + * PTI-Output agent will send these out in the order that they arrived, and + * thus, it will intermix these messages. The debug tool can then later + * regroup the appropriate message segments together reconstituting each + * message. + */ +static void pti_write_to_aperture(struct pti_masterchannel *mc, + u8 *buf, + int len) +{ + int dwordcnt; + int final; + int i; + u32 ptiword; + u32 __iomem *aperture; + u8 *p = buf; + + /* + * calculate the aperture offset from the base using the master and + * channel id's. + */ + aperture = drv_data->pti_ioaddr + (mc->master << 15) + + (mc->channel << 8); + + dwordcnt = len >> 2; + final = len - (dwordcnt << 2); /* final = trailing bytes */ + if (final == 0 && dwordcnt != 0) { /* always need a final dword */ + final += 4; + dwordcnt--; + } + + for (i = 0; i < dwordcnt; i++) { + ptiword = be32_to_cpu(*(u32 *)p); + p += 4; + iowrite32(ptiword, aperture); + } + + aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */ + + ptiword = 0; + for (i = 0; i < final; i++) + ptiword |= *p++ << (24-(8*i)); + + iowrite32(ptiword, aperture); + return; +} + +/** + * pti_control_frame_built_and_sent()- control frame build and send function. + * + * @mc: The master / channel structure on which the function + * built a control frame. + * @thread_name: The thread name associated with the master / channel or + * 'NULL' if using the 'current' global variable. + * + * To be able to post process the PTI contents on host side, a control frame + * is added before sending any PTI content. So the host side knows on + * each PTI frame the name of the thread using a dedicated master / channel. + * The thread name is retrieved from 'current' global variable if 'thread_name' + * is 'NULL', else it is retrieved from 'thread_name' parameter. + * This function builds this frame and sends it to a master ID CONTROL_ID. + * The overhead is only 32 bytes since the driver only writes to HW + * in 32 byte chunks. + */ +static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, + const char *thread_name) +{ + /* + * Since we access the comm member in current's task_struct, we only + * need to be as large as what 'comm' in that structure is. + */ + char comm[TASK_COMM_LEN]; + struct pti_masterchannel mccontrol = {.master = CONTROL_ID, + .channel = 0}; + const char *thread_name_p; + const char *control_format = "%3d %3d %s"; + u8 control_frame[CONTROL_FRAME_LEN]; + + if (!thread_name) { + if (!in_interrupt()) + get_task_comm(comm, current); + else + strncpy(comm, "Interrupt", TASK_COMM_LEN); + + /* Absolutely ensure our buffer is zero terminated. */ + comm[TASK_COMM_LEN-1] = 0; + thread_name_p = comm; + } else { + thread_name_p = thread_name; + } + + mccontrol.channel = pti_control_channel; + pti_control_channel = (pti_control_channel + 1) & 0x7f; + + snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master, + mc->channel, thread_name_p); + pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame)); +} + +/** + * pti_write_full_frame_to_aperture()- high level function to + * write to PTI. + * + * @mc: The 'aperture'. It's part of a write address that holds + * a master and channel ID. + * @buf: Data being written to the HW that will ultimately be seen + * in a debugging tool (Fido, Lauterbach). + * @len: Size of buffer. + * + * All threads sending data (either console, user space application, ...) + * are calling the high level function to write to PTI meaning that it is + * possible to add a control frame before sending the content. + */ +static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc, + const unsigned char *buf, + int len) +{ + pti_control_frame_built_and_sent(mc, NULL); + pti_write_to_aperture(mc, (u8 *)buf, len); +} + +/** + * get_id()- Allocate a master and channel ID. + * + * @id_array: an array of bits representing what channel + * id's are allocated for writing. + * @max_ids: The max amount of available write IDs to use. + * @base_id: The starting SW channel ID, based on the Intel + * PTI arch. + * @thread_name: The thread name associated with the master / channel or + * 'NULL' if using the 'current' global variable. + * + * Returns: + * pti_masterchannel struct with master, channel ID address + * 0 for error + * + * Each bit in the arrays ia_app and ia_os correspond to a master and + * channel id. The bit is one if the id is taken and 0 if free. For + * every master there are 128 channel id's. + */ +static struct pti_masterchannel *get_id(u8 *id_array, + int max_ids, + int base_id, + const char *thread_name) +{ + struct pti_masterchannel *mc; + int i, j, mask; + + mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL); + if (mc == NULL) + return NULL; + + /* look for a byte with a free bit */ + for (i = 0; i < max_ids; i++) + if (id_array[i] != 0xff) + break; + if (i == max_ids) { + kfree(mc); + return NULL; + } + /* find the bit in the 128 possible channel opportunities */ + mask = 0x80; + for (j = 0; j < 8; j++) { + if ((id_array[i] & mask) == 0) + break; + mask >>= 1; + } + + /* grab it */ + id_array[i] |= mask; + mc->master = base_id; + mc->channel = ((i & 0xf)<<3) + j; + /* write new master Id / channel Id allocation to channel control */ + pti_control_frame_built_and_sent(mc, thread_name); + return mc; +} + +/* + * The following three functions: + * pti_request_mastercahannel(), mipi_release_masterchannel() + * and pti_writedata() are an API for other kernel drivers to + * access PTI. + */ + +/** + * pti_request_masterchannel()- Kernel API function used to allocate + * a master, channel ID address + * to write to PTI HW. + * + * @type: 0- request Application master, channel aperture ID + * write address. + * 1- request OS master, channel aperture ID write + * address. + * 2- request Modem master, channel aperture ID + * write address. + * Other values, error. + * @thread_name: The thread name associated with the master / channel or + * 'NULL' if using the 'current' global variable. + * + * Returns: + * pti_masterchannel struct + * 0 for error + */ +struct pti_masterchannel *pti_request_masterchannel(u8 type, + const char *thread_name) +{ + struct pti_masterchannel *mc; + + mutex_lock(&alloclock); + + switch (type) { + + case 0: + mc = get_id(drv_data->ia_app, MAX_APP_IDS, + APP_BASE_ID, thread_name); + break; + + case 1: + mc = get_id(drv_data->ia_os, MAX_OS_IDS, + OS_BASE_ID, thread_name); + break; + + case 2: + mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS, + MODEM_BASE_ID, thread_name); + break; + default: + mc = NULL; + } + + mutex_unlock(&alloclock); + return mc; +} +EXPORT_SYMBOL_GPL(pti_request_masterchannel); + +/** + * pti_release_masterchannel()- Kernel API function used to release + * a master, channel ID address + * used to write to PTI HW. + * + * @mc: master, channel apeture ID address to be released. This + * will de-allocate the structure via kfree(). + */ +void pti_release_masterchannel(struct pti_masterchannel *mc) +{ + u8 master, channel, i; + + mutex_lock(&alloclock); + + if (mc) { + master = mc->master; + channel = mc->channel; + + if (master == APP_BASE_ID) { + i = channel >> 3; + drv_data->ia_app[i] &= ~(0x80>>(channel & 0x7)); + } else if (master == OS_BASE_ID) { + i = channel >> 3; + drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7)); + } else { + i = channel >> 3; + drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7)); + } + + kfree(mc); + } + + mutex_unlock(&alloclock); +} +EXPORT_SYMBOL_GPL(pti_release_masterchannel); + +/** + * pti_writedata()- Kernel API function used to write trace + * debugging data to PTI HW. + * + * @mc: Master, channel aperture ID address to write to. + * Null value will return with no write occurring. + * @buf: Trace debuging data to write to the PTI HW. + * Null value will return with no write occurring. + * @count: Size of buf. Value of 0 or a negative number will + * return with no write occuring. + */ +void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count) +{ + /* + * since this function is exported, this is treated like an + * API function, thus, all parameters should + * be checked for validity. + */ + if ((mc != NULL) && (buf != NULL) && (count > 0)) + pti_write_to_aperture(mc, buf, count); + return; +} +EXPORT_SYMBOL_GPL(pti_writedata); + +/* + * for the tty_driver_*() basic function descriptions, see tty_driver.h. + * Specific header comments made for PTI-related specifics. + */ + +/** + * pti_tty_driver_open()- Open an Application master, channel aperture + * ID to the PTI device via tty device. + * + * @tty: tty interface. + * @filp: filp interface pased to tty_port_open() call. + * + * Returns: + * int, 0 for success + * otherwise, fail value + * + * The main purpose of using the tty device interface is for + * each tty port to have a unique PTI write aperture. In an + * example use case, ttyPTI0 gets syslogd and an APP aperture + * ID and ttyPTI1 is where the n_tracesink ldisc hooks to route + * modem messages into PTI. Modem trace data does not have to + * go to ttyPTI1, but ttyPTI0 and ttyPTI1 do need to be distinct + * master IDs. These messages go through the PTI HW and out of + * the handheld platform and to the Fido/Lauterbach device. + */ +static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp) +{ + /* + * we actually want to allocate a new channel per open, per + * system arch. HW gives more than plenty channels for a single + * system task to have its own channel to write trace data. This + * also removes a locking requirement for the actual write + * procedure. + */ + return tty_port_open(tty->port, tty, filp); +} + +/** + * pti_tty_driver_close()- close tty device and release Application + * master, channel aperture ID to the PTI device via tty device. + * + * @tty: tty interface. + * @filp: filp interface pased to tty_port_close() call. + * + * The main purpose of using the tty device interface is to route + * syslog daemon messages to the PTI HW and out of the handheld platform + * and to the Fido/Lauterbach device. + */ +static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp) +{ + tty_port_close(tty->port, tty, filp); +} + +/** + * pti_tty_install()- Used to set up specific master-channels + * to tty ports for organizational purposes when + * tracing viewed from debuging tools. + * + * @driver: tty driver information. + * @tty: tty struct containing pti information. + * + * Returns: + * 0 for success + * otherwise, error + */ +static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty) +{ + int idx = tty->index; + struct pti_tty *pti_tty_data; + int ret = tty_standard_install(driver, tty); + + if (ret == 0) { + pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL); + if (pti_tty_data == NULL) + return -ENOMEM; + + if (idx == PTITTY_MINOR_START) + pti_tty_data->mc = pti_request_masterchannel(0, NULL); + else + pti_tty_data->mc = pti_request_masterchannel(2, NULL); + + if (pti_tty_data->mc == NULL) { + kfree(pti_tty_data); + return -ENXIO; + } + tty->driver_data = pti_tty_data; + } + + return ret; +} + +/** + * pti_tty_cleanup()- Used to de-allocate master-channel resources + * tied to tty's of this driver. + * + * @tty: tty struct containing pti information. + */ +static void pti_tty_cleanup(struct tty_struct *tty) +{ + struct pti_tty *pti_tty_data = tty->driver_data; + if (pti_tty_data == NULL) + return; + pti_release_masterchannel(pti_tty_data->mc); + kfree(pti_tty_data); + tty->driver_data = NULL; +} + +/** + * pti_tty_driver_write()- Write trace debugging data through the char + * interface to the PTI HW. Part of the misc device implementation. + * + * @filp: Contains private data which is used to obtain + * master, channel write ID. + * @data: trace data to be written. + * @len: # of byte to write. + * + * Returns: + * int, # of bytes written + * otherwise, error + */ +static int pti_tty_driver_write(struct tty_struct *tty, + const unsigned char *buf, int len) +{ + struct pti_tty *pti_tty_data = tty->driver_data; + if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) { + pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len); + return len; + } + /* + * we can't write to the pti hardware if the private driver_data + * and the mc address is not there. + */ + else + return -EFAULT; +} + +/** + * pti_tty_write_room()- Always returns 2048. + * + * @tty: contains tty info of the pti driver. + */ +static int pti_tty_write_room(struct tty_struct *tty) +{ + return 2048; +} + +/** + * pti_char_open()- Open an Application master, channel aperture + * ID to the PTI device. Part of the misc device implementation. + * + * @inode: not used. + * @filp: Output- will have a masterchannel struct set containing + * the allocated application PTI aperture write address. + * + * Returns: + * int, 0 for success + * otherwise, a fail value + */ +static int pti_char_open(struct inode *inode, struct file *filp) +{ + struct pti_masterchannel *mc; + + /* + * We really do want to fail immediately if + * pti_request_masterchannel() fails, + * before assigning the value to filp->private_data. + * Slightly easier to debug if this driver needs debugging. + */ + mc = pti_request_masterchannel(0, NULL); + if (mc == NULL) + return -ENOMEM; + filp->private_data = mc; + return 0; +} + +/** + * pti_char_release()- Close a char channel to the PTI device. Part + * of the misc device implementation. + * + * @inode: Not used in this implementaiton. + * @filp: Contains private_data that contains the master, channel + * ID to be released by the PTI device. + * + * Returns: + * always 0 + */ +static int pti_char_release(struct inode *inode, struct file *filp) +{ + pti_release_masterchannel(filp->private_data); + filp->private_data = NULL; + return 0; +} + +/** + * pti_char_write()- Write trace debugging data through the char + * interface to the PTI HW. Part of the misc device implementation. + * + * @filp: Contains private data which is used to obtain + * master, channel write ID. + * @data: trace data to be written. + * @len: # of byte to write. + * @ppose: Not used in this function implementation. + * + * Returns: + * int, # of bytes written + * otherwise, error value + * + * Notes: From side discussions with Alan Cox and experimenting + * with PTI debug HW like Nokia's Fido box and Lauterbach + * devices, 8192 byte write buffer used by USER_COPY_SIZE was + * deemed an appropriate size for this type of usage with + * debugging HW. + */ +static ssize_t pti_char_write(struct file *filp, const char __user *data, + size_t len, loff_t *ppose) +{ + struct pti_masterchannel *mc; + void *kbuf; + const char __user *tmp; + size_t size = USER_COPY_SIZE; + size_t n = 0; + + tmp = data; + mc = filp->private_data; + + kbuf = kmalloc(size, GFP_KERNEL); + if (kbuf == NULL) { + pr_err("%s(%d): buf allocation failed\n", + __func__, __LINE__); + return -ENOMEM; + } + + do { + if (len - n > USER_COPY_SIZE) + size = USER_COPY_SIZE; + else + size = len - n; + + if (copy_from_user(kbuf, tmp, size)) { + kfree(kbuf); + return n ? n : -EFAULT; + } + + pti_write_to_aperture(mc, kbuf, size); + n += size; + tmp += size; + + } while (len > n); + + kfree(kbuf); + return len; +} + +static const struct tty_operations pti_tty_driver_ops = { + .open = pti_tty_driver_open, + .close = pti_tty_driver_close, + .write = pti_tty_driver_write, + .write_room = pti_tty_write_room, + .install = pti_tty_install, + .cleanup = pti_tty_cleanup +}; + +static const struct file_operations pti_char_driver_ops = { + .owner = THIS_MODULE, + .write = pti_char_write, + .open = pti_char_open, + .release = pti_char_release, +}; + +static struct miscdevice pti_char_driver = { + .minor = MISC_DYNAMIC_MINOR, + .name = CHARNAME, + .fops = &pti_char_driver_ops +}; + +/** + * pti_console_write()- Write to the console that has been acquired. + * + * @c: Not used in this implementaiton. + * @buf: Data to be written. + * @len: Length of buf. + */ +static void pti_console_write(struct console *c, const char *buf, unsigned len) +{ + static struct pti_masterchannel mc = {.master = CONSOLE_ID, + .channel = 0}; + + mc.channel = pti_console_channel; + pti_console_channel = (pti_console_channel + 1) & 0x7f; + + pti_write_full_frame_to_aperture(&mc, buf, len); +} + +/** + * pti_console_device()- Return the driver tty structure and set the + * associated index implementation. + * + * @c: Console device of the driver. + * @index: index associated with c. + * + * Returns: + * always value of pti_tty_driver structure when this function + * is called. + */ +static struct tty_driver *pti_console_device(struct console *c, int *index) +{ + *index = c->index; + return pti_tty_driver; +} + +/** + * pti_console_setup()- Initialize console variables used by the driver. + * + * @c: Not used. + * @opts: Not used. + * + * Returns: + * always 0. + */ +static int pti_console_setup(struct console *c, char *opts) +{ + pti_console_channel = 0; + pti_control_channel = 0; + return 0; +} + +/* + * pti_console struct, used to capture OS printk()'s and shift + * out to the PTI device for debugging. This cannot be + * enabled upon boot because of the possibility of eating + * any serial console printk's (race condition discovered). + * The console should be enabled upon when the tty port is + * used for the first time. Since the primary purpose for + * the tty port is to hook up syslog to it, the tty port + * will be open for a really long time. + */ +static struct console pti_console = { + .name = TTYNAME, + .write = pti_console_write, + .device = pti_console_device, + .setup = pti_console_setup, + .flags = CON_PRINTBUFFER, + .index = 0, +}; + +/** + * pti_port_activate()- Used to start/initialize any items upon + * first opening of tty_port(). + * + * @port- The tty port number of the PTI device. + * @tty- The tty struct associated with this device. + * + * Returns: + * always returns 0 + * + * Notes: The primary purpose of the PTI tty port 0 is to hook + * the syslog daemon to it; thus this port will be open for a + * very long time. + */ +static int pti_port_activate(struct tty_port *port, struct tty_struct *tty) +{ + if (port->tty->index == PTITTY_MINOR_START) + console_start(&pti_console); + return 0; +} + +/** + * pti_port_shutdown()- Used to stop/shutdown any items upon the + * last tty port close. + * + * @port- The tty port number of the PTI device. + * + * Notes: The primary purpose of the PTI tty port 0 is to hook + * the syslog daemon to it; thus this port will be open for a + * very long time. + */ +static void pti_port_shutdown(struct tty_port *port) +{ + if (port->tty->index == PTITTY_MINOR_START) + console_stop(&pti_console); +} + +static const struct tty_port_operations tty_port_ops = { + .activate = pti_port_activate, + .shutdown = pti_port_shutdown, +}; + +/* + * Note the _probe() call sets everything up and ties the char and tty + * to successfully detecting the PTI device on the pci bus. + */ + +/** + * pti_pci_probe()- Used to detect pti on the pci bus and set + * things up in the driver. + * + * @pdev- pci_dev struct values for pti. + * @ent- pci_device_id struct for pti driver. + * + * Returns: + * 0 for success + * otherwise, error + */ +static int pti_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int a; + int retval = -EINVAL; + int pci_bar = 1; + + dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__, + __func__, __LINE__, pdev->vendor, pdev->device); + + retval = misc_register(&pti_char_driver); + if (retval) { + pr_err("%s(%d): CHAR registration failed of pti driver\n", + __func__, __LINE__); + pr_err("%s(%d): Error value returned: %d\n", + __func__, __LINE__, retval); + goto err; + } + + retval = pci_enable_device(pdev); + if (retval != 0) { + dev_err(&pdev->dev, + "%s: pci_enable_device() returned error %d\n", + __func__, retval); + goto err_unreg_misc; + } + + drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); + if (drv_data == NULL) { + retval = -ENOMEM; + dev_err(&pdev->dev, + "%s(%d): kmalloc() returned NULL memory.\n", + __func__, __LINE__); + goto err_disable_pci; + } + drv_data->pti_addr = pci_resource_start(pdev, pci_bar); + + retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); + if (retval != 0) { + dev_err(&pdev->dev, + "%s(%d): pci_request_region() returned error %d\n", + __func__, __LINE__, retval); + goto err_free_dd; + } + drv_data->aperture_base = drv_data->pti_addr+APERTURE_14; + drv_data->pti_ioaddr = + ioremap_nocache((u32)drv_data->aperture_base, + APERTURE_LEN); + if (!drv_data->pti_ioaddr) { + retval = -ENOMEM; + goto err_rel_reg; + } + + pci_set_drvdata(pdev, drv_data); + + for (a = 0; a < PTITTY_MINOR_NUM; a++) { + struct tty_port *port = &drv_data->port[a]; + tty_port_init(port); + port->ops = &tty_port_ops; + + tty_port_register_device(port, pti_tty_driver, a, &pdev->dev); + } + + register_console(&pti_console); + + return 0; +err_rel_reg: + pci_release_region(pdev, pci_bar); +err_free_dd: + kfree(drv_data); +err_disable_pci: + pci_disable_device(pdev); +err_unreg_misc: + misc_deregister(&pti_char_driver); +err: + return retval; +} + +/** + * pti_pci_remove()- Driver exit method to remove PTI from + * PCI bus. + * @pdev: variable containing pci info of PTI. + */ +static void pti_pci_remove(struct pci_dev *pdev) +{ + struct pti_dev *drv_data = pci_get_drvdata(pdev); + unsigned int a; + + unregister_console(&pti_console); + + for (a = 0; a < PTITTY_MINOR_NUM; a++) { + tty_unregister_device(pti_tty_driver, a); + tty_port_destroy(&drv_data->port[a]); + } + + iounmap(drv_data->pti_ioaddr); + kfree(drv_data); + pci_release_region(pdev, 1); + pci_disable_device(pdev); + + misc_deregister(&pti_char_driver); +} + +static struct pci_driver pti_pci_driver = { + .name = PCINAME, + .id_table = pci_ids, + .probe = pti_pci_probe, + .remove = pti_pci_remove, +}; + +/** + * + * pti_init()- Overall entry/init call to the pti driver. + * It starts the registration process with the kernel. + * + * Returns: + * int __init, 0 for success + * otherwise value is an error + * + */ +static int __init pti_init(void) +{ + int retval = -EINVAL; + + /* First register module as tty device */ + + pti_tty_driver = alloc_tty_driver(PTITTY_MINOR_NUM); + if (pti_tty_driver == NULL) { + pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n", + __func__, __LINE__); + return -ENOMEM; + } + + pti_tty_driver->driver_name = DRIVERNAME; + pti_tty_driver->name = TTYNAME; + pti_tty_driver->major = 0; + pti_tty_driver->minor_start = PTITTY_MINOR_START; + pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; + pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS; + pti_tty_driver->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + pti_tty_driver->init_termios = tty_std_termios; + + tty_set_operations(pti_tty_driver, &pti_tty_driver_ops); + + retval = tty_register_driver(pti_tty_driver); + if (retval) { + pr_err("%s(%d): TTY registration failed of pti driver\n", + __func__, __LINE__); + pr_err("%s(%d): Error value returned: %d\n", + __func__, __LINE__, retval); + + goto put_tty; + } + + retval = pci_register_driver(&pti_pci_driver); + if (retval) { + pr_err("%s(%d): PCI registration failed of pti driver\n", + __func__, __LINE__); + pr_err("%s(%d): Error value returned: %d\n", + __func__, __LINE__, retval); + goto unreg_tty; + } + + return 0; +unreg_tty: + tty_unregister_driver(pti_tty_driver); +put_tty: + put_tty_driver(pti_tty_driver); + pti_tty_driver = NULL; + return retval; +} + +/** + * pti_exit()- Unregisters this module as a tty and pci driver. + */ +static void __exit pti_exit(void) +{ + tty_unregister_driver(pti_tty_driver); + pci_unregister_driver(&pti_pci_driver); + put_tty_driver(pti_tty_driver); +} + +module_init(pti_init); +module_exit(pti_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ken Mills, Jay Freyensee"); +MODULE_DESCRIPTION("PTI Driver"); + diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile index 7c4c306dfa8..0003a1d56f7 100644 --- a/drivers/misc/sgi-gru/Makefile +++ b/drivers/misc/sgi-gru/Makefile @@ -1,6 +1,4 @@ -ifdef CONFIG_SGI_GRU_DEBUG - EXTRA_CFLAGS += -DDEBUG -endif +ccflags-$(CONFIG_SGI_GRU_DEBUG) := -DDEBUG obj-$(CONFIG_SGI_GRU) := gru.o gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h index d95587cc794..04d5170ac14 100644 --- a/drivers/misc/sgi-gru/gru_instructions.h +++ b/drivers/misc/sgi-gru/gru_instructions.h @@ -40,6 +40,7 @@ extern void gru_wait_abort_proc(void *cb); *((volatile unsigned long *)(p)) = v; /* force st.rel */ \ } while (0) #elif defined(CONFIG_X86_64) +#include <asm/cacheflush.h> #define __flush_cache(p) clflush(p) #define gru_ordered_store_ulong(p, v) \ do { \ diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 38657cdaf54..f74fc0ca2ef 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -33,6 +33,7 @@ #include <linux/io.h> #include <linux/uaccess.h> #include <linux/security.h> +#include <linux/prefetch.h> #include <asm/pgtable.h> #include "gru.h" #include "grutables.h" @@ -875,8 +876,9 @@ int gru_set_context_option(unsigned long arg) switch (req.op) { case sco_blade_chiplet: /* Select blade/chiplet for GRU context */ - if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] || - req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) { + if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB || + req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || + (req.val1 >= 0 && !gru_base[req.val1])) { ret = -EINVAL; } else { gts->ts_user_blade_id = req.val1; diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index cb3b4d22847..104a05f6b73 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -6,7 +6,7 @@ * This file supports the user system call for file open, close, mmap, etc. * This also incudes the driver initialization code. * - * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2008-2014 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,6 +58,11 @@ static int max_user_cbrs, max_user_dsr_bytes; static struct miscdevice gru_miscdev; +static int gru_supported(void) +{ + return is_uv_system() && + (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE); +} /* * gru_vma_close @@ -108,9 +113,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) return -EINVAL; - vma->vm_flags |= - (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | - VM_RESERVED); + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | + VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = PAGE_SHARED; vma->vm_ops = &gru_vm_ops; @@ -173,6 +177,7 @@ static long gru_get_config_info(unsigned long arg) nodesperblade = 2; else nodesperblade = 1; + memset(&info, 0, sizeof(info)); info.cpus = num_online_cpus(); info.nodes = num_online_nodes(); info.blades = info.nodes / nodesperblade; @@ -348,15 +353,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; -static void gru_noop(unsigned int irq) +static void gru_noop(struct irq_data *d) { } static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { [0 ... GRU_CHIPLETS_PER_BLADE - 1] { - .mask = gru_noop, - .unmask = gru_noop, - .ack = gru_noop + .irq_mask = gru_noop, + .irq_unmask = gru_noop, + .irq_ack = gru_noop } }; @@ -373,7 +378,7 @@ static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, if (gru_irq_count[chiplet] == 0) { gru_chip[chiplet].name = irq_name; - ret = set_irq_chip(irq, &gru_chip[chiplet]); + ret = irq_set_chip(irq, &gru_chip[chiplet]); if (ret) { printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", GRU_DRIVER_ID_STR, -ret); @@ -518,7 +523,7 @@ static int __init gru_init(void) { int ret; - if (!is_uv_system()) + if (!gru_supported()) return 0; #if defined CONFIG_IA64 @@ -573,7 +578,7 @@ exit0: static void __exit gru_exit(void) { - if (!is_uv_system()) + if (!gru_supported()) return; gru_teardown_tlb_irqs(); @@ -587,6 +592,7 @@ static const struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, + .llseek = noop_llseek, }; static struct miscdevice gru_miscdev = { diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 9b2062d1732..a3700a56b8f 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c @@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum, ubuf += sizeof(hdr); ubufcch = ubuf; - if (gru_user_copy_handle(&ubuf, cch)) - goto fail; + if (gru_user_copy_handle(&ubuf, cch)) { + if (cch_locked) + unlock_cch_handle(cch); + return -EFAULT; + } if (cch_locked) ubufcch->delresp = 0; bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; @@ -175,14 +178,10 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum, hdr.cbrcnt = cbrcnt; hdr.dsrcnt = dsrcnt; hdr.cch_locked = cch_locked; - if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr))) - ret = -EFAULT; - - return ret ? ret : bytes; + if (copy_to_user(uhdr, &hdr, sizeof(hdr))) + return -EFAULT; -fail: - unlock_cch_handle(cch); - return -EFAULT; + return bytes; } int gru_dump_chiplet_request(unsigned long arg) diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 34749ee88df..913de07e577 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/delay.h> +#include <linux/export.h> #include <asm/io_apic.h> #include "gru.h" #include "grulib.h" @@ -229,7 +230,7 @@ again: bid = blade_id < 0 ? uv_numa_blade_id() : blade_id; bs = gru_base[bid]; - /* Handle the case where migration occured while waiting for the sema */ + /* Handle the case where migration occurred while waiting for the sema */ down_read(&bs->bs_kgts_sema); if (blade_id < 0 && bid != uv_numa_blade_id()) { up_read(&bs->bs_kgts_sema); diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index f8538bbd0bf..ae16c8cb4f3 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -28,6 +28,7 @@ #include <linux/device.h> #include <linux/list.h> #include <linux/err.h> +#include <linux/prefetch.h> #include <asm/uv/uv_hub.h> #include "gru.h" #include "grutables.h" diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 7768b87d995..4f763592239 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -160,15 +160,11 @@ static int options_show(struct seq_file *s, void *p) static ssize_t options_write(struct file *file, const char __user *userbuf, size_t count, loff_t *data) { - char buf[20]; + int ret; - if (count >= sizeof(buf)) - return -EINVAL; - if (copy_from_user(buf, userbuf, count)) - return -EFAULT; - buf[count] = '\0'; - if (strict_strtoul(buf, 0, &gru_options)) - return -EINVAL; + ret = kstrtoul_from_user(userbuf, count, 0, &gru_options); + if (ret) + return ret; return count; } @@ -324,7 +320,7 @@ static const struct file_operations gru_fops = { static struct proc_entry { char *name; - int mode; + umode_t mode; const struct file_operations *fops; struct proc_dir_entry *entry; } proc_files[] = { @@ -355,7 +351,7 @@ static void delete_proc_files(void) for (p = proc_files; p->name; p++) if (p->entry) remove_proc_entry(p->name, proc_gru); - remove_proc_entry("gru", proc_gru->parent); + proc_remove(proc_gru); } } diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 02a77b8b8ee..5c3ce245967 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -379,7 +379,7 @@ struct gru_thread_state { required for contest */ char ts_cch_req_slice;/* CCH packet slice */ char ts_blade; /* If >= 0, migrate context if - ref from diferent blade */ + ref from different blade */ char ts_force_cch_reload; char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each allocated CB */ @@ -516,8 +516,7 @@ struct gru_blade_state { /* Scan all active GRUs in a GRU bitmap */ #define for_each_gru_in_bitmap(gid, map) \ - for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\ - (gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid))) + for_each_set_bit((gid), (map), GRU_MAX_GRUS) /* Scan all active GRUs on a specific blade */ #define for_each_gru_on_blade(gru, nid, i) \ @@ -536,23 +535,17 @@ struct gru_blade_state { /* Scan each CBR whose bit is set in a TFM (or copy of) */ #define for_each_cbr_in_tfm(i, map) \ - for ((i) = find_first_bit(map, GRU_NUM_CBE); \ - (i) < GRU_NUM_CBE; \ - (i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i)) + for_each_set_bit((i), (map), GRU_NUM_CBE) /* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ #define for_each_cbr_in_allocation_map(i, map, k) \ - for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU; \ - (k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) \ + for_each_set_bit((k), (map), GRU_CBR_AU) \ for ((i) = (k)*GRU_CBR_AU_SIZE; \ (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) /* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ #define for_each_dsr_in_allocation_map(i, map, k) \ - for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\ - (k) < GRU_DSR_AU; \ - (k) = find_next_bit((const unsigned long *)map, \ - GRU_DSR_AU, (k) + 1)) \ + for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \ for ((i) = (k) * GRU_DSR_AU_CL; \ (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index 240a6d36166..2129274ef7a 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm, const struct mmu_notifier_ops *ops) { struct mmu_notifier *mn, *gru_mn = NULL; - struct hlist_node *n; if (mm->mmu_notifier_mm) { rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) if (mn->ops == ops) { gru_mn = mn; diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 851b2f25ce0..c862cd4583c 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -25,7 +25,6 @@ #endif #if defined CONFIG_IA64 -#include <asm/system.h> #include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ #define is_shub() ia64_platform_is("sn2") #endif diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 652593fc486..128d5615c80 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -828,6 +828,7 @@ enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *ch) { enum xp_retval ret; + DEFINE_WAIT(wait); if (ch->flags & XPC_C_DISCONNECTING) { DBUG_ON(ch->reason == xpInterrupted); @@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) } atomic_inc(&ch->n_on_msg_allocate_wq); - ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); + prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); + ret = schedule_timeout(1); + finish_wait(&ch->msg_allocate_wq, &wait); atomic_dec(&ch->n_on_msg_allocate_wq); if (ch->flags & XPC_C_DISCONNECTING) { diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 832ed4c88cf..82dc5748f87 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -44,6 +44,7 @@ */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/sysctl.h> #include <linux/device.h> #include <linux/delay.h> @@ -52,6 +53,10 @@ #include <linux/kthread.h> #include "xpc.h" +#ifdef CONFIG_X86_64 +#include <asm/traps.h> +#endif + /* define two XPC debug device structures to be used with dev_dbg() et al */ struct device_driver xpc_dbg_name = { @@ -87,7 +92,7 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; static int xpc_disengage_min_timelimit; /* = 0 */ static int xpc_disengage_max_timelimit = 120; -static ctl_table xpc_sys_xpc_hb_dir[] = { +static struct ctl_table xpc_sys_xpc_hb_dir[] = { { .procname = "hb_interval", .data = &xpc_hb_interval, @@ -106,7 +111,7 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { .extra2 = &xpc_hb_check_max_interval}, {} }; -static ctl_table xpc_sys_xpc_dir[] = { +static struct ctl_table xpc_sys_xpc_dir[] = { { .procname = "hb", .mode = 0555, @@ -121,7 +126,7 @@ static ctl_table xpc_sys_xpc_dir[] = { .extra2 = &xpc_disengage_max_timelimit}, {} }; -static ctl_table xpc_sys_dir[] = { +static struct ctl_table xpc_sys_dir[] = { { .procname = "xpc", .mode = 0555, @@ -1078,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) return NOTIFY_DONE; } +/* Used to only allow one cpu to complete disconnect */ +static unsigned int xpc_die_disconnecting; + /* * Notify other partitions to deactivate from us by first disengaging from all * references to our memory. @@ -1091,6 +1099,9 @@ xpc_die_deactivate(void) long keep_waiting; long wait_to_print; + if (cmpxchg(&xpc_die_disconnecting, 0, 1)) + return; + /* keep xpc_hb_checker thread from doing anything (just in case) */ xpc_exiting = 1; @@ -1158,7 +1169,7 @@ xpc_die_deactivate(void) * about the lack of a heartbeat. */ static int -xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) +xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) { #ifdef CONFIG_IA64 /* !!! temporary kludge */ switch (event) { @@ -1190,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) break; } #else - xpc_die_deactivate(); + struct die_args *die_args = _die_args; + + switch (event) { + case DIE_TRAP: + if (die_args->trapnr == X86_TRAP_DF) + xpc_die_deactivate(); + + if (((die_args->trapnr == X86_TRAP_MF) || + (die_args->trapnr == X86_TRAP_XF)) && + !user_mode_vm(die_args->regs)) + xpc_die_deactivate(); + + break; + case DIE_INT3: + case DIE_DEBUG: + break; + case DIE_OOPS: + case DIE_GPF: + default: + xpc_die_deactivate(); + } #endif return NOTIFY_DONE; diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 9a6268c89fd..6956f7e7d43 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -17,6 +17,7 @@ #include <linux/device.h> #include <linux/hardirq.h> +#include <linux/slab.h> #include "xpc.h" #include <asm/uv/uv_hub.h> @@ -438,18 +439,23 @@ xpc_discovery(void) * nodes that can comprise an access protection grouping. The access * protection is in regards to memory, IOI and IPI. */ - max_regions = 64; region_size = xp_region_size; - switch (region_size) { - case 128: - max_regions *= 2; - case 64: - max_regions *= 2; - case 32: - max_regions *= 2; - region_size = 16; - DBUG_ON(!is_shub2()); + if (is_uv()) + max_regions = 256; + else { + max_regions = 64; + + switch (region_size) { + case 128: + max_regions *= 2; + case 64: + max_regions *= 2; + case 32: + max_regions *= 2; + region_size = 16; + DBUG_ON(!is_shub2()); + } } for (region = 0; region < max_regions; region++) { diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 8b70e03f939..7d71c04fc93 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -14,6 +14,7 @@ */ #include <linux/delay.h> +#include <linux/slab.h> #include <asm/uncached.h> #include <asm/sn/mspec.h> #include <asm/sn/sn_sal.h> diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 8725d5e8ab0..95c894482fd 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,7 +18,10 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/cpu.h> +#include <linux/module.h> #include <linux/err.h> +#include <linux/slab.h> #include <asm/uv/uv_hub.h> #if defined CONFIG_X86_64 #include <asm/uv/bios.h> @@ -58,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -108,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -237,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -416,6 +419,7 @@ xpc_process_activate_IRQ_rcvd_uv(void) static void xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, struct xpc_activate_mq_msghdr_uv *msg_hdr, + int part_setup, int *wakeup_hb_checker) { unsigned long irq_flags; @@ -450,9 +454,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, if (msg->activate_gru_mq_desc_gpa != part_uv->activate_gru_mq_desc_gpa) { - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + spin_lock(&part_uv->flags_lock); part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + spin_unlock(&part_uv->flags_lock); part_uv->activate_gru_mq_desc_gpa = msg->activate_gru_mq_desc_gpa; } @@ -480,6 +484,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closerequest_uv, hdr); @@ -496,6 +503,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closereply_uv, hdr); @@ -510,6 +520,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openrequest_uv, hdr); @@ -527,6 +540,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openreply_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; @@ -544,6 +560,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); @@ -620,6 +639,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) part_referenced = xpc_part_ref(part); xpc_handle_activate_mq_msg_uv(part, msg_hdr, + part_referenced, &wakeup_hb_checker); if (part_referenced) xpc_part_deref(part); @@ -1713,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1724,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1747,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 16f0abda142..3fac67a5204 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -20,6 +20,7 @@ * */ +#include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -475,7 +476,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->data[0] == 0xff) { /* we are being asked to broadcast to all partitions */ - for_each_bit(dest_partid, xpnet_broadcast_partitions, + for_each_set_bit(dest_partid, xpnet_broadcast_partitions, xp_max_npartitions) { xpnet_send(skb, queued_msg, start_addr, end_addr, @@ -494,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + if (atomic_dec_return(&queued_msg->use_count) == 0) { dev_kfree_skb(skb); kfree(queued_msg); } - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - return NETDEV_TX_OK; } @@ -575,7 +576,7 @@ xpnet_init(void) * report an error if the data is not retrievable and the * packet will be dropped. */ - xpnet_device->features = NETIF_F_NO_CSUM; + xpnet_device->features = NETIF_F_HW_CSUM; result = register_netdev(xpnet_device); if (result != 0) { diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c new file mode 100644 index 00000000000..2e13614d41e --- /dev/null +++ b/drivers/misc/spear13xx_pcie_gadget.c @@ -0,0 +1,923 @@ +/* + * drivers/misc/spear13xx_pcie_gadget.c + * + * Copyright (C) 2010 ST Microelectronics + * Pratyush Anand<pratyush.anand@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pci_regs.h> +#include <linux/configfs.h> +#include <mach/pcie.h> +#include <mach/misc_regs.h> + +#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1) +/* In current implementation address translation is done using IN0 only. + * So IN1 start address and IN0 end address has been kept same +*/ +#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1) +#define IN_IO_SIZE (20 * 1024 * 1024 - 1) +#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1) +#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1) +#define IN_MSG_SIZE (12 * 1024 * 1024 - 1) +/* Keep default BAR size as 4K*/ +/* AORAM would be mapped by default*/ +#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1) + +#define INT_TYPE_NO_INT 0 +#define INT_TYPE_INTX 1 +#define INT_TYPE_MSI 2 +struct spear_pcie_gadget_config { + void __iomem *base; + void __iomem *va_app_base; + void __iomem *va_dbi_base; + char int_type[10]; + ulong requested_msi; + ulong configured_msi; + ulong bar0_size; + ulong bar0_rw_offset; + void __iomem *va_bar0_address; +}; + +struct pcie_gadget_target { + struct configfs_subsystem subsys; + struct spear_pcie_gadget_config config; +}; + +struct pcie_gadget_target_attr { + struct configfs_attribute attr; + ssize_t (*show)(struct spear_pcie_gadget_config *config, + char *buf); + ssize_t (*store)(struct spear_pcie_gadget_config *config, + const char *buf, + size_t count); +}; + +static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg) +{ + /* Enable DBI access */ + writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID), + &app_reg->slv_armisc); + writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID), + &app_reg->slv_awmisc); + +} + +static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg) +{ + /* disable DBI access */ + writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID), + &app_reg->slv_armisc); + writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID), + &app_reg->slv_awmisc); + +} + +static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config, + int where, int size, u32 *val) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + ulong va_address; + + /* Enable DBI access */ + enable_dbi_access(app_reg); + + va_address = (ulong)config->va_dbi_base + (where & ~0x3); + + *val = readl(va_address); + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + /* Disable DBI access */ + disable_dbi_access(app_reg); +} + +static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config, + int where, int size, u32 val) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + ulong va_address; + + /* Enable DBI access */ + enable_dbi_access(app_reg); + + va_address = (ulong)config->va_dbi_base + (where & ~0x3); + + if (size == 4) + writel(val, va_address); + else if (size == 2) + writew(val, va_address + (where & 2)); + else if (size == 1) + writeb(val, va_address + (where & 3)); + + /* Disable DBI access */ + disable_dbi_access(app_reg); +} + +#define PCI_FIND_CAP_TTL 48 + +static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config, + u32 pos, int cap, int *ttl) +{ + u32 id; + + while ((*ttl)--) { + spear_dbi_read_reg(config, pos, 1, &pos); + if (pos < 0x40) + break; + pos &= ~3; + spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id); + if (id == 0xff) + break; + if (id == cap) + return pos; + pos += PCI_CAP_LIST_NEXT; + } + return 0; +} + +static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config, + u32 pos, int cap) +{ + int ttl = PCI_FIND_CAP_TTL; + + return pci_find_own_next_cap_ttl(config, pos, cap, &ttl); +} + +static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config, + u8 hdr_type) +{ + u32 status; + + spear_dbi_read_reg(config, PCI_STATUS, 2, &status); + if (!(status & PCI_STATUS_CAP_LIST)) + return 0; + + switch (hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + return PCI_CAPABILITY_LIST; + case PCI_HEADER_TYPE_CARDBUS: + return PCI_CB_CAPABILITY_LIST; + default: + return 0; + } + + return 0; +} + +/* + * Tell if a device supports a given PCI capability. + * Returns the address of the requested capability structure within the + * device's PCI configuration space or 0 in case the device does not + * support it. Possible values for @cap: + * + * %PCI_CAP_ID_PM Power Management + * %PCI_CAP_ID_AGP Accelerated Graphics Port + * %PCI_CAP_ID_VPD Vital Product Data + * %PCI_CAP_ID_SLOTID Slot Identification + * %PCI_CAP_ID_MSI Message Signalled Interrupts + * %PCI_CAP_ID_CHSWP CompactPCI HotSwap + * %PCI_CAP_ID_PCIX PCI-X + * %PCI_CAP_ID_EXP PCI Express + */ +static int pci_find_own_capability(struct spear_pcie_gadget_config *config, + int cap) +{ + u32 pos; + u32 hdr_type; + + spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type); + + pos = pci_find_own_cap_start(config, hdr_type); + if (pos) + pos = pci_find_own_next_cap(config, pos, cap); + + return pos; +} + +static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id) +{ + return 0; +} + +/* + * configfs interfaces show/store functions + */ +static ssize_t pcie_gadget_show_link( + struct spear_pcie_gadget_config *config, + char *buf) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + + if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID)) + return sprintf(buf, "UP"); + else + return sprintf(buf, "DOWN"); +} + +static ssize_t pcie_gadget_store_link( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + + if (sysfs_streq(buf, "UP")) + writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID), + &app_reg->app_ctrl_0); + else if (sysfs_streq(buf, "DOWN")) + writel(readl(&app_reg->app_ctrl_0) + & ~(1 << APP_LTSSM_ENABLE_ID), + &app_reg->app_ctrl_0); + else + return -EINVAL; + return count; +} + +static ssize_t pcie_gadget_show_int_type( + struct spear_pcie_gadget_config *config, + char *buf) +{ + return sprintf(buf, "%s", config->int_type); +} + +static ssize_t pcie_gadget_store_int_type( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + u32 cap, vec, flags; + ulong vector; + + if (sysfs_streq(buf, "INTA")) + spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1); + + else if (sysfs_streq(buf, "MSI")) { + vector = config->requested_msi; + vec = 0; + while (vector > 1) { + vector /= 2; + vec++; + } + spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0); + cap = pci_find_own_capability(config, PCI_CAP_ID_MSI); + spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags); + flags &= ~PCI_MSI_FLAGS_QMASK; + flags |= vec << 1; + spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags); + } else + return -EINVAL; + + strcpy(config->int_type, buf); + + return count; +} + +static ssize_t pcie_gadget_show_no_of_msi( + struct spear_pcie_gadget_config *config, + char *buf) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + u32 cap, vec, flags; + ulong vector; + + if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID)) + != (1 << CFG_MSI_EN_ID)) + vector = 0; + else { + cap = pci_find_own_capability(config, PCI_CAP_ID_MSI); + spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags); + flags &= ~PCI_MSI_FLAGS_QSIZE; + vec = flags >> 4; + vector = 1; + while (vec--) + vector *= 2; + } + config->configured_msi = vector; + + return sprintf(buf, "%lu", vector); +} + +static ssize_t pcie_gadget_store_no_of_msi( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + int ret; + + ret = kstrtoul(buf, 0, &config->requested_msi); + if (ret) + return ret; + + if (config->requested_msi > 32) + config->requested_msi = 32; + + return count; +} + +static ssize_t pcie_gadget_store_inta( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + ulong en; + int ret; + + ret = kstrtoul(buf, 0, &en); + if (ret) + return ret; + + if (en) + writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID), + &app_reg->app_ctrl_0); + else + writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID), + &app_reg->app_ctrl_0); + + return count; +} + +static ssize_t pcie_gadget_store_send_msi( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + ulong vector; + u32 ven_msi; + int ret; + + ret = kstrtoul(buf, 0, &vector); + if (ret) + return ret; + + if (!config->configured_msi) + return -EINVAL; + + if (vector >= config->configured_msi) + return -EINVAL; + + ven_msi = readl(&app_reg->ven_msi_1); + ven_msi &= ~VEN_MSI_FUN_NUM_MASK; + ven_msi |= 0 << VEN_MSI_FUN_NUM_ID; + ven_msi &= ~VEN_MSI_TC_MASK; + ven_msi |= 0 << VEN_MSI_TC_ID; + ven_msi &= ~VEN_MSI_VECTOR_MASK; + ven_msi |= vector << VEN_MSI_VECTOR_ID; + + /* generating interrupt for msi vector */ + ven_msi |= VEN_MSI_REQ_EN; + writel(ven_msi, &app_reg->ven_msi_1); + udelay(1); + ven_msi &= ~VEN_MSI_REQ_EN; + writel(ven_msi, &app_reg->ven_msi_1); + + return count; +} + +static ssize_t pcie_gadget_show_vendor_id( + struct spear_pcie_gadget_config *config, + char *buf) +{ + u32 id; + + spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id); + + return sprintf(buf, "%x", id); +} + +static ssize_t pcie_gadget_store_vendor_id( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + ulong id; + int ret; + + ret = kstrtoul(buf, 0, &id); + if (ret) + return ret; + + spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id); + + return count; +} + +static ssize_t pcie_gadget_show_device_id( + struct spear_pcie_gadget_config *config, + char *buf) +{ + u32 id; + + spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id); + + return sprintf(buf, "%x", id); +} + +static ssize_t pcie_gadget_store_device_id( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + ulong id; + int ret; + + ret = kstrtoul(buf, 0, &id); + if (ret) + return ret; + + spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id); + + return count; +} + +static ssize_t pcie_gadget_show_bar0_size( + struct spear_pcie_gadget_config *config, + char *buf) +{ + return sprintf(buf, "%lx", config->bar0_size); +} + +static ssize_t pcie_gadget_store_bar0_size( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + ulong size; + u32 pos, pos1; + u32 no_of_bit = 0; + int ret; + + ret = kstrtoul(buf, 0, &size); + if (ret) + return ret; + + /* min bar size is 256 */ + if (size <= 0x100) + size = 0x100; + /* max bar size is 1MB*/ + else if (size >= 0x100000) + size = 0x100000; + else { + pos = 0; + pos1 = 0; + while (pos < 21) { + pos = find_next_bit((ulong *)&size, 21, pos); + if (pos != 21) + pos1 = pos + 1; + pos++; + no_of_bit++; + } + if (no_of_bit == 2) + pos1--; + + size = 1 << pos1; + } + config->bar0_size = size; + spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1); + + return count; +} + +static ssize_t pcie_gadget_show_bar0_address( + struct spear_pcie_gadget_config *config, + char *buf) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + + u32 address = readl(&app_reg->pim0_mem_addr_start); + + return sprintf(buf, "%x", address); +} + +static ssize_t pcie_gadget_store_bar0_address( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + ulong address; + int ret; + + ret = kstrtoul(buf, 0, &address); + if (ret) + return ret; + + address &= ~(config->bar0_size - 1); + if (config->va_bar0_address) + iounmap(config->va_bar0_address); + config->va_bar0_address = ioremap(address, config->bar0_size); + if (!config->va_bar0_address) + return -ENOMEM; + + writel(address, &app_reg->pim0_mem_addr_start); + + return count; +} + +static ssize_t pcie_gadget_show_bar0_rw_offset( + struct spear_pcie_gadget_config *config, + char *buf) +{ + return sprintf(buf, "%lx", config->bar0_rw_offset); +} + +static ssize_t pcie_gadget_store_bar0_rw_offset( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + ulong offset; + int ret; + + ret = kstrtoul(buf, 0, &offset); + if (ret) + return ret; + + if (offset % 4) + return -EINVAL; + + config->bar0_rw_offset = offset; + + return count; +} + +static ssize_t pcie_gadget_show_bar0_data( + struct spear_pcie_gadget_config *config, + char *buf) +{ + ulong data; + + if (!config->va_bar0_address) + return -ENOMEM; + + data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset); + + return sprintf(buf, "%lx", data); +} + +static ssize_t pcie_gadget_store_bar0_data( + struct spear_pcie_gadget_config *config, + const char *buf, size_t count) +{ + ulong data; + int ret; + + ret = kstrtoul(buf, 0, &data); + if (ret) + return ret; + + if (!config->va_bar0_address) + return -ENOMEM; + + writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset); + + return count; +} + +/* + * Attribute definitions. + */ + +#define PCIE_GADGET_TARGET_ATTR_RO(_name) \ +static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL) + +#define PCIE_GADGET_TARGET_ATTR_WO(_name) \ +static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name) + +#define PCIE_GADGET_TARGET_ATTR_RW(_name) \ +static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \ + pcie_gadget_store_##_name) +PCIE_GADGET_TARGET_ATTR_RW(link); +PCIE_GADGET_TARGET_ATTR_RW(int_type); +PCIE_GADGET_TARGET_ATTR_RW(no_of_msi); +PCIE_GADGET_TARGET_ATTR_WO(inta); +PCIE_GADGET_TARGET_ATTR_WO(send_msi); +PCIE_GADGET_TARGET_ATTR_RW(vendor_id); +PCIE_GADGET_TARGET_ATTR_RW(device_id); +PCIE_GADGET_TARGET_ATTR_RW(bar0_size); +PCIE_GADGET_TARGET_ATTR_RW(bar0_address); +PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset); +PCIE_GADGET_TARGET_ATTR_RW(bar0_data); + +static struct configfs_attribute *pcie_gadget_target_attrs[] = { + &pcie_gadget_target_link.attr, + &pcie_gadget_target_int_type.attr, + &pcie_gadget_target_no_of_msi.attr, + &pcie_gadget_target_inta.attr, + &pcie_gadget_target_send_msi.attr, + &pcie_gadget_target_vendor_id.attr, + &pcie_gadget_target_device_id.attr, + &pcie_gadget_target_bar0_size.attr, + &pcie_gadget_target_bar0_address.attr, + &pcie_gadget_target_bar0_rw_offset.attr, + &pcie_gadget_target_bar0_data.attr, + NULL, +}; + +static struct pcie_gadget_target *to_target(struct config_item *item) +{ + return item ? + container_of(to_configfs_subsystem(to_config_group(item)), + struct pcie_gadget_target, subsys) : NULL; +} + +/* + * Item operations and type for pcie_gadget_target. + */ + +static ssize_t pcie_gadget_target_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *buf) +{ + ssize_t ret = -EINVAL; + struct pcie_gadget_target *target = to_target(item); + struct pcie_gadget_target_attr *t_attr = + container_of(attr, struct pcie_gadget_target_attr, attr); + + if (t_attr->show) + ret = t_attr->show(&target->config, buf); + return ret; +} + +static ssize_t pcie_gadget_target_attr_store(struct config_item *item, + struct configfs_attribute *attr, + const char *buf, + size_t count) +{ + ssize_t ret = -EINVAL; + struct pcie_gadget_target *target = to_target(item); + struct pcie_gadget_target_attr *t_attr = + container_of(attr, struct pcie_gadget_target_attr, attr); + + if (t_attr->store) + ret = t_attr->store(&target->config, buf, count); + return ret; +} + +static struct configfs_item_operations pcie_gadget_target_item_ops = { + .show_attribute = pcie_gadget_target_attr_show, + .store_attribute = pcie_gadget_target_attr_store, +}; + +static struct config_item_type pcie_gadget_target_type = { + .ct_attrs = pcie_gadget_target_attrs, + .ct_item_ops = &pcie_gadget_target_item_ops, + .ct_owner = THIS_MODULE, +}; + +static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config) +{ + struct pcie_app_reg __iomem *app_reg = config->va_app_base; + + /*setup registers for outbound translation */ + + writel(config->base, &app_reg->in0_mem_addr_start); + writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE, + &app_reg->in0_mem_addr_limit); + writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start); + writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE, + &app_reg->in1_mem_addr_limit); + writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start); + writel(app_reg->in_io_addr_start + IN_IO_SIZE, + &app_reg->in_io_addr_limit); + writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start); + writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE, + &app_reg->in_cfg0_addr_limit); + writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start); + writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE, + &app_reg->in_cfg1_addr_limit); + writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start); + writel(app_reg->in_msg_addr_start + IN_MSG_SIZE, + &app_reg->in_msg_addr_limit); + + writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start); + writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start); + writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start); + + /*setup registers for inbound translation */ + + /* Keep AORAM mapped at BAR0 as default */ + config->bar0_size = INBOUND_ADDR_MASK + 1; + spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK); + spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC); + config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE, + config->bar0_size); + + writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start); + writel(0, &app_reg->pim1_mem_addr_start); + writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit); + + writel(0x0, &app_reg->pim_io_addr_start); + writel(0x0, &app_reg->pim_io_addr_start); + writel(0x0, &app_reg->pim_rom_addr_start); + + writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID) + | ((u32)1 << REG_TRANSLATION_ENABLE), + &app_reg->app_ctrl_0); + /* disable all rx interrupts */ + writel(0, &app_reg->int_mask); + + /* Select INTA as default*/ + spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1); +} + +static int spear_pcie_gadget_probe(struct platform_device *pdev) +{ + struct resource *res0, *res1; + unsigned int status = 0; + int irq; + struct clk *clk; + static struct pcie_gadget_target *target; + struct spear_pcie_gadget_config *config; + struct config_item *cg_item; + struct configfs_subsystem *subsys; + + /* get resource for application registers*/ + + res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res0) { + dev_err(&pdev->dev, "no resource defined\n"); + return -EBUSY; + } + if (!request_mem_region(res0->start, resource_size(res0), + pdev->name)) { + dev_err(&pdev->dev, "pcie gadget region already claimed\n"); + return -EBUSY; + } + /* get resource for dbi registers*/ + + res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res1) { + dev_err(&pdev->dev, "no resource defined\n"); + goto err_rel_res0; + } + if (!request_mem_region(res1->start, resource_size(res1), + pdev->name)) { + dev_err(&pdev->dev, "pcie gadget region already claimed\n"); + goto err_rel_res0; + } + + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) { + dev_err(&pdev->dev, "out of memory\n"); + status = -ENOMEM; + goto err_rel_res; + } + + cg_item = &target->subsys.su_group.cg_item; + sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id); + cg_item->ci_type = &pcie_gadget_target_type; + config = &target->config; + config->va_app_base = (void __iomem *)ioremap(res0->start, + resource_size(res0)); + if (!config->va_app_base) { + dev_err(&pdev->dev, "ioremap fail\n"); + status = -ENOMEM; + goto err_kzalloc; + } + + config->base = (void __iomem *)res1->start; + + config->va_dbi_base = (void __iomem *)ioremap(res1->start, + resource_size(res1)); + if (!config->va_dbi_base) { + dev_err(&pdev->dev, "ioremap fail\n"); + status = -ENOMEM; + goto err_iounmap_app; + } + + platform_set_drvdata(pdev, target); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "no update irq?\n"); + status = irq; + goto err_iounmap; + } + + status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL); + if (status) { + dev_err(&pdev->dev, + "pcie gadget interrupt IRQ%d already claimed\n", irq); + goto err_iounmap; + } + + /* Register configfs hooks */ + subsys = &target->subsys; + config_group_init(&subsys->su_group); + mutex_init(&subsys->su_mutex); + status = configfs_register_subsystem(subsys); + if (status) + goto err_irq; + + /* + * init basic pcie application registers + * do not enable clock if it is PCIE0.Ideally , all controller should + * have been independent from others with respect to clock. But PCIE1 + * and 2 depends on PCIE0.So PCIE0 clk is provided during board init. + */ + if (pdev->id == 1) { + /* + * Ideally CFG Clock should have been also enabled here. But + * it is done currently during board init routne + */ + clk = clk_get_sys("pcie1", NULL); + if (IS_ERR(clk)) { + pr_err("%s:couldn't get clk for pcie1\n", __func__); + status = PTR_ERR(clk); + goto err_irq; + } + status = clk_enable(clk); + if (status) { + pr_err("%s:couldn't enable clk for pcie1\n", __func__); + goto err_irq; + } + } else if (pdev->id == 2) { + /* + * Ideally CFG Clock should have been also enabled here. But + * it is done currently during board init routne + */ + clk = clk_get_sys("pcie2", NULL); + if (IS_ERR(clk)) { + pr_err("%s:couldn't get clk for pcie2\n", __func__); + status = PTR_ERR(clk); + goto err_irq; + } + status = clk_enable(clk); + if (status) { + pr_err("%s:couldn't enable clk for pcie2\n", __func__); + goto err_irq; + } + } + spear13xx_pcie_device_init(config); + + return 0; +err_irq: + free_irq(irq, NULL); +err_iounmap: + iounmap(config->va_dbi_base); +err_iounmap_app: + iounmap(config->va_app_base); +err_kzalloc: + kfree(target); +err_rel_res: + release_mem_region(res1->start, resource_size(res1)); +err_rel_res0: + release_mem_region(res0->start, resource_size(res0)); + return status; +} + +static int spear_pcie_gadget_remove(struct platform_device *pdev) +{ + struct resource *res0, *res1; + static struct pcie_gadget_target *target; + struct spear_pcie_gadget_config *config; + int irq; + + res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + irq = platform_get_irq(pdev, 0); + target = platform_get_drvdata(pdev); + config = &target->config; + + free_irq(irq, NULL); + iounmap(config->va_dbi_base); + iounmap(config->va_app_base); + release_mem_region(res1->start, resource_size(res1)); + release_mem_region(res0->start, resource_size(res0)); + configfs_unregister_subsystem(&target->subsys); + kfree(target); + + return 0; +} + +static void spear_pcie_gadget_shutdown(struct platform_device *pdev) +{ +} + +static struct platform_driver spear_pcie_gadget_driver = { + .probe = spear_pcie_gadget_probe, + .remove = spear_pcie_gadget_remove, + .shutdown = spear_pcie_gadget_shutdown, + .driver = { + .name = "pcie-gadget-spear", + .bus = &platform_bus_type + }, +}; + +module_platform_driver(spear_pcie_gadget_driver); + +MODULE_ALIAS("platform:pcie-gadget-spear"); +MODULE_AUTHOR("Pratyush Anand"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c new file mode 100644 index 00000000000..21181fa243d --- /dev/null +++ b/drivers/misc/sram.c @@ -0,0 +1,228 @@ +/* + * Generic on-chip SRAM allocation driver + * + * Copyright (C) 2012 Philipp Zabel, Pengutronix + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/list.h> +#include <linux/list_sort.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/genalloc.h> + +#define SRAM_GRANULARITY 32 + +struct sram_dev { + struct gen_pool *pool; + struct clk *clk; +}; + +struct sram_reserve { + struct list_head list; + u32 start; + u32 size; +}; + +static int sram_reserve_cmp(void *priv, struct list_head *a, + struct list_head *b) +{ + struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); + struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); + + return ra->start - rb->start; +} + +static int sram_probe(struct platform_device *pdev) +{ + void __iomem *virt_base; + struct sram_dev *sram; + struct resource *res; + struct device_node *np = pdev->dev.of_node, *child; + unsigned long size, cur_start, cur_size; + struct sram_reserve *rblocks, *block; + struct list_head reserve_list; + unsigned int nblocks; + int ret; + + INIT_LIST_HEAD(&reserve_list); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + virt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(virt_base)) + return PTR_ERR(virt_base); + + size = resource_size(res); + + sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL); + if (!sram) + return -ENOMEM; + + sram->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sram->clk)) + sram->clk = NULL; + else + clk_prepare_enable(sram->clk); + + sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1); + if (!sram->pool) + return -ENOMEM; + + /* + * We need an additional block to mark the end of the memory region + * after the reserved blocks from the dt are processed. + */ + nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; + rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); + if (!rblocks) { + ret = -ENOMEM; + goto err_alloc; + } + + block = &rblocks[0]; + for_each_available_child_of_node(np, child) { + struct resource child_res; + + ret = of_address_to_resource(child, 0, &child_res); + if (ret < 0) { + dev_err(&pdev->dev, + "could not get address for node %s\n", + child->full_name); + goto err_chunks; + } + + if (child_res.start < res->start || child_res.end > res->end) { + dev_err(&pdev->dev, + "reserved block %s outside the sram area\n", + child->full_name); + ret = -EINVAL; + goto err_chunks; + } + + block->start = child_res.start - res->start; + block->size = resource_size(&child_res); + list_add_tail(&block->list, &reserve_list); + + dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n", + block->start, + block->start + block->size); + + block++; + } + + /* the last chunk marks the end of the region */ + rblocks[nblocks - 1].start = size; + rblocks[nblocks - 1].size = 0; + list_add_tail(&rblocks[nblocks - 1].list, &reserve_list); + + list_sort(NULL, &reserve_list, sram_reserve_cmp); + + cur_start = 0; + + list_for_each_entry(block, &reserve_list, list) { + /* can only happen if sections overlap */ + if (block->start < cur_start) { + dev_err(&pdev->dev, + "block at 0x%x starts after current offset 0x%lx\n", + block->start, cur_start); + ret = -EINVAL; + goto err_chunks; + } + + /* current start is in a reserved block, so continue after it */ + if (block->start == cur_start) { + cur_start = block->start + block->size; + continue; + } + + /* + * allocate the space between the current starting + * address and the following reserved block, or the + * end of the region. + */ + cur_size = block->start - cur_start; + + dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n", + cur_start, cur_start + cur_size); + ret = gen_pool_add_virt(sram->pool, + (unsigned long)virt_base + cur_start, + res->start + cur_start, cur_size, -1); + if (ret < 0) + goto err_chunks; + + /* next allocation after this reserved block */ + cur_start = block->start + block->size; + } + + kfree(rblocks); + + platform_set_drvdata(pdev, sram); + + dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base); + + return 0; + +err_chunks: + kfree(rblocks); +err_alloc: + if (sram->clk) + clk_disable_unprepare(sram->clk); + return ret; +} + +static int sram_remove(struct platform_device *pdev) +{ + struct sram_dev *sram = platform_get_drvdata(pdev); + + if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) + dev_dbg(&pdev->dev, "removed while SRAM allocated\n"); + + if (sram->clk) + clk_disable_unprepare(sram->clk); + + return 0; +} + +#ifdef CONFIG_OF +static struct of_device_id sram_dt_ids[] = { + { .compatible = "mmio-sram" }, + {} +}; +#endif + +static struct platform_driver sram_driver = { + .driver = { + .name = "sram", + .of_match_table = of_match_ptr(sram_dt_ids), + }, + .probe = sram_probe, + .remove = sram_remove, +}; + +static int __init sram_init(void) +{ + return platform_driver_register(&sram_driver); +} + +postcore_initcall(sram_init); diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig new file mode 100644 index 00000000000..f34dcc51473 --- /dev/null +++ b/drivers/misc/ti-st/Kconfig @@ -0,0 +1,17 @@ +# +# TI's shared transport line discipline and the protocol +# drivers (BT, FM and GPS) +# +menu "Texas Instruments shared transport line discipline" +config TI_ST + tristate "Shared transport core driver" + depends on NET && GPIOLIB && TTY + select FW_LOADER + help + This enables the shared transport core driver for TI + BT / FM and GPS combo chips. This enables protocol drivers + to register themselves with core and send data, the responses + are returned to relevant protocol drivers based on their + packet types. + +endmenu diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile new file mode 100644 index 00000000000..78d7ebb1474 --- /dev/null +++ b/drivers/misc/ti-st/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for TI's shared transport line discipline +# and its protocol drivers (BT, FM, GPS) +# +obj-$(CONFIG_TI_ST) += st_drv.o +st_drv-objs := st_core.o st_kim.o st_ll.o diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c new file mode 100644 index 00000000000..1972d57aadb --- /dev/null +++ b/drivers/misc/ti-st/st_core.c @@ -0,0 +1,898 @@ +/* + * Shared Transport Line discipline driver Core + * This hooks up ST KIM driver and ST LL driver + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define pr_fmt(fmt) "(stc): " fmt +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/tty.h> + +#include <linux/seq_file.h> +#include <linux/skbuff.h> + +#include <linux/ti_wilink_st.h> + +extern void st_kim_recv(void *, const unsigned char *, long); +void st_int_recv(void *, const unsigned char *, long); +/* function pointer pointing to either, + * st_kim_recv during registration to receive fw download responses + * st_int_recv after registration to receive proto stack responses + */ +static void (*st_recv) (void *, const unsigned char *, long); + +/********************************************************************/ +static void add_channel_to_table(struct st_data_s *st_gdata, + struct st_proto_s *new_proto) +{ + pr_info("%s: id %d\n", __func__, new_proto->chnl_id); + /* list now has the channel id as index itself */ + st_gdata->list[new_proto->chnl_id] = new_proto; + st_gdata->is_registered[new_proto->chnl_id] = true; +} + +static void remove_channel_from_table(struct st_data_s *st_gdata, + struct st_proto_s *proto) +{ + pr_info("%s: id %d\n", __func__, proto->chnl_id); +/* st_gdata->list[proto->chnl_id] = NULL; */ + st_gdata->is_registered[proto->chnl_id] = false; +} + +/* + * called from KIM during firmware download. + * + * This is a wrapper function to tty->ops->write_room. + * It returns number of free space available in + * uart tx buffer. + */ +int st_get_uart_wr_room(struct st_data_s *st_gdata) +{ + struct tty_struct *tty; + if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) { + pr_err("tty unavailable to perform write"); + return -1; + } + tty = st_gdata->tty; + return tty->ops->write_room(tty); +} + +/* can be called in from + * -- KIM (during fw download) + * -- ST Core (during st_write) + * + * This is the internal write function - a wrapper + * to tty->ops->write + */ +int st_int_write(struct st_data_s *st_gdata, + const unsigned char *data, int count) +{ + struct tty_struct *tty; + if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) { + pr_err("tty unavailable to perform write"); + return -EINVAL; + } + tty = st_gdata->tty; +#ifdef VERBOSE + print_hex_dump(KERN_DEBUG, "<out<", DUMP_PREFIX_NONE, + 16, 1, data, count, 0); +#endif + return tty->ops->write(tty, data, count); + +} + +/* + * push the skb received to relevant + * protocol stacks + */ +static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata) +{ + pr_debug(" %s(prot:%d) ", __func__, chnl_id); + + if (unlikely + (st_gdata == NULL || st_gdata->rx_skb == NULL + || st_gdata->is_registered[chnl_id] == false)) { + pr_err("chnl_id %d not registered, no data to send?", + chnl_id); + kfree_skb(st_gdata->rx_skb); + return; + } + /* this cannot fail + * this shouldn't take long + * - should be just skb_queue_tail for the + * protocol stack driver + */ + if (likely(st_gdata->list[chnl_id]->recv != NULL)) { + if (unlikely + (st_gdata->list[chnl_id]->recv + (st_gdata->list[chnl_id]->priv_data, st_gdata->rx_skb) + != 0)) { + pr_err(" proto stack %d's ->recv failed", chnl_id); + kfree_skb(st_gdata->rx_skb); + return; + } + } else { + pr_err(" proto stack %d's ->recv null", chnl_id); + kfree_skb(st_gdata->rx_skb); + } + return; +} + +/** + * st_reg_complete - + * to call registration complete callbacks + * of all protocol stack drivers + * This function is being called with spin lock held, protocol drivers are + * only expected to complete their waits and do nothing more than that. + */ +static void st_reg_complete(struct st_data_s *st_gdata, char err) +{ + unsigned char i = 0; + pr_info(" %s ", __func__); + for (i = 0; i < ST_MAX_CHANNELS; i++) { + if (likely(st_gdata != NULL && + st_gdata->is_registered[i] == true && + st_gdata->list[i]->reg_complete_cb != NULL)) { + st_gdata->list[i]->reg_complete_cb + (st_gdata->list[i]->priv_data, err); + pr_info("protocol %d's cb sent %d\n", i, err); + if (err) { /* cleanup registered protocol */ + st_gdata->protos_registered--; + st_gdata->is_registered[i] = false; + } + } + } +} + +static inline int st_check_data_len(struct st_data_s *st_gdata, + unsigned char chnl_id, int len) +{ + int room = skb_tailroom(st_gdata->rx_skb); + + pr_debug("len %d room %d", len, room); + + if (!len) { + /* Received packet has only packet header and + * has zero length payload. So, ask ST CORE to + * forward the packet to protocol driver (BT/FM/GPS) + */ + st_send_frame(chnl_id, st_gdata); + + } else if (len > room) { + /* Received packet's payload length is larger. + * We can't accommodate it in created skb. + */ + pr_err("Data length is too large len %d room %d", len, + room); + kfree_skb(st_gdata->rx_skb); + } else { + /* Packet header has non-zero payload length and + * we have enough space in created skb. Lets read + * payload data */ + st_gdata->rx_state = ST_W4_DATA; + st_gdata->rx_count = len; + return len; + } + + /* Change ST state to continue to process next + * packet */ + st_gdata->rx_state = ST_W4_PACKET_TYPE; + st_gdata->rx_skb = NULL; + st_gdata->rx_count = 0; + st_gdata->rx_chnl = 0; + + return 0; +} + +/** + * st_wakeup_ack - internal function for action when wake-up ack + * received + */ +static inline void st_wakeup_ack(struct st_data_s *st_gdata, + unsigned char cmd) +{ + struct sk_buff *waiting_skb; + unsigned long flags = 0; + + spin_lock_irqsave(&st_gdata->lock, flags); + /* de-Q from waitQ and Q in txQ now that the + * chip is awake + */ + while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq))) + skb_queue_tail(&st_gdata->txq, waiting_skb); + + /* state forwarded to ST LL */ + st_ll_sleep_state(st_gdata, (unsigned long)cmd); + spin_unlock_irqrestore(&st_gdata->lock, flags); + + /* wake up to send the recently copied skbs from waitQ */ + st_tx_wakeup(st_gdata); +} + +/** + * st_int_recv - ST's internal receive function. + * Decodes received RAW data and forwards to corresponding + * client drivers (Bluetooth,FM,GPS..etc). + * This can receive various types of packets, + * HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets + * CH-8 packets from FM, CH-9 packets from GPS cores. + */ +void st_int_recv(void *disc_data, + const unsigned char *data, long count) +{ + char *ptr; + struct st_proto_s *proto; + unsigned short payload_len = 0; + int len = 0; + unsigned char type = 0; + unsigned char *plen; + struct st_data_s *st_gdata = (struct st_data_s *)disc_data; + unsigned long flags; + + ptr = (char *)data; + /* tty_receive sent null ? */ + if (unlikely(ptr == NULL) || (st_gdata == NULL)) { + pr_err(" received null from TTY "); + return; + } + + pr_debug("count %ld rx_state %ld" + "rx_count %ld", count, st_gdata->rx_state, + st_gdata->rx_count); + + spin_lock_irqsave(&st_gdata->lock, flags); + /* Decode received bytes here */ + while (count) { + if (st_gdata->rx_count) { + len = min_t(unsigned int, st_gdata->rx_count, count); + memcpy(skb_put(st_gdata->rx_skb, len), ptr, len); + st_gdata->rx_count -= len; + count -= len; + ptr += len; + + if (st_gdata->rx_count) + continue; + + /* Check ST RX state machine , where are we? */ + switch (st_gdata->rx_state) { + /* Waiting for complete packet ? */ + case ST_W4_DATA: + pr_debug("Complete pkt received"); + /* Ask ST CORE to forward + * the packet to protocol driver */ + st_send_frame(st_gdata->rx_chnl, st_gdata); + + st_gdata->rx_state = ST_W4_PACKET_TYPE; + st_gdata->rx_skb = NULL; + continue; + /* parse the header to know details */ + case ST_W4_HEADER: + proto = st_gdata->list[st_gdata->rx_chnl]; + plen = + &st_gdata->rx_skb->data + [proto->offset_len_in_hdr]; + pr_debug("plen pointing to %x\n", *plen); + if (proto->len_size == 1)/* 1 byte len field */ + payload_len = *(unsigned char *)plen; + else if (proto->len_size == 2) + payload_len = + __le16_to_cpu(*(unsigned short *)plen); + else + pr_info("%s: invalid length " + "for id %d\n", + __func__, proto->chnl_id); + st_check_data_len(st_gdata, proto->chnl_id, + payload_len); + pr_debug("off %d, pay len %d\n", + proto->offset_len_in_hdr, payload_len); + continue; + } /* end of switch rx_state */ + } + + /* end of if rx_count */ + /* Check first byte of packet and identify module + * owner (BT/FM/GPS) */ + switch (*ptr) { + case LL_SLEEP_IND: + case LL_SLEEP_ACK: + case LL_WAKE_UP_IND: + pr_debug("PM packet"); + /* this takes appropriate action based on + * sleep state received -- + */ + st_ll_sleep_state(st_gdata, *ptr); + /* if WAKEUP_IND collides copy from waitq to txq + * and assume chip awake + */ + spin_unlock_irqrestore(&st_gdata->lock, flags); + if (st_ll_getstate(st_gdata) == ST_LL_AWAKE) + st_wakeup_ack(st_gdata, LL_WAKE_UP_ACK); + spin_lock_irqsave(&st_gdata->lock, flags); + + ptr++; + count--; + continue; + case LL_WAKE_UP_ACK: + pr_debug("PM packet"); + + spin_unlock_irqrestore(&st_gdata->lock, flags); + /* wake up ack received */ + st_wakeup_ack(st_gdata, *ptr); + spin_lock_irqsave(&st_gdata->lock, flags); + + ptr++; + count--; + continue; + /* Unknow packet? */ + default: + type = *ptr; + if (st_gdata->list[type] == NULL) { + pr_err("chip/interface misbehavior dropping" + " frame starting with 0x%02x", type); + goto done; + + } + st_gdata->rx_skb = alloc_skb( + st_gdata->list[type]->max_frame_size, + GFP_ATOMIC); + if (st_gdata->rx_skb == NULL) { + pr_err("out of memory: dropping\n"); + goto done; + } + + skb_reserve(st_gdata->rx_skb, + st_gdata->list[type]->reserve); + /* next 2 required for BT only */ + st_gdata->rx_skb->cb[0] = type; /*pkt_type*/ + st_gdata->rx_skb->cb[1] = 0; /*incoming*/ + st_gdata->rx_chnl = *ptr; + st_gdata->rx_state = ST_W4_HEADER; + st_gdata->rx_count = st_gdata->list[type]->hdr_len; + pr_debug("rx_count %ld\n", st_gdata->rx_count); + }; + ptr++; + count--; + } +done: + spin_unlock_irqrestore(&st_gdata->lock, flags); + pr_debug("done %s", __func__); + return; +} + +/** + * st_int_dequeue - internal de-Q function. + * If the previous data set was not written + * completely, return that skb which has the pending data. + * In normal cases, return top of txq. + */ +static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata) +{ + struct sk_buff *returning_skb; + + pr_debug("%s", __func__); + if (st_gdata->tx_skb != NULL) { + returning_skb = st_gdata->tx_skb; + st_gdata->tx_skb = NULL; + return returning_skb; + } + return skb_dequeue(&st_gdata->txq); +} + +/** + * st_int_enqueue - internal Q-ing function. + * Will either Q the skb to txq or the tx_waitq + * depending on the ST LL state. + * If the chip is asleep, then Q it onto waitq and + * wakeup the chip. + * txq and waitq needs protection since the other contexts + * may be sending data, waking up chip. + */ +static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) +{ + unsigned long flags = 0; + + pr_debug("%s", __func__); + spin_lock_irqsave(&st_gdata->lock, flags); + + switch (st_ll_getstate(st_gdata)) { + case ST_LL_AWAKE: + pr_debug("ST LL is AWAKE, sending normally"); + skb_queue_tail(&st_gdata->txq, skb); + break; + case ST_LL_ASLEEP_TO_AWAKE: + skb_queue_tail(&st_gdata->tx_waitq, skb); + break; + case ST_LL_AWAKE_TO_ASLEEP: + pr_err("ST LL is illegal state(%ld)," + "purging received skb.", st_ll_getstate(st_gdata)); + kfree_skb(skb); + break; + case ST_LL_ASLEEP: + skb_queue_tail(&st_gdata->tx_waitq, skb); + st_ll_wakeup(st_gdata); + break; + default: + pr_err("ST LL is illegal state(%ld)," + "purging received skb.", st_ll_getstate(st_gdata)); + kfree_skb(skb); + break; + } + + spin_unlock_irqrestore(&st_gdata->lock, flags); + pr_debug("done %s", __func__); + return; +} + +/* + * internal wakeup function + * called from either + * - TTY layer when write's finished + * - st_write (in context of the protocol stack) + */ +void st_tx_wakeup(struct st_data_s *st_data) +{ + struct sk_buff *skb; + unsigned long flags; /* for irq save flags */ + pr_debug("%s", __func__); + /* check for sending & set flag sending here */ + if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) { + pr_debug("ST already sending"); + /* keep sending */ + set_bit(ST_TX_WAKEUP, &st_data->tx_state); + return; + /* TX_WAKEUP will be checked in another + * context + */ + } + do { /* come back if st_tx_wakeup is set */ + /* woke-up to write */ + clear_bit(ST_TX_WAKEUP, &st_data->tx_state); + while ((skb = st_int_dequeue(st_data))) { + int len; + spin_lock_irqsave(&st_data->lock, flags); + /* enable wake-up from TTY */ + set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags); + len = st_int_write(st_data, skb->data, skb->len); + skb_pull(skb, len); + /* if skb->len = len as expected, skb->len=0 */ + if (skb->len) { + /* would be the next skb to be sent */ + st_data->tx_skb = skb; + spin_unlock_irqrestore(&st_data->lock, flags); + break; + } + kfree_skb(skb); + spin_unlock_irqrestore(&st_data->lock, flags); + } + /* if wake-up is set in another context- restart sending */ + } while (test_bit(ST_TX_WAKEUP, &st_data->tx_state)); + + /* clear flag sending */ + clear_bit(ST_TX_SENDING, &st_data->tx_state); +} + +/********************************************************************/ +/* functions called from ST KIM +*/ +void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf) +{ + seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", + st_gdata->protos_registered, + st_gdata->is_registered[0x04] == true ? 'R' : 'U', + st_gdata->is_registered[0x08] == true ? 'R' : 'U', + st_gdata->is_registered[0x09] == true ? 'R' : 'U'); +} + +/********************************************************************/ +/* + * functions called from protocol stack drivers + * to be EXPORT-ed + */ +long st_register(struct st_proto_s *new_proto) +{ + struct st_data_s *st_gdata; + long err = 0; + unsigned long flags = 0; + + st_kim_ref(&st_gdata, 0); + if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL + || new_proto->reg_complete_cb == NULL) { + pr_err("gdata/new_proto/recv or reg_complete_cb not ready"); + return -EINVAL; + } + + if (new_proto->chnl_id >= ST_MAX_CHANNELS) { + pr_err("chnl_id %d not supported", new_proto->chnl_id); + return -EPROTONOSUPPORT; + } + + if (st_gdata->is_registered[new_proto->chnl_id] == true) { + pr_err("chnl_id %d already registered", new_proto->chnl_id); + return -EALREADY; + } + + /* can be from process context only */ + spin_lock_irqsave(&st_gdata->lock, flags); + + if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) { + pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->chnl_id); + /* fw download in progress */ + + add_channel_to_table(st_gdata, new_proto); + st_gdata->protos_registered++; + new_proto->write = st_write; + + set_bit(ST_REG_PENDING, &st_gdata->st_state); + spin_unlock_irqrestore(&st_gdata->lock, flags); + return -EINPROGRESS; + } else if (st_gdata->protos_registered == ST_EMPTY) { + pr_info(" chnl_id list empty :%d ", new_proto->chnl_id); + set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); + st_recv = st_kim_recv; + + /* enable the ST LL - to set default chip state */ + st_ll_enable(st_gdata); + + /* release lock previously held - re-locked below */ + spin_unlock_irqrestore(&st_gdata->lock, flags); + + /* this may take a while to complete + * since it involves BT fw download + */ + err = st_kim_start(st_gdata->kim_data); + if (err != 0) { + clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); + if ((st_gdata->protos_registered != ST_EMPTY) && + (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { + pr_err(" KIM failure complete callback "); + spin_lock_irqsave(&st_gdata->lock, flags); + st_reg_complete(st_gdata, err); + spin_unlock_irqrestore(&st_gdata->lock, flags); + clear_bit(ST_REG_PENDING, &st_gdata->st_state); + } + return -EINVAL; + } + + spin_lock_irqsave(&st_gdata->lock, flags); + + clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state); + st_recv = st_int_recv; + + /* this is where all pending registration + * are signalled to be complete by calling callback functions + */ + if ((st_gdata->protos_registered != ST_EMPTY) && + (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { + pr_debug(" call reg complete callback "); + st_reg_complete(st_gdata, 0); + } + clear_bit(ST_REG_PENDING, &st_gdata->st_state); + + /* check for already registered once more, + * since the above check is old + */ + if (st_gdata->is_registered[new_proto->chnl_id] == true) { + pr_err(" proto %d already registered ", + new_proto->chnl_id); + spin_unlock_irqrestore(&st_gdata->lock, flags); + return -EALREADY; + } + + add_channel_to_table(st_gdata, new_proto); + st_gdata->protos_registered++; + new_proto->write = st_write; + spin_unlock_irqrestore(&st_gdata->lock, flags); + return err; + } + /* if fw is already downloaded & new stack registers protocol */ + else { + add_channel_to_table(st_gdata, new_proto); + st_gdata->protos_registered++; + new_proto->write = st_write; + + /* lock already held before entering else */ + spin_unlock_irqrestore(&st_gdata->lock, flags); + return err; + } + pr_debug("done %s(%d) ", __func__, new_proto->chnl_id); +} +EXPORT_SYMBOL_GPL(st_register); + +/* to unregister a protocol - + * to be called from protocol stack driver + */ +long st_unregister(struct st_proto_s *proto) +{ + long err = 0; + unsigned long flags = 0; + struct st_data_s *st_gdata; + + pr_debug("%s: %d ", __func__, proto->chnl_id); + + st_kim_ref(&st_gdata, 0); + if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) { + pr_err(" chnl_id %d not supported", proto->chnl_id); + return -EPROTONOSUPPORT; + } + + spin_lock_irqsave(&st_gdata->lock, flags); + + if (st_gdata->is_registered[proto->chnl_id] == false) { + pr_err(" chnl_id %d not registered", proto->chnl_id); + spin_unlock_irqrestore(&st_gdata->lock, flags); + return -EPROTONOSUPPORT; + } + + st_gdata->protos_registered--; + remove_channel_from_table(st_gdata, proto); + spin_unlock_irqrestore(&st_gdata->lock, flags); + + /* paranoid check */ + if (st_gdata->protos_registered < ST_EMPTY) + st_gdata->protos_registered = ST_EMPTY; + + if ((st_gdata->protos_registered == ST_EMPTY) && + (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) { + pr_info(" all chnl_ids unregistered "); + + /* stop traffic on tty */ + if (st_gdata->tty) { + tty_ldisc_flush(st_gdata->tty); + stop_tty(st_gdata->tty); + } + + /* all chnl_ids now unregistered */ + st_kim_stop(st_gdata->kim_data); + /* disable ST LL */ + st_ll_disable(st_gdata); + } + return err; +} + +/* + * called in protocol stack drivers + * via the write function pointer + */ +long st_write(struct sk_buff *skb) +{ + struct st_data_s *st_gdata; + long len; + + st_kim_ref(&st_gdata, 0); + if (unlikely(skb == NULL || st_gdata == NULL + || st_gdata->tty == NULL)) { + pr_err("data/tty unavailable to perform write"); + return -EINVAL; + } + + pr_debug("%d to be written", skb->len); + len = skb->len; + + /* st_ll to decide where to enqueue the skb */ + st_int_enqueue(st_gdata, skb); + /* wake up */ + st_tx_wakeup(st_gdata); + + /* return number of bytes written */ + return len; +} + +/* for protocols making use of shared transport */ +EXPORT_SYMBOL_GPL(st_unregister); + +/********************************************************************/ +/* + * functions called from TTY layer + */ +static int st_tty_open(struct tty_struct *tty) +{ + int err = 0; + struct st_data_s *st_gdata; + pr_info("%s ", __func__); + + st_kim_ref(&st_gdata, 0); + st_gdata->tty = tty; + tty->disc_data = st_gdata; + + /* don't do an wakeup for now */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + + /* mem already allocated + */ + tty->receive_room = 65536; + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + /* + * signal to UIM via KIM that - + * installation of N_TI_WL ldisc is complete + */ + st_kim_complete(st_gdata->kim_data); + pr_debug("done %s", __func__); + return err; +} + +static void st_tty_close(struct tty_struct *tty) +{ + unsigned char i = ST_MAX_CHANNELS; + unsigned long flags = 0; + struct st_data_s *st_gdata = tty->disc_data; + + pr_info("%s ", __func__); + + /* TODO: + * if a protocol has been registered & line discipline + * un-installed for some reason - what should be done ? + */ + spin_lock_irqsave(&st_gdata->lock, flags); + for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { + if (st_gdata->is_registered[i] == true) + pr_err("%d not un-registered", i); + st_gdata->list[i] = NULL; + st_gdata->is_registered[i] = false; + } + st_gdata->protos_registered = 0; + spin_unlock_irqrestore(&st_gdata->lock, flags); + /* + * signal to UIM via KIM that - + * N_TI_WL ldisc is un-installed + */ + st_kim_complete(st_gdata->kim_data); + st_gdata->tty = NULL; + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + + spin_lock_irqsave(&st_gdata->lock, flags); + /* empty out txq and tx_waitq */ + skb_queue_purge(&st_gdata->txq); + skb_queue_purge(&st_gdata->tx_waitq); + /* reset the TTY Rx states of ST */ + st_gdata->rx_count = 0; + st_gdata->rx_state = ST_W4_PACKET_TYPE; + kfree_skb(st_gdata->rx_skb); + st_gdata->rx_skb = NULL; + spin_unlock_irqrestore(&st_gdata->lock, flags); + + pr_debug("%s: done ", __func__); +} + +static void st_tty_receive(struct tty_struct *tty, const unsigned char *data, + char *tty_flags, int count) +{ +#ifdef VERBOSE + print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE, + 16, 1, data, count, 0); +#endif + + /* + * if fw download is in progress then route incoming data + * to KIM for validation + */ + st_recv(tty->disc_data, data, count); + pr_debug("done %s", __func__); +} + +/* wake-up function called in from the TTY layer + * inside the internal wakeup function will be called + */ +static void st_tty_wakeup(struct tty_struct *tty) +{ + struct st_data_s *st_gdata = tty->disc_data; + pr_debug("%s ", __func__); + /* don't do an wakeup for now */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + + /* call our internal wakeup */ + st_tx_wakeup((void *)st_gdata); +} + +static void st_tty_flush_buffer(struct tty_struct *tty) +{ + struct st_data_s *st_gdata = tty->disc_data; + pr_debug("%s ", __func__); + + kfree_skb(st_gdata->tx_skb); + st_gdata->tx_skb = NULL; + + tty_driver_flush_buffer(tty); + return; +} + +static struct tty_ldisc_ops st_ldisc_ops = { + .magic = TTY_LDISC_MAGIC, + .name = "n_st", + .open = st_tty_open, + .close = st_tty_close, + .receive_buf = st_tty_receive, + .write_wakeup = st_tty_wakeup, + .flush_buffer = st_tty_flush_buffer, + .owner = THIS_MODULE +}; + +/********************************************************************/ +int st_core_init(struct st_data_s **core_data) +{ + struct st_data_s *st_gdata; + long err; + + err = tty_register_ldisc(N_TI_WL, &st_ldisc_ops); + if (err) { + pr_err("error registering %d line discipline %ld", + N_TI_WL, err); + return err; + } + pr_debug("registered n_shared line discipline"); + + st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL); + if (!st_gdata) { + pr_err("memory allocation failed"); + err = tty_unregister_ldisc(N_TI_WL); + if (err) + pr_err("unable to un-register ldisc %ld", err); + err = -ENOMEM; + return err; + } + + /* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's + * will be pushed in this queue for actual transmission. + */ + skb_queue_head_init(&st_gdata->txq); + skb_queue_head_init(&st_gdata->tx_waitq); + + /* Locking used in st_int_enqueue() to avoid multiple execution */ + spin_lock_init(&st_gdata->lock); + + err = st_ll_init(st_gdata); + if (err) { + pr_err("error during st_ll initialization(%ld)", err); + kfree(st_gdata); + err = tty_unregister_ldisc(N_TI_WL); + if (err) + pr_err("unable to un-register ldisc"); + return err; + } + *core_data = st_gdata; + return 0; +} + +void st_core_exit(struct st_data_s *st_gdata) +{ + long err; + /* internal module cleanup */ + err = st_ll_deinit(st_gdata); + if (err) + pr_err("error during deinit of ST LL %ld", err); + + if (st_gdata != NULL) { + /* Free ST Tx Qs and skbs */ + skb_queue_purge(&st_gdata->txq); + skb_queue_purge(&st_gdata->tx_waitq); + kfree_skb(st_gdata->rx_skb); + kfree_skb(st_gdata->tx_skb); + /* TTY ldisc cleanup */ + err = tty_unregister_ldisc(N_TI_WL); + if (err) + pr_err("unable to un-register ldisc %ld", err); + /* free the global data pointer */ + kfree(st_gdata); + } +} + + diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c new file mode 100644 index 00000000000..9d3dbb28734 --- /dev/null +++ b/drivers/misc/ti-st/st_kim.c @@ -0,0 +1,869 @@ +/* + * Shared Transport Line discipline driver Core + * Init Manager module responsible for GPIO control + * and firmware download + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define pr_fmt(fmt) "(stk) :" fmt +#include <linux/platform_device.h> +#include <linux/jiffies.h> +#include <linux/firmware.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/gpio.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/sched.h> +#include <linux/sysfs.h> +#include <linux/tty.h> + +#include <linux/skbuff.h> +#include <linux/ti_wilink_st.h> +#include <linux/module.h> + + +#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ +static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; + +/**********************************************************************/ +/* internal functions */ + +/** + * st_get_plat_device - + * function which returns the reference to the platform device + * requested by id. As of now only 1 such device exists (id=0) + * the context requesting for reference can get the id to be + * requested by a. The protocol driver which is registering or + * b. the tty device which is opened. + */ +static struct platform_device *st_get_plat_device(int id) +{ + return st_kim_devices[id]; +} + +/** + * validate_firmware_response - + * function to return whether the firmware response was proper + * in case of error don't complete so that waiting for proper + * response times out + */ +static void validate_firmware_response(struct kim_data_s *kim_gdata) +{ + struct sk_buff *skb = kim_gdata->rx_skb; + if (!skb) + return; + + /* these magic numbers are the position in the response buffer which + * allows us to distinguish whether the response is for the read + * version info. command + */ + if (skb->data[2] == 0x01 && skb->data[3] == 0x01 && + skb->data[4] == 0x10 && skb->data[5] == 0x00) { + /* fw version response */ + memcpy(kim_gdata->resp_buffer, + kim_gdata->rx_skb->data, + kim_gdata->rx_skb->len); + complete_all(&kim_gdata->kim_rcvd); + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_skb = NULL; + kim_gdata->rx_count = 0; + } else if (unlikely(skb->data[5] != 0)) { + pr_err("no proper response during fw download"); + pr_err("data6 %x", skb->data[5]); + kfree_skb(skb); + return; /* keep waiting for the proper response */ + } + /* becos of all the script being downloaded */ + complete_all(&kim_gdata->kim_rcvd); + kfree_skb(skb); +} + +/* check for data len received inside kim_int_recv + * most often hit the last case to update state to waiting for data + */ +static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len) +{ + register int room = skb_tailroom(kim_gdata->rx_skb); + + pr_debug("len %d room %d", len, room); + + if (!len) { + validate_firmware_response(kim_gdata); + } else if (len > room) { + /* Received packet's payload length is larger. + * We can't accommodate it in created skb. + */ + pr_err("Data length is too large len %d room %d", len, + room); + kfree_skb(kim_gdata->rx_skb); + } else { + /* Packet header has non-zero payload length and + * we have enough space in created skb. Lets read + * payload data */ + kim_gdata->rx_state = ST_W4_DATA; + kim_gdata->rx_count = len; + return len; + } + + /* Change ST LL state to continue to process next + * packet */ + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_skb = NULL; + kim_gdata->rx_count = 0; + + return 0; +} + +/** + * kim_int_recv - receive function called during firmware download + * firmware download responses on different UART drivers + * have been observed to come in bursts of different + * tty_receive and hence the logic + */ +static void kim_int_recv(struct kim_data_s *kim_gdata, + const unsigned char *data, long count) +{ + const unsigned char *ptr; + int len = 0, type = 0; + unsigned char *plen; + + pr_debug("%s", __func__); + /* Decode received bytes here */ + ptr = data; + if (unlikely(ptr == NULL)) { + pr_err(" received null from TTY "); + return; + } + + while (count) { + if (kim_gdata->rx_count) { + len = min_t(unsigned int, kim_gdata->rx_count, count); + memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len); + kim_gdata->rx_count -= len; + count -= len; + ptr += len; + + if (kim_gdata->rx_count) + continue; + + /* Check ST RX state machine , where are we? */ + switch (kim_gdata->rx_state) { + /* Waiting for complete packet ? */ + case ST_W4_DATA: + pr_debug("Complete pkt received"); + validate_firmware_response(kim_gdata); + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_skb = NULL; + continue; + /* Waiting for Bluetooth event header ? */ + case ST_W4_HEADER: + plen = + (unsigned char *)&kim_gdata->rx_skb->data[1]; + pr_debug("event hdr: plen 0x%02x\n", *plen); + kim_check_data_len(kim_gdata, *plen); + continue; + } /* end of switch */ + } /* end of if rx_state */ + switch (*ptr) { + /* Bluetooth event packet? */ + case 0x04: + kim_gdata->rx_state = ST_W4_HEADER; + kim_gdata->rx_count = 2; + type = *ptr; + break; + default: + pr_info("unknown packet"); + ptr++; + count--; + continue; + } + ptr++; + count--; + kim_gdata->rx_skb = + alloc_skb(1024+8, GFP_ATOMIC); + if (!kim_gdata->rx_skb) { + pr_err("can't allocate mem for new packet"); + kim_gdata->rx_state = ST_W4_PACKET_TYPE; + kim_gdata->rx_count = 0; + return; + } + skb_reserve(kim_gdata->rx_skb, 8); + kim_gdata->rx_skb->cb[0] = 4; + kim_gdata->rx_skb->cb[1] = 0; + + } + return; +} + +static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) +{ + unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0; + const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 }; + + pr_debug("%s", __func__); + + reinit_completion(&kim_gdata->kim_rcvd); + if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) { + pr_err("kim: couldn't write 4 bytes"); + return -EIO; + } + + if (!wait_for_completion_interruptible_timeout( + &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) { + pr_err(" waiting for ver info- timed out "); + return -ETIMEDOUT; + } + reinit_completion(&kim_gdata->kim_rcvd); + /* the positions 12 & 13 in the response buffer provide with the + * chip, major & minor numbers + */ + + version = + MAKEWORD(kim_gdata->resp_buffer[12], + kim_gdata->resp_buffer[13]); + chip = (version & 0x7C00) >> 10; + min_ver = (version & 0x007F); + maj_ver = (version & 0x0380) >> 7; + + if (version & 0x8000) + maj_ver |= 0x0008; + + sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver); + + /* to be accessed later via sysfs entry */ + kim_gdata->version.full = version; + kim_gdata->version.chip = chip; + kim_gdata->version.maj_ver = maj_ver; + kim_gdata->version.min_ver = min_ver; + + pr_info("%s", bts_scr_name); + return 0; +} + +static void skip_change_remote_baud(unsigned char **ptr, long *len) +{ + unsigned char *nxt_action, *cur_action; + cur_action = *ptr; + + nxt_action = cur_action + sizeof(struct bts_action) + + ((struct bts_action *) cur_action)->size; + + if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) { + pr_err("invalid action after change remote baud command"); + } else { + *ptr = *ptr + sizeof(struct bts_action) + + ((struct bts_action *)cur_action)->size; + *len = *len - (sizeof(struct bts_action) + + ((struct bts_action *)cur_action)->size); + /* warn user on not commenting these in firmware */ + pr_warn("skipping the wait event of change remote baud"); + } +} + +/** + * download_firmware - + * internal function which parses through the .bts firmware + * script file intreprets SEND, DELAY actions only as of now + */ +static long download_firmware(struct kim_data_s *kim_gdata) +{ + long err = 0; + long len = 0; + unsigned char *ptr = NULL; + unsigned char *action_ptr = NULL; + unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */ + int wr_room_space; + int cmd_size; + unsigned long timeout; + + err = read_local_version(kim_gdata, bts_scr_name); + if (err != 0) { + pr_err("kim: failed to read local ver"); + return err; + } + err = + request_firmware(&kim_gdata->fw_entry, bts_scr_name, + &kim_gdata->kim_pdev->dev); + if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) || + (kim_gdata->fw_entry->size == 0))) { + pr_err(" request_firmware failed(errno %ld) for %s", err, + bts_scr_name); + return -EINVAL; + } + ptr = (void *)kim_gdata->fw_entry->data; + len = kim_gdata->fw_entry->size; + /* bts_header to remove out magic number and + * version + */ + ptr += sizeof(struct bts_header); + len -= sizeof(struct bts_header); + + while (len > 0 && ptr) { + pr_debug(" action size %d, type %d ", + ((struct bts_action *)ptr)->size, + ((struct bts_action *)ptr)->type); + + switch (((struct bts_action *)ptr)->type) { + case ACTION_SEND_COMMAND: /* action send */ + pr_debug("S"); + action_ptr = &(((struct bts_action *)ptr)->data[0]); + if (unlikely + (((struct hci_command *)action_ptr)->opcode == + 0xFF36)) { + /* ignore remote change + * baud rate HCI VS command */ + pr_warn("change remote baud" + " rate command in firmware"); + skip_change_remote_baud(&ptr, &len); + break; + } + /* + * Make sure we have enough free space in uart + * tx buffer to write current firmware command + */ + cmd_size = ((struct bts_action *)ptr)->size; + timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME); + do { + wr_room_space = + st_get_uart_wr_room(kim_gdata->core_data); + if (wr_room_space < 0) { + pr_err("Unable to get free " + "space info from uart tx buffer"); + release_firmware(kim_gdata->fw_entry); + return wr_room_space; + } + mdelay(1); /* wait 1ms before checking room */ + } while ((wr_room_space < cmd_size) && + time_before(jiffies, timeout)); + + /* Timeout happened ? */ + if (time_after_eq(jiffies, timeout)) { + pr_err("Timeout while waiting for free " + "free space in uart tx buffer"); + release_firmware(kim_gdata->fw_entry); + return -ETIMEDOUT; + } + /* reinit completion before sending for the + * relevant wait + */ + reinit_completion(&kim_gdata->kim_rcvd); + + /* + * Free space found in uart buffer, call st_int_write + * to send current firmware command to the uart tx + * buffer. + */ + err = st_int_write(kim_gdata->core_data, + ((struct bts_action_send *)action_ptr)->data, + ((struct bts_action *)ptr)->size); + if (unlikely(err < 0)) { + release_firmware(kim_gdata->fw_entry); + return err; + } + /* + * Check number of bytes written to the uart tx buffer + * and requested command write size + */ + if (err != cmd_size) { + pr_err("Number of bytes written to uart " + "tx buffer are not matching with " + "requested cmd write size"); + release_firmware(kim_gdata->fw_entry); + return -EIO; + } + break; + case ACTION_WAIT_EVENT: /* wait */ + pr_debug("W"); + if (!wait_for_completion_interruptible_timeout( + &kim_gdata->kim_rcvd, + msecs_to_jiffies(CMD_RESP_TIME))) { + pr_err("response timeout during fw download "); + /* timed out */ + release_firmware(kim_gdata->fw_entry); + return -ETIMEDOUT; + } + reinit_completion(&kim_gdata->kim_rcvd); + break; + case ACTION_DELAY: /* sleep */ + pr_info("sleep command in scr"); + action_ptr = &(((struct bts_action *)ptr)->data[0]); + mdelay(((struct bts_action_delay *)action_ptr)->msec); + break; + } + len = + len - (sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size); + ptr = + ptr + sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size; + } + /* fw download complete */ + release_firmware(kim_gdata->fw_entry); + return 0; +} + +/**********************************************************************/ +/* functions called from ST core */ +/* called from ST Core, when REG_IN_PROGRESS (registration in progress) + * can be because of + * 1. response to read local version + * 2. during send/recv's of firmware download + */ +void st_kim_recv(void *disc_data, const unsigned char *data, long count) +{ + struct st_data_s *st_gdata = (struct st_data_s *)disc_data; + struct kim_data_s *kim_gdata = st_gdata->kim_data; + + /* proceed to gather all data and distinguish read fw version response + * from other fw responses when data gathering is complete + */ + kim_int_recv(kim_gdata, data, count); + return; +} + +/* to signal completion of line discipline installation + * called from ST Core, upon tty_open + */ +void st_kim_complete(void *kim_data) +{ + struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; + complete(&kim_gdata->ldisc_installed); +} + +/** + * st_kim_start - called from ST Core upon 1st registration + * This involves toggling the chip enable gpio, reading + * the firmware version from chip, forming the fw file name + * based on the chip version, requesting the fw, parsing it + * and perform download(send/recv). + */ +long st_kim_start(void *kim_data) +{ + long err = 0; + long retry = POR_RETRY_COUNT; + struct ti_st_plat_data *pdata; + struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; + + pr_info(" %s", __func__); + pdata = kim_gdata->kim_pdev->dev.platform_data; + + do { + /* platform specific enabling code here */ + if (pdata->chip_enable) + pdata->chip_enable(kim_gdata); + + /* Configure BT nShutdown to HIGH state */ + gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + mdelay(5); /* FIXME: a proper toggle */ + gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); + mdelay(100); + /* re-initialize the completion */ + reinit_completion(&kim_gdata->ldisc_installed); + /* send notification to UIM */ + kim_gdata->ldisc_install = 1; + pr_info("ldisc_install = 1"); + sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, + NULL, "install"); + /* wait for ldisc to be installed */ + err = wait_for_completion_interruptible_timeout( + &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); + if (!err) { + /* ldisc installation timeout, + * flush uart, power cycle BT_EN */ + pr_err("ldisc installation timeout"); + err = st_kim_stop(kim_gdata); + continue; + } else { + /* ldisc installed now */ + pr_info("line discipline installed"); + err = download_firmware(kim_gdata); + if (err != 0) { + /* ldisc installed but fw download failed, + * flush uart & power cycle BT_EN */ + pr_err("download firmware failed"); + err = st_kim_stop(kim_gdata); + continue; + } else { /* on success don't retry */ + break; + } + } + } while (retry--); + return err; +} + +/** + * st_kim_stop - stop communication with chip. + * This can be called from ST Core/KIM, on the- + * (a) last un-register when chip need not be powered there-after, + * (b) upon failure to either install ldisc or download firmware. + * The function is responsible to (a) notify UIM about un-installation, + * (b) flush UART if the ldisc was installed. + * (c) reset BT_EN - pull down nshutdown at the end. + * (d) invoke platform's chip disabling routine. + */ +long st_kim_stop(void *kim_data) +{ + long err = 0; + struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; + struct ti_st_plat_data *pdata = + kim_gdata->kim_pdev->dev.platform_data; + struct tty_struct *tty = kim_gdata->core_data->tty; + + reinit_completion(&kim_gdata->ldisc_installed); + + if (tty) { /* can be called before ldisc is installed */ + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + } + + /* send uninstall notification to UIM */ + pr_info("ldisc_install = 0"); + kim_gdata->ldisc_install = 0; + sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); + + /* wait for ldisc to be un-installed */ + err = wait_for_completion_interruptible_timeout( + &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); + if (!err) { /* timeout */ + pr_err(" timed out waiting for ldisc to be un-installed"); + err = -ETIMEDOUT; + } + + /* By default configure BT nShutdown to LOW state */ + gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + mdelay(1); + gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); + mdelay(1); + gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + + /* platform specific disable */ + if (pdata->chip_disable) + pdata->chip_disable(kim_gdata); + return err; +} + +/**********************************************************************/ +/* functions called from subsystems */ +/* called when debugfs entry is read from */ + +static int show_version(struct seq_file *s, void *unused) +{ + struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; + seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full, + kim_gdata->version.chip, kim_gdata->version.maj_ver, + kim_gdata->version.min_ver); + return 0; +} + +static int show_list(struct seq_file *s, void *unused) +{ + struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; + kim_st_list_protocols(kim_gdata->core_data, s); + return 0; +} + +static ssize_t show_install(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", kim_data->ldisc_install); +} + +#ifdef DEBUG +static ssize_t store_dev_name(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + pr_debug("storing dev name >%s<", buf); + strncpy(kim_data->dev_name, buf, count); + pr_debug("stored dev name >%s<", kim_data->dev_name); + return count; +} + +static ssize_t store_baud_rate(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + pr_debug("storing baud rate >%s<", buf); + sscanf(buf, "%ld", &kim_data->baud_rate); + pr_debug("stored baud rate >%ld<", kim_data->baud_rate); + return count; +} +#endif /* if DEBUG */ + +static ssize_t show_dev_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", kim_data->dev_name); +} + +static ssize_t show_baud_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%ld\n", kim_data->baud_rate); +} + +static ssize_t show_flow_cntrl(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kim_data_s *kim_data = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", kim_data->flow_cntrl); +} + +/* structures specific for sysfs entries */ +static struct kobj_attribute ldisc_install = +__ATTR(install, 0444, (void *)show_install, NULL); + +static struct kobj_attribute uart_dev_name = +#ifdef DEBUG /* TODO: move this to debug-fs if possible */ +__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name); +#else +__ATTR(dev_name, 0444, (void *)show_dev_name, NULL); +#endif + +static struct kobj_attribute uart_baud_rate = +#ifdef DEBUG /* TODO: move to debugfs */ +__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate); +#else +__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL); +#endif + +static struct kobj_attribute uart_flow_cntrl = +__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL); + +static struct attribute *uim_attrs[] = { + &ldisc_install.attr, + &uart_dev_name.attr, + &uart_baud_rate.attr, + &uart_flow_cntrl.attr, + NULL, +}; + +static struct attribute_group uim_attr_grp = { + .attrs = uim_attrs, +}; + +/** + * st_kim_ref - reference the core's data + * This references the per-ST platform device in the arch/xx/ + * board-xx.c file. + * This would enable multiple such platform devices to exist + * on a given platform + */ +void st_kim_ref(struct st_data_s **core_data, int id) +{ + struct platform_device *pdev; + struct kim_data_s *kim_gdata; + /* get kim_gdata reference from platform device */ + pdev = st_get_plat_device(id); + if (!pdev) { + *core_data = NULL; + return; + } + kim_gdata = platform_get_drvdata(pdev); + *core_data = kim_gdata->core_data; +} + +static int kim_version_open(struct inode *i, struct file *f) +{ + return single_open(f, show_version, i->i_private); +} + +static int kim_list_open(struct inode *i, struct file *f) +{ + return single_open(f, show_list, i->i_private); +} + +static const struct file_operations version_debugfs_fops = { + /* version info */ + .open = kim_version_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +static const struct file_operations list_debugfs_fops = { + /* protocols info */ + .open = kim_list_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/**********************************************************************/ +/* functions called from platform device driver subsystem + * need to have a relevant platform device entry in the platform's + * board-*.c file + */ + +static struct dentry *kim_debugfs_dir; +static int kim_probe(struct platform_device *pdev) +{ + struct kim_data_s *kim_gdata; + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + int err; + + if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) { + /* multiple devices could exist */ + st_kim_devices[pdev->id] = pdev; + } else { + /* platform's sure about existence of 1 device */ + st_kim_devices[0] = pdev; + } + + kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); + if (!kim_gdata) { + pr_err("no mem to allocate"); + return -ENOMEM; + } + platform_set_drvdata(pdev, kim_gdata); + + err = st_core_init(&kim_gdata->core_data); + if (err != 0) { + pr_err(" ST core init failed"); + err = -EIO; + goto err_core_init; + } + /* refer to itself */ + kim_gdata->core_data->kim_data = kim_gdata; + + /* Claim the chip enable nShutdown gpio from the system */ + kim_gdata->nshutdown = pdata->nshutdown_gpio; + err = gpio_request(kim_gdata->nshutdown, "kim"); + if (unlikely(err)) { + pr_err(" gpio %ld request failed ", kim_gdata->nshutdown); + return err; + } + + /* Configure nShutdown GPIO as output=0 */ + err = gpio_direction_output(kim_gdata->nshutdown, 0); + if (unlikely(err)) { + pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown); + return err; + } + /* get reference of pdev for request_firmware + */ + kim_gdata->kim_pdev = pdev; + init_completion(&kim_gdata->kim_rcvd); + init_completion(&kim_gdata->ldisc_installed); + + err = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp); + if (err) { + pr_err("failed to create sysfs entries"); + goto err_sysfs_group; + } + + /* copying platform data */ + strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN); + kim_gdata->flow_cntrl = pdata->flow_cntrl; + kim_gdata->baud_rate = pdata->baud_rate; + pr_info("sysfs entries created\n"); + + kim_debugfs_dir = debugfs_create_dir("ti-st", NULL); + if (IS_ERR(kim_debugfs_dir)) { + pr_err(" debugfs entries creation failed "); + err = -EIO; + goto err_debugfs_dir; + } + + debugfs_create_file("version", S_IRUGO, kim_debugfs_dir, + kim_gdata, &version_debugfs_fops); + debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, + kim_gdata, &list_debugfs_fops); + pr_info(" debugfs entries created "); + return 0; + +err_debugfs_dir: + sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); + +err_sysfs_group: + st_core_exit(kim_gdata->core_data); + +err_core_init: + kfree(kim_gdata); + + return err; +} + +static int kim_remove(struct platform_device *pdev) +{ + /* free the GPIOs requested */ + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + struct kim_data_s *kim_gdata; + + kim_gdata = platform_get_drvdata(pdev); + + /* Free the Bluetooth/FM/GPIO + * nShutdown gpio from the system + */ + gpio_free(pdata->nshutdown_gpio); + pr_info("nshutdown GPIO Freed"); + + debugfs_remove_recursive(kim_debugfs_dir); + sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); + pr_info("sysfs entries removed"); + + kim_gdata->kim_pdev = NULL; + st_core_exit(kim_gdata->core_data); + + kfree(kim_gdata); + kim_gdata = NULL; + return 0; +} + +static int kim_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + + if (pdata->suspend) + return pdata->suspend(pdev, state); + + return -EOPNOTSUPP; +} + +static int kim_resume(struct platform_device *pdev) +{ + struct ti_st_plat_data *pdata = pdev->dev.platform_data; + + if (pdata->resume) + return pdata->resume(pdev); + + return -EOPNOTSUPP; +} + +/**********************************************************************/ +/* entry point for ST KIM module, called in from ST Core */ +static struct platform_driver kim_platform_driver = { + .probe = kim_probe, + .remove = kim_remove, + .suspend = kim_suspend, + .resume = kim_resume, + .driver = { + .name = "kim", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(kim_platform_driver); + +MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>"); +MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips "); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c new file mode 100644 index 00000000000..93b4d67cc4a --- /dev/null +++ b/drivers/misc/ti-st/st_ll.c @@ -0,0 +1,169 @@ +/* + * Shared Transport driver + * HCI-LL module responsible for TI proprietary HCI_LL protocol + * Copyright (C) 2009-2010 Texas Instruments + * Author: Pavan Savoy <pavan_savoy@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define pr_fmt(fmt) "(stll) :" fmt +#include <linux/skbuff.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/ti_wilink_st.h> + +/**********************************************************************/ +/* internal functions */ +static void send_ll_cmd(struct st_data_s *st_data, + unsigned char cmd) +{ + + pr_debug("%s: writing %x", __func__, cmd); + st_int_write(st_data, &cmd, 1); + return; +} + +static void ll_device_want_to_sleep(struct st_data_s *st_data) +{ + struct kim_data_s *kim_data; + struct ti_st_plat_data *pdata; + + pr_debug("%s", __func__); + /* sanity check */ + if (st_data->ll_state != ST_LL_AWAKE) + pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND" + "in state %ld", st_data->ll_state); + + send_ll_cmd(st_data, LL_SLEEP_ACK); + /* update state */ + st_data->ll_state = ST_LL_ASLEEP; + + /* communicate to platform about chip asleep */ + kim_data = st_data->kim_data; + pdata = kim_data->kim_pdev->dev.platform_data; + if (pdata->chip_asleep) + pdata->chip_asleep(NULL); +} + +static void ll_device_want_to_wakeup(struct st_data_s *st_data) +{ + struct kim_data_s *kim_data; + struct ti_st_plat_data *pdata; + + /* diff actions in diff states */ + switch (st_data->ll_state) { + case ST_LL_ASLEEP: + send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */ + break; + case ST_LL_ASLEEP_TO_AWAKE: + /* duplicate wake_ind */ + pr_err("duplicate wake_ind while waiting for Wake ack"); + break; + case ST_LL_AWAKE: + /* duplicate wake_ind */ + pr_err("duplicate wake_ind already AWAKE"); + break; + case ST_LL_AWAKE_TO_ASLEEP: + /* duplicate wake_ind */ + pr_err("duplicate wake_ind"); + break; + } + /* update state */ + st_data->ll_state = ST_LL_AWAKE; + + /* communicate to platform about chip wakeup */ + kim_data = st_data->kim_data; + pdata = kim_data->kim_pdev->dev.platform_data; + if (pdata->chip_awake) + pdata->chip_awake(NULL); +} + +/**********************************************************************/ +/* functions invoked by ST Core */ + +/* called when ST Core wants to + * enable ST LL */ +void st_ll_enable(struct st_data_s *ll) +{ + ll->ll_state = ST_LL_AWAKE; +} + +/* called when ST Core /local module wants to + * disable ST LL */ +void st_ll_disable(struct st_data_s *ll) +{ + ll->ll_state = ST_LL_INVALID; +} + +/* called when ST Core wants to update the state */ +void st_ll_wakeup(struct st_data_s *ll) +{ + if (likely(ll->ll_state != ST_LL_AWAKE)) { + send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */ + ll->ll_state = ST_LL_ASLEEP_TO_AWAKE; + } else { + /* don't send the duplicate wake_indication */ + pr_err(" Chip already AWAKE "); + } +} + +/* called when ST Core wants the state */ +unsigned long st_ll_getstate(struct st_data_s *ll) +{ + pr_debug(" returning state %ld", ll->ll_state); + return ll->ll_state; +} + +/* called from ST Core, when a PM related packet arrives */ +unsigned long st_ll_sleep_state(struct st_data_s *st_data, + unsigned char cmd) +{ + switch (cmd) { + case LL_SLEEP_IND: /* sleep ind */ + pr_debug("sleep indication recvd"); + ll_device_want_to_sleep(st_data); + break; + case LL_SLEEP_ACK: /* sleep ack */ + pr_err("sleep ack rcvd: host shouldn't"); + break; + case LL_WAKE_UP_IND: /* wake ind */ + pr_debug("wake indication recvd"); + ll_device_want_to_wakeup(st_data); + break; + case LL_WAKE_UP_ACK: /* wake ack */ + pr_debug("wake ack rcvd"); + st_data->ll_state = ST_LL_AWAKE; + break; + default: + pr_err(" unknown input/state "); + return -EINVAL; + } + return 0; +} + +/* Called from ST CORE to initialize ST LL */ +long st_ll_init(struct st_data_s *ll) +{ + /* set state to invalid */ + ll->ll_state = ST_LL_INVALID; + return 0; +} + +/* Called from ST CORE to de-initialize ST LL */ +long st_ll_deinit(struct st_data_s *ll) +{ + return 0; +} diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c index d3f229a3a77..cb0289b44a1 100644 --- a/drivers/misc/ti_dac7512.c +++ b/drivers/misc/ti_dac7512.c @@ -20,11 +20,8 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/spi/spi.h> - -#define DAC7512_DRV_NAME "dac7512" -#define DRIVER_VERSION "1.0" +#include <linux/of.h> static ssize_t dac7512_store_val(struct device *dev, struct device_attribute *attr, @@ -33,9 +30,11 @@ static ssize_t dac7512_store_val(struct device *dev, struct spi_device *spi = to_spi_device(dev); unsigned char tmp[2]; unsigned long val; + int ret; - if (strict_strtoul(buf, 10, &val) < 0) - return -EINVAL; + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; tmp[0] = val >> 8; tmp[1] = val & 0xff; @@ -54,7 +53,7 @@ static const struct attribute_group dac7512_attr_group = { .attrs = dac7512_attributes, }; -static int __devinit dac7512_probe(struct spi_device *spi) +static int dac7512_probe(struct spi_device *spi) { int ret; @@ -67,35 +66,39 @@ static int __devinit dac7512_probe(struct spi_device *spi) return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group); } -static int __devexit dac7512_remove(struct spi_device *spi) +static int dac7512_remove(struct spi_device *spi) { sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group); return 0; } +static const struct spi_device_id dac7512_id_table[] = { + { "dac7512", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, dac7512_id_table); + +#ifdef CONFIG_OF +static const struct of_device_id dac7512_of_match[] = { + { .compatible = "ti,dac7512", }, + { } +}; +MODULE_DEVICE_TABLE(of, dac7512_of_match); +#endif + static struct spi_driver dac7512_driver = { .driver = { - .name = DAC7512_DRV_NAME, + .name = "dac7512", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(dac7512_of_match), }, .probe = dac7512_probe, - .remove = __devexit_p(dac7512_remove), + .remove = dac7512_remove, + .id_table = dac7512_id_table, }; -static int __init dac7512_init(void) -{ - return spi_register_driver(&dac7512_driver); -} - -static void __exit dac7512_exit(void) -{ - spi_unregister_driver(&dac7512_driver); -} +module_spi_driver(dac7512_driver); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("DAC7512 16-bit DAC"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRIVER_VERSION); - -module_init(dac7512_init); -module_exit(dac7512_exit); diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index a6ef18259da..a606c8901e1 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -11,6 +11,7 @@ #include <linux/tifm.h> #include <linux/dma-mapping.h> +#include <linux/module.h> #define DRIVER_NAME "tifm_7xx1" #define DRIVER_VERSION "0.8" @@ -355,8 +356,10 @@ static int tifm_7xx1_probe(struct pci_dev *dev, pci_set_drvdata(dev, fm); fm->addr = pci_ioremap_bar(dev, 0); - if (!fm->addr) + if (!fm->addr) { + rc = -ENODEV; goto err_out_free; + } rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm); if (rc) @@ -377,7 +380,6 @@ err_out_irq: err_out_unmap: iounmap(fm->addr); err_out_free: - pci_set_drvdata(dev, NULL); tifm_free_adapter(fm); err_out_int: pci_intx(dev, 0); @@ -404,8 +406,6 @@ static void tifm_7xx1_remove(struct pci_dev *dev) for (cnt = 0; cnt < fm->num_sockets; cnt++) tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt)); - pci_set_drvdata(dev, NULL); - iounmap(fm->addr); pci_intx(dev, 0); pci_release_regions(dev); @@ -433,21 +433,9 @@ static struct pci_driver tifm_7xx1_driver = { .resume = tifm_7xx1_resume, }; -static int __init tifm_7xx1_init(void) -{ - return pci_register_driver(&tifm_7xx1_driver); -} - -static void __exit tifm_7xx1_exit(void) -{ - pci_unregister_driver(&tifm_7xx1_driver); -} - +module_pci_driver(tifm_7xx1_driver); MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("TI FlashMedia host driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl); MODULE_VERSION(DRIVER_VERSION); - -module_init(tifm_7xx1_init); -module_exit(tifm_7xx1_exit); diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 98bcba521da..a511b2a713b 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -10,8 +10,10 @@ */ #include <linux/tifm.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/idr.h> +#include <linux/module.h> #define DRIVER_NAME "tifm_core" #define DRIVER_VERSION "0.8" @@ -143,15 +145,17 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr, struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev); return sprintf(buf, "%x", sock->type); } +static DEVICE_ATTR_RO(type); -static struct device_attribute tifm_dev_attrs[] = { - __ATTR(type, S_IRUGO, type_show, NULL), - __ATTR_NULL +static struct attribute *tifm_dev_attrs[] = { + &dev_attr_type.attr, + NULL, }; +ATTRIBUTE_GROUPS(tifm_dev); static struct bus_type tifm_bus_type = { .name = "tifm", - .dev_attrs = tifm_dev_attrs, + .dev_groups = tifm_dev_groups, .match = tifm_bus_match, .uevent = tifm_uevent, .probe = tifm_device_probe, @@ -194,13 +198,14 @@ int tifm_add_adapter(struct tifm_adapter *fm) { int rc; - if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL)) - return -ENOMEM; - + idr_preload(GFP_KERNEL); spin_lock(&tifm_adapter_lock); - rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id); + rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT); + if (rc >= 0) + fm->id = rc; spin_unlock(&tifm_adapter_lock); - if (rc) + idr_preload_end(); + if (rc < 0) return rc; dev_set_name(&fm->dev, "tifm%u", fm->id); @@ -328,7 +333,7 @@ static int __init tifm_init(void) { int rc; - workqueue = create_freezeable_workqueue("tifm"); + workqueue = create_freezable_workqueue("tifm"); if (!workqueue) return -ENOMEM; diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c new file mode 100644 index 00000000000..b00335652e5 --- /dev/null +++ b/drivers/misc/tsl2550.c @@ -0,0 +1,462 @@ +/* + * tsl2550.c - Linux kernel modules for ambient light sensor + * + * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it> + * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mutex.h> + +#define TSL2550_DRV_NAME "tsl2550" +#define DRIVER_VERSION "1.2" + +/* + * Defines + */ + +#define TSL2550_POWER_DOWN 0x00 +#define TSL2550_POWER_UP 0x03 +#define TSL2550_STANDARD_RANGE 0x18 +#define TSL2550_EXTENDED_RANGE 0x1d +#define TSL2550_READ_ADC0 0x43 +#define TSL2550_READ_ADC1 0x83 + +/* + * Structs + */ + +struct tsl2550_data { + struct i2c_client *client; + struct mutex update_lock; + + unsigned int power_state:1; + unsigned int operating_mode:1; +}; + +/* + * Global data + */ + +static const u8 TSL2550_MODE_RANGE[2] = { + TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE, +}; + +/* + * Management functions + */ + +static int tsl2550_set_operating_mode(struct i2c_client *client, int mode) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + + int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]); + + data->operating_mode = mode; + + return ret; +} + +static int tsl2550_set_power_state(struct i2c_client *client, int state) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + int ret; + + if (state == 0) + ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN); + else { + ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP); + + /* On power up we should reset operating mode also... */ + tsl2550_set_operating_mode(client, data->operating_mode); + } + + data->power_state = state; + + return ret; +} + +static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, cmd); + if (ret < 0) + return ret; + if (!(ret & 0x80)) + return -EAGAIN; + return ret & 0x7f; /* remove the "valid" bit */ +} + +/* + * LUX calculation + */ + +#define TSL2550_MAX_LUX 1846 + +static const u8 ratio_lut[] = { + 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 98, 98, 98, 98, 98, + 98, 98, 97, 97, 97, 97, 97, 96, + 96, 96, 96, 95, 95, 95, 94, 94, + 93, 93, 93, 92, 92, 91, 91, 90, + 89, 89, 88, 87, 87, 86, 85, 84, + 83, 82, 81, 80, 79, 78, 77, 75, + 74, 73, 71, 69, 68, 66, 64, 62, + 60, 58, 56, 54, 52, 49, 47, 44, + 42, 41, 40, 40, 39, 39, 38, 38, + 37, 37, 37, 36, 36, 36, 35, 35, + 35, 35, 34, 34, 34, 34, 33, 33, + 33, 33, 32, 32, 32, 32, 32, 31, + 31, 31, 31, 31, 30, 30, 30, 30, + 30, +}; + +static const u16 count_lut[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 26, 28, 30, + 32, 34, 36, 38, 40, 42, 44, 46, + 49, 53, 57, 61, 65, 69, 73, 77, + 81, 85, 89, 93, 97, 101, 105, 109, + 115, 123, 131, 139, 147, 155, 163, 171, + 179, 187, 195, 203, 211, 219, 227, 235, + 247, 263, 279, 295, 311, 327, 343, 359, + 375, 391, 407, 423, 439, 455, 471, 487, + 511, 543, 575, 607, 639, 671, 703, 735, + 767, 799, 831, 863, 895, 927, 959, 991, + 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487, + 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999, + 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991, + 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015, +}; + +/* + * This function is described into Taos TSL2550 Designer's Notebook + * pages 2, 3. + */ +static int tsl2550_calculate_lux(u8 ch0, u8 ch1) +{ + unsigned int lux; + + /* Look up count from channel values */ + u16 c0 = count_lut[ch0]; + u16 c1 = count_lut[ch1]; + + /* + * Calculate ratio. + * Note: the "128" is a scaling factor + */ + u8 r = 128; + + /* Avoid division by 0 and count 1 cannot be greater than count 0 */ + if (c1 <= c0) + if (c0) { + r = c1 * 128 / c0; + + /* Calculate LUX */ + lux = ((c0 - c1) * ratio_lut[r]) / 256; + } else + lux = 0; + else + return -EAGAIN; + + /* LUX range check */ + return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; +} + +/* + * SysFS support + */ + +static ssize_t tsl2550_show_power_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); + + return sprintf(buf, "%u\n", data->power_state); +} + +static ssize_t tsl2550_store_power_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tsl2550_data *data = i2c_get_clientdata(client); + unsigned long val = simple_strtoul(buf, NULL, 10); + int ret; + + if (val > 1) + return -EINVAL; + + mutex_lock(&data->update_lock); + ret = tsl2550_set_power_state(client, val); + mutex_unlock(&data->update_lock); + + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, + tsl2550_show_power_state, tsl2550_store_power_state); + +static ssize_t tsl2550_show_operating_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); + + return sprintf(buf, "%u\n", data->operating_mode); +} + +static ssize_t tsl2550_store_operating_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tsl2550_data *data = i2c_get_clientdata(client); + unsigned long val = simple_strtoul(buf, NULL, 10); + int ret; + + if (val > 1) + return -EINVAL; + + if (data->power_state == 0) + return -EBUSY; + + mutex_lock(&data->update_lock); + ret = tsl2550_set_operating_mode(client, val); + mutex_unlock(&data->update_lock); + + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO, + tsl2550_show_operating_mode, tsl2550_store_operating_mode); + +static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + u8 ch0, ch1; + int ret; + + ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0); + if (ret < 0) + return ret; + ch0 = ret; + + ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1); + if (ret < 0) + return ret; + ch1 = ret; + + /* Do the job */ + ret = tsl2550_calculate_lux(ch0, ch1); + if (ret < 0) + return ret; + if (data->operating_mode == 1) + ret *= 5; + + return sprintf(buf, "%d\n", ret); +} + +static ssize_t tsl2550_show_lux1_input(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tsl2550_data *data = i2c_get_clientdata(client); + int ret; + + /* No LUX data if not operational */ + if (!data->power_state) + return -EBUSY; + + mutex_lock(&data->update_lock); + ret = __tsl2550_show_lux(client, buf); + mutex_unlock(&data->update_lock); + + return ret; +} + +static DEVICE_ATTR(lux1_input, S_IRUGO, + tsl2550_show_lux1_input, NULL); + +static struct attribute *tsl2550_attributes[] = { + &dev_attr_power_state.attr, + &dev_attr_operating_mode.attr, + &dev_attr_lux1_input.attr, + NULL +}; + +static const struct attribute_group tsl2550_attr_group = { + .attrs = tsl2550_attributes, +}; + +/* + * Initialization function + */ + +static int tsl2550_init_client(struct i2c_client *client) +{ + struct tsl2550_data *data = i2c_get_clientdata(client); + int err; + + /* + * Probe the chip. To do so we try to power up the device and then to + * read back the 0x03 code + */ + err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP); + if (err < 0) + return err; + if (err != TSL2550_POWER_UP) + return -ENODEV; + data->power_state = 1; + + /* Set the default operating mode */ + err = i2c_smbus_write_byte(client, + TSL2550_MODE_RANGE[data->operating_mode]); + if (err < 0) + return err; + + return 0; +} + +/* + * I2C init/probing/exit functions + */ + +static struct i2c_driver tsl2550_driver; +static int tsl2550_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct tsl2550_data *data; + int *opmode, err = 0; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE + | I2C_FUNC_SMBUS_READ_BYTE_DATA)) { + err = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + data->client = client; + i2c_set_clientdata(client, data); + + /* Check platform data */ + opmode = client->dev.platform_data; + if (opmode) { + if (*opmode < 0 || *opmode > 1) { + dev_err(&client->dev, "invalid operating_mode (%d)\n", + *opmode); + err = -EINVAL; + goto exit_kfree; + } + data->operating_mode = *opmode; + } else + data->operating_mode = 0; /* default mode is standard */ + dev_info(&client->dev, "%s operating mode\n", + data->operating_mode ? "extended" : "standard"); + + mutex_init(&data->update_lock); + + /* Initialize the TSL2550 chip */ + err = tsl2550_init_client(client); + if (err) + goto exit_kfree; + + /* Register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group); + if (err) + goto exit_kfree; + + dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); + + return 0; + +exit_kfree: + kfree(data); +exit: + return err; +} + +static int tsl2550_remove(struct i2c_client *client) +{ + sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group); + + /* Power down the device */ + tsl2550_set_power_state(client, 0); + + kfree(i2c_get_clientdata(client)); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int tsl2550_suspend(struct device *dev) +{ + return tsl2550_set_power_state(to_i2c_client(dev), 0); +} + +static int tsl2550_resume(struct device *dev) +{ + return tsl2550_set_power_state(to_i2c_client(dev), 1); +} + +static SIMPLE_DEV_PM_OPS(tsl2550_pm_ops, tsl2550_suspend, tsl2550_resume); +#define TSL2550_PM_OPS (&tsl2550_pm_ops) + +#else + +#define TSL2550_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +static const struct i2c_device_id tsl2550_id[] = { + { "tsl2550", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tsl2550_id); + +static struct i2c_driver tsl2550_driver = { + .driver = { + .name = TSL2550_DRV_NAME, + .owner = THIS_MODULE, + .pm = TSL2550_PM_OPS, + }, + .probe = tsl2550_probe, + .remove = tsl2550_remove, + .id_table = tsl2550_id, +}; + +module_i2c_driver(tsl2550_driver); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("TSL2550 ambient light sensor driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c new file mode 100644 index 00000000000..3250fc1df0a --- /dev/null +++ b/drivers/misc/vexpress-syscfg.c @@ -0,0 +1,328 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> +#include <linux/vexpress.h> + + +#define SYS_CFGDATA 0x0 + +#define SYS_CFGCTRL 0x4 +#define SYS_CFGCTRL_START (1 << 31) +#define SYS_CFGCTRL_WRITE (1 << 30) +#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) +#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) +#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) +#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) +#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) + +#define SYS_CFGSTAT 0x8 +#define SYS_CFGSTAT_ERR (1 << 1) +#define SYS_CFGSTAT_COMPLETE (1 << 0) + + +struct vexpress_syscfg { + struct device *dev; + void __iomem *base; + struct list_head funcs; +}; + +struct vexpress_syscfg_func { + struct list_head list; + struct vexpress_syscfg *syscfg; + struct regmap *regmap; + int num_templates; + u32 template[0]; /* Keep it last! */ +}; + + +static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, + int index, bool write, u32 *data) +{ + struct vexpress_syscfg *syscfg = func->syscfg; + u32 command, status; + int tries; + long timeout; + + if (WARN_ON(index > func->num_templates)) + return -EINVAL; + + command = readl(syscfg->base + SYS_CFGCTRL); + if (WARN_ON(command & SYS_CFGCTRL_START)) + return -EBUSY; + + command = func->template[index]; + command |= SYS_CFGCTRL_START; + command |= write ? SYS_CFGCTRL_WRITE : 0; + + /* Use a canary for reads */ + if (!write) + *data = 0xdeadbeef; + + dev_dbg(syscfg->dev, "func %p, command %x, data %x\n", + func, command, *data); + writel(*data, syscfg->base + SYS_CFGDATA); + writel(0, syscfg->base + SYS_CFGSTAT); + writel(command, syscfg->base + SYS_CFGCTRL); + mb(); + + /* The operation can take ages... Go to sleep, 100us initially */ + tries = 100; + timeout = 100; + do { + if (!irqs_disabled()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(timeout)); + if (signal_pending(current)) + return -EINTR; + } else { + udelay(timeout); + } + + status = readl(syscfg->base + SYS_CFGSTAT); + if (status & SYS_CFGSTAT_ERR) + return -EFAULT; + + if (timeout > 20) + timeout -= 20; + } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); + if (WARN_ON_ONCE(!tries)) + return -ETIMEDOUT; + + if (!write) { + *data = readl(syscfg->base + SYS_CFGDATA); + dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data); + } + + return 0; +} + +static int vexpress_syscfg_read(void *context, unsigned int index, + unsigned int *val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, false, val); +} + +static int vexpress_syscfg_write(void *context, unsigned int index, + unsigned int val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, true, &val); +} + +struct regmap_config vexpress_syscfg_regmap_config = { + .lock = vexpress_config_lock, + .unlock = vexpress_config_unlock, + .reg_bits = 32, + .val_bits = 32, + .reg_read = vexpress_syscfg_read, + .reg_write = vexpress_syscfg_write, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + + +static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, + void *context) +{ + struct platform_device *pdev = to_platform_device(dev); + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func; + struct property *prop; + const __be32 *val = NULL; + __be32 energy_quirk[4]; + int num; + u32 site, position, dcc; + int i; + + if (dev->of_node) { + int err = vexpress_config_get_topo(dev->of_node, &site, + &position, &dcc); + + if (err) + return ERR_PTR(err); + + prop = of_find_property(dev->of_node, + "arm,vexpress-sysreg,func", NULL); + if (!prop) + return ERR_PTR(-EINVAL); + + num = prop->length / sizeof(u32) / 2; + val = prop->value; + } else { + if (pdev->num_resources != 1 || + pdev->resource[0].flags != IORESOURCE_BUS) + return ERR_PTR(-EFAULT); + + site = pdev->resource[0].start; + if (site == VEXPRESS_SITE_MASTER) + site = vexpress_config_get_master(); + position = 0; + dcc = 0; + num = 1; + } + + /* + * "arm,vexpress-energy" function used to be described + * by its first device only, now it requires both + */ + if (num == 1 && of_device_is_compatible(dev->of_node, + "arm,vexpress-energy")) { + num = 2; + energy_quirk[0] = *val; + energy_quirk[2] = *val++; + energy_quirk[1] = *val; + energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); + val = energy_quirk; + } + + func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, + GFP_KERNEL); + if (!func) + return ERR_PTR(-ENOMEM); + + func->syscfg = syscfg; + func->num_templates = num; + + for (i = 0; i < num; i++) { + u32 function, device; + + if (dev->of_node) { + function = be32_to_cpup(val++); + device = be32_to_cpup(val++); + } else { + function = pdev->resource[0].end; + device = pdev->id; + } + + dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", + func, site, position, dcc, + function, device); + + func->template[i] = SYS_CFGCTRL_DCC(dcc); + func->template[i] |= SYS_CFGCTRL_SITE(site); + func->template[i] |= SYS_CFGCTRL_POSITION(position); + func->template[i] |= SYS_CFGCTRL_FUNC(function); + func->template[i] |= SYS_CFGCTRL_DEVICE(device); + } + + vexpress_syscfg_regmap_config.max_register = num - 1; + + func->regmap = regmap_init(dev, NULL, func, + &vexpress_syscfg_regmap_config); + + if (IS_ERR(func->regmap)) { + void *err = func->regmap; + + kfree(func); + return err; + } + + list_add(&func->list, &syscfg->funcs); + + return func->regmap; +} + +static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) +{ + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func, *tmp; + + regmap_exit(regmap); + + list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) { + if (func->regmap == regmap) { + list_del(&syscfg->funcs); + kfree(func); + break; + } + } +} + +static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { + .regmap_init = vexpress_syscfg_regmap_init, + .regmap_exit = vexpress_syscfg_regmap_exit, +}; + + +/* Non-DT hack, to be gone... */ +static struct device *vexpress_syscfg_bridge; + +int vexpress_syscfg_device_register(struct platform_device *pdev) +{ + pdev->dev.parent = vexpress_syscfg_bridge; + + return platform_device_register(pdev); +} + + +int vexpress_syscfg_probe(struct platform_device *pdev) +{ + struct vexpress_syscfg *syscfg; + struct resource *res; + struct device *bridge; + + syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL); + if (!syscfg) + return -ENOMEM; + syscfg->dev = &pdev->dev; + INIT_LIST_HEAD(&syscfg->funcs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!syscfg->base) + return -EFAULT; + + /* Must use dev.parent (MFD), as that's where DT phandle points at... */ + bridge = vexpress_config_bridge_register(pdev->dev.parent, + &vexpress_syscfg_bridge_ops, syscfg); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + /* Non-DT case */ + if (!pdev->dev.of_node) + vexpress_syscfg_bridge = bridge; + + return 0; +} + +static const struct platform_device_id vexpress_syscfg_id_table[] = { + { "vexpress-syscfg", }, + {}, +}; + +static struct platform_driver vexpress_syscfg_driver = { + .driver.name = "vexpress-syscfg", + .id_table = vexpress_syscfg_id_table, + .probe = vexpress_syscfg_probe, +}; + +static int __init vexpress_syscfg_init(void) +{ + return platform_driver_register(&vexpress_syscfg_driver); +} +core_initcall(vexpress_syscfg_init); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c new file mode 100644 index 00000000000..19161749218 --- /dev/null +++ b/drivers/misc/vmw_balloon.c @@ -0,0 +1,839 @@ +/* + * VMware Balloon driver. + * + * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained by: Xavier Deguillard <xdeguillard@vmware.com> + * Philip Moltmann <moltmann@vmware.com> + */ + +/* + * This is VMware physical memory management driver for Linux. The driver + * acts like a "balloon" that can be inflated to reclaim physical pages by + * reserving them in the guest and invalidating them in the monitor, + * freeing up the underlying machine pages so they can be allocated to + * other guests. The balloon can also be deflated to allow the guest to + * use more physical memory. Higher level policies can control the sizes + * of balloons in VMs in order to manage physical memory resources. + */ + +//#define DEBUG +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <asm/hypervisor.h> + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); +MODULE_VERSION("1.2.1.3-k"); +MODULE_ALIAS("dmi:*:svnVMware*:*"); +MODULE_ALIAS("vmware_vmmemctl"); +MODULE_LICENSE("GPL"); + +/* + * Various constants controlling rate of inflaint/deflating balloon, + * measured in pages. + */ + +/* + * Rate of allocating memory when there is no memory pressure + * (driver performs non-sleeping allocations). + */ +#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U + +/* + * Rates of memory allocaton when guest experiences memory pressure + * (driver performs sleeping allocations). + */ +#define VMW_BALLOON_RATE_ALLOC_MIN 512U +#define VMW_BALLOON_RATE_ALLOC_MAX 2048U +#define VMW_BALLOON_RATE_ALLOC_INC 16U + +/* + * Rates for releasing pages while deflating balloon. + */ +#define VMW_BALLOON_RATE_FREE_MIN 512U +#define VMW_BALLOON_RATE_FREE_MAX 16384U +#define VMW_BALLOON_RATE_FREE_INC 16U + +/* + * When guest is under memory pressure, use a reduced page allocation + * rate for next several cycles. + */ +#define VMW_BALLOON_SLOW_CYCLES 4 + +/* + * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't + * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use + * __GFP_NOWARN, to suppress page allocation failure warnings. + */ +#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) + +/* + * Use GFP_HIGHUSER when executing in a separate kernel thread + * context and allocation can sleep. This is less stressful to + * the guest memory system, since it allows the thread to block + * while memory is reclaimed, and won't take pages from emergency + * low-memory pools. + */ +#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) + +/* Maximum number of page allocations without yielding processor */ +#define VMW_BALLOON_YIELD_THRESHOLD 1024 + +/* Maximum number of refused pages we accumulate during inflation cycle */ +#define VMW_BALLOON_MAX_REFUSED 16 + +/* + * Hypervisor communication port definitions. + */ +#define VMW_BALLOON_HV_PORT 0x5670 +#define VMW_BALLOON_HV_MAGIC 0x456c6d6f +#define VMW_BALLOON_PROTOCOL_VERSION 2 +#define VMW_BALLOON_GUEST_ID 1 /* Linux */ + +#define VMW_BALLOON_CMD_START 0 +#define VMW_BALLOON_CMD_GET_TARGET 1 +#define VMW_BALLOON_CMD_LOCK 2 +#define VMW_BALLOON_CMD_UNLOCK 3 +#define VMW_BALLOON_CMD_GUEST_ID 4 + +/* error codes */ +#define VMW_BALLOON_SUCCESS 0 +#define VMW_BALLOON_FAILURE -1 +#define VMW_BALLOON_ERROR_CMD_INVALID 1 +#define VMW_BALLOON_ERROR_PPN_INVALID 2 +#define VMW_BALLOON_ERROR_PPN_LOCKED 3 +#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 +#define VMW_BALLOON_ERROR_PPN_PINNED 5 +#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 +#define VMW_BALLOON_ERROR_RESET 7 +#define VMW_BALLOON_ERROR_BUSY 8 + +#define VMWARE_BALLOON_CMD(cmd, data, result) \ +({ \ + unsigned long __stat, __dummy1, __dummy2; \ + __asm__ __volatile__ ("inl %%dx" : \ + "=a"(__stat), \ + "=c"(__dummy1), \ + "=d"(__dummy2), \ + "=b"(result) : \ + "0"(VMW_BALLOON_HV_MAGIC), \ + "1"(VMW_BALLOON_CMD_##cmd), \ + "2"(VMW_BALLOON_HV_PORT), \ + "3"(data) : \ + "memory"); \ + result &= -1UL; \ + __stat & -1UL; \ +}) + +#ifdef CONFIG_DEBUG_FS +struct vmballoon_stats { + unsigned int timer; + + /* allocation statistics */ + unsigned int alloc; + unsigned int alloc_fail; + unsigned int sleep_alloc; + unsigned int sleep_alloc_fail; + unsigned int refused_alloc; + unsigned int refused_free; + unsigned int free; + + /* monitor operations */ + unsigned int lock; + unsigned int lock_fail; + unsigned int unlock; + unsigned int unlock_fail; + unsigned int target; + unsigned int target_fail; + unsigned int start; + unsigned int start_fail; + unsigned int guest_type; + unsigned int guest_type_fail; +}; + +#define STATS_INC(stat) (stat)++ +#else +#define STATS_INC(stat) +#endif + +struct vmballoon { + + /* list of reserved physical pages */ + struct list_head pages; + + /* transient list of non-balloonable pages */ + struct list_head refused_pages; + unsigned int n_refused_pages; + + /* balloon size in pages */ + unsigned int size; + unsigned int target; + + /* reset flag */ + bool reset_required; + + /* adjustment rates (pages per second) */ + unsigned int rate_alloc; + unsigned int rate_free; + + /* slowdown page allocations for next few cycles */ + unsigned int slow_allocation_cycles; + +#ifdef CONFIG_DEBUG_FS + /* statistics */ + struct vmballoon_stats stats; + + /* debugfs file exporting statistics */ + struct dentry *dbg_entry; +#endif + + struct sysinfo sysinfo; + + struct delayed_work dwork; +}; + +static struct vmballoon balloon; + +/* + * Send "start" command to the host, communicating supported version + * of the protocol. + */ +static bool vmballoon_send_start(struct vmballoon *b) +{ + unsigned long status, dummy; + + STATS_INC(b->stats.start); + + status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy); + if (status == VMW_BALLOON_SUCCESS) + return true; + + pr_debug("%s - failed, hv returns %ld\n", __func__, status); + STATS_INC(b->stats.start_fail); + return false; +} + +static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) +{ + switch (status) { + case VMW_BALLOON_SUCCESS: + return true; + + case VMW_BALLOON_ERROR_RESET: + b->reset_required = true; + /* fall through */ + + default: + return false; + } +} + +/* + * Communicate guest type to the host so that it can adjust ballooning + * algorithm to the one most appropriate for the guest. This command + * is normally issued after sending "start" command and is part of + * standard reset sequence. + */ +static bool vmballoon_send_guest_id(struct vmballoon *b) +{ + unsigned long status, dummy; + + status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy); + + STATS_INC(b->stats.guest_type); + + if (vmballoon_check_status(b, status)) + return true; + + pr_debug("%s - failed, hv returns %ld\n", __func__, status); + STATS_INC(b->stats.guest_type_fail); + return false; +} + +/* + * Retrieve desired balloon size from the host. + */ +static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) +{ + unsigned long status; + unsigned long target; + unsigned long limit; + u32 limit32; + + /* + * si_meminfo() is cheap. Moreover, we want to provide dynamic + * max balloon size later. So let us call si_meminfo() every + * iteration. + */ + si_meminfo(&b->sysinfo); + limit = b->sysinfo.totalram; + + /* Ensure limit fits in 32-bits */ + limit32 = (u32)limit; + if (limit != limit32) + return false; + + /* update stats */ + STATS_INC(b->stats.target); + + status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target); + if (vmballoon_check_status(b, status)) { + *new_target = target; + return true; + } + + pr_debug("%s - failed, hv returns %ld\n", __func__, status); + STATS_INC(b->stats.target_fail); + return false; +} + +/* + * Notify the host about allocated page so that host can use it without + * fear that guest will need it. Host may reject some pages, we need to + * check the return value and maybe submit a different page. + */ +static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, + unsigned int *hv_status) +{ + unsigned long status, dummy; + u32 pfn32; + + pfn32 = (u32)pfn; + if (pfn32 != pfn) + return -1; + + STATS_INC(b->stats.lock); + + *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); + if (vmballoon_check_status(b, status)) + return 0; + + pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); + STATS_INC(b->stats.lock_fail); + return 1; +} + +/* + * Notify the host that guest intends to release given page back into + * the pool of available (to the guest) pages. + */ +static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) +{ + unsigned long status, dummy; + u32 pfn32; + + pfn32 = (u32)pfn; + if (pfn32 != pfn) + return false; + + STATS_INC(b->stats.unlock); + + status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy); + if (vmballoon_check_status(b, status)) + return true; + + pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); + STATS_INC(b->stats.unlock_fail); + return false; +} + +/* + * Quickly release all pages allocated for the balloon. This function is + * called when host decides to "reset" balloon for one reason or another. + * Unlike normal "deflate" we do not (shall not) notify host of the pages + * being released. + */ +static void vmballoon_pop(struct vmballoon *b) +{ + struct page *page, *next; + unsigned int count = 0; + + list_for_each_entry_safe(page, next, &b->pages, lru) { + list_del(&page->lru); + __free_page(page); + STATS_INC(b->stats.free); + b->size--; + + if (++count >= b->rate_free) { + count = 0; + cond_resched(); + } + } +} + +/* + * Perform standard reset sequence by popping the balloon (in case it + * is not empty) and then restarting protocol. This operation normally + * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. + */ +static void vmballoon_reset(struct vmballoon *b) +{ + /* free all pages, skipping monitor unlock */ + vmballoon_pop(b); + + if (vmballoon_send_start(b)) { + b->reset_required = false; + if (!vmballoon_send_guest_id(b)) + pr_err("failed to send guest ID to the host\n"); + } +} + +/* + * Allocate (or reserve) a page for the balloon and notify the host. If host + * refuses the page put it on "refuse" list and allocate another one until host + * is satisfied. "Refused" pages are released at the end of inflation cycle + * (when we allocate b->rate_alloc pages). + */ +static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) +{ + struct page *page; + gfp_t flags; + unsigned int hv_status; + int locked; + flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; + + do { + if (!can_sleep) + STATS_INC(b->stats.alloc); + else + STATS_INC(b->stats.sleep_alloc); + + page = alloc_page(flags); + if (!page) { + if (!can_sleep) + STATS_INC(b->stats.alloc_fail); + else + STATS_INC(b->stats.sleep_alloc_fail); + return -ENOMEM; + } + + /* inform monitor */ + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); + if (locked > 0) { + STATS_INC(b->stats.refused_alloc); + + if (hv_status == VMW_BALLOON_ERROR_RESET || + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { + __free_page(page); + return -EIO; + } + + /* + * Place page on the list of non-balloonable pages + * and retry allocation, unless we already accumulated + * too many of them, in which case take a breather. + */ + list_add(&page->lru, &b->refused_pages); + if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED) + return -EIO; + } + } while (locked != 0); + + /* track allocated page */ + list_add(&page->lru, &b->pages); + + /* update balloon size */ + b->size++; + + return 0; +} + +/* + * Release the page allocated for the balloon. Note that we first notify + * the host so it can make sure the page will be available for the guest + * to use, if needed. + */ +static int vmballoon_release_page(struct vmballoon *b, struct page *page) +{ + if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) + return -EIO; + + list_del(&page->lru); + + /* deallocate page */ + __free_page(page); + STATS_INC(b->stats.free); + + /* update balloon size */ + b->size--; + + return 0; +} + +/* + * Release pages that were allocated while attempting to inflate the + * balloon but were refused by the host for one reason or another. + */ +static void vmballoon_release_refused_pages(struct vmballoon *b) +{ + struct page *page, *next; + + list_for_each_entry_safe(page, next, &b->refused_pages, lru) { + list_del(&page->lru); + __free_page(page); + STATS_INC(b->stats.refused_free); + } + + b->n_refused_pages = 0; +} + +/* + * Inflate the balloon towards its target size. Note that we try to limit + * the rate of allocation to make sure we are not choking the rest of the + * system. + */ +static void vmballoon_inflate(struct vmballoon *b) +{ + unsigned int goal; + unsigned int rate; + unsigned int i; + unsigned int allocations = 0; + int error = 0; + bool alloc_can_sleep = false; + + pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); + + /* + * First try NOSLEEP page allocations to inflate balloon. + * + * If we do not throttle nosleep allocations, we can drain all + * free pages in the guest quickly (if the balloon target is high). + * As a side-effect, draining free pages helps to inform (force) + * the guest to start swapping if balloon target is not met yet, + * which is a desired behavior. However, balloon driver can consume + * all available CPU cycles if too many pages are allocated in a + * second. Therefore, we throttle nosleep allocations even when + * the guest is not under memory pressure. OTOH, if we have already + * predicted that the guest is under memory pressure, then we + * slowdown page allocations considerably. + */ + + goal = b->target - b->size; + /* + * Start with no sleep allocation rate which may be higher + * than sleeping allocation rate. + */ + rate = b->slow_allocation_cycles ? + b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; + + pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n", + __func__, goal, rate, b->rate_alloc); + + for (i = 0; i < goal; i++) { + + error = vmballoon_reserve_page(b, alloc_can_sleep); + if (error) { + if (error != -ENOMEM) { + /* + * Not a page allocation failure, stop this + * cycle. Maybe we'll get new target from + * the host soon. + */ + break; + } + + if (alloc_can_sleep) { + /* + * CANSLEEP page allocation failed, so guest + * is under severe memory pressure. Quickly + * decrease allocation rate. + */ + b->rate_alloc = max(b->rate_alloc / 2, + VMW_BALLOON_RATE_ALLOC_MIN); + break; + } + + /* + * NOSLEEP page allocation failed, so the guest is + * under memory pressure. Let us slow down page + * allocations for next few cycles so that the guest + * gets out of memory pressure. Also, if we already + * allocated b->rate_alloc pages, let's pause, + * otherwise switch to sleeping allocations. + */ + b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; + + if (i >= b->rate_alloc) + break; + + alloc_can_sleep = true; + /* Lower rate for sleeping allocations. */ + rate = b->rate_alloc; + } + + if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { + cond_resched(); + allocations = 0; + } + + if (i >= rate) { + /* We allocated enough pages, let's take a break. */ + break; + } + } + + /* + * We reached our goal without failures so try increasing + * allocation rate. + */ + if (error == 0 && i >= b->rate_alloc) { + unsigned int mult = i / b->rate_alloc; + + b->rate_alloc = + min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, + VMW_BALLOON_RATE_ALLOC_MAX); + } + + vmballoon_release_refused_pages(b); +} + +/* + * Decrease the size of the balloon allowing guest to use more memory. + */ +static void vmballoon_deflate(struct vmballoon *b) +{ + struct page *page, *next; + unsigned int i = 0; + unsigned int goal; + int error; + + pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); + + /* limit deallocation rate */ + goal = min(b->size - b->target, b->rate_free); + + pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); + + /* free pages to reach target */ + list_for_each_entry_safe(page, next, &b->pages, lru) { + error = vmballoon_release_page(b, page); + if (error) { + /* quickly decrease rate in case of error */ + b->rate_free = max(b->rate_free / 2, + VMW_BALLOON_RATE_FREE_MIN); + return; + } + + if (++i >= goal) + break; + } + + /* slowly increase rate if there were no errors */ + b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, + VMW_BALLOON_RATE_FREE_MAX); +} + +/* + * Balloon work function: reset protocol, if needed, get the new size and + * adjust balloon as needed. Repeat in 1 sec. + */ +static void vmballoon_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); + unsigned int target; + + STATS_INC(b->stats.timer); + + if (b->reset_required) + vmballoon_reset(b); + + if (b->slow_allocation_cycles > 0) + b->slow_allocation_cycles--; + + if (vmballoon_send_get_target(b, &target)) { + /* update target, adjust size */ + b->target = target; + + if (b->size < target) + vmballoon_inflate(b); + else if (b->size > target) + vmballoon_deflate(b); + } + + /* + * We are using a freezable workqueue so that balloon operations are + * stopped while the system transitions to/from sleep/hibernation. + */ + queue_delayed_work(system_freezable_wq, + dwork, round_jiffies_relative(HZ)); +} + +/* + * DEBUGFS Interface + */ +#ifdef CONFIG_DEBUG_FS + +static int vmballoon_debug_show(struct seq_file *f, void *offset) +{ + struct vmballoon *b = f->private; + struct vmballoon_stats *stats = &b->stats; + + /* format size info */ + seq_printf(f, + "target: %8d pages\n" + "current: %8d pages\n", + b->target, b->size); + + /* format rate info */ + seq_printf(f, + "rateNoSleepAlloc: %8d pages/sec\n" + "rateSleepAlloc: %8d pages/sec\n" + "rateFree: %8d pages/sec\n", + VMW_BALLOON_NOSLEEP_ALLOC_MAX, + b->rate_alloc, b->rate_free); + + seq_printf(f, + "\n" + "timer: %8u\n" + "start: %8u (%4u failed)\n" + "guestType: %8u (%4u failed)\n" + "lock: %8u (%4u failed)\n" + "unlock: %8u (%4u failed)\n" + "target: %8u (%4u failed)\n" + "primNoSleepAlloc: %8u (%4u failed)\n" + "primCanSleepAlloc: %8u (%4u failed)\n" + "primFree: %8u\n" + "errAlloc: %8u\n" + "errFree: %8u\n", + stats->timer, + stats->start, stats->start_fail, + stats->guest_type, stats->guest_type_fail, + stats->lock, stats->lock_fail, + stats->unlock, stats->unlock_fail, + stats->target, stats->target_fail, + stats->alloc, stats->alloc_fail, + stats->sleep_alloc, stats->sleep_alloc_fail, + stats->free, + stats->refused_alloc, stats->refused_free); + + return 0; +} + +static int vmballoon_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, vmballoon_debug_show, inode->i_private); +} + +static const struct file_operations vmballoon_debug_fops = { + .owner = THIS_MODULE, + .open = vmballoon_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init vmballoon_debugfs_init(struct vmballoon *b) +{ + int error; + + b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, + &vmballoon_debug_fops); + if (IS_ERR(b->dbg_entry)) { + error = PTR_ERR(b->dbg_entry); + pr_err("failed to create debugfs entry, error: %d\n", error); + return error; + } + + return 0; +} + +static void __exit vmballoon_debugfs_exit(struct vmballoon *b) +{ + debugfs_remove(b->dbg_entry); +} + +#else + +static inline int vmballoon_debugfs_init(struct vmballoon *b) +{ + return 0; +} + +static inline void vmballoon_debugfs_exit(struct vmballoon *b) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +static int __init vmballoon_init(void) +{ + int error; + + /* + * Check if we are running on VMware's hypervisor and bail out + * if we are not. + */ + if (x86_hyper != &x86_hyper_vmware) + return -ENODEV; + + INIT_LIST_HEAD(&balloon.pages); + INIT_LIST_HEAD(&balloon.refused_pages); + + /* initialize rates */ + balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; + balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX; + + INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); + + /* + * Start balloon. + */ + if (!vmballoon_send_start(&balloon)) { + pr_err("failed to send start command to the host\n"); + return -EIO; + } + + if (!vmballoon_send_guest_id(&balloon)) { + pr_err("failed to send guest ID to the host\n"); + return -EIO; + } + + error = vmballoon_debugfs_init(&balloon); + if (error) + return error; + + queue_delayed_work(system_freezable_wq, &balloon.dwork, 0); + + return 0; +} +module_init(vmballoon_init); + +static void __exit vmballoon_exit(void) +{ + cancel_delayed_work_sync(&balloon.dwork); + + vmballoon_debugfs_exit(&balloon); + + /* + * Deallocate all reserved memory, and reset connection with monitor. + * Reset connection before deallocating memory to avoid potential for + * additional spurious resets from guest touching deallocated pages. + */ + vmballoon_send_start(&balloon); + vmballoon_pop(&balloon); +} +module_exit(vmballoon_exit); diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig new file mode 100644 index 00000000000..39c2ecadb27 --- /dev/null +++ b/drivers/misc/vmw_vmci/Kconfig @@ -0,0 +1,16 @@ +# +# VMware VMCI device +# + +config VMWARE_VMCI + tristate "VMware VMCI Driver" + depends on X86 && PCI + help + This is VMware's Virtual Machine Communication Interface. It enables + high-speed communication between host and guest in a virtual + environment via the VMCI virtual device. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called vmw_vmci. diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile new file mode 100644 index 00000000000..4da9893c394 --- /dev/null +++ b/drivers/misc/vmw_vmci/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o +vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \ + vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \ + vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c new file mode 100644 index 00000000000..f866a4baecb --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -0,0 +1,1214 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +/* + * List of current VMCI contexts. Contexts can be added by + * vmci_ctx_create() and removed via vmci_ctx_destroy(). + * These, along with context lookup, are protected by the + * list structure's lock. + */ +static struct { + struct list_head head; + spinlock_t lock; /* Spinlock for context list operations */ +} ctx_list = { + .head = LIST_HEAD_INIT(ctx_list.head), + .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock), +}; + +/* Used by contexts that did not set up notify flag pointers */ +static bool ctx_dummy_notify; + +static void ctx_signal_notify(struct vmci_ctx *context) +{ + *context->notify = true; +} + +static void ctx_clear_notify(struct vmci_ctx *context) +{ + *context->notify = false; +} + +/* + * If nothing requires the attention of the guest, clears both + * notify flag and call. + */ +static void ctx_clear_notify_call(struct vmci_ctx *context) +{ + if (context->pending_datagrams == 0 && + vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) + ctx_clear_notify(context); +} + +/* + * Sets the context's notify flag iff datagrams are pending for this + * context. Called from vmci_setup_notify(). + */ +void vmci_ctx_check_signal_notify(struct vmci_ctx *context) +{ + spin_lock(&context->lock); + if (context->pending_datagrams) + ctx_signal_notify(context); + spin_unlock(&context->lock); +} + +/* + * Allocates and initializes a VMCI context. + */ +struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags, + uintptr_t event_hnd, + int user_version, + const struct cred *cred) +{ + struct vmci_ctx *context; + int error; + + if (cid == VMCI_INVALID_ID) { + pr_devel("Invalid context ID for VMCI context\n"); + error = -EINVAL; + goto err_out; + } + + if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) { + pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", + priv_flags); + error = -EINVAL; + goto err_out; + } + + if (user_version == 0) { + pr_devel("Invalid suer_version %d\n", user_version); + error = -EINVAL; + goto err_out; + } + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) { + pr_warn("Failed to allocate memory for VMCI context\n"); + error = -EINVAL; + goto err_out; + } + + kref_init(&context->kref); + spin_lock_init(&context->lock); + INIT_LIST_HEAD(&context->list_item); + INIT_LIST_HEAD(&context->datagram_queue); + INIT_LIST_HEAD(&context->notifier_list); + + /* Initialize host-specific VMCI context. */ + init_waitqueue_head(&context->host_context.wait_queue); + + context->queue_pair_array = vmci_handle_arr_create(0); + if (!context->queue_pair_array) { + error = -ENOMEM; + goto err_free_ctx; + } + + context->doorbell_array = vmci_handle_arr_create(0); + if (!context->doorbell_array) { + error = -ENOMEM; + goto err_free_qp_array; + } + + context->pending_doorbell_array = vmci_handle_arr_create(0); + if (!context->pending_doorbell_array) { + error = -ENOMEM; + goto err_free_db_array; + } + + context->user_version = user_version; + + context->priv_flags = priv_flags; + + if (cred) + context->cred = get_cred(cred); + + context->notify = &ctx_dummy_notify; + context->notify_page = NULL; + + /* + * If we collide with an existing context we generate a new + * and use it instead. The VMX will determine if regeneration + * is okay. Since there isn't 4B - 16 VMs running on a given + * host, the below loop will terminate. + */ + spin_lock(&ctx_list.lock); + + while (vmci_ctx_exists(cid)) { + /* We reserve the lowest 16 ids for fixed contexts. */ + cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1; + if (cid == VMCI_INVALID_ID) + cid = VMCI_RESERVED_CID_LIMIT; + } + context->cid = cid; + + list_add_tail_rcu(&context->list_item, &ctx_list.head); + spin_unlock(&ctx_list.lock); + + return context; + + err_free_db_array: + vmci_handle_arr_destroy(context->doorbell_array); + err_free_qp_array: + vmci_handle_arr_destroy(context->queue_pair_array); + err_free_ctx: + kfree(context); + err_out: + return ERR_PTR(error); +} + +/* + * Destroy VMCI context. + */ +void vmci_ctx_destroy(struct vmci_ctx *context) +{ + spin_lock(&ctx_list.lock); + list_del_rcu(&context->list_item); + spin_unlock(&ctx_list.lock); + synchronize_rcu(); + + vmci_ctx_put(context); +} + +/* + * Fire notification for all contexts interested in given cid. + */ +static int ctx_fire_notification(u32 context_id, u32 priv_flags) +{ + u32 i, array_size; + struct vmci_ctx *sub_ctx; + struct vmci_handle_arr *subscriber_array; + struct vmci_handle context_handle = + vmci_make_handle(context_id, VMCI_EVENT_HANDLER); + + /* + * We create an array to hold the subscribers we find when + * scanning through all contexts. + */ + subscriber_array = vmci_handle_arr_create(0); + if (subscriber_array == NULL) + return VMCI_ERROR_NO_MEM; + + /* + * Scan all contexts to find who is interested in being + * notified about given contextID. + */ + rcu_read_lock(); + list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) { + struct vmci_handle_list *node; + + /* + * We only deliver notifications of the removal of + * contexts, if the two contexts are allowed to + * interact. + */ + if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags)) + continue; + + list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) { + if (!vmci_handle_is_equal(node->handle, context_handle)) + continue; + + vmci_handle_arr_append_entry(&subscriber_array, + vmci_make_handle(sub_ctx->cid, + VMCI_EVENT_HANDLER)); + } + } + rcu_read_unlock(); + + /* Fire event to all subscribers. */ + array_size = vmci_handle_arr_get_size(subscriber_array); + for (i = 0; i < array_size; i++) { + int result; + struct vmci_event_ctx ev; + + ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; + ev.payload.context_id = context_id; + + result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, + &ev.msg.hdr, false); + if (result < VMCI_SUCCESS) { + pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", + ev.msg.event_data.event, + ev.msg.hdr.dst.context); + /* We continue to enqueue on next subscriber. */ + } + } + vmci_handle_arr_destroy(subscriber_array); + + return VMCI_SUCCESS; +} + +/* + * Returns the current number of pending datagrams. The call may + * also serve as a synchronization point for the datagram queue, + * as no enqueue operations can occur concurrently. + */ +int vmci_ctx_pending_datagrams(u32 cid, u32 *pending) +{ + struct vmci_ctx *context; + + context = vmci_ctx_get(cid); + if (context == NULL) + return VMCI_ERROR_INVALID_ARGS; + + spin_lock(&context->lock); + if (pending) + *pending = context->pending_datagrams; + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return VMCI_SUCCESS; +} + +/* + * Queues a VMCI datagram for the appropriate target VM context. + */ +int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) +{ + struct vmci_datagram_queue_entry *dq_entry; + struct vmci_ctx *context; + struct vmci_handle dg_src; + size_t vmci_dg_size; + + vmci_dg_size = VMCI_DG_SIZE(dg); + if (vmci_dg_size > VMCI_MAX_DG_SIZE) { + pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size); + return VMCI_ERROR_INVALID_ARGS; + } + + /* Get the target VM's VMCI context. */ + context = vmci_ctx_get(cid); + if (!context) { + pr_devel("Invalid context (ID=0x%x)\n", cid); + return VMCI_ERROR_INVALID_ARGS; + } + + /* Allocate guest call entry and add it to the target VM's queue. */ + dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL); + if (dq_entry == NULL) { + pr_warn("Failed to allocate memory for datagram\n"); + vmci_ctx_put(context); + return VMCI_ERROR_NO_MEM; + } + dq_entry->dg = dg; + dq_entry->dg_size = vmci_dg_size; + dg_src = dg->src; + INIT_LIST_HEAD(&dq_entry->list_item); + + spin_lock(&context->lock); + + /* + * We put a higher limit on datagrams from the hypervisor. If + * the pending datagram is not from hypervisor, then we check + * if enqueueing it would exceed the + * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If + * the pending datagram is from hypervisor, we allow it to be + * queued at the destination side provided we don't reach the + * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit. + */ + if (context->datagram_queue_size + vmci_dg_size >= + VMCI_MAX_DATAGRAM_QUEUE_SIZE && + (!vmci_handle_is_equal(dg_src, + vmci_make_handle + (VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID)) || + context->datagram_queue_size + vmci_dg_size >= + VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) { + spin_unlock(&context->lock); + vmci_ctx_put(context); + kfree(dq_entry); + pr_devel("Context (ID=0x%x) receive queue is full\n", cid); + return VMCI_ERROR_NO_RESOURCES; + } + + list_add(&dq_entry->list_item, &context->datagram_queue); + context->pending_datagrams++; + context->datagram_queue_size += vmci_dg_size; + ctx_signal_notify(context); + wake_up(&context->host_context.wait_queue); + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return vmci_dg_size; +} + +/* + * Verifies whether a context with the specified context ID exists. + * FIXME: utility is dubious as no decisions can be reliably made + * using this data as context can appear and disappear at any time. + */ +bool vmci_ctx_exists(u32 cid) +{ + struct vmci_ctx *context; + bool exists = false; + + rcu_read_lock(); + + list_for_each_entry_rcu(context, &ctx_list.head, list_item) { + if (context->cid == cid) { + exists = true; + break; + } + } + + rcu_read_unlock(); + return exists; +} + +/* + * Retrieves VMCI context corresponding to the given cid. + */ +struct vmci_ctx *vmci_ctx_get(u32 cid) +{ + struct vmci_ctx *c, *context = NULL; + + if (cid == VMCI_INVALID_ID) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(c, &ctx_list.head, list_item) { + if (c->cid == cid) { + /* + * The context owner drops its own reference to the + * context only after removing it from the list and + * waiting for RCU grace period to expire. This + * means that we are not about to increase the + * reference count of something that is in the + * process of being destroyed. + */ + context = c; + kref_get(&context->kref); + break; + } + } + rcu_read_unlock(); + + return context; +} + +/* + * Deallocates all parts of a context data structure. This + * function doesn't lock the context, because it assumes that + * the caller was holding the last reference to context. + */ +static void ctx_free_ctx(struct kref *kref) +{ + struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); + struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp; + struct vmci_handle temp_handle; + struct vmci_handle_list *notifier, *tmp; + + /* + * Fire event to all contexts interested in knowing this + * context is dying. + */ + ctx_fire_notification(context->cid, context->priv_flags); + + /* + * Cleanup all queue pair resources attached to context. If + * the VM dies without cleaning up, this code will make sure + * that no resources are leaked. + */ + temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); + while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) { + if (vmci_qp_broker_detach(temp_handle, + context) < VMCI_SUCCESS) { + /* + * When vmci_qp_broker_detach() succeeds it + * removes the handle from the array. If + * detach fails, we must remove the handle + * ourselves. + */ + vmci_handle_arr_remove_entry(context->queue_pair_array, + temp_handle); + } + temp_handle = + vmci_handle_arr_get_entry(context->queue_pair_array, 0); + } + + /* + * It is fine to destroy this without locking the callQueue, as + * this is the only thread having a reference to the context. + */ + list_for_each_entry_safe(dq_entry, dq_entry_tmp, + &context->datagram_queue, list_item) { + WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg)); + list_del(&dq_entry->list_item); + kfree(dq_entry->dg); + kfree(dq_entry); + } + + list_for_each_entry_safe(notifier, tmp, + &context->notifier_list, node) { + list_del(¬ifier->node); + kfree(notifier); + } + + vmci_handle_arr_destroy(context->queue_pair_array); + vmci_handle_arr_destroy(context->doorbell_array); + vmci_handle_arr_destroy(context->pending_doorbell_array); + vmci_ctx_unset_notify(context); + if (context->cred) + put_cred(context->cred); + kfree(context); +} + +/* + * Drops reference to VMCI context. If this is the last reference to + * the context it will be deallocated. A context is created with + * a reference count of one, and on destroy, it is removed from + * the context list before its reference count is decremented. Thus, + * if we reach zero, we are sure that nobody else are about to increment + * it (they need the entry in the context list for that), and so there + * is no need for locking. + */ +void vmci_ctx_put(struct vmci_ctx *context) +{ + kref_put(&context->kref, ctx_free_ctx); +} + +/* + * Dequeues the next datagram and returns it to caller. + * The caller passes in a pointer to the max size datagram + * it can handle and the datagram is only unqueued if the + * size is less than max_size. If larger max_size is set to + * the size of the datagram to give the caller a chance to + * set up a larger buffer for the guestcall. + */ +int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, + size_t *max_size, + struct vmci_datagram **dg) +{ + struct vmci_datagram_queue_entry *dq_entry; + struct list_head *list_item; + int rv; + + /* Dequeue the next datagram entry. */ + spin_lock(&context->lock); + if (context->pending_datagrams == 0) { + ctx_clear_notify_call(context); + spin_unlock(&context->lock); + pr_devel("No datagrams pending\n"); + return VMCI_ERROR_NO_MORE_DATAGRAMS; + } + + list_item = context->datagram_queue.next; + + dq_entry = + list_entry(list_item, struct vmci_datagram_queue_entry, list_item); + + /* Check size of caller's buffer. */ + if (*max_size < dq_entry->dg_size) { + *max_size = dq_entry->dg_size; + spin_unlock(&context->lock); + pr_devel("Caller's buffer should be at least (size=%u bytes)\n", + (u32) *max_size); + return VMCI_ERROR_NO_MEM; + } + + list_del(list_item); + context->pending_datagrams--; + context->datagram_queue_size -= dq_entry->dg_size; + if (context->pending_datagrams == 0) { + ctx_clear_notify_call(context); + rv = VMCI_SUCCESS; + } else { + /* + * Return the size of the next datagram. + */ + struct vmci_datagram_queue_entry *next_entry; + + list_item = context->datagram_queue.next; + next_entry = + list_entry(list_item, struct vmci_datagram_queue_entry, + list_item); + + /* + * The following size_t -> int truncation is fine as + * the maximum size of a (routable) datagram is 68KB. + */ + rv = (int)next_entry->dg_size; + } + spin_unlock(&context->lock); + + /* Caller must free datagram. */ + *dg = dq_entry->dg; + dq_entry->dg = NULL; + kfree(dq_entry); + + return rv; +} + +/* + * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the + * page mapped/locked by vmci_setup_notify(). + */ +void vmci_ctx_unset_notify(struct vmci_ctx *context) +{ + struct page *notify_page; + + spin_lock(&context->lock); + + notify_page = context->notify_page; + context->notify = &ctx_dummy_notify; + context->notify_page = NULL; + + spin_unlock(&context->lock); + + if (notify_page) { + kunmap(notify_page); + put_page(notify_page); + } +} + +/* + * Add remote_cid to list of contexts current contexts wants + * notifications from/about. + */ +int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) +{ + struct vmci_ctx *context; + struct vmci_handle_list *notifier, *n; + int result; + bool exists = false; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) { + pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n", + context_id, remote_cid); + result = VMCI_ERROR_DST_UNREACHABLE; + goto out; + } + + if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { + result = VMCI_ERROR_NO_ACCESS; + goto out; + } + + notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL); + if (!notifier) { + result = VMCI_ERROR_NO_MEM; + goto out; + } + + INIT_LIST_HEAD(¬ifier->node); + notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); + + spin_lock(&context->lock); + + list_for_each_entry(n, &context->notifier_list, node) { + if (vmci_handle_is_equal(n->handle, notifier->handle)) { + exists = true; + break; + } + } + + if (exists) { + kfree(notifier); + result = VMCI_ERROR_ALREADY_EXISTS; + } else { + list_add_tail_rcu(¬ifier->node, &context->notifier_list); + context->n_notifiers++; + result = VMCI_SUCCESS; + } + + spin_unlock(&context->lock); + + out: + vmci_ctx_put(context); + return result; +} + +/* + * Remove remote_cid from current context's list of contexts it is + * interested in getting notifications from/about. + */ +int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) +{ + struct vmci_ctx *context; + struct vmci_handle_list *notifier, *tmp; + struct vmci_handle handle; + bool found = false; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); + + spin_lock(&context->lock); + list_for_each_entry_safe(notifier, tmp, + &context->notifier_list, node) { + if (vmci_handle_is_equal(notifier->handle, handle)) { + list_del_rcu(¬ifier->node); + context->n_notifiers--; + found = true; + break; + } + } + spin_unlock(&context->lock); + + if (found) { + synchronize_rcu(); + kfree(notifier); + } + + vmci_ctx_put(context); + + return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; +} + +static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, + u32 *buf_size, void **pbuf) +{ + u32 *notifiers; + size_t data_size; + struct vmci_handle_list *entry; + int i = 0; + + if (context->n_notifiers == 0) { + *buf_size = 0; + *pbuf = NULL; + return VMCI_SUCCESS; + } + + data_size = context->n_notifiers * sizeof(*notifiers); + if (*buf_size < data_size) { + *buf_size = data_size; + return VMCI_ERROR_MORE_DATA; + } + + notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */ + if (!notifiers) + return VMCI_ERROR_NO_MEM; + + list_for_each_entry(entry, &context->notifier_list, node) + notifiers[i++] = entry->handle.context; + + *buf_size = data_size; + *pbuf = notifiers; + return VMCI_SUCCESS; +} + +static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, + u32 *buf_size, void **pbuf) +{ + struct dbell_cpt_state *dbells; + size_t n_doorbells; + int i; + + n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); + if (n_doorbells > 0) { + size_t data_size = n_doorbells * sizeof(*dbells); + if (*buf_size < data_size) { + *buf_size = data_size; + return VMCI_ERROR_MORE_DATA; + } + + dbells = kmalloc(data_size, GFP_ATOMIC); + if (!dbells) + return VMCI_ERROR_NO_MEM; + + for (i = 0; i < n_doorbells; i++) + dbells[i].handle = vmci_handle_arr_get_entry( + context->doorbell_array, i); + + *buf_size = data_size; + *pbuf = dbells; + } else { + *buf_size = 0; + *pbuf = NULL; + } + + return VMCI_SUCCESS; +} + +/* + * Get current context's checkpoint state of given type. + */ +int vmci_ctx_get_chkpt_state(u32 context_id, + u32 cpt_type, + u32 *buf_size, + void **pbuf) +{ + struct vmci_ctx *context; + int result; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + + switch (cpt_type) { + case VMCI_NOTIFICATION_CPT_STATE: + result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); + break; + + case VMCI_WELLKNOWN_CPT_STATE: + /* + * For compatibility with VMX'en with VM to VM communication, we + * always return zero wellknown handles. + */ + + *buf_size = 0; + *pbuf = NULL; + result = VMCI_SUCCESS; + break; + + case VMCI_DOORBELL_CPT_STATE: + result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); + break; + + default: + pr_devel("Invalid cpt state (type=%d)\n", cpt_type); + result = VMCI_ERROR_INVALID_ARGS; + break; + } + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Set current context's checkpoint state of given type. + */ +int vmci_ctx_set_chkpt_state(u32 context_id, + u32 cpt_type, + u32 buf_size, + void *cpt_buf) +{ + u32 i; + u32 current_id; + int result = VMCI_SUCCESS; + u32 num_ids = buf_size / sizeof(u32); + + if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) { + /* + * We would end up here if VMX with VM to VM communication + * attempts to restore a checkpoint with wellknown handles. + */ + pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n"); + return VMCI_ERROR_OBSOLETE; + } + + if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) { + pr_devel("Invalid cpt state (type=%d)\n", cpt_type); + return VMCI_ERROR_INVALID_ARGS; + } + + for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) { + current_id = ((u32 *)cpt_buf)[i]; + result = vmci_ctx_add_notification(context_id, current_id); + if (result != VMCI_SUCCESS) + break; + } + if (result != VMCI_SUCCESS) + pr_devel("Failed to set cpt state (type=%d) (error=%d)\n", + cpt_type, result); + + return result; +} + +/* + * Retrieves the specified context's pending notifications in the + * form of a handle array. The handle arrays returned are the + * actual data - not a copy and should not be modified by the + * caller. They must be released using + * vmci_ctx_rcv_notifications_release. + */ +int vmci_ctx_rcv_notifications_get(u32 context_id, + struct vmci_handle_arr **db_handle_array, + struct vmci_handle_arr **qp_handle_array) +{ + struct vmci_ctx *context; + int result = VMCI_SUCCESS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + + *db_handle_array = context->pending_doorbell_array; + context->pending_doorbell_array = vmci_handle_arr_create(0); + if (!context->pending_doorbell_array) { + context->pending_doorbell_array = *db_handle_array; + *db_handle_array = NULL; + result = VMCI_ERROR_NO_MEM; + } + *qp_handle_array = NULL; + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Releases handle arrays with pending notifications previously + * retrieved using vmci_ctx_rcv_notifications_get. If the + * notifications were not successfully handed over to the guest, + * success must be false. + */ +void vmci_ctx_rcv_notifications_release(u32 context_id, + struct vmci_handle_arr *db_handle_array, + struct vmci_handle_arr *qp_handle_array, + bool success) +{ + struct vmci_ctx *context = vmci_ctx_get(context_id); + + spin_lock(&context->lock); + if (!success) { + struct vmci_handle handle; + + /* + * New notifications may have been added while we were not + * holding the context lock, so we transfer any new pending + * doorbell notifications to the old array, and reinstate the + * old array. + */ + + handle = vmci_handle_arr_remove_tail( + context->pending_doorbell_array); + while (!vmci_handle_is_invalid(handle)) { + if (!vmci_handle_arr_has_entry(db_handle_array, + handle)) { + vmci_handle_arr_append_entry( + &db_handle_array, handle); + } + handle = vmci_handle_arr_remove_tail( + context->pending_doorbell_array); + } + vmci_handle_arr_destroy(context->pending_doorbell_array); + context->pending_doorbell_array = db_handle_array; + db_handle_array = NULL; + } else { + ctx_clear_notify_call(context); + } + spin_unlock(&context->lock); + vmci_ctx_put(context); + + if (db_handle_array) + vmci_handle_arr_destroy(db_handle_array); + + if (qp_handle_array) + vmci_handle_arr_destroy(qp_handle_array); +} + +/* + * Registers that a new doorbell handle has been allocated by the + * context. Only doorbell handles registered can be notified. + */ +int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle) +{ + struct vmci_ctx *context; + int result; + + if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) { + vmci_handle_arr_append_entry(&context->doorbell_array, handle); + result = VMCI_SUCCESS; + } else { + result = VMCI_ERROR_DUPLICATE_ENTRY; + } + + spin_unlock(&context->lock); + vmci_ctx_put(context); + + return result; +} + +/* + * Unregisters a doorbell handle that was previously registered + * with vmci_ctx_dbell_create. + */ +int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle) +{ + struct vmci_ctx *context; + struct vmci_handle removed_handle; + + if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + removed_handle = + vmci_handle_arr_remove_entry(context->doorbell_array, handle); + vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); + spin_unlock(&context->lock); + + vmci_ctx_put(context); + + return vmci_handle_is_invalid(removed_handle) ? + VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; +} + +/* + * Unregisters all doorbell handles that were previously + * registered with vmci_ctx_dbell_create. + */ +int vmci_ctx_dbell_destroy_all(u32 context_id) +{ + struct vmci_ctx *context; + struct vmci_handle handle; + + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + context = vmci_ctx_get(context_id); + if (context == NULL) + return VMCI_ERROR_NOT_FOUND; + + spin_lock(&context->lock); + do { + struct vmci_handle_arr *arr = context->doorbell_array; + handle = vmci_handle_arr_remove_tail(arr); + } while (!vmci_handle_is_invalid(handle)); + do { + struct vmci_handle_arr *arr = context->pending_doorbell_array; + handle = vmci_handle_arr_remove_tail(arr); + } while (!vmci_handle_is_invalid(handle)); + spin_unlock(&context->lock); + + vmci_ctx_put(context); + + return VMCI_SUCCESS; +} + +/* + * Registers a notification of a doorbell handle initiated by the + * specified source context. The notification of doorbells are + * subject to the same isolation rules as datagram delivery. To + * allow host side senders of notifications a finer granularity + * of sender rights than those assigned to the sending context + * itself, the host context is required to specify a different + * set of privilege flags that will override the privileges of + * the source context. + */ +int vmci_ctx_notify_dbell(u32 src_cid, + struct vmci_handle handle, + u32 src_priv_flags) +{ + struct vmci_ctx *dst_context; + int result; + + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + /* Get the target VM's VMCI context. */ + dst_context = vmci_ctx_get(handle.context); + if (!dst_context) { + pr_devel("Invalid context (ID=0x%x)\n", handle.context); + return VMCI_ERROR_NOT_FOUND; + } + + if (src_cid != handle.context) { + u32 dst_priv_flags; + + if (VMCI_CONTEXT_IS_VM(src_cid) && + VMCI_CONTEXT_IS_VM(handle.context)) { + pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n", + src_cid, handle.context); + result = VMCI_ERROR_DST_UNREACHABLE; + goto out; + } + + result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags); + if (result < VMCI_SUCCESS) { + pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + goto out; + } + + if (src_cid != VMCI_HOST_CONTEXT_ID || + src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) { + src_priv_flags = vmci_context_get_priv_flags(src_cid); + } + + if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) { + result = VMCI_ERROR_NO_ACCESS; + goto out; + } + } + + if (handle.context == VMCI_HOST_CONTEXT_ID) { + result = vmci_dbell_host_context_notify(src_cid, handle); + } else { + spin_lock(&dst_context->lock); + + if (!vmci_handle_arr_has_entry(dst_context->doorbell_array, + handle)) { + result = VMCI_ERROR_NOT_FOUND; + } else { + if (!vmci_handle_arr_has_entry( + dst_context->pending_doorbell_array, + handle)) { + vmci_handle_arr_append_entry( + &dst_context->pending_doorbell_array, + handle); + + ctx_signal_notify(dst_context); + wake_up(&dst_context->host_context.wait_queue); + + } + result = VMCI_SUCCESS; + } + spin_unlock(&dst_context->lock); + } + + out: + vmci_ctx_put(dst_context); + + return result; +} + +bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) +{ + return context && context->user_version >= VMCI_VERSION_HOSTQP; +} + +/* + * Registers that a new queue pair handle has been allocated by + * the context. + */ +int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) +{ + int result; + + if (context == NULL || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) { + vmci_handle_arr_append_entry(&context->queue_pair_array, + handle); + result = VMCI_SUCCESS; + } else { + result = VMCI_ERROR_DUPLICATE_ENTRY; + } + + return result; +} + +/* + * Unregisters a queue pair handle that was previously registered + * with vmci_ctx_qp_create. + */ +int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) +{ + struct vmci_handle hndl; + + if (context == NULL || vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); + + return vmci_handle_is_invalid(hndl) ? + VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; +} + +/* + * Determines whether a given queue pair handle is registered + * with the given context. + */ +bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) +{ + if (context == NULL || vmci_handle_is_invalid(handle)) + return false; + + return vmci_handle_arr_has_entry(context->queue_pair_array, handle); +} + +/* + * vmci_context_get_priv_flags() - Retrieve privilege flags. + * @context_id: The context ID of the VMCI context. + * + * Retrieves privilege flags of the given VMCI context ID. + */ +u32 vmci_context_get_priv_flags(u32 context_id) +{ + if (vmci_host_code_active()) { + u32 flags; + struct vmci_ctx *context; + + context = vmci_ctx_get(context_id); + if (!context) + return VMCI_LEAST_PRIVILEGE_FLAGS; + + flags = context->priv_flags; + vmci_ctx_put(context); + return flags; + } + return VMCI_NO_PRIVILEGE_FLAGS; +} +EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags); + +/* + * vmci_is_context_owner() - Determimnes if user is the context owner + * @context_id: The context ID of the VMCI context. + * @uid: The host user id (real kernel value). + * + * Determines whether a given UID is the owner of given VMCI context. + */ +bool vmci_is_context_owner(u32 context_id, kuid_t uid) +{ + bool is_owner = false; + + if (vmci_host_code_active()) { + struct vmci_ctx *context = vmci_ctx_get(context_id); + if (context) { + if (context->cred) + is_owner = uid_eq(context->cred->uid, uid); + vmci_ctx_put(context); + } + } + + return is_owner; +} +EXPORT_SYMBOL_GPL(vmci_is_context_owner); diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h new file mode 100644 index 00000000000..24a88e68a1e --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_context.h @@ -0,0 +1,182 @@ +/* + * VMware VMCI driver (vmciContext.h) + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_CONTEXT_H_ +#define _VMCI_CONTEXT_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/atomic.h> +#include <linux/kref.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include "vmci_handle_array.h" +#include "vmci_datagram.h" + +/* Used to determine what checkpoint state to get and set. */ +enum { + VMCI_NOTIFICATION_CPT_STATE = 1, + VMCI_WELLKNOWN_CPT_STATE = 2, + VMCI_DG_OUT_STATE = 3, + VMCI_DG_IN_STATE = 4, + VMCI_DG_IN_SIZE_STATE = 5, + VMCI_DOORBELL_CPT_STATE = 6, +}; + +/* Host specific struct used for signalling */ +struct vmci_host { + wait_queue_head_t wait_queue; +}; + +struct vmci_handle_list { + struct list_head node; + struct vmci_handle handle; +}; + +struct vmci_ctx { + struct list_head list_item; /* For global VMCI list. */ + u32 cid; + struct kref kref; + struct list_head datagram_queue; /* Head of per VM queue. */ + u32 pending_datagrams; + size_t datagram_queue_size; /* Size of datagram queue in bytes. */ + + /* + * Version of the code that created + * this context; e.g., VMX. + */ + int user_version; + spinlock_t lock; /* Locks callQueue and handle_arrays. */ + + /* + * queue_pairs attached to. The array of + * handles for queue pairs is accessed + * from the code for QP API, and there + * it is protected by the QP lock. It + * is also accessed from the context + * clean up path, which does not + * require a lock. VMCILock is not + * used to protect the QP array field. + */ + struct vmci_handle_arr *queue_pair_array; + + /* Doorbells created by context. */ + struct vmci_handle_arr *doorbell_array; + + /* Doorbells pending for context. */ + struct vmci_handle_arr *pending_doorbell_array; + + /* Contexts current context is subscribing to. */ + struct list_head notifier_list; + unsigned int n_notifiers; + + struct vmci_host host_context; + u32 priv_flags; + + const struct cred *cred; + bool *notify; /* Notify flag pointer - hosted only. */ + struct page *notify_page; /* Page backing the notify UVA. */ +}; + +/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */ +struct vmci_ctx_info { + u32 remote_cid; + int result; +}; + +/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */ +struct vmci_ctx_chkpt_buf_info { + u64 cpt_buf; + u32 cpt_type; + u32 buf_size; + s32 result; + u32 _pad; +}; + +/* + * VMCINotificationReceiveInfo: Used to recieve pending notifications + * for doorbells and queue pairs. + */ +struct vmci_ctx_notify_recv_info { + u64 db_handle_buf_uva; + u64 db_handle_buf_size; + u64 qp_handle_buf_uva; + u64 qp_handle_buf_size; + s32 result; + u32 _pad; +}; + +/* + * Utilility function that checks whether two entities are allowed + * to interact. If one of them is restricted, the other one must + * be trusted. + */ +static inline bool vmci_deny_interaction(u32 part_one, u32 part_two) +{ + return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) || + ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED)); +} + +struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags, + uintptr_t event_hnd, int version, + const struct cred *cred); +void vmci_ctx_destroy(struct vmci_ctx *context); + +bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); +int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg); +int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, + size_t *max_size, struct vmci_datagram **dg); +int vmci_ctx_pending_datagrams(u32 cid, u32 *pending); +struct vmci_ctx *vmci_ctx_get(u32 cid); +void vmci_ctx_put(struct vmci_ctx *context); +bool vmci_ctx_exists(u32 cid); + +int vmci_ctx_add_notification(u32 context_id, u32 remote_cid); +int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid); +int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type, + u32 *num_cids, void **cpt_buf_ptr); +int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type, + u32 num_cids, void *cpt_buf); + +int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle); +int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle); +bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle); + +void vmci_ctx_check_signal_notify(struct vmci_ctx *context); +void vmci_ctx_unset_notify(struct vmci_ctx *context); + +int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle); +int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle); +int vmci_ctx_dbell_destroy_all(u32 context_id); +int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle, + u32 src_priv_flags); + +int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr + **db_handle_array, struct vmci_handle_arr + **qp_handle_array); +void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr + *db_handle_array, struct vmci_handle_arr + *qp_handle_array, bool success); + +static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) +{ + if (!context) + return VMCI_INVALID_ID; + return context->cid; +} + +#endif /* _VMCI_CONTEXT_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c new file mode 100644 index 00000000000..f3cdd904fe4 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_datagram.c @@ -0,0 +1,502 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/bug.h> + +#include "vmci_datagram.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" +#include "vmci_route.h" + +/* + * struct datagram_entry describes the datagram entity. It is used for datagram + * entities created only on the host. + */ +struct datagram_entry { + struct vmci_resource resource; + u32 flags; + bool run_delayed; + vmci_datagram_recv_cb recv_cb; + void *client_data; + u32 priv_flags; +}; + +struct delayed_datagram_info { + struct datagram_entry *entry; + struct work_struct work; + bool in_dg_host_queue; + /* msg and msg_payload must be together. */ + struct vmci_datagram msg; + u8 msg_payload[]; +}; + +/* Number of in-flight host->host datagrams */ +static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0); + +/* + * Create a datagram entry given a handle pointer. + */ +static int dg_create_handle(u32 resource_id, + u32 flags, + u32 priv_flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, struct vmci_handle *out_handle) +{ + int result; + u32 context_id; + struct vmci_handle handle; + struct datagram_entry *entry; + + if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) + return VMCI_ERROR_INVALID_ARGS; + + if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) { + context_id = VMCI_INVALID_ID; + } else { + context_id = vmci_get_context_id(); + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_NO_RESOURCES; + } + + handle = vmci_make_handle(context_id, resource_id); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + pr_warn("Failed allocating memory for datagram entry\n"); + return VMCI_ERROR_NO_MEM; + } + + entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false; + entry->flags = flags; + entry->recv_cb = recv_cb; + entry->client_data = client_data; + entry->priv_flags = priv_flags; + + /* Make datagram resource live. */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_DATAGRAM, + handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", + handle.context, handle.resource, result); + kfree(entry); + return result; + } + + *out_handle = vmci_resource_handle(&entry->resource); + return VMCI_SUCCESS; +} + +/* + * Internal utility function with the same purpose as + * vmci_datagram_get_priv_flags that also takes a context_id. + */ +static int vmci_datagram_get_priv_flags(u32 context_id, + struct vmci_handle handle, + u32 *priv_flags) +{ + if (context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + if (context_id == VMCI_HOST_CONTEXT_ID) { + struct datagram_entry *src_entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) + return VMCI_ERROR_INVALID_ARGS; + + src_entry = container_of(resource, struct datagram_entry, + resource); + *priv_flags = src_entry->priv_flags; + vmci_resource_put(resource); + } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID) + *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS; + else + *priv_flags = vmci_context_get_priv_flags(context_id); + + return VMCI_SUCCESS; +} + +/* + * Calls the specified callback in a delayed context. + */ +static void dg_delayed_dispatch(struct work_struct *work) +{ + struct delayed_datagram_info *dg_info = + container_of(work, struct delayed_datagram_info, work); + + dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg); + + vmci_resource_put(&dg_info->entry->resource); + + if (dg_info->in_dg_host_queue) + atomic_dec(&delayed_dg_host_queue_size); + + kfree(dg_info); +} + +/* + * Dispatch datagram as a host, to the host, or other vm context. This + * function cannot dispatch to hypervisor context handlers. This should + * have been handled before we get here by vmci_datagram_dispatch. + * Returns number of bytes sent on success, error code otherwise. + */ +static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg) +{ + int retval; + size_t dg_size; + u32 src_priv_flags; + + dg_size = VMCI_DG_SIZE(dg); + + /* Host cannot send to the hypervisor. */ + if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) + return VMCI_ERROR_DST_UNREACHABLE; + + /* Check that source handle matches sending context. */ + if (dg->src.context != context_id) { + pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n", + context_id, dg->src.context, dg->src.resource); + return VMCI_ERROR_NO_ACCESS; + } + + /* Get hold of privileges of sending endpoint. */ + retval = vmci_datagram_get_priv_flags(context_id, dg->src, + &src_priv_flags); + if (retval != VMCI_SUCCESS) { + pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n", + dg->src.context, dg->src.resource); + return retval; + } + + /* Determine if we should route to host or guest destination. */ + if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { + /* Route to host datagram entry. */ + struct datagram_entry *dst_entry; + struct vmci_resource *resource; + + if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && + dg->dst.resource == VMCI_EVENT_HANDLER) { + return vmci_event_dispatch(dg); + } + + resource = vmci_resource_by_handle(dg->dst, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n", + dg->dst.context, dg->dst.resource); + return VMCI_ERROR_INVALID_RESOURCE; + } + dst_entry = container_of(resource, struct datagram_entry, + resource); + if (vmci_deny_interaction(src_priv_flags, + dst_entry->priv_flags)) { + vmci_resource_put(resource); + return VMCI_ERROR_NO_ACCESS; + } + + /* + * If a VMCI datagram destined for the host is also sent by the + * host, we always run it delayed. This ensures that no locks + * are held when the datagram callback runs. + */ + if (dst_entry->run_delayed || + dg->src.context == VMCI_HOST_CONTEXT_ID) { + struct delayed_datagram_info *dg_info; + + if (atomic_add_return(1, &delayed_dg_host_queue_size) + == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) { + atomic_dec(&delayed_dg_host_queue_size); + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info = kmalloc(sizeof(*dg_info) + + (size_t) dg->payload_size, GFP_ATOMIC); + if (!dg_info) { + atomic_dec(&delayed_dg_host_queue_size); + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info->in_dg_host_queue = true; + dg_info->entry = dst_entry; + memcpy(&dg_info->msg, dg, dg_size); + + INIT_WORK(&dg_info->work, dg_delayed_dispatch); + schedule_work(&dg_info->work); + retval = VMCI_SUCCESS; + + } else { + retval = dst_entry->recv_cb(dst_entry->client_data, dg); + vmci_resource_put(resource); + if (retval < VMCI_SUCCESS) + return retval; + } + } else { + /* Route to destination VM context. */ + struct vmci_datagram *new_dg; + + if (context_id != dg->dst.context) { + if (vmci_deny_interaction(src_priv_flags, + vmci_context_get_priv_flags + (dg->dst.context))) { + return VMCI_ERROR_NO_ACCESS; + } else if (VMCI_CONTEXT_IS_VM(context_id)) { + /* + * If the sending context is a VM, it + * cannot reach another VM. + */ + + pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n", + context_id, dg->dst.context); + return VMCI_ERROR_DST_UNREACHABLE; + } + } + + /* We make a copy to enqueue. */ + new_dg = kmalloc(dg_size, GFP_KERNEL); + if (new_dg == NULL) + return VMCI_ERROR_NO_MEM; + + memcpy(new_dg, dg, dg_size); + retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); + if (retval < VMCI_SUCCESS) { + kfree(new_dg); + return retval; + } + } + + /* + * We currently truncate the size to signed 32 bits. This doesn't + * matter for this handler as it only support 4Kb messages. + */ + return (int)dg_size; +} + +/* + * Dispatch datagram as a guest, down through the VMX and potentially to + * the host. + * Returns number of bytes sent on success, error code otherwise. + */ +static int dg_dispatch_as_guest(struct vmci_datagram *dg) +{ + int retval; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(dg->src, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) + return VMCI_ERROR_NO_HANDLE; + + retval = vmci_send_datagram(dg); + vmci_resource_put(resource); + return retval; +} + +/* + * Dispatch datagram. This will determine the routing for the datagram + * and dispatch it accordingly. + * Returns number of bytes sent on success, error code otherwise. + */ +int vmci_datagram_dispatch(u32 context_id, + struct vmci_datagram *dg, bool from_guest) +{ + int retval; + enum vmci_route route; + + BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24); + + if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) { + pr_devel("Payload (size=%llu bytes) too big to send\n", + (unsigned long long)dg->payload_size); + return VMCI_ERROR_INVALID_ARGS; + } + + retval = vmci_route(&dg->src, &dg->dst, from_guest, &route); + if (retval < VMCI_SUCCESS) { + pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n", + dg->src.context, dg->dst.context, retval); + return retval; + } + + if (VMCI_ROUTE_AS_HOST == route) { + if (VMCI_INVALID_ID == context_id) + context_id = VMCI_HOST_CONTEXT_ID; + return dg_dispatch_as_host(context_id, dg); + } + + if (VMCI_ROUTE_AS_GUEST == route) + return dg_dispatch_as_guest(dg); + + pr_warn("Unknown route (%d) for datagram\n", route); + return VMCI_ERROR_DST_UNREACHABLE; +} + +/* + * Invoke the handler for the given datagram. This is intended to be + * called only when acting as a guest and receiving a datagram from the + * virtual device. + */ +int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg) +{ + struct vmci_resource *resource; + struct datagram_entry *dst_entry; + + resource = vmci_resource_by_handle(dg->dst, + VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n", + dg->dst.context, dg->dst.resource); + return VMCI_ERROR_NO_HANDLE; + } + + dst_entry = container_of(resource, struct datagram_entry, resource); + if (dst_entry->run_delayed) { + struct delayed_datagram_info *dg_info; + + dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size, + GFP_ATOMIC); + if (!dg_info) { + vmci_resource_put(resource); + return VMCI_ERROR_NO_MEM; + } + + dg_info->in_dg_host_queue = false; + dg_info->entry = dst_entry; + memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg)); + + INIT_WORK(&dg_info->work, dg_delayed_dispatch); + schedule_work(&dg_info->work); + } else { + dst_entry->recv_cb(dst_entry->client_data, dg); + vmci_resource_put(resource); + } + + return VMCI_SUCCESS; +} + +/* + * vmci_datagram_create_handle_priv() - Create host context datagram endpoint + * @resource_id: The resource ID. + * @flags: Datagram Flags. + * @priv_flags: Privilege Flags. + * @recv_cb: Callback when receiving datagrams. + * @client_data: Pointer for a datagram_entry struct + * @out_handle: vmci_handle that is populated as a result of this function. + * + * Creates a host context datagram endpoint and returns a handle to it. + */ +int vmci_datagram_create_handle_priv(u32 resource_id, + u32 flags, + u32 priv_flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, + struct vmci_handle *out_handle) +{ + if (out_handle == NULL) + return VMCI_ERROR_INVALID_ARGS; + + if (recv_cb == NULL) { + pr_devel("Client callback needed when creating datagram\n"); + return VMCI_ERROR_INVALID_ARGS; + } + + if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) + return VMCI_ERROR_INVALID_ARGS; + + return dg_create_handle(resource_id, flags, priv_flags, recv_cb, + client_data, out_handle); +} +EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv); + +/* + * vmci_datagram_create_handle() - Create host context datagram endpoint + * @resource_id: Resource ID. + * @flags: Datagram Flags. + * @recv_cb: Callback when receiving datagrams. + * @client_ata: Pointer for a datagram_entry struct + * @out_handle: vmci_handle that is populated as a result of this function. + * + * Creates a host context datagram endpoint and returns a handle to + * it. Same as vmci_datagram_create_handle_priv without the priviledge + * flags argument. + */ +int vmci_datagram_create_handle(u32 resource_id, + u32 flags, + vmci_datagram_recv_cb recv_cb, + void *client_data, + struct vmci_handle *out_handle) +{ + return vmci_datagram_create_handle_priv( + resource_id, flags, + VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, + recv_cb, client_data, + out_handle); +} +EXPORT_SYMBOL_GPL(vmci_datagram_create_handle); + +/* + * vmci_datagram_destroy_handle() - Destroys datagram handle + * @handle: vmci_handle to be destroyed and reaped. + * + * Use this function to destroy any datagram handles created by + * vmci_datagram_create_handle{,Priv} functions. + */ +int vmci_datagram_destroy_handle(struct vmci_handle handle) +{ + struct datagram_entry *entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM); + if (!resource) { + pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct datagram_entry, resource); + + vmci_resource_put(&entry->resource); + vmci_resource_remove(&entry->resource); + kfree(entry); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle); + +/* + * vmci_datagram_send() - Send a datagram + * @msg: The datagram to send. + * + * Sends the provided datagram on its merry way. + */ +int vmci_datagram_send(struct vmci_datagram *msg) +{ + if (msg == NULL) + return VMCI_ERROR_INVALID_ARGS; + + return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false); +} +EXPORT_SYMBOL_GPL(vmci_datagram_send); diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h new file mode 100644 index 00000000000..eb4aab7f64e --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_datagram.h @@ -0,0 +1,52 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_DATAGRAM_H_ +#define _VMCI_DATAGRAM_H_ + +#include <linux/types.h> +#include <linux/list.h> + +#include "vmci_context.h" + +#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256 + +/* + * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI + * datagram queues. It is allocated in non-paged memory, as the + * content is accessed while holding a spinlock. The pending datagram + * itself may be allocated from paged memory. We shadow the size of + * the datagram in the non-paged queue entry as this size is used + * while holding the same spinlock as above. + */ +struct vmci_datagram_queue_entry { + struct list_head list_item; /* For queuing. */ + size_t dg_size; /* Size of datagram. */ + struct vmci_datagram *dg; /* Pending datagram. */ +}; + +/* VMCIDatagramSendRecvInfo */ +struct vmci_datagram_snd_rcv_info { + u64 addr; + u32 len; + s32 result; +}; + +/* Datagram API for non-public use. */ +int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg, + bool from_guest); +int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg); + +#endif /* _VMCI_DATAGRAM_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c new file mode 100644 index 00000000000..a8cee33ae8d --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -0,0 +1,601 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/completion.h> +#include <linux/hash.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_resource.h" +#include "vmci_driver.h" +#include "vmci_route.h" + + +#define VMCI_DOORBELL_INDEX_BITS 6 +#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS) +#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS) + +/* + * DoorbellEntry describes the a doorbell notification handle allocated by the + * host. + */ +struct dbell_entry { + struct vmci_resource resource; + struct hlist_node node; + struct work_struct work; + vmci_callback notify_cb; + void *client_data; + u32 idx; + u32 priv_flags; + bool run_delayed; + atomic_t active; /* Only used by guest personality */ +}; + +/* The VMCI index table keeps track of currently registered doorbells. */ +struct dbell_index_table { + spinlock_t lock; /* Index table lock */ + struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE]; +}; + +static struct dbell_index_table vmci_doorbell_it = { + .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock), +}; + +/* + * The max_notify_idx is one larger than the currently known bitmap index in + * use, and is used to determine how much of the bitmap needs to be scanned. + */ +static u32 max_notify_idx; + +/* + * The notify_idx_count is used for determining whether there are free entries + * within the bitmap (if notify_idx_count + 1 < max_notify_idx). + */ +static u32 notify_idx_count; + +/* + * The last_notify_idx_reserved is used to track the last index handed out - in + * the case where multiple handles share a notification index, we hand out + * indexes round robin based on last_notify_idx_reserved. + */ +static u32 last_notify_idx_reserved; + +/* This is a one entry cache used to by the index allocation. */ +static u32 last_notify_idx_released = PAGE_SIZE; + + +/* + * Utility function that retrieves the privilege flags associated + * with a given doorbell handle. For guest endpoints, the + * privileges are determined by the context ID, but for host + * endpoints privileges are associated with the complete + * handle. Hypervisor endpoints are not yet supported. + */ +int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags) +{ + if (priv_flags == NULL || handle.context == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + if (handle.context == VMCI_HOST_CONTEXT_ID) { + struct dbell_entry *entry; + struct vmci_resource *resource; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) + return VMCI_ERROR_NOT_FOUND; + + entry = container_of(resource, struct dbell_entry, resource); + *priv_flags = entry->priv_flags; + vmci_resource_put(resource); + } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) { + /* + * Hypervisor endpoints for notifications are not + * supported (yet). + */ + return VMCI_ERROR_INVALID_ARGS; + } else { + *priv_flags = vmci_context_get_priv_flags(handle.context); + } + + return VMCI_SUCCESS; +} + +/* + * Find doorbell entry by bitmap index. + */ +static struct dbell_entry *dbell_index_table_find(u32 idx) +{ + u32 bucket = VMCI_DOORBELL_HASH(idx); + struct dbell_entry *dbell; + + hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], + node) { + if (idx == dbell->idx) + return dbell; + } + + return NULL; +} + +/* + * Add the given entry to the index table. This willi take a reference to the + * entry's resource so that the entry is not deleted before it is removed from + * the * table. + */ +static void dbell_index_table_add(struct dbell_entry *entry) +{ + u32 bucket; + u32 new_notify_idx; + + vmci_resource_get(&entry->resource); + + spin_lock_bh(&vmci_doorbell_it.lock); + + /* + * Below we try to allocate an index in the notification + * bitmap with "not too much" sharing between resources. If we + * use less that the full bitmap, we either add to the end if + * there are no unused flags within the currently used area, + * or we search for unused ones. If we use the full bitmap, we + * allocate the index round robin. + */ + if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) { + if (last_notify_idx_released < max_notify_idx && + !dbell_index_table_find(last_notify_idx_released)) { + new_notify_idx = last_notify_idx_released; + last_notify_idx_released = PAGE_SIZE; + } else { + bool reused = false; + new_notify_idx = last_notify_idx_reserved; + if (notify_idx_count + 1 < max_notify_idx) { + do { + if (!dbell_index_table_find + (new_notify_idx)) { + reused = true; + break; + } + new_notify_idx = (new_notify_idx + 1) % + max_notify_idx; + } while (new_notify_idx != + last_notify_idx_released); + } + if (!reused) { + new_notify_idx = max_notify_idx; + max_notify_idx++; + } + } + } else { + new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE; + } + + last_notify_idx_reserved = new_notify_idx; + notify_idx_count++; + + entry->idx = new_notify_idx; + bucket = VMCI_DOORBELL_HASH(entry->idx); + hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); + + spin_unlock_bh(&vmci_doorbell_it.lock); +} + +/* + * Remove the given entry from the index table. This will release() the + * entry's resource. + */ +static void dbell_index_table_remove(struct dbell_entry *entry) +{ + spin_lock_bh(&vmci_doorbell_it.lock); + + hlist_del_init(&entry->node); + + notify_idx_count--; + if (entry->idx == max_notify_idx - 1) { + /* + * If we delete an entry with the maximum known + * notification index, we take the opportunity to + * prune the current max. As there might be other + * unused indices immediately below, we lower the + * maximum until we hit an index in use. + */ + while (max_notify_idx > 0 && + !dbell_index_table_find(max_notify_idx - 1)) + max_notify_idx--; + } + + last_notify_idx_released = entry->idx; + + spin_unlock_bh(&vmci_doorbell_it.lock); + + vmci_resource_put(&entry->resource); +} + +/* + * Creates a link between the given doorbell handle and the given + * index in the bitmap in the device backend. A notification state + * is created in hypervisor. + */ +static int dbell_link(struct vmci_handle handle, u32 notify_idx) +{ + struct vmci_doorbell_link_msg link_msg; + + link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_LINK); + link_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE; + link_msg.handle = handle; + link_msg.notify_idx = notify_idx; + + return vmci_send_datagram(&link_msg.hdr); +} + +/* + * Unlinks the given doorbell handle from an index in the bitmap in + * the device backend. The notification state is destroyed in hypervisor. + */ +static int dbell_unlink(struct vmci_handle handle) +{ + struct vmci_doorbell_unlink_msg unlink_msg; + + unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_UNLINK); + unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE; + unlink_msg.handle = handle; + + return vmci_send_datagram(&unlink_msg.hdr); +} + +/* + * Notify another guest or the host. We send a datagram down to the + * host via the hypervisor with the notification info. + */ +static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags) +{ + struct vmci_doorbell_notify_msg notify_msg; + + notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_DOORBELL_NOTIFY); + notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE; + notify_msg.handle = handle; + + return vmci_send_datagram(¬ify_msg.hdr); +} + +/* + * Calls the specified callback in a delayed context. + */ +static void dbell_delayed_dispatch(struct work_struct *work) +{ + struct dbell_entry *entry = container_of(work, + struct dbell_entry, work); + + entry->notify_cb(entry->client_data); + vmci_resource_put(&entry->resource); +} + +/* + * Dispatches a doorbell notification to the host context. + */ +int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) +{ + struct dbell_entry *entry; + struct vmci_resource *resource; + + if (vmci_handle_is_invalid(handle)) { + pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_INVALID_ARGS; + } + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) { + pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct dbell_entry, resource); + if (entry->run_delayed) { + schedule_work(&entry->work); + } else { + entry->notify_cb(entry->client_data); + vmci_resource_put(resource); + } + + return VMCI_SUCCESS; +} + +/* + * Register the notification bitmap with the host. + */ +bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) +{ + int result; + struct vmci_notify_bm_set_msg bitmap_set_msg; + + bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_SET_NOTIFY_BITMAP); + bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) - + VMCI_DG_HEADERSIZE; + bitmap_set_msg.bitmap_ppn = bitmap_ppn; + + result = vmci_send_datagram(&bitmap_set_msg.hdr); + if (result != VMCI_SUCCESS) { + pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n", + bitmap_ppn, result); + return false; + } + return true; +} + +/* + * Executes or schedules the handlers for a given notify index. + */ +static void dbell_fire_entries(u32 notify_idx) +{ + u32 bucket = VMCI_DOORBELL_HASH(notify_idx); + struct dbell_entry *dbell; + + spin_lock_bh(&vmci_doorbell_it.lock); + + hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { + if (dbell->idx == notify_idx && + atomic_read(&dbell->active) == 1) { + if (dbell->run_delayed) { + vmci_resource_get(&dbell->resource); + schedule_work(&dbell->work); + } else { + dbell->notify_cb(dbell->client_data); + } + } + } + + spin_unlock_bh(&vmci_doorbell_it.lock); +} + +/* + * Scans the notification bitmap, collects pending notifications, + * resets the bitmap and invokes appropriate callbacks. + */ +void vmci_dbell_scan_notification_entries(u8 *bitmap) +{ + u32 idx; + + for (idx = 0; idx < max_notify_idx; idx++) { + if (bitmap[idx] & 0x1) { + bitmap[idx] &= ~1; + dbell_fire_entries(idx); + } + } +} + +/* + * vmci_doorbell_create() - Creates a doorbell + * @handle: A handle used to track the resource. Can be invalid. + * @flags: Flag that determines context of callback. + * @priv_flags: Privileges flags. + * @notify_cb: The callback to be ivoked when the doorbell fires. + * @client_data: A parameter to be passed to the callback. + * + * Creates a doorbell with the given callback. If the handle is + * VMCI_INVALID_HANDLE, a free handle will be assigned, if + * possible. The callback can be run immediately (potentially with + * locks held - the default) or delayed (in a kernel thread) by + * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution + * is selected, a given callback may not be run if the kernel is + * unable to allocate memory for the delayed execution (highly + * unlikely). + */ +int vmci_doorbell_create(struct vmci_handle *handle, + u32 flags, + u32 priv_flags, + vmci_callback notify_cb, void *client_data) +{ + struct dbell_entry *entry; + struct vmci_handle new_handle; + int result; + + if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB || + priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) + return VMCI_ERROR_INVALID_ARGS; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + pr_warn("Failed allocating memory for datagram entry\n"); + return VMCI_ERROR_NO_MEM; + } + + if (vmci_handle_is_invalid(*handle)) { + u32 context_id = vmci_get_context_id(); + + /* Let resource code allocate a free ID for us */ + new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID); + } else { + bool valid_context = false; + + /* + * Validate the handle. We must do both of the checks below + * because we can be acting as both a host and a guest at the + * same time. We always allow the host context ID, since the + * host functionality is in practice always there with the + * unified driver. + */ + if (handle->context == VMCI_HOST_CONTEXT_ID || + (vmci_guest_code_active() && + vmci_get_context_id() == handle->context)) { + valid_context = true; + } + + if (!valid_context || handle->resource == VMCI_INVALID_ID) { + pr_devel("Invalid argument (handle=0x%x:0x%x)\n", + handle->context, handle->resource); + result = VMCI_ERROR_INVALID_ARGS; + goto free_mem; + } + + new_handle = *handle; + } + + entry->idx = 0; + INIT_HLIST_NODE(&entry->node); + entry->priv_flags = priv_flags; + INIT_WORK(&entry->work, dbell_delayed_dispatch); + entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB; + entry->notify_cb = notify_cb; + entry->client_data = client_data; + atomic_set(&entry->active, 0); + + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_DOORBELL, + new_handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", + new_handle.context, new_handle.resource, result); + goto free_mem; + } + + new_handle = vmci_resource_handle(&entry->resource); + if (vmci_guest_code_active()) { + dbell_index_table_add(entry); + result = dbell_link(new_handle, entry->idx); + if (VMCI_SUCCESS != result) + goto destroy_resource; + + atomic_set(&entry->active, 1); + } + + *handle = new_handle; + + return result; + + destroy_resource: + dbell_index_table_remove(entry); + vmci_resource_remove(&entry->resource); + free_mem: + kfree(entry); + return result; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_create); + +/* + * vmci_doorbell_destroy() - Destroy a doorbell. + * @handle: The handle tracking the resource. + * + * Destroys a doorbell previously created with vmcii_doorbell_create. This + * operation may block waiting for a callback to finish. + */ +int vmci_doorbell_destroy(struct vmci_handle handle) +{ + struct dbell_entry *entry; + struct vmci_resource *resource; + + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + resource = vmci_resource_by_handle(handle, + VMCI_RESOURCE_TYPE_DOORBELL); + if (!resource) { + pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n", + handle.context, handle.resource); + return VMCI_ERROR_NOT_FOUND; + } + + entry = container_of(resource, struct dbell_entry, resource); + + if (vmci_guest_code_active()) { + int result; + + dbell_index_table_remove(entry); + + result = dbell_unlink(handle); + if (VMCI_SUCCESS != result) { + + /* + * The only reason this should fail would be + * an inconsistency between guest and + * hypervisor state, where the guest believes + * it has an active registration whereas the + * hypervisor doesn't. One case where this may + * happen is if a doorbell is unregistered + * following a hibernation at a time where the + * doorbell state hasn't been restored on the + * hypervisor side yet. Since the handle has + * now been removed in the guest, we just + * print a warning and return success. + */ + pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n", + handle.context, handle.resource, result); + } + } + + /* + * Now remove the resource from the table. It might still be in use + * after this, in a callback or still on the delayed work queue. + */ + vmci_resource_put(&entry->resource); + vmci_resource_remove(&entry->resource); + + kfree(entry); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_destroy); + +/* + * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes). + * @dst: The handlle identifying the doorbell resource + * @priv_flags: Priviledge flags. + * + * Generates a notification on the doorbell identified by the + * handle. For host side generation of notifications, the caller + * can specify what the privilege of the calling side is. + */ +int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags) +{ + int retval; + enum vmci_route route; + struct vmci_handle src; + + if (vmci_handle_is_invalid(dst) || + (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)) + return VMCI_ERROR_INVALID_ARGS; + + src = VMCI_INVALID_HANDLE; + retval = vmci_route(&src, &dst, false, &route); + if (retval < VMCI_SUCCESS) + return retval; + + if (VMCI_ROUTE_AS_HOST == route) + return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID, + dst, priv_flags); + + if (VMCI_ROUTE_AS_GUEST == route) + return dbell_notify_as_guest(dst, priv_flags); + + pr_warn("Unknown route (%d) for doorbell\n", route); + return VMCI_ERROR_DST_UNREACHABLE; +} +EXPORT_SYMBOL_GPL(vmci_doorbell_notify); diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h new file mode 100644 index 00000000000..e4c0b17486a --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_doorbell.h @@ -0,0 +1,51 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef VMCI_DOORBELL_H +#define VMCI_DOORBELL_H + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_driver.h" + +/* + * VMCINotifyResourceInfo: Used to create and destroy doorbells, and + * generate a notification for a doorbell or queue pair. + */ +struct vmci_dbell_notify_resource_info { + struct vmci_handle handle; + u16 resource; + u16 action; + s32 result; +}; + +/* + * Structure used for checkpointing the doorbell mappings. It is + * written to the checkpoint as is, so changing this structure will + * break checkpoint compatibility. + */ +struct dbell_cpt_state { + struct vmci_handle handle; + u64 bitmap_idx; +}; + +int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle); +int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags); + +bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn); +void vmci_dbell_scan_notification_entries(u8 *bitmap); + +#endif /* VMCI_DOORBELL_H */ diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c new file mode 100644 index 00000000000..3dee7ae123e --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -0,0 +1,117 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> + +#include "vmci_driver.h" +#include "vmci_event.h" + +static bool vmci_disable_host; +module_param_named(disable_host, vmci_disable_host, bool, 0); +MODULE_PARM_DESC(disable_host, + "Disable driver host personality (default=enabled)"); + +static bool vmci_disable_guest; +module_param_named(disable_guest, vmci_disable_guest, bool, 0); +MODULE_PARM_DESC(disable_guest, + "Disable driver guest personality (default=enabled)"); + +static bool vmci_guest_personality_initialized; +static bool vmci_host_personality_initialized; + +/* + * vmci_get_context_id() - Gets the current context ID. + * + * Returns the current context ID. Note that since this is accessed only + * from code running in the host, this always returns the host context ID. + */ +u32 vmci_get_context_id(void) +{ + if (vmci_guest_code_active()) + return vmci_get_vm_context_id(); + else if (vmci_host_code_active()) + return VMCI_HOST_CONTEXT_ID; + + return VMCI_INVALID_ID; +} +EXPORT_SYMBOL_GPL(vmci_get_context_id); + +static int __init vmci_drv_init(void) +{ + int vmci_err; + int error; + + vmci_err = vmci_event_init(); + if (vmci_err < VMCI_SUCCESS) { + pr_err("Failed to initialize VMCIEvent (result=%d)\n", + vmci_err); + return -EINVAL; + } + + if (!vmci_disable_guest) { + error = vmci_guest_init(); + if (error) { + pr_warn("Failed to initialize guest personality (err=%d)\n", + error); + } else { + vmci_guest_personality_initialized = true; + pr_info("Guest personality initialized and is %s\n", + vmci_guest_code_active() ? + "active" : "inactive"); + } + } + + if (!vmci_disable_host) { + error = vmci_host_init(); + if (error) { + pr_warn("Unable to initialize host personality (err=%d)\n", + error); + } else { + vmci_host_personality_initialized = true; + pr_info("Initialized host personality\n"); + } + } + + if (!vmci_guest_personality_initialized && + !vmci_host_personality_initialized) { + vmci_event_exit(); + return -ENODEV; + } + + return 0; +} +module_init(vmci_drv_init); + +static void __exit vmci_drv_exit(void) +{ + if (vmci_guest_personality_initialized) + vmci_guest_exit(); + + if (vmci_host_personality_initialized) + vmci_host_exit(); + + vmci_event_exit(); +} +module_exit(vmci_drv_exit); + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); +MODULE_VERSION("1.1.0.0-k"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h new file mode 100644 index 00000000000..cee9e977d31 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_driver.h @@ -0,0 +1,57 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_DRIVER_H_ +#define _VMCI_DRIVER_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/wait.h> + +#include "vmci_queue_pair.h" +#include "vmci_context.h" + +enum vmci_obj_type { + VMCIOBJ_VMX_VM = 10, + VMCIOBJ_CONTEXT, + VMCIOBJ_SOCKET, + VMCIOBJ_NOT_SET, +}; + +/* For storing VMCI structures in file handles. */ +struct vmci_obj { + void *ptr; + enum vmci_obj_type type; +}; + +/* + * Needed by other components of this module. It's okay to have one global + * instance of this because there can only ever be one VMCI device. Our + * virtual hardware enforces this. + */ +extern struct pci_dev *vmci_pdev; + +u32 vmci_get_context_id(void); +int vmci_send_datagram(struct vmci_datagram *dg); + +int vmci_host_init(void); +void vmci_host_exit(void); +bool vmci_host_code_active(void); + +int vmci_guest_init(void); +void vmci_guest_exit(void); +bool vmci_guest_code_active(void); +u32 vmci_get_vm_context_id(void); + +#endif /* _VMCI_DRIVER_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c new file mode 100644 index 00000000000..8449516d6ac --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -0,0 +1,224 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> + +#include "vmci_driver.h" +#include "vmci_event.h" + +#define EVENT_MAGIC 0xEABE0000 +#define VMCI_EVENT_MAX_ATTEMPTS 10 + +struct vmci_subscription { + u32 id; + u32 event; + vmci_event_cb callback; + void *callback_data; + struct list_head node; /* on one of subscriber lists */ +}; + +static struct list_head subscriber_array[VMCI_EVENT_MAX]; +static DEFINE_MUTEX(subscriber_mutex); + +int __init vmci_event_init(void) +{ + int i; + + for (i = 0; i < VMCI_EVENT_MAX; i++) + INIT_LIST_HEAD(&subscriber_array[i]); + + return VMCI_SUCCESS; +} + +void vmci_event_exit(void) +{ + int e; + + /* We free all memory at exit. */ + for (e = 0; e < VMCI_EVENT_MAX; e++) { + struct vmci_subscription *cur, *p2; + list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { + + /* + * We should never get here because all events + * should have been unregistered before we try + * to unload the driver module. + */ + pr_warn("Unexpected free events occurring\n"); + list_del(&cur->node); + kfree(cur); + } + } +} + +/* + * Find entry. Assumes subscriber_mutex is held. + */ +static struct vmci_subscription *event_find(u32 sub_id) +{ + int e; + + for (e = 0; e < VMCI_EVENT_MAX; e++) { + struct vmci_subscription *cur; + list_for_each_entry(cur, &subscriber_array[e], node) { + if (cur->id == sub_id) + return cur; + } + } + return NULL; +} + +/* + * Actually delivers the events to the subscribers. + * The callback function for each subscriber is invoked. + */ +static void event_deliver(struct vmci_event_msg *event_msg) +{ + struct vmci_subscription *cur; + struct list_head *subscriber_list; + + rcu_read_lock(); + subscriber_list = &subscriber_array[event_msg->event_data.event]; + list_for_each_entry_rcu(cur, subscriber_list, node) { + cur->callback(cur->id, &event_msg->event_data, + cur->callback_data); + } + rcu_read_unlock(); +} + +/* + * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all + * subscribers for given event. + */ +int vmci_event_dispatch(struct vmci_datagram *msg) +{ + struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg; + + if (msg->payload_size < sizeof(u32) || + msg->payload_size > sizeof(struct vmci_event_data_max)) + return VMCI_ERROR_INVALID_ARGS; + + if (!VMCI_EVENT_VALID(event_msg->event_data.event)) + return VMCI_ERROR_EVENT_UNKNOWN; + + event_deliver(event_msg); + return VMCI_SUCCESS; +} + +/* + * vmci_event_subscribe() - Subscribe to a given event. + * @event: The event to subscribe to. + * @callback: The callback to invoke upon the event. + * @callback_data: Data to pass to the callback. + * @subscription_id: ID used to track subscription. Used with + * vmci_event_unsubscribe() + * + * Subscribes to the provided event. The callback specified will be + * fired from RCU critical section and therefore must not sleep. + */ +int vmci_event_subscribe(u32 event, + vmci_event_cb callback, + void *callback_data, + u32 *new_subscription_id) +{ + struct vmci_subscription *sub; + int attempts; + int retval; + bool have_new_id = false; + + if (!new_subscription_id) { + pr_devel("%s: Invalid subscription (NULL)\n", __func__); + return VMCI_ERROR_INVALID_ARGS; + } + + if (!VMCI_EVENT_VALID(event) || !callback) { + pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", + __func__, event, callback, callback_data); + return VMCI_ERROR_INVALID_ARGS; + } + + sub = kzalloc(sizeof(*sub), GFP_KERNEL); + if (!sub) + return VMCI_ERROR_NO_MEM; + + sub->id = VMCI_EVENT_MAX; + sub->event = event; + sub->callback = callback; + sub->callback_data = callback_data; + INIT_LIST_HEAD(&sub->node); + + mutex_lock(&subscriber_mutex); + + /* Creation of a new event is always allowed. */ + for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) { + static u32 subscription_id; + /* + * We try to get an id a couple of time before + * claiming we are out of resources. + */ + + /* Test for duplicate id. */ + if (!event_find(++subscription_id)) { + sub->id = subscription_id; + have_new_id = true; + break; + } + } + + if (have_new_id) { + list_add_rcu(&sub->node, &subscriber_array[event]); + retval = VMCI_SUCCESS; + } else { + retval = VMCI_ERROR_NO_RESOURCES; + } + + mutex_unlock(&subscriber_mutex); + + *new_subscription_id = sub->id; + return retval; +} +EXPORT_SYMBOL_GPL(vmci_event_subscribe); + +/* + * vmci_event_unsubscribe() - unsubscribe from an event. + * @sub_id: A subscription ID as provided by vmci_event_subscribe() + * + * Unsubscribe from given event. Removes it from list and frees it. + * Will return callback_data if requested by caller. + */ +int vmci_event_unsubscribe(u32 sub_id) +{ + struct vmci_subscription *s; + + mutex_lock(&subscriber_mutex); + s = event_find(sub_id); + if (s) + list_del_rcu(&s->node); + mutex_unlock(&subscriber_mutex); + + if (!s) + return VMCI_ERROR_NOT_FOUND; + + synchronize_rcu(); + kfree(s); + + return VMCI_SUCCESS; +} +EXPORT_SYMBOL_GPL(vmci_event_unsubscribe); diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h new file mode 100644 index 00000000000..7df9b1c0a96 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_event.h @@ -0,0 +1,25 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef __VMCI_EVENT_H__ +#define __VMCI_EVENT_H__ + +#include <linux/vmw_vmci_api.h> + +int vmci_event_init(void); +void vmci_event_exit(void); +int vmci_event_dispatch(struct vmci_datagram *msg); + +#endif /*__VMCI_EVENT_H__ */ diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c new file mode 100644 index 00000000000..e0d5017785e --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -0,0 +1,772 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/moduleparam.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <linux/vmalloc.h> + +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +#define PCI_VENDOR_ID_VMWARE 0x15AD +#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 + +#define VMCI_UTIL_NUM_RESOURCES 1 + +static bool vmci_disable_msi; +module_param_named(disable_msi, vmci_disable_msi, bool, 0); +MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); + +static bool vmci_disable_msix; +module_param_named(disable_msix, vmci_disable_msix, bool, 0); +MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); + +static u32 ctx_update_sub_id = VMCI_INVALID_ID; +static u32 vm_context_id = VMCI_INVALID_ID; + +struct vmci_guest_device { + struct device *dev; /* PCI device we are attached to */ + void __iomem *iobase; + + unsigned int irq; + unsigned int intr_type; + bool exclusive_vectors; + struct msix_entry msix_entries[VMCI_MAX_INTRS]; + + struct tasklet_struct datagram_tasklet; + struct tasklet_struct bm_tasklet; + + void *data_buffer; + void *notification_bitmap; + dma_addr_t notification_base; +}; + +/* vmci_dev singleton device and supporting data*/ +struct pci_dev *vmci_pdev; +static struct vmci_guest_device *vmci_dev_g; +static DEFINE_SPINLOCK(vmci_dev_spinlock); + +static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); + +bool vmci_guest_code_active(void) +{ + return atomic_read(&vmci_num_guest_devices) != 0; +} + +u32 vmci_get_vm_context_id(void) +{ + if (vm_context_id == VMCI_INVALID_ID) { + struct vmci_datagram get_cid_msg; + get_cid_msg.dst = + vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_GET_CONTEXT_ID); + get_cid_msg.src = VMCI_ANON_SRC_HANDLE; + get_cid_msg.payload_size = 0; + vm_context_id = vmci_send_datagram(&get_cid_msg); + } + return vm_context_id; +} + +/* + * VM to hypervisor call mechanism. We use the standard VMware naming + * convention since shared code is calling this function as well. + */ +int vmci_send_datagram(struct vmci_datagram *dg) +{ + unsigned long flags; + int result; + + /* Check args. */ + if (dg == NULL) + return VMCI_ERROR_INVALID_ARGS; + + /* + * Need to acquire spinlock on the device because the datagram + * data may be spread over multiple pages and the monitor may + * interleave device user rpc calls from multiple + * VCPUs. Acquiring the spinlock precludes that + * possibility. Disabling interrupts to avoid incoming + * datagrams during a "rep out" and possibly landing up in + * this function. + */ + spin_lock_irqsave(&vmci_dev_spinlock, flags); + + if (vmci_dev_g) { + iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, + dg, VMCI_DG_SIZE(dg)); + result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); + } else { + result = VMCI_ERROR_UNAVAILABLE; + } + + spin_unlock_irqrestore(&vmci_dev_spinlock, flags); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_send_datagram); + +/* + * Gets called with the new context id if updated or resumed. + * Context id. + */ +static void vmci_guest_cid_update(u32 sub_id, + const struct vmci_event_data *event_data, + void *client_data) +{ + const struct vmci_event_payld_ctx *ev_payload = + vmci_event_data_const_payload(event_data); + + if (sub_id != ctx_update_sub_id) { + pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); + return; + } + + if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { + pr_devel("Invalid event data\n"); + return; + } + + pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", + vm_context_id, ev_payload->context_id, event_data->event); + + vm_context_id = ev_payload->context_id; +} + +/* + * Verify that the host supports the hypercalls we need. If it does not, + * try to find fallback hypercalls and use those instead. Returns + * true if required hypercalls (or fallback hypercalls) are + * supported by the host, false otherwise. + */ +static int vmci_check_host_caps(struct pci_dev *pdev) +{ + bool result; + struct vmci_resource_query_msg *msg; + u32 msg_size = sizeof(struct vmci_resource_query_hdr) + + VMCI_UTIL_NUM_RESOURCES * sizeof(u32); + struct vmci_datagram *check_msg; + + check_msg = kmalloc(msg_size, GFP_KERNEL); + if (!check_msg) { + dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); + return -ENOMEM; + } + + check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_RESOURCES_QUERY); + check_msg->src = VMCI_ANON_SRC_HANDLE; + check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; + msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); + + msg->num_resources = VMCI_UTIL_NUM_RESOURCES; + msg->resources[0] = VMCI_GET_CONTEXT_ID; + + /* Checks that hyper calls are supported */ + result = vmci_send_datagram(check_msg) == 0x01; + kfree(check_msg); + + dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", + __func__, result ? "PASSED" : "FAILED"); + + /* We need the vector. There are no fallbacks. */ + return result ? 0 : -ENXIO; +} + +/* + * Reads datagrams from the data in port and dispatches them. We + * always start reading datagrams into only the first page of the + * datagram buffer. If the datagrams don't fit into one page, we + * use the maximum datagram buffer size for the remainder of the + * invocation. This is a simple heuristic for not penalizing + * small datagrams. + * + * This function assumes that it has exclusive access to the data + * in port for the duration of the call. + */ +static void vmci_dispatch_dgs(unsigned long data) +{ + struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; + u8 *dg_in_buffer = vmci_dev->data_buffer; + struct vmci_datagram *dg; + size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; + size_t current_dg_in_buffer_size = PAGE_SIZE; + size_t remaining_bytes; + + BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); + + ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, current_dg_in_buffer_size); + dg = (struct vmci_datagram *)dg_in_buffer; + remaining_bytes = current_dg_in_buffer_size; + + while (dg->dst.resource != VMCI_INVALID_ID || + remaining_bytes > PAGE_SIZE) { + unsigned dg_in_size; + + /* + * When the input buffer spans multiple pages, a datagram can + * start on any page boundary in the buffer. + */ + if (dg->dst.resource == VMCI_INVALID_ID) { + dg = (struct vmci_datagram *)roundup( + (uintptr_t)dg + 1, PAGE_SIZE); + remaining_bytes = + (size_t)(dg_in_buffer + + current_dg_in_buffer_size - + (u8 *)dg); + continue; + } + + dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); + + if (dg_in_size <= dg_in_buffer_size) { + int result; + + /* + * If the remaining bytes in the datagram + * buffer doesn't contain the complete + * datagram, we first make sure we have enough + * room for it and then we read the reminder + * of the datagram and possibly any following + * datagrams. + */ + if (dg_in_size > remaining_bytes) { + if (remaining_bytes != + current_dg_in_buffer_size) { + + /* + * We move the partial + * datagram to the front and + * read the reminder of the + * datagram and possibly + * following calls into the + * following bytes. + */ + memmove(dg_in_buffer, dg_in_buffer + + current_dg_in_buffer_size - + remaining_bytes, + remaining_bytes); + dg = (struct vmci_datagram *) + dg_in_buffer; + } + + if (current_dg_in_buffer_size != + dg_in_buffer_size) + current_dg_in_buffer_size = + dg_in_buffer_size; + + ioread8_rep(vmci_dev->iobase + + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer + + remaining_bytes, + current_dg_in_buffer_size - + remaining_bytes); + } + + /* + * We special case event datagrams from the + * hypervisor. + */ + if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && + dg->dst.resource == VMCI_EVENT_HANDLER) { + result = vmci_event_dispatch(dg); + } else { + result = vmci_datagram_invoke_guest_handler(dg); + } + if (result < VMCI_SUCCESS) + dev_dbg(vmci_dev->dev, + "Datagram with resource (ID=0x%x) failed (err=%d)\n", + dg->dst.resource, result); + + /* On to the next datagram. */ + dg = (struct vmci_datagram *)((u8 *)dg + + dg_in_size); + } else { + size_t bytes_to_skip; + + /* + * Datagram doesn't fit in datagram buffer of maximal + * size. We drop it. + */ + dev_dbg(vmci_dev->dev, + "Failed to receive datagram (size=%u bytes)\n", + dg_in_size); + + bytes_to_skip = dg_in_size - remaining_bytes; + if (current_dg_in_buffer_size != dg_in_buffer_size) + current_dg_in_buffer_size = dg_in_buffer_size; + + for (;;) { + ioread8_rep(vmci_dev->iobase + + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, + current_dg_in_buffer_size); + if (bytes_to_skip <= current_dg_in_buffer_size) + break; + + bytes_to_skip -= current_dg_in_buffer_size; + } + dg = (struct vmci_datagram *)(dg_in_buffer + + bytes_to_skip); + } + + remaining_bytes = + (size_t) (dg_in_buffer + current_dg_in_buffer_size - + (u8 *)dg); + + if (remaining_bytes < VMCI_DG_HEADERSIZE) { + /* Get the next batch of datagrams. */ + + ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, + vmci_dev->data_buffer, + current_dg_in_buffer_size); + dg = (struct vmci_datagram *)dg_in_buffer; + remaining_bytes = current_dg_in_buffer_size; + } + } +} + +/* + * Scans the notification bitmap for raised flags, clears them + * and handles the notifications. + */ +static void vmci_process_bitmap(unsigned long data) +{ + struct vmci_guest_device *dev = (struct vmci_guest_device *)data; + + if (!dev->notification_bitmap) { + dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); + return; + } + + vmci_dbell_scan_notification_entries(dev->notification_bitmap); +} + +/* + * Enable MSI-X. Try exclusive vectors first, then shared vectors. + */ +static int vmci_enable_msix(struct pci_dev *pdev, + struct vmci_guest_device *vmci_dev) +{ + int i; + int result; + + for (i = 0; i < VMCI_MAX_INTRS; ++i) { + vmci_dev->msix_entries[i].entry = i; + vmci_dev->msix_entries[i].vector = i; + } + + result = pci_enable_msix_exact(pdev, + vmci_dev->msix_entries, VMCI_MAX_INTRS); + if (result == 0) + vmci_dev->exclusive_vectors = true; + else if (result == -ENOSPC) + result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1); + + return result; +} + +/* + * Interrupt handler for legacy or MSI interrupt, or for first MSI-X + * interrupt (vector VMCI_INTR_DATAGRAM). + */ +static irqreturn_t vmci_interrupt(int irq, void *_dev) +{ + struct vmci_guest_device *dev = _dev; + + /* + * If we are using MSI-X with exclusive vectors then we simply schedule + * the datagram tasklet, since we know the interrupt was meant for us. + * Otherwise we must read the ICR to determine what to do. + */ + + if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { + tasklet_schedule(&dev->datagram_tasklet); + } else { + unsigned int icr; + + /* Acknowledge interrupt and determine what needs doing. */ + icr = ioread32(dev->iobase + VMCI_ICR_ADDR); + if (icr == 0 || icr == ~0) + return IRQ_NONE; + + if (icr & VMCI_ICR_DATAGRAM) { + tasklet_schedule(&dev->datagram_tasklet); + icr &= ~VMCI_ICR_DATAGRAM; + } + + if (icr & VMCI_ICR_NOTIFICATION) { + tasklet_schedule(&dev->bm_tasklet); + icr &= ~VMCI_ICR_NOTIFICATION; + } + + if (icr != 0) + dev_warn(dev->dev, + "Ignoring unknown interrupt cause (%d)\n", + icr); + } + + return IRQ_HANDLED; +} + +/* + * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, + * which is for the notification bitmap. Will only get called if we are + * using MSI-X with exclusive vectors. + */ +static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) +{ + struct vmci_guest_device *dev = _dev; + + /* For MSI-X we can just assume it was meant for us. */ + tasklet_schedule(&dev->bm_tasklet); + + return IRQ_HANDLED; +} + +/* + * Most of the initialization at module load time is done here. + */ +static int vmci_guest_probe_device(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct vmci_guest_device *vmci_dev; + void __iomem *iobase; + unsigned int capabilities; + unsigned long cmd; + int vmci_err; + int error; + + dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); + + error = pcim_enable_device(pdev); + if (error) { + dev_err(&pdev->dev, + "Failed to enable VMCI device: %d\n", error); + return error; + } + + error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); + if (error) { + dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); + return error; + } + + iobase = pcim_iomap_table(pdev)[0]; + + dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", + (unsigned long)iobase, pdev->irq); + + vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); + if (!vmci_dev) { + dev_err(&pdev->dev, + "Can't allocate memory for VMCI device\n"); + return -ENOMEM; + } + + vmci_dev->dev = &pdev->dev; + vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; + vmci_dev->exclusive_vectors = false; + vmci_dev->iobase = iobase; + + tasklet_init(&vmci_dev->datagram_tasklet, + vmci_dispatch_dgs, (unsigned long)vmci_dev); + tasklet_init(&vmci_dev->bm_tasklet, + vmci_process_bitmap, (unsigned long)vmci_dev); + + vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); + if (!vmci_dev->data_buffer) { + dev_err(&pdev->dev, + "Can't allocate memory for datagram buffer\n"); + return -ENOMEM; + } + + pci_set_master(pdev); /* To enable queue_pair functionality. */ + + /* + * Verify that the VMCI Device supports the capabilities that + * we need. If the device is missing capabilities that we would + * like to use, check for fallback capabilities and use those + * instead (so we can run a new VM on old hosts). Fail the load if + * a required capability is missing and there is no fallback. + * + * Right now, we need datagrams. There are no fallbacks. + */ + capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); + if (!(capabilities & VMCI_CAPS_DATAGRAM)) { + dev_err(&pdev->dev, "Device does not support datagrams\n"); + error = -ENXIO; + goto err_free_data_buffer; + } + + /* + * If the hardware supports notifications, we will use that as + * well. + */ + if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + vmci_dev->notification_bitmap = dma_alloc_coherent( + &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, + GFP_KERNEL); + if (!vmci_dev->notification_bitmap) { + dev_warn(&pdev->dev, + "Unable to allocate notification bitmap\n"); + } else { + memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); + capabilities |= VMCI_CAPS_NOTIFICATIONS; + } + } + + dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); + + /* Let the host know which capabilities we intend to use. */ + iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); + + /* Set up global device so that we can start sending datagrams */ + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = vmci_dev; + vmci_pdev = pdev; + spin_unlock_irq(&vmci_dev_spinlock); + + /* + * Register notification bitmap with device if that capability is + * used. + */ + if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + unsigned long bitmap_ppn = + vmci_dev->notification_base >> PAGE_SHIFT; + if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { + dev_warn(&pdev->dev, + "VMCI device unable to register notification bitmap with PPN 0x%x\n", + (u32) bitmap_ppn); + error = -ENXIO; + goto err_remove_vmci_dev_g; + } + } + + /* Check host capabilities. */ + error = vmci_check_host_caps(pdev); + if (error) + goto err_remove_bitmap; + + /* Enable device. */ + + /* + * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can + * update the internal context id when needed. + */ + vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, + vmci_guest_cid_update, NULL, + &ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to subscribe to event (type=%d): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, vmci_err); + + /* + * Enable interrupts. Try MSI-X first, then MSI, and then fallback on + * legacy interrupts. + */ + if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { + vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; + vmci_dev->irq = vmci_dev->msix_entries[0].vector; + } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { + vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; + vmci_dev->irq = pdev->irq; + } else { + vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; + vmci_dev->irq = pdev->irq; + } + + /* + * Request IRQ for legacy or MSI interrupts, or for first + * MSI-X vector. + */ + error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, + KBUILD_MODNAME, vmci_dev); + if (error) { + dev_err(&pdev->dev, "Irq %u in use: %d\n", + vmci_dev->irq, error); + goto err_disable_msi; + } + + /* + * For MSI-X with exclusive vectors we need to request an + * interrupt for each vector so that we get a separate + * interrupt handler routine. This allows us to distinguish + * between the vectors. + */ + if (vmci_dev->exclusive_vectors) { + error = request_irq(vmci_dev->msix_entries[1].vector, + vmci_interrupt_bm, 0, KBUILD_MODNAME, + vmci_dev); + if (error) { + dev_err(&pdev->dev, + "Failed to allocate irq %u: %d\n", + vmci_dev->msix_entries[1].vector, error); + goto err_free_irq; + } + } + + dev_dbg(&pdev->dev, "Registered device\n"); + + atomic_inc(&vmci_num_guest_devices); + + /* Enable specific interrupt bits. */ + cmd = VMCI_IMR_DATAGRAM; + if (capabilities & VMCI_CAPS_NOTIFICATIONS) + cmd |= VMCI_IMR_NOTIFICATION; + iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); + + /* Enable interrupts. */ + iowrite32(VMCI_CONTROL_INT_ENABLE, + vmci_dev->iobase + VMCI_CONTROL_ADDR); + + pci_set_drvdata(pdev, vmci_dev); + return 0; + +err_free_irq: + free_irq(vmci_dev->irq, vmci_dev); + tasklet_kill(&vmci_dev->datagram_tasklet); + tasklet_kill(&vmci_dev->bm_tasklet); + +err_disable_msi: + if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) + pci_disable_msix(pdev); + else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) + pci_disable_msi(pdev); + + vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); + +err_remove_bitmap: + if (vmci_dev->notification_bitmap) { + iowrite32(VMCI_CONTROL_RESET, + vmci_dev->iobase + VMCI_CONTROL_ADDR); + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vmci_dev->notification_bitmap, + vmci_dev->notification_base); + } + +err_remove_vmci_dev_g: + spin_lock_irq(&vmci_dev_spinlock); + vmci_pdev = NULL; + vmci_dev_g = NULL; + spin_unlock_irq(&vmci_dev_spinlock); + +err_free_data_buffer: + vfree(vmci_dev->data_buffer); + + /* The rest are managed resources and will be freed by PCI core */ + return error; +} + +static void vmci_guest_remove_device(struct pci_dev *pdev) +{ + struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); + int vmci_err; + + dev_dbg(&pdev->dev, "Removing device\n"); + + atomic_dec(&vmci_num_guest_devices); + + vmci_qp_guest_endpoints_exit(); + + vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); + if (vmci_err < VMCI_SUCCESS) + dev_warn(&pdev->dev, + "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", + VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); + + spin_lock_irq(&vmci_dev_spinlock); + vmci_dev_g = NULL; + vmci_pdev = NULL; + spin_unlock_irq(&vmci_dev_spinlock); + + dev_dbg(&pdev->dev, "Resetting vmci device\n"); + iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); + + /* + * Free IRQ and then disable MSI/MSI-X as appropriate. For + * MSI-X, we might have multiple vectors, each with their own + * IRQ, which we must free too. + */ + free_irq(vmci_dev->irq, vmci_dev); + if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { + if (vmci_dev->exclusive_vectors) + free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); + pci_disable_msix(pdev); + } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { + pci_disable_msi(pdev); + } + + tasklet_kill(&vmci_dev->datagram_tasklet); + tasklet_kill(&vmci_dev->bm_tasklet); + + if (vmci_dev->notification_bitmap) { + /* + * The device reset above cleared the bitmap state of the + * device, so we can safely free it here. + */ + + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vmci_dev->notification_bitmap, + vmci_dev->notification_base); + } + + vfree(vmci_dev->data_buffer); + + /* The rest are managed resources and will be freed by PCI core */ +} + +static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, vmci_ids); + +static struct pci_driver vmci_guest_driver = { + .name = KBUILD_MODNAME, + .id_table = vmci_ids, + .probe = vmci_guest_probe_device, + .remove = vmci_guest_remove_device, +}; + +int __init vmci_guest_init(void) +{ + return pci_register_driver(&vmci_guest_driver); +} + +void __exit vmci_guest_exit(void) +{ + pci_unregister_driver(&vmci_guest_driver); +} diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c new file mode 100644 index 00000000000..344973a0fb0 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_handle_array.c @@ -0,0 +1,142 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/slab.h> +#include "vmci_handle_array.h" + +static size_t handle_arr_calc_size(size_t capacity) +{ + return sizeof(struct vmci_handle_arr) + + capacity * sizeof(struct vmci_handle); +} + +struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity) +{ + struct vmci_handle_arr *array; + + if (capacity == 0) + capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE; + + array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC); + if (!array) + return NULL; + + array->capacity = capacity; + array->size = 0; + + return array; +} + +void vmci_handle_arr_destroy(struct vmci_handle_arr *array) +{ + kfree(array); +} + +void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle) +{ + struct vmci_handle_arr *array = *array_ptr; + + if (unlikely(array->size >= array->capacity)) { + /* reallocate. */ + struct vmci_handle_arr *new_array; + size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT; + size_t new_size = handle_arr_calc_size(new_capacity); + + new_array = krealloc(array, new_size, GFP_ATOMIC); + if (!new_array) + return; + + new_array->capacity = new_capacity; + *array_ptr = array = new_array; + } + + array->entries[array->size] = handle; + array->size++; +} + +/* + * Handle that was removed, VMCI_INVALID_HANDLE if entry not found. + */ +struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, + struct vmci_handle entry_handle) +{ + struct vmci_handle handle = VMCI_INVALID_HANDLE; + size_t i; + + for (i = 0; i < array->size; i++) { + if (vmci_handle_is_equal(array->entries[i], entry_handle)) { + handle = array->entries[i]; + array->size--; + array->entries[i] = array->entries[array->size]; + array->entries[array->size] = VMCI_INVALID_HANDLE; + break; + } + } + + return handle; +} + +/* + * Handle that was removed, VMCI_INVALID_HANDLE if array was empty. + */ +struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array) +{ + struct vmci_handle handle = VMCI_INVALID_HANDLE; + + if (array->size) { + array->size--; + handle = array->entries[array->size]; + array->entries[array->size] = VMCI_INVALID_HANDLE; + } + + return handle; +} + +/* + * Handle at given index, VMCI_INVALID_HANDLE if invalid index. + */ +struct vmci_handle +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index) +{ + if (unlikely(index >= array->size)) + return VMCI_INVALID_HANDLE; + + return array->entries[index]; +} + +bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, + struct vmci_handle entry_handle) +{ + size_t i; + + for (i = 0; i < array->size; i++) + if (vmci_handle_is_equal(array->entries[i], entry_handle)) + return true; + + return false; +} + +/* + * NULL if the array is empty. Otherwise, a pointer to the array + * of VMCI handles in the handle array. + */ +struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array) +{ + if (array->size) + return array->entries; + + return NULL; +} diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h new file mode 100644 index 00000000000..b5f3a7f98cf --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_handle_array.h @@ -0,0 +1,52 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_HANDLE_ARRAY_H_ +#define _VMCI_HANDLE_ARRAY_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4 +#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */ + +struct vmci_handle_arr { + size_t capacity; + size_t size; + struct vmci_handle entries[]; +}; + +struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity); +void vmci_handle_arr_destroy(struct vmci_handle_arr *array); +void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle); +struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, + struct vmci_handle + entry_handle); +struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array); +struct vmci_handle +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index); +bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, + struct vmci_handle entry_handle); +struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array); + +static inline size_t vmci_handle_arr_get_size( + const struct vmci_handle_arr *array) +{ + return array->size; +} + + +#endif /* _VMCI_HANDLE_ARRAY_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c new file mode 100644 index 00000000000..1723a6e4f2e --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -0,0 +1,1039 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/moduleparam.h> +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/highmem.h> +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/pci.h> +#include <linux/smp.h> +#include <linux/fs.h> +#include <linux/io.h> + +#include "vmci_handle_array.h" +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_doorbell.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" + +#define VMCI_UTIL_NUM_RESOURCES 1 + +enum { + VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, + VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, +}; + +enum { + VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, + VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, + VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, +}; + +/* + * VMCI driver initialization. This block can also be used to + * pass initial group membership etc. + */ +struct vmci_init_blk { + u32 cid; + u32 flags; +}; + +/* VMCIqueue_pairAllocInfo_VMToVM */ +struct vmci_qp_alloc_info_vmvm { + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u64 produce_page_file; /* User VA. */ + u64 consume_page_file; /* User VA. */ + u64 produce_page_file_size; /* Size of the file name array. */ + u64 consume_page_file_size; /* Size of the file name array. */ + s32 result; + u32 _pad; +}; + +/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ +struct vmci_set_notify_info { + u64 notify_uva; + s32 result; + u32 _pad; +}; + +/* + * Per-instance host state + */ +struct vmci_host_dev { + struct vmci_ctx *context; + int user_version; + enum vmci_obj_type ct_type; + struct mutex lock; /* Mutex lock for vmci context access */ +}; + +static struct vmci_ctx *host_context; +static bool vmci_host_device_initialized; +static atomic_t vmci_host_active_users = ATOMIC_INIT(0); + +/* + * Determines whether the VMCI host personality is + * available. Since the core functionality of the host driver is + * always present, all guests could possibly use the host + * personality. However, to minimize the deviation from the + * pre-unified driver state of affairs, we only consider the host + * device active if there is no active guest device or if there + * are VMX'en with active VMCI contexts using the host device. + */ +bool vmci_host_code_active(void) +{ + return vmci_host_device_initialized && + (!vmci_guest_code_active() || + atomic_read(&vmci_host_active_users) > 0); +} + +/* + * Called on open of /dev/vmci. + */ +static int vmci_host_open(struct inode *inode, struct file *filp) +{ + struct vmci_host_dev *vmci_host_dev; + + vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); + if (vmci_host_dev == NULL) + return -ENOMEM; + + vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; + mutex_init(&vmci_host_dev->lock); + filp->private_data = vmci_host_dev; + + return 0; +} + +/* + * Called on close of /dev/vmci, most often when the process + * exits. + */ +static int vmci_host_close(struct inode *inode, struct file *filp) +{ + struct vmci_host_dev *vmci_host_dev = filp->private_data; + + if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { + vmci_ctx_destroy(vmci_host_dev->context); + vmci_host_dev->context = NULL; + + /* + * The number of active contexts is used to track whether any + * VMX'en are using the host personality. It is incremented when + * a context is created through the IOCTL_VMCI_INIT_CONTEXT + * ioctl. + */ + atomic_dec(&vmci_host_active_users); + } + vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; + + kfree(vmci_host_dev); + filp->private_data = NULL; + return 0; +} + +/* + * This is used to wake up the VMX when a VMCI call arrives, or + * to wake up select() or poll() at the next clock tick. + */ +static unsigned int vmci_host_poll(struct file *filp, poll_table *wait) +{ + struct vmci_host_dev *vmci_host_dev = filp->private_data; + struct vmci_ctx *context = vmci_host_dev->context; + unsigned int mask = 0; + + if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { + /* Check for VMCI calls to this VM context. */ + if (wait) + poll_wait(filp, &context->host_context.wait_queue, + wait); + + spin_lock(&context->lock); + if (context->pending_datagrams > 0 || + vmci_handle_arr_get_size( + context->pending_doorbell_array) > 0) { + mask = POLLIN; + } + spin_unlock(&context->lock); + } + return mask; +} + +/* + * Copies the handles of a handle array into a user buffer, and + * returns the new length in userBufferSize. If the copy to the + * user buffer fails, the functions still returns VMCI_SUCCESS, + * but retval != 0. + */ +static int drv_cp_harray_to_user(void __user *user_buf_uva, + u64 *user_buf_size, + struct vmci_handle_arr *handle_array, + int *retval) +{ + u32 array_size = 0; + struct vmci_handle *handles; + + if (handle_array) + array_size = vmci_handle_arr_get_size(handle_array); + + if (array_size * sizeof(*handles) > *user_buf_size) + return VMCI_ERROR_MORE_DATA; + + *user_buf_size = array_size * sizeof(*handles); + if (*user_buf_size) + *retval = copy_to_user(user_buf_uva, + vmci_handle_arr_get_handles + (handle_array), *user_buf_size); + + return VMCI_SUCCESS; +} + +/* + * Sets up a given context for notify to work. Calls drv_map_bool_ptr() + * which maps the notify boolean in user VA in kernel space. + */ +static int vmci_host_setup_notify(struct vmci_ctx *context, + unsigned long uva) +{ + struct page *page; + int retval; + + if (context->notify_page) { + pr_devel("%s: Notify mechanism is already set up\n", __func__); + return VMCI_ERROR_DUPLICATE_ENTRY; + } + + /* + * We are using 'bool' internally, but let's make sure we explicit + * about the size. + */ + BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); + if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8))) + return VMCI_ERROR_GENERIC; + + /* + * Lock physical page backing a given user VA. + */ + retval = get_user_pages_fast(PAGE_ALIGN(uva), 1, 1, &page); + if (retval != 1) + return VMCI_ERROR_GENERIC; + + /* + * Map the locked page and set up notify pointer. + */ + context->notify = kmap(page) + (uva & (PAGE_SIZE - 1)); + vmci_ctx_check_signal_notify(context); + + return VMCI_SUCCESS; +} + +static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, + unsigned int cmd, void __user *uptr) +{ + if (cmd == IOCTL_VMCI_VERSION2) { + int __user *vptr = uptr; + if (get_user(vmci_host_dev->user_version, vptr)) + return -EFAULT; + } + + /* + * The basic logic here is: + * + * If the user sends in a version of 0 tell it our version. + * If the user didn't send in a version, tell it our version. + * If the user sent in an old version, tell it -its- version. + * If the user sent in an newer version, tell it our version. + * + * The rationale behind telling the caller its version is that + * Workstation 6.5 required that VMX and VMCI kernel module were + * version sync'd. All new VMX users will be programmed to + * handle the VMCI kernel module version. + */ + + if (vmci_host_dev->user_version > 0 && + vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { + return vmci_host_dev->user_version; + } + + return VMCI_VERSION; +} + +#define vmci_ioctl_err(fmt, ...) \ + pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) + +static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_init_blk init_block; + const struct cred *cred; + int retval; + + if (copy_from_user(&init_block, uptr, sizeof(init_block))) { + vmci_ioctl_err("error reading init block\n"); + return -EFAULT; + } + + mutex_lock(&vmci_host_dev->lock); + + if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { + vmci_ioctl_err("received VMCI init on initialized handle\n"); + retval = -EINVAL; + goto out; + } + + if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { + vmci_ioctl_err("unsupported VMCI restriction flag\n"); + retval = -EINVAL; + goto out; + } + + cred = get_current_cred(); + vmci_host_dev->context = vmci_ctx_create(init_block.cid, + init_block.flags, 0, + vmci_host_dev->user_version, + cred); + put_cred(cred); + if (IS_ERR(vmci_host_dev->context)) { + retval = PTR_ERR(vmci_host_dev->context); + vmci_ioctl_err("error initializing context\n"); + goto out; + } + + /* + * Copy cid to userlevel, we do this to allow the VMX + * to enforce its policy on cid generation. + */ + init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); + if (copy_to_user(uptr, &init_block, sizeof(init_block))) { + vmci_ctx_destroy(vmci_host_dev->context); + vmci_host_dev->context = NULL; + vmci_ioctl_err("error writing init block\n"); + retval = -EFAULT; + goto out; + } + + vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; + atomic_inc(&vmci_host_active_users); + + retval = 0; + +out: + mutex_unlock(&vmci_host_dev->lock); + return retval; +} + +static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_datagram_snd_rcv_info send_info; + struct vmci_datagram *dg = NULL; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&send_info, uptr, sizeof(send_info))) + return -EFAULT; + + if (send_info.len > VMCI_MAX_DG_SIZE) { + vmci_ioctl_err("datagram is too big (size=%d)\n", + send_info.len); + return -EINVAL; + } + + if (send_info.len < sizeof(*dg)) { + vmci_ioctl_err("datagram is too small (size=%d)\n", + send_info.len); + return -EINVAL; + } + + dg = kmalloc(send_info.len, GFP_KERNEL); + if (!dg) { + vmci_ioctl_err( + "cannot allocate memory to dispatch datagram\n"); + return -ENOMEM; + } + + if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr, + send_info.len)) { + vmci_ioctl_err("error getting datagram\n"); + kfree(dg); + return -EFAULT; + } + + pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", + dg->dst.context, dg->dst.resource, + dg->src.context, dg->src.resource, + (unsigned long long)dg->payload_size); + + /* Get source context id. */ + cid = vmci_ctx_get_id(vmci_host_dev->context); + send_info.result = vmci_datagram_dispatch(cid, dg, true); + kfree(dg); + + return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_datagram_snd_rcv_info recv_info; + struct vmci_datagram *dg = NULL; + int retval; + size_t size; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) + return -EFAULT; + + size = recv_info.len; + recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, + &size, &dg); + + if (recv_info.result >= VMCI_SUCCESS) { + void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; + retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); + kfree(dg); + if (retval != 0) + return -EFAULT; + } + + return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_handle handle; + int vmci_status; + int __user *retptr; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + struct vmci_qp_alloc_info_vmvm alloc_info; + struct vmci_qp_alloc_info_vmvm __user *info = uptr; + + if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) + return -EFAULT; + + handle = alloc_info.handle; + retptr = &info->result; + + vmci_status = vmci_qp_broker_alloc(alloc_info.handle, + alloc_info.peer, + alloc_info.flags, + VMCI_NO_PRIVILEGE_FLAGS, + alloc_info.produce_size, + alloc_info.consume_size, + NULL, + vmci_host_dev->context); + + if (vmci_status == VMCI_SUCCESS) + vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; + } else { + struct vmci_qp_alloc_info alloc_info; + struct vmci_qp_alloc_info __user *info = uptr; + struct vmci_qp_page_store page_store; + + if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) + return -EFAULT; + + handle = alloc_info.handle; + retptr = &info->result; + + page_store.pages = alloc_info.ppn_va; + page_store.len = alloc_info.num_ppns; + + vmci_status = vmci_qp_broker_alloc(alloc_info.handle, + alloc_info.peer, + alloc_info.flags, + VMCI_NO_PRIVILEGE_FLAGS, + alloc_info.produce_size, + alloc_info.consume_size, + &page_store, + vmci_host_dev->context); + } + + if (put_user(vmci_status, retptr)) { + if (vmci_status >= VMCI_SUCCESS) { + vmci_status = vmci_qp_broker_detach(handle, + vmci_host_dev->context); + } + return -EFAULT; + } + + return 0; +} + +static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_set_va_info set_va_info; + struct vmci_qp_set_va_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + vmci_ioctl_err("is not allowed\n"); + return -EINVAL; + } + + if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) + return -EFAULT; + + if (set_va_info.va) { + /* + * VMX is passing down a new VA for the queue + * pair mapping. + */ + result = vmci_qp_broker_map(set_va_info.handle, + vmci_host_dev->context, + set_va_info.va); + } else { + /* + * The queue pair is about to be unmapped by + * the VMX. + */ + result = vmci_qp_broker_unmap(set_va_info.handle, + vmci_host_dev->context, 0); + } + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_page_file_info page_file_info; + struct vmci_qp_page_file_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || + vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { + vmci_ioctl_err("not supported on this VMX (version=%d)\n", + vmci_host_dev->user_version); + return -EINVAL; + } + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&page_file_info, uptr, sizeof(*info))) + return -EFAULT; + + /* + * Communicate success pre-emptively to the caller. Note that the + * basic premise is that it is incumbent upon the caller not to look at + * the info.result field until after the ioctl() returns. And then, + * only if the ioctl() result indicates no error. We send up the + * SUCCESS status before calling SetPageStore() store because failing + * to copy up the result code means unwinding the SetPageStore(). + * + * It turns out the logic to unwind a SetPageStore() opens a can of + * worms. For example, if a host had created the queue_pair and a + * guest attaches and SetPageStore() is successful but writing success + * fails, then ... the host has to be stopped from writing (anymore) + * data into the queue_pair. That means an additional test in the + * VMCI_Enqueue() code path. Ugh. + */ + + if (put_user(VMCI_SUCCESS, &info->result)) { + /* + * In this case, we can't write a result field of the + * caller's info block. So, we don't even try to + * SetPageStore(). + */ + return -EFAULT; + } + + result = vmci_qp_broker_set_page_store(page_file_info.handle, + page_file_info.produce_va, + page_file_info.consume_va, + vmci_host_dev->context); + if (result < VMCI_SUCCESS) { + if (put_user(result, &info->result)) { + /* + * Note that in this case the SetPageStore() + * call failed but we were unable to + * communicate that to the caller (because the + * copy_to_user() call failed). So, if we + * simply return an error (in this case + * -EFAULT) then the caller will know that the + * SetPageStore failed even though we couldn't + * put the result code in the result field and + * indicate exactly why it failed. + * + * That says nothing about the issue where we + * were once able to write to the caller's info + * memory and now can't. Something more + * serious is probably going on than the fact + * that SetPageStore() didn't work. + */ + return -EFAULT; + } + } + + return 0; +} + +static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_qp_dtch_info detach_info; + struct vmci_qp_dtch_info __user *info = uptr; + s32 result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) + return -EFAULT; + + result = vmci_qp_broker_detach(detach_info.handle, + vmci_host_dev->context); + if (result == VMCI_SUCCESS && + vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { + result = VMCI_SUCCESS_LAST_DETACH; + } + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_info ar_info; + struct vmci_ctx_info __user *info = uptr; + s32 result; + u32 cid; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + result = vmci_ctx_add_notification(cid, ar_info.remote_cid); + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_info ar_info; + struct vmci_ctx_info __user *info = uptr; + u32 cid; + int result; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + result = vmci_ctx_remove_notification(cid, + ar_info.remote_cid); + + return put_user(result, &info->result) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_chkpt_buf_info get_info; + u32 cid; + void *cpt_buf; + int retval; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&get_info, uptr, sizeof(get_info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, + &get_info.buf_size, &cpt_buf); + if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { + void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; + retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); + kfree(cpt_buf); + + if (retval) + return -EFAULT; + } + + return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; +} + +static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_chkpt_buf_info set_info; + u32 cid; + void *cpt_buf; + int retval; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&set_info, uptr, sizeof(set_info))) + return -EFAULT; + + cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL); + if (!cpt_buf) { + vmci_ioctl_err( + "cannot allocate memory to set cpt state (type=%d)\n", + set_info.cpt_type); + return -ENOMEM; + } + + if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf, + set_info.buf_size)) { + retval = -EFAULT; + goto out; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, + set_info.buf_size, cpt_buf); + + retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; + +out: + kfree(cpt_buf); + return retval; +} + +static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + u32 __user *u32ptr = uptr; + + return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; +} + +static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_set_notify_info notify_info; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) + return -EFAULT; + + if (notify_info.notify_uva) { + notify_info.result = + vmci_host_setup_notify(vmci_host_dev->context, + notify_info.notify_uva); + } else { + vmci_ctx_unset_notify(vmci_host_dev->context); + notify_info.result = VMCI_SUCCESS; + } + + return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? + -EFAULT : 0; +} + +static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_dbell_notify_resource_info info; + u32 cid; + + if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { + vmci_ioctl_err("invalid for current VMX versions\n"); + return -EINVAL; + } + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (copy_from_user(&info, uptr, sizeof(info))) + return -EFAULT; + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + switch (info.action) { + case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: + if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { + u32 flags = VMCI_NO_PRIVILEGE_FLAGS; + info.result = vmci_ctx_notify_dbell(cid, info.handle, + flags); + } else { + info.result = VMCI_ERROR_UNAVAILABLE; + } + break; + + case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: + info.result = vmci_ctx_dbell_create(cid, info.handle); + break; + + case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: + info.result = vmci_ctx_dbell_destroy(cid, info.handle); + break; + + default: + vmci_ioctl_err("got unknown action (action=%d)\n", + info.action); + info.result = VMCI_ERROR_INVALID_ARGS; + } + + return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; +} + +static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, + const char *ioctl_name, + void __user *uptr) +{ + struct vmci_ctx_notify_recv_info info; + struct vmci_handle_arr *db_handle_array; + struct vmci_handle_arr *qp_handle_array; + void __user *ubuf; + u32 cid; + int retval = 0; + + if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { + vmci_ioctl_err("only valid for contexts\n"); + return -EINVAL; + } + + if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { + vmci_ioctl_err("not supported for the current vmx version\n"); + return -EINVAL; + } + + if (copy_from_user(&info, uptr, sizeof(info))) + return -EFAULT; + + if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || + (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { + return -EINVAL; + } + + cid = vmci_ctx_get_id(vmci_host_dev->context); + + info.result = vmci_ctx_rcv_notifications_get(cid, + &db_handle_array, &qp_handle_array); + if (info.result != VMCI_SUCCESS) + return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; + + ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; + info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, + db_handle_array, &retval); + if (info.result == VMCI_SUCCESS && !retval) { + ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; + info.result = drv_cp_harray_to_user(ubuf, + &info.qp_handle_buf_size, + qp_handle_array, &retval); + } + + if (!retval && copy_to_user(uptr, &info, sizeof(info))) + retval = -EFAULT; + + vmci_ctx_rcv_notifications_release(cid, + db_handle_array, qp_handle_array, + info.result == VMCI_SUCCESS && !retval); + + return retval; +} + +static long vmci_host_unlocked_ioctl(struct file *filp, + unsigned int iocmd, unsigned long ioarg) +{ +#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ + char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \ + return vmci_host_do_ ## ioctl_fn( \ + vmci_host_dev, name, uptr); \ + } while (0) + + struct vmci_host_dev *vmci_host_dev = filp->private_data; + void __user *uptr = (void __user *)ioarg; + + switch (iocmd) { + case IOCTL_VMCI_INIT_CONTEXT: + VMCI_DO_IOCTL(INIT_CONTEXT, init_context); + case IOCTL_VMCI_DATAGRAM_SEND: + VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); + case IOCTL_VMCI_DATAGRAM_RECEIVE: + VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); + case IOCTL_VMCI_QUEUEPAIR_ALLOC: + VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); + case IOCTL_VMCI_QUEUEPAIR_SETVA: + VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); + case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: + VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); + case IOCTL_VMCI_QUEUEPAIR_DETACH: + VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); + case IOCTL_VMCI_CTX_ADD_NOTIFICATION: + VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); + case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: + VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); + case IOCTL_VMCI_CTX_GET_CPT_STATE: + VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); + case IOCTL_VMCI_CTX_SET_CPT_STATE: + VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); + case IOCTL_VMCI_GET_CONTEXT_ID: + VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); + case IOCTL_VMCI_SET_NOTIFY: + VMCI_DO_IOCTL(SET_NOTIFY, set_notify); + case IOCTL_VMCI_NOTIFY_RESOURCE: + VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); + case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: + VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); + + case IOCTL_VMCI_VERSION: + case IOCTL_VMCI_VERSION2: + return vmci_host_get_version(vmci_host_dev, iocmd, uptr); + + default: + pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); + return -EINVAL; + } + +#undef VMCI_DO_IOCTL +} + +static const struct file_operations vmuser_fops = { + .owner = THIS_MODULE, + .open = vmci_host_open, + .release = vmci_host_close, + .poll = vmci_host_poll, + .unlocked_ioctl = vmci_host_unlocked_ioctl, + .compat_ioctl = vmci_host_unlocked_ioctl, +}; + +static struct miscdevice vmci_host_miscdev = { + .name = "vmci", + .minor = MISC_DYNAMIC_MINOR, + .fops = &vmuser_fops, +}; + +int __init vmci_host_init(void) +{ + int error; + + host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, + VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, + -1, VMCI_VERSION, NULL); + if (IS_ERR(host_context)) { + error = PTR_ERR(host_context); + pr_warn("Failed to initialize VMCIContext (error%d)\n", + error); + return error; + } + + error = misc_register(&vmci_host_miscdev); + if (error) { + pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", + vmci_host_miscdev.name, + MISC_MAJOR, vmci_host_miscdev.minor, + error); + pr_warn("Unable to initialize host personality\n"); + vmci_ctx_destroy(host_context); + return error; + } + + pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", + vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); + + vmci_host_device_initialized = true; + return 0; +} + +void __exit vmci_host_exit(void) +{ + int error; + + vmci_host_device_initialized = false; + + error = misc_deregister(&vmci_host_miscdev); + if (error) + pr_warn("Error unregistering character device: %d\n", error); + + vmci_ctx_destroy(host_context); + vmci_qp_broker_exit(); + + pr_debug("VMCI host driver module unloaded\n"); +} diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c new file mode 100644 index 00000000000..1b7b303085d --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -0,0 +1,3339 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pagemap.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uio.h> +#include <linux/wait.h> +#include <linux/vmalloc.h> + +#include "vmci_handle_array.h" +#include "vmci_queue_pair.h" +#include "vmci_datagram.h" +#include "vmci_resource.h" +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_event.h" +#include "vmci_route.h" + +/* + * In the following, we will distinguish between two kinds of VMX processes - + * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized + * VMCI page files in the VMX and supporting VM to VM communication and the + * newer ones that use the guest memory directly. We will in the following + * refer to the older VMX versions as old-style VMX'en, and the newer ones as + * new-style VMX'en. + * + * The state transition datagram is as follows (the VMCIQPB_ prefix has been + * removed for readability) - see below for more details on the transtions: + * + * -------------- NEW ------------- + * | | + * \_/ \_/ + * CREATED_NO_MEM <-----------------> CREATED_MEM + * | | | + * | o-----------------------o | + * | | | + * \_/ \_/ \_/ + * ATTACHED_NO_MEM <----------------> ATTACHED_MEM + * | | | + * | o----------------------o | + * | | | + * \_/ \_/ \_/ + * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM + * | | + * | | + * -------------> gone <------------- + * + * In more detail. When a VMCI queue pair is first created, it will be in the + * VMCIQPB_NEW state. It will then move into one of the following states: + * + * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: + * + * - the created was performed by a host endpoint, in which case there is + * no backing memory yet. + * + * - the create was initiated by an old-style VMX, that uses + * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at + * a later point in time. This state can be distinguished from the one + * above by the context ID of the creator. A host side is not allowed to + * attach until the page store has been set. + * + * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair + * is created by a VMX using the queue pair device backend that + * sets the UVAs of the queue pair immediately and stores the + * information for later attachers. At this point, it is ready for + * the host side to attach to it. + * + * Once the queue pair is in one of the created states (with the exception of + * the case mentioned for older VMX'en above), it is possible to attach to the + * queue pair. Again we have two new states possible: + * + * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following + * paths: + * + * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue + * pair, and attaches to a queue pair previously created by the host side. + * + * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair + * already created by a guest. + * + * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls + * vmci_qp_broker_set_page_store (see below). + * + * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the + * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will + * bring the queue pair into this state. Once vmci_qp_broker_set_page_store + * is called to register the user memory, the VMCIQPB_ATTACH_MEM state + * will be entered. + * + * From the attached queue pair, the queue pair can enter the shutdown states + * when either side of the queue pair detaches. If the guest side detaches + * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where + * the content of the queue pair will no longer be available. If the host + * side detaches first, the queue pair will either enter the + * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or + * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped + * (e.g., the host detaches while a guest is stunned). + * + * New-style VMX'en will also unmap guest memory, if the guest is + * quiesced, e.g., during a snapshot operation. In that case, the guest + * memory will no longer be available, and the queue pair will transition from + * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, + * in which case the queue pair will transition from the *_NO_MEM state at that + * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, + * since the peer may have either attached or detached in the meantime. The + * values are laid out such that ++ on a state will move from a *_NO_MEM to a + * *_MEM state, and vice versa. + */ + +/* + * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these + * types are passed around to enqueue and dequeue routines. Note that + * often the functions passed are simply wrappers around memcpy + * itself. + * + * Note: In order for the memcpy typedefs to be compatible with the VMKernel, + * there's an unused last parameter for the hosted side. In + * ESX, that parameter holds a buffer type. + */ +typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue, + u64 queue_offset, const void *src, + size_t src_offset, size_t size); +typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset, + const struct vmci_queue *queue, + u64 queue_offset, size_t size); + +/* The Kernel specific component of the struct vmci_queue structure. */ +struct vmci_queue_kern_if { + struct mutex __mutex; /* Protects the queue. */ + struct mutex *mutex; /* Shared by producer and consumer queues. */ + size_t num_pages; /* Number of pages incl. header. */ + bool host; /* Host or guest? */ + union { + struct { + dma_addr_t *pas; + void **vas; + } g; /* Used by the guest. */ + struct { + struct page **page; + struct page **header_page; + } h; /* Used by the host. */ + } u; +}; + +/* + * This structure is opaque to the clients. + */ +struct vmci_qp { + struct vmci_handle handle; + struct vmci_queue *produce_q; + struct vmci_queue *consume_q; + u64 produce_q_size; + u64 consume_q_size; + u32 peer; + u32 flags; + u32 priv_flags; + bool guest_endpoint; + unsigned int blocked; + unsigned int generation; + wait_queue_head_t event; +}; + +enum qp_broker_state { + VMCIQPB_NEW, + VMCIQPB_CREATED_NO_MEM, + VMCIQPB_CREATED_MEM, + VMCIQPB_ATTACHED_NO_MEM, + VMCIQPB_ATTACHED_MEM, + VMCIQPB_SHUTDOWN_NO_MEM, + VMCIQPB_SHUTDOWN_MEM, + VMCIQPB_GONE +}; + +#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ + _qpb->state == VMCIQPB_ATTACHED_MEM || \ + _qpb->state == VMCIQPB_SHUTDOWN_MEM) + +/* + * In the queue pair broker, we always use the guest point of view for + * the produce and consume queue values and references, e.g., the + * produce queue size stored is the guests produce queue size. The + * host endpoint will need to swap these around. The only exception is + * the local queue pairs on the host, in which case the host endpoint + * that creates the queue pair will have the right orientation, and + * the attaching host endpoint will need to swap. + */ +struct qp_entry { + struct list_head list_item; + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u32 ref_count; +}; + +struct qp_broker_entry { + struct vmci_resource resource; + struct qp_entry qp; + u32 create_id; + u32 attach_id; + enum qp_broker_state state; + bool require_trusted_attach; + bool created_by_trusted; + bool vmci_page_files; /* Created by VMX using VMCI page files */ + struct vmci_queue *produce_q; + struct vmci_queue *consume_q; + struct vmci_queue_header saved_produce_q; + struct vmci_queue_header saved_consume_q; + vmci_event_release_cb wakeup_cb; + void *client_data; + void *local_mem; /* Kernel memory for local queue pair */ +}; + +struct qp_guest_endpoint { + struct vmci_resource resource; + struct qp_entry qp; + u64 num_ppns; + void *produce_q; + void *consume_q; + struct ppn_set ppn_set; +}; + +struct qp_list { + struct list_head head; + struct mutex mutex; /* Protect queue list. */ +}; + +static struct qp_list qp_broker_list = { + .head = LIST_HEAD_INIT(qp_broker_list.head), + .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), +}; + +static struct qp_list qp_guest_endpoints = { + .head = LIST_HEAD_INIT(qp_guest_endpoints.head), + .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), +}; + +#define INVALID_VMCI_GUEST_MEM_ID 0 +#define QPE_NUM_PAGES(_QPE) ((u32) \ + (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ + DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) + + +/* + * Frees kernel VA space for a given queue and its queue header, and + * frees physical data pages. + */ +static void qp_free_queue(void *q, u64 size) +{ + struct vmci_queue *queue = q; + + if (queue) { + u64 i; + + /* Given size does not include header, so add in a page here. */ + for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { + dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, + queue->kernel_if->u.g.vas[i], + queue->kernel_if->u.g.pas[i]); + } + + vfree(queue); + } +} + +/* + * Allocates kernel queue pages of specified size with IOMMU mappings, + * plus space for the queue structure/kernel interface and the queue + * header. + */ +static void *qp_alloc_queue(u64 size, u32 flags) +{ + u64 i; + struct vmci_queue *queue; + const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); + const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); + const size_t queue_size = + sizeof(*queue) + sizeof(*queue->kernel_if) + + pas_size + vas_size; + + queue = vmalloc(queue_size); + if (!queue) + return NULL; + + queue->q_header = NULL; + queue->saved_header = NULL; + queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); + queue->kernel_if->mutex = NULL; + queue->kernel_if->num_pages = num_pages; + queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); + queue->kernel_if->u.g.vas = + (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); + queue->kernel_if->host = false; + + for (i = 0; i < num_pages; i++) { + queue->kernel_if->u.g.vas[i] = + dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, + &queue->kernel_if->u.g.pas[i], + GFP_KERNEL); + if (!queue->kernel_if->u.g.vas[i]) { + /* Size excl. the header. */ + qp_free_queue(queue, i * PAGE_SIZE); + return NULL; + } + } + + /* Queue header is the first page. */ + queue->q_header = queue->kernel_if->u.g.vas[0]; + + return queue; +} + +/* + * Copies from a given buffer or iovector to a VMCI Queue. Uses + * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * by traversing the offset -> page translation structure for the queue. + * Assumes that offset + size does not wrap around in the queue. + */ +static int __qp_memcpy_to_queue(struct vmci_queue *queue, + u64 queue_offset, + const void *src, + size_t size, + bool is_iovec) +{ + struct vmci_queue_kern_if *kernel_if = queue->kernel_if; + size_t bytes_copied = 0; + + while (bytes_copied < size) { + const u64 page_index = + (queue_offset + bytes_copied) / PAGE_SIZE; + const size_t page_offset = + (queue_offset + bytes_copied) & (PAGE_SIZE - 1); + void *va; + size_t to_copy; + + if (kernel_if->host) + va = kmap(kernel_if->u.h.page[page_index]); + else + va = kernel_if->u.g.vas[page_index + 1]; + /* Skip header. */ + + if (size - bytes_copied > PAGE_SIZE - page_offset) + /* Enough payload to fill up from this page. */ + to_copy = PAGE_SIZE - page_offset; + else + to_copy = size - bytes_copied; + + if (is_iovec) { + struct iovec *iov = (struct iovec *)src; + int err; + + /* The iovec will track bytes_copied internally. */ + err = memcpy_fromiovec((u8 *)va + page_offset, + iov, to_copy); + if (err != 0) { + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + return VMCI_ERROR_INVALID_ARGS; + } + } else { + memcpy((u8 *)va + page_offset, + (u8 *)src + bytes_copied, to_copy); + } + + bytes_copied += to_copy; + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + } + + return VMCI_SUCCESS; +} + +/* + * Copies to a given buffer or iovector from a VMCI Queue. Uses + * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * by traversing the offset -> page translation structure for the queue. + * Assumes that offset + size does not wrap around in the queue. + */ +static int __qp_memcpy_from_queue(void *dest, + const struct vmci_queue *queue, + u64 queue_offset, + size_t size, + bool is_iovec) +{ + struct vmci_queue_kern_if *kernel_if = queue->kernel_if; + size_t bytes_copied = 0; + + while (bytes_copied < size) { + const u64 page_index = + (queue_offset + bytes_copied) / PAGE_SIZE; + const size_t page_offset = + (queue_offset + bytes_copied) & (PAGE_SIZE - 1); + void *va; + size_t to_copy; + + if (kernel_if->host) + va = kmap(kernel_if->u.h.page[page_index]); + else + va = kernel_if->u.g.vas[page_index + 1]; + /* Skip header. */ + + if (size - bytes_copied > PAGE_SIZE - page_offset) + /* Enough payload to fill up this page. */ + to_copy = PAGE_SIZE - page_offset; + else + to_copy = size - bytes_copied; + + if (is_iovec) { + struct iovec *iov = (struct iovec *)dest; + int err; + + /* The iovec will track bytes_copied internally. */ + err = memcpy_toiovec(iov, (u8 *)va + page_offset, + to_copy); + if (err != 0) { + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + return VMCI_ERROR_INVALID_ARGS; + } + } else { + memcpy((u8 *)dest + bytes_copied, + (u8 *)va + page_offset, to_copy); + } + + bytes_copied += to_copy; + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); + } + + return VMCI_SUCCESS; +} + +/* + * Allocates two list of PPNs --- one for the pages in the produce queue, + * and the other for the pages in the consume queue. Intializes the list + * of PPNs with the page frame numbers of the KVA for the two queues (and + * the queue headers). + */ +static int qp_alloc_ppn_set(void *prod_q, + u64 num_produce_pages, + void *cons_q, + u64 num_consume_pages, struct ppn_set *ppn_set) +{ + u32 *produce_ppns; + u32 *consume_ppns; + struct vmci_queue *produce_q = prod_q; + struct vmci_queue *consume_q = cons_q; + u64 i; + + if (!produce_q || !num_produce_pages || !consume_q || + !num_consume_pages || !ppn_set) + return VMCI_ERROR_INVALID_ARGS; + + if (ppn_set->initialized) + return VMCI_ERROR_ALREADY_EXISTS; + + produce_ppns = + kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL); + if (!produce_ppns) + return VMCI_ERROR_NO_MEM; + + consume_ppns = + kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL); + if (!consume_ppns) { + kfree(produce_ppns); + return VMCI_ERROR_NO_MEM; + } + + for (i = 0; i < num_produce_pages; i++) { + unsigned long pfn; + + produce_ppns[i] = + produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; + pfn = produce_ppns[i]; + + /* Fail allocation if PFN isn't supported by hypervisor. */ + if (sizeof(pfn) > sizeof(*produce_ppns) + && pfn != produce_ppns[i]) + goto ppn_error; + } + + for (i = 0; i < num_consume_pages; i++) { + unsigned long pfn; + + consume_ppns[i] = + consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; + pfn = consume_ppns[i]; + + /* Fail allocation if PFN isn't supported by hypervisor. */ + if (sizeof(pfn) > sizeof(*consume_ppns) + && pfn != consume_ppns[i]) + goto ppn_error; + } + + ppn_set->num_produce_pages = num_produce_pages; + ppn_set->num_consume_pages = num_consume_pages; + ppn_set->produce_ppns = produce_ppns; + ppn_set->consume_ppns = consume_ppns; + ppn_set->initialized = true; + return VMCI_SUCCESS; + + ppn_error: + kfree(produce_ppns); + kfree(consume_ppns); + return VMCI_ERROR_INVALID_ARGS; +} + +/* + * Frees the two list of PPNs for a queue pair. + */ +static void qp_free_ppn_set(struct ppn_set *ppn_set) +{ + if (ppn_set->initialized) { + /* Do not call these functions on NULL inputs. */ + kfree(ppn_set->produce_ppns); + kfree(ppn_set->consume_ppns); + } + memset(ppn_set, 0, sizeof(*ppn_set)); +} + +/* + * Populates the list of PPNs in the hypercall structure with the PPNS + * of the produce queue and the consume queue. + */ +static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) +{ + memcpy(call_buf, ppn_set->produce_ppns, + ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); + memcpy(call_buf + + ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), + ppn_set->consume_ppns, + ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); + + return VMCI_SUCCESS; +} + +static int qp_memcpy_to_queue(struct vmci_queue *queue, + u64 queue_offset, + const void *src, size_t src_offset, size_t size) +{ + return __qp_memcpy_to_queue(queue, queue_offset, + (u8 *)src + src_offset, size, false); +} + +static int qp_memcpy_from_queue(void *dest, + size_t dest_offset, + const struct vmci_queue *queue, + u64 queue_offset, size_t size) +{ + return __qp_memcpy_from_queue((u8 *)dest + dest_offset, + queue, queue_offset, size, false); +} + +/* + * Copies from a given iovec from a VMCI Queue. + */ +static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, + u64 queue_offset, + const void *src, + size_t src_offset, size_t size) +{ + + /* + * We ignore src_offset because src is really a struct iovec * and will + * maintain offset internally. + */ + return __qp_memcpy_to_queue(queue, queue_offset, src, size, true); +} + +/* + * Copies to a given iovec from a VMCI Queue. + */ +static int qp_memcpy_from_queue_iov(void *dest, + size_t dest_offset, + const struct vmci_queue *queue, + u64 queue_offset, size_t size) +{ + /* + * We ignore dest_offset because dest is really a struct iovec * and + * will maintain offset internally. + */ + return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true); +} + +/* + * Allocates kernel VA space of specified size plus space for the queue + * and kernel interface. This is different from the guest queue allocator, + * because we do not allocate our own queue header/data pages here but + * share those of the guest. + */ +static struct vmci_queue *qp_host_alloc_queue(u64 size) +{ + struct vmci_queue *queue; + const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); + const size_t queue_page_size = + num_pages * sizeof(*queue->kernel_if->u.h.page); + + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); + if (queue) { + queue->q_header = NULL; + queue->saved_header = NULL; + queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); + queue->kernel_if->host = true; + queue->kernel_if->mutex = NULL; + queue->kernel_if->num_pages = num_pages; + queue->kernel_if->u.h.header_page = + (struct page **)((u8 *)queue + queue_size); + queue->kernel_if->u.h.page = + &queue->kernel_if->u.h.header_page[1]; + } + + return queue; +} + +/* + * Frees kernel memory for a given queue (header plus translation + * structure). + */ +static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) +{ + kfree(queue); +} + +/* + * Initialize the mutex for the pair of queues. This mutex is used to + * protect the q_header and the buffer from changing out from under any + * users of either queue. Of course, it's only any good if the mutexes + * are actually acquired. Queue structure must lie on non-paged memory + * or we cannot guarantee access to the mutex. + */ +static void qp_init_queue_mutex(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + /* + * Only the host queue has shared state - the guest queues do not + * need to synchronize access using a queue mutex. + */ + + if (produce_q->kernel_if->host) { + produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; + consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; + mutex_init(produce_q->kernel_if->mutex); + } +} + +/* + * Cleans up the mutex for the pair of queues. + */ +static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + if (produce_q->kernel_if->host) { + produce_q->kernel_if->mutex = NULL; + consume_q->kernel_if->mutex = NULL; + } +} + +/* + * Acquire the mutex for the queue. Note that the produce_q and + * the consume_q share a mutex. So, only one of the two need to + * be passed in to this routine. Either will work just fine. + */ +static void qp_acquire_queue_mutex(struct vmci_queue *queue) +{ + if (queue->kernel_if->host) + mutex_lock(queue->kernel_if->mutex); +} + +/* + * Release the mutex for the queue. Note that the produce_q and + * the consume_q share a mutex. So, only one of the two need to + * be passed in to this routine. Either will work just fine. + */ +static void qp_release_queue_mutex(struct vmci_queue *queue) +{ + if (queue->kernel_if->host) + mutex_unlock(queue->kernel_if->mutex); +} + +/* + * Helper function to release pages in the PageStoreAttachInfo + * previously obtained using get_user_pages. + */ +static void qp_release_pages(struct page **pages, + u64 num_pages, bool dirty) +{ + int i; + + for (i = 0; i < num_pages; i++) { + if (dirty) + set_page_dirty(pages[i]); + + page_cache_release(pages[i]); + pages[i] = NULL; + } +} + +/* + * Lock the user pages referenced by the {produce,consume}Buffer + * struct into memory and populate the {produce,consume}Pages + * arrays in the attach structure with them. + */ +static int qp_host_get_user_memory(u64 produce_uva, + u64 consume_uva, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int retval; + int err = VMCI_SUCCESS; + + retval = get_user_pages_fast((uintptr_t) produce_uva, + produce_q->kernel_if->num_pages, 1, + produce_q->kernel_if->u.h.header_page); + if (retval < produce_q->kernel_if->num_pages) { + pr_warn("get_user_pages(produce) failed (retval=%d)", retval); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + retval, false); + err = VMCI_ERROR_NO_MEM; + goto out; + } + + retval = get_user_pages_fast((uintptr_t) consume_uva, + consume_q->kernel_if->num_pages, 1, + consume_q->kernel_if->u.h.header_page); + if (retval < consume_q->kernel_if->num_pages) { + pr_warn("get_user_pages(consume) failed (retval=%d)", retval); + qp_release_pages(consume_q->kernel_if->u.h.header_page, + retval, false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, false); + err = VMCI_ERROR_NO_MEM; + } + + out: + return err; +} + +/* + * Registers the specification of the user pages used for backing a queue + * pair. Enough information to map in pages is stored in the OS specific + * part of the struct vmci_queue structure. + */ +static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + u64 produce_uva; + u64 consume_uva; + + /* + * The new style and the old style mapping only differs in + * that we either get a single or two UVAs, so we split the + * single UVA range at the appropriate spot. + */ + produce_uva = page_store->pages; + consume_uva = page_store->pages + + produce_q->kernel_if->num_pages * PAGE_SIZE; + return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, + consume_q); +} + +/* + * Releases and removes the references to user pages stored in the attach + * struct. Pages are released from the page cache and may become + * swappable again. + */ +static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, true); + memset(produce_q->kernel_if->u.h.header_page, 0, + sizeof(*produce_q->kernel_if->u.h.header_page) * + produce_q->kernel_if->num_pages); + qp_release_pages(consume_q->kernel_if->u.h.header_page, + consume_q->kernel_if->num_pages, true); + memset(consume_q->kernel_if->u.h.header_page, 0, + sizeof(*consume_q->kernel_if->u.h.header_page) * + consume_q->kernel_if->num_pages); +} + +/* + * Once qp_host_register_user_memory has been performed on a + * queue, the queue pair headers can be mapped into the + * kernel. Once mapped, they must be unmapped with + * qp_host_unmap_queues prior to calling + * qp_host_unregister_user_memory. + * Pages are pinned. + */ +static int qp_host_map_queues(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int result; + + if (!produce_q->q_header || !consume_q->q_header) { + struct page *headers[2]; + + if (produce_q->q_header != consume_q->q_header) + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + + if (produce_q->kernel_if->u.h.header_page == NULL || + *produce_q->kernel_if->u.h.header_page == NULL) + return VMCI_ERROR_UNAVAILABLE; + + headers[0] = *produce_q->kernel_if->u.h.header_page; + headers[1] = *consume_q->kernel_if->u.h.header_page; + + produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); + if (produce_q->q_header != NULL) { + consume_q->q_header = + (struct vmci_queue_header *)((u8 *) + produce_q->q_header + + PAGE_SIZE); + result = VMCI_SUCCESS; + } else { + pr_warn("vmap failed\n"); + result = VMCI_ERROR_NO_MEM; + } + } else { + result = VMCI_SUCCESS; + } + + return result; +} + +/* + * Unmaps previously mapped queue pair headers from the kernel. + * Pages are unpinned. + */ +static int qp_host_unmap_queues(u32 gid, + struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + if (produce_q->q_header) { + if (produce_q->q_header < consume_q->q_header) + vunmap(produce_q->q_header); + else + vunmap(consume_q->q_header); + + produce_q->q_header = NULL; + consume_q->q_header = NULL; + } + + return VMCI_SUCCESS; +} + +/* + * Finds the entry in the list corresponding to a given handle. Assumes + * that the list is locked. + */ +static struct qp_entry *qp_list_find(struct qp_list *qp_list, + struct vmci_handle handle) +{ + struct qp_entry *entry; + + if (vmci_handle_is_invalid(handle)) + return NULL; + + list_for_each_entry(entry, &qp_list->head, list_item) { + if (vmci_handle_is_equal(entry->handle, handle)) + return entry; + } + + return NULL; +} + +/* + * Finds the entry in the list corresponding to a given handle. + */ +static struct qp_guest_endpoint * +qp_guest_handle_to_entry(struct vmci_handle handle) +{ + struct qp_guest_endpoint *entry; + struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); + + entry = qp ? container_of( + qp, struct qp_guest_endpoint, qp) : NULL; + return entry; +} + +/* + * Finds the entry in the list corresponding to a given handle. + */ +static struct qp_broker_entry * +qp_broker_handle_to_entry(struct vmci_handle handle) +{ + struct qp_broker_entry *entry; + struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); + + entry = qp ? container_of( + qp, struct qp_broker_entry, qp) : NULL; + return entry; +} + +/* + * Dispatches a queue pair event message directly into the local event + * queue. + */ +static int qp_notify_peer_local(bool attach, struct vmci_handle handle) +{ + u32 context_id = vmci_get_context_id(); + struct vmci_event_qp ev; + + ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = + attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; + ev.payload.peer_id = context_id; + ev.payload.handle = handle; + + return vmci_event_dispatch(&ev.msg.hdr); +} + +/* + * Allocates and initializes a qp_guest_endpoint structure. + * Allocates a queue_pair rid (and handle) iff the given entry has + * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX + * are reserved handles. Assumes that the QP list mutex is held + * by the caller. + */ +static struct qp_guest_endpoint * +qp_guest_endpoint_create(struct vmci_handle handle, + u32 peer, + u32 flags, + u64 produce_size, + u64 consume_size, + void *produce_q, + void *consume_q) +{ + int result; + struct qp_guest_endpoint *entry; + /* One page each for the queue headers. */ + const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + + DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; + + if (vmci_handle_is_invalid(handle)) { + u32 context_id = vmci_get_context_id(); + + handle = vmci_make_handle(context_id, VMCI_INVALID_ID); + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + entry->qp.peer = peer; + entry->qp.flags = flags; + entry->qp.produce_size = produce_size; + entry->qp.consume_size = consume_size; + entry->qp.ref_count = 0; + entry->num_ppns = num_ppns; + entry->produce_q = produce_q; + entry->consume_q = consume_q; + INIT_LIST_HEAD(&entry->qp.list_item); + + /* Add resource obj */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_QPAIR_GUEST, + handle); + entry->qp.handle = vmci_resource_handle(&entry->resource); + if ((result != VMCI_SUCCESS) || + qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", + handle.context, handle.resource, result); + kfree(entry); + entry = NULL; + } + } + return entry; +} + +/* + * Frees a qp_guest_endpoint structure. + */ +static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) +{ + qp_free_ppn_set(&entry->ppn_set); + qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); + qp_free_queue(entry->produce_q, entry->qp.produce_size); + qp_free_queue(entry->consume_q, entry->qp.consume_size); + /* Unlink from resource hash table and free callback */ + vmci_resource_remove(&entry->resource); + + kfree(entry); +} + +/* + * Helper to make a queue_pairAlloc hypercall when the driver is + * supporting a guest device. + */ +static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) +{ + struct vmci_qp_alloc_msg *alloc_msg; + size_t msg_size; + int result; + + if (!entry || entry->num_ppns <= 2) + return VMCI_ERROR_INVALID_ARGS; + + msg_size = sizeof(*alloc_msg) + + (size_t) entry->num_ppns * sizeof(u32); + alloc_msg = kmalloc(msg_size, GFP_KERNEL); + if (!alloc_msg) + return VMCI_ERROR_NO_MEM; + + alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_QUEUEPAIR_ALLOC); + alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; + alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; + alloc_msg->handle = entry->qp.handle; + alloc_msg->peer = entry->qp.peer; + alloc_msg->flags = entry->qp.flags; + alloc_msg->produce_size = entry->qp.produce_size; + alloc_msg->consume_size = entry->qp.consume_size; + alloc_msg->num_ppns = entry->num_ppns; + + result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), + &entry->ppn_set); + if (result == VMCI_SUCCESS) + result = vmci_send_datagram(&alloc_msg->hdr); + + kfree(alloc_msg); + + return result; +} + +/* + * Helper to make a queue_pairDetach hypercall when the driver is + * supporting a guest device. + */ +static int qp_detatch_hypercall(struct vmci_handle handle) +{ + struct vmci_qp_detach_msg detach_msg; + + detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_QUEUEPAIR_DETACH); + detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; + detach_msg.hdr.payload_size = sizeof(handle); + detach_msg.handle = handle; + + return vmci_send_datagram(&detach_msg.hdr); +} + +/* + * Adds the given entry to the list. Assumes that the list is locked. + */ +static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) +{ + if (entry) + list_add(&entry->list_item, &qp_list->head); +} + +/* + * Removes the given entry from the list. Assumes that the list is locked. + */ +static void qp_list_remove_entry(struct qp_list *qp_list, + struct qp_entry *entry) +{ + if (entry) + list_del(&entry->list_item); +} + +/* + * Helper for VMCI queue_pair detach interface. Frees the physical + * pages for the queue pair. + */ +static int qp_detatch_guest_work(struct vmci_handle handle) +{ + int result; + struct qp_guest_endpoint *entry; + u32 ref_count = ~0; /* To avoid compiler warning below */ + + mutex_lock(&qp_guest_endpoints.mutex); + + entry = qp_guest_handle_to_entry(handle); + if (!entry) { + mutex_unlock(&qp_guest_endpoints.mutex); + return VMCI_ERROR_NOT_FOUND; + } + + if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { + result = VMCI_SUCCESS; + + if (entry->qp.ref_count > 1) { + result = qp_notify_peer_local(false, handle); + /* + * We can fail to notify a local queuepair + * because we can't allocate. We still want + * to release the entry if that happens, so + * don't bail out yet. + */ + } + } else { + result = qp_detatch_hypercall(handle); + if (result < VMCI_SUCCESS) { + /* + * We failed to notify a non-local queuepair. + * That other queuepair might still be + * accessing the shared memory, so don't + * release the entry yet. It will get cleaned + * up by VMCIqueue_pair_Exit() if necessary + * (assuming we are going away, otherwise why + * did this fail?). + */ + + mutex_unlock(&qp_guest_endpoints.mutex); + return result; + } + } + + /* + * If we get here then we either failed to notify a local queuepair, or + * we succeeded in all cases. Release the entry if required. + */ + + entry->qp.ref_count--; + if (entry->qp.ref_count == 0) + qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); + + /* If we didn't remove the entry, this could change once we unlock. */ + if (entry) + ref_count = entry->qp.ref_count; + + mutex_unlock(&qp_guest_endpoints.mutex); + + if (ref_count == 0) + qp_guest_endpoint_destroy(entry); + + return result; +} + +/* + * This functions handles the actual allocation of a VMCI queue + * pair guest endpoint. Allocates physical pages for the queue + * pair. It makes OS dependent calls through generic wrappers. + */ +static int qp_alloc_guest_work(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags) +{ + const u64 num_produce_pages = + DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; + const u64 num_consume_pages = + DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; + void *my_produce_q = NULL; + void *my_consume_q = NULL; + int result; + struct qp_guest_endpoint *queue_pair_entry = NULL; + + if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) + return VMCI_ERROR_NO_ACCESS; + + mutex_lock(&qp_guest_endpoints.mutex); + + queue_pair_entry = qp_guest_handle_to_entry(*handle); + if (queue_pair_entry) { + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { + /* Local attach case. */ + if (queue_pair_entry->qp.ref_count > 1) { + pr_devel("Error attempting to attach more than once\n"); + result = VMCI_ERROR_UNAVAILABLE; + goto error_keep_entry; + } + + if (queue_pair_entry->qp.produce_size != consume_size || + queue_pair_entry->qp.consume_size != + produce_size || + queue_pair_entry->qp.flags != + (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { + pr_devel("Error mismatched queue pair in local attach\n"); + result = VMCI_ERROR_QUEUEPAIR_MISMATCH; + goto error_keep_entry; + } + + /* + * Do a local attach. We swap the consume and + * produce queues for the attacher and deliver + * an attach event. + */ + result = qp_notify_peer_local(true, *handle); + if (result < VMCI_SUCCESS) + goto error_keep_entry; + + my_produce_q = queue_pair_entry->consume_q; + my_consume_q = queue_pair_entry->produce_q; + goto out; + } + + result = VMCI_ERROR_ALREADY_EXISTS; + goto error_keep_entry; + } + + my_produce_q = qp_alloc_queue(produce_size, flags); + if (!my_produce_q) { + pr_warn("Error allocating pages for produce queue\n"); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + my_consume_q = qp_alloc_queue(consume_size, flags); + if (!my_consume_q) { + pr_warn("Error allocating pages for consume queue\n"); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, + produce_size, consume_size, + my_produce_q, my_consume_q); + if (!queue_pair_entry) { + pr_warn("Error allocating memory in %s\n", __func__); + result = VMCI_ERROR_NO_MEM; + goto error; + } + + result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, + num_consume_pages, + &queue_pair_entry->ppn_set); + if (result < VMCI_SUCCESS) { + pr_warn("qp_alloc_ppn_set failed\n"); + goto error; + } + + /* + * It's only necessary to notify the host if this queue pair will be + * attached to from another context. + */ + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { + /* Local create case. */ + u32 context_id = vmci_get_context_id(); + + /* + * Enforce similar checks on local queue pairs as we + * do for regular ones. The handle's context must + * match the creator or attacher context id (here they + * are both the current context id) and the + * attach-only flag cannot exist during create. We + * also ensure specified peer is this context or an + * invalid one. + */ + if (queue_pair_entry->qp.handle.context != context_id || + (queue_pair_entry->qp.peer != VMCI_INVALID_ID && + queue_pair_entry->qp.peer != context_id)) { + result = VMCI_ERROR_NO_ACCESS; + goto error; + } + + if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { + result = VMCI_ERROR_NOT_FOUND; + goto error; + } + } else { + result = qp_alloc_hypercall(queue_pair_entry); + if (result < VMCI_SUCCESS) { + pr_warn("qp_alloc_hypercall result = %d\n", result); + goto error; + } + } + + qp_init_queue_mutex((struct vmci_queue *)my_produce_q, + (struct vmci_queue *)my_consume_q); + + qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); + + out: + queue_pair_entry->qp.ref_count++; + *handle = queue_pair_entry->qp.handle; + *produce_q = (struct vmci_queue *)my_produce_q; + *consume_q = (struct vmci_queue *)my_consume_q; + + /* + * We should initialize the queue pair header pages on a local + * queue pair create. For non-local queue pairs, the + * hypervisor initializes the header pages in the create step. + */ + if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && + queue_pair_entry->qp.ref_count == 1) { + vmci_q_header_init((*produce_q)->q_header, *handle); + vmci_q_header_init((*consume_q)->q_header, *handle); + } + + mutex_unlock(&qp_guest_endpoints.mutex); + + return VMCI_SUCCESS; + + error: + mutex_unlock(&qp_guest_endpoints.mutex); + if (queue_pair_entry) { + /* The queues will be freed inside the destroy routine. */ + qp_guest_endpoint_destroy(queue_pair_entry); + } else { + qp_free_queue(my_produce_q, produce_size); + qp_free_queue(my_consume_q, consume_size); + } + return result; + + error_keep_entry: + /* This path should only be used when an existing entry was found. */ + mutex_unlock(&qp_guest_endpoints.mutex); + return result; +} + +/* + * The first endpoint issuing a queue pair allocation will create the state + * of the queue pair in the queue pair broker. + * + * If the creator is a guest, it will associate a VMX virtual address range + * with the queue pair as specified by the page_store. For compatibility with + * older VMX'en, that would use a separate step to set the VMX virtual + * address range, the virtual address range can be registered later using + * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be + * used. + * + * If the creator is the host, a page_store of NULL should be used as well, + * since the host is not able to supply a page store for the queue pair. + * + * For older VMX and host callers, the queue pair will be created in the + * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be + * created in VMCOQPB_CREATED_MEM state. + */ +static int qp_broker_create(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, struct qp_broker_entry **ent) +{ + struct qp_broker_entry *entry = NULL; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + u64 guest_produce_size; + u64 guest_consume_size; + + /* Do not create if the caller asked not to. */ + if (flags & VMCI_QPFLAG_ATTACH_ONLY) + return VMCI_ERROR_NOT_FOUND; + + /* + * Creator's context ID should match handle's context ID or the creator + * must allow the context in handle's context ID as the "peer". + */ + if (handle.context != context_id && handle.context != peer) + return VMCI_ERROR_NO_ACCESS; + + if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * Creator's context ID for local queue pairs should match the + * peer, if a peer is specified. + */ + if (is_local && peer != VMCI_INVALID_ID && context_id != peer) + return VMCI_ERROR_NO_ACCESS; + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return VMCI_ERROR_NO_MEM; + + if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { + /* + * The queue pair broker entry stores values from the guest + * point of view, so a creating host side endpoint should swap + * produce and consume values -- unless it is a local queue + * pair, in which case no swapping is necessary, since the local + * attacher will swap queues. + */ + + guest_produce_size = consume_size; + guest_consume_size = produce_size; + } else { + guest_produce_size = produce_size; + guest_consume_size = consume_size; + } + + entry->qp.handle = handle; + entry->qp.peer = peer; + entry->qp.flags = flags; + entry->qp.produce_size = guest_produce_size; + entry->qp.consume_size = guest_consume_size; + entry->qp.ref_count = 1; + entry->create_id = context_id; + entry->attach_id = VMCI_INVALID_ID; + entry->state = VMCIQPB_NEW; + entry->require_trusted_attach = + !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); + entry->created_by_trusted = + !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); + entry->vmci_page_files = false; + entry->wakeup_cb = wakeup_cb; + entry->client_data = client_data; + entry->produce_q = qp_host_alloc_queue(guest_produce_size); + if (entry->produce_q == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + entry->consume_q = qp_host_alloc_queue(guest_consume_size); + if (entry->consume_q == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + + qp_init_queue_mutex(entry->produce_q, entry->consume_q); + + INIT_LIST_HEAD(&entry->qp.list_item); + + if (is_local) { + u8 *tmp; + + entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), + PAGE_SIZE, GFP_KERNEL); + if (entry->local_mem == NULL) { + result = VMCI_ERROR_NO_MEM; + goto error; + } + entry->state = VMCIQPB_CREATED_MEM; + entry->produce_q->q_header = entry->local_mem; + tmp = (u8 *)entry->local_mem + PAGE_SIZE * + (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); + entry->consume_q->q_header = (struct vmci_queue_header *)tmp; + } else if (page_store) { + /* + * The VMX already initialized the queue pair headers, so no + * need for the kernel side to do that. + */ + result = qp_host_register_user_memory(page_store, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + goto error; + + entry->state = VMCIQPB_CREATED_MEM; + } else { + /* + * A create without a page_store may be either a host + * side create (in which case we are waiting for the + * guest side to supply the memory) or an old style + * queue pair create (in which case we will expect a + * set page store call as the next step). + */ + entry->state = VMCIQPB_CREATED_NO_MEM; + } + + qp_list_add_entry(&qp_broker_list, &entry->qp); + if (ent != NULL) + *ent = entry; + + /* Add to resource obj */ + result = vmci_resource_add(&entry->resource, + VMCI_RESOURCE_TYPE_QPAIR_HOST, + handle); + if (result != VMCI_SUCCESS) { + pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", + handle.context, handle.resource, result); + goto error; + } + + entry->qp.handle = vmci_resource_handle(&entry->resource); + if (is_local) { + vmci_q_header_init(entry->produce_q->q_header, + entry->qp.handle); + vmci_q_header_init(entry->consume_q->q_header, + entry->qp.handle); + } + + vmci_ctx_qp_create(context, entry->qp.handle); + + return VMCI_SUCCESS; + + error: + if (entry != NULL) { + qp_host_free_queue(entry->produce_q, guest_produce_size); + qp_host_free_queue(entry->consume_q, guest_consume_size); + kfree(entry); + } + + return result; +} + +/* + * Enqueues an event datagram to notify the peer VM attached to + * the given queue pair handle about attach/detach event by the + * given VM. Returns Payload size of datagram enqueued on + * success, error code otherwise. + */ +static int qp_notify_peer(bool attach, + struct vmci_handle handle, + u32 my_id, + u32 peer_id) +{ + int rv; + struct vmci_event_qp ev; + + if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || + peer_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + /* + * In vmci_ctx_enqueue_datagram() we enforce the upper limit on + * number of pending events from the hypervisor to a given VM + * otherwise a rogue VM could do an arbitrary number of attach + * and detach operations causing memory pressure in the host + * kernel. + */ + + ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); + ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_CONTEXT_RESOURCE_ID); + ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); + ev.msg.event_data.event = attach ? + VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; + ev.payload.handle = handle; + ev.payload.peer_id = my_id; + + rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, + &ev.msg.hdr, false); + if (rv < VMCI_SUCCESS) + pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", + attach ? "ATTACH" : "DETACH", peer_id); + + return rv; +} + +/* + * The second endpoint issuing a queue pair allocation will attach to + * the queue pair registered with the queue pair broker. + * + * If the attacher is a guest, it will associate a VMX virtual address + * range with the queue pair as specified by the page_store. At this + * point, the already attach host endpoint may start using the queue + * pair, and an attach event is sent to it. For compatibility with + * older VMX'en, that used a separate step to set the VMX virtual + * address range, the virtual address range can be registered later + * using vmci_qp_broker_set_page_store. In that case, a page_store of + * NULL should be used, and the attach event will be generated once + * the actual page store has been set. + * + * If the attacher is the host, a page_store of NULL should be used as + * well, since the page store information is already set by the guest. + * + * For new VMX and host callers, the queue pair will be moved to the + * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be + * moved to the VMCOQPB_ATTACHED_NO_MEM state. + */ +static int qp_broker_attach(struct qp_broker_entry *entry, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, + struct qp_broker_entry **ent) +{ + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + + if (entry->state != VMCIQPB_CREATED_NO_MEM && + entry->state != VMCIQPB_CREATED_MEM) + return VMCI_ERROR_UNAVAILABLE; + + if (is_local) { + if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || + context_id != entry->create_id) { + return VMCI_ERROR_INVALID_ARGS; + } + } else if (context_id == entry->create_id || + context_id == entry->attach_id) { + return VMCI_ERROR_ALREADY_EXISTS; + } + + if (VMCI_CONTEXT_IS_VM(context_id) && + VMCI_CONTEXT_IS_VM(entry->create_id)) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * If we are attaching from a restricted context then the queuepair + * must have been created by a trusted endpoint. + */ + if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && + !entry->created_by_trusted) + return VMCI_ERROR_NO_ACCESS; + + /* + * If we are attaching to a queuepair that was created by a restricted + * context then we must be trusted. + */ + if (entry->require_trusted_attach && + (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) + return VMCI_ERROR_NO_ACCESS; + + /* + * If the creator specifies VMCI_INVALID_ID in "peer" field, access + * control check is not performed. + */ + if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) + return VMCI_ERROR_NO_ACCESS; + + if (entry->create_id == VMCI_HOST_CONTEXT_ID) { + /* + * Do not attach if the caller doesn't support Host Queue Pairs + * and a host created this queue pair. + */ + + if (!vmci_ctx_supports_host_qp(context)) + return VMCI_ERROR_INVALID_RESOURCE; + + } else if (context_id == VMCI_HOST_CONTEXT_ID) { + struct vmci_ctx *create_context; + bool supports_host_qp; + + /* + * Do not attach a host to a user created queue pair if that + * user doesn't support host queue pair end points. + */ + + create_context = vmci_ctx_get(entry->create_id); + supports_host_qp = vmci_ctx_supports_host_qp(create_context); + vmci_ctx_put(create_context); + + if (!supports_host_qp) + return VMCI_ERROR_INVALID_RESOURCE; + } + + if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + /* + * The queue pair broker entry stores values from the guest + * point of view, so an attaching guest should match the values + * stored in the entry. + */ + + if (entry->qp.produce_size != produce_size || + entry->qp.consume_size != consume_size) { + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + } + } else if (entry->qp.produce_size != consume_size || + entry->qp.consume_size != produce_size) { + return VMCI_ERROR_QUEUEPAIR_MISMATCH; + } + + if (context_id != VMCI_HOST_CONTEXT_ID) { + /* + * If a guest attached to a queue pair, it will supply + * the backing memory. If this is a pre NOVMVM vmx, + * the backing memory will be supplied by calling + * vmci_qp_broker_set_page_store() following the + * return of the vmci_qp_broker_alloc() call. If it is + * a vmx of version NOVMVM or later, the page store + * must be supplied as part of the + * vmci_qp_broker_alloc call. Under all circumstances + * must the initially created queue pair not have any + * memory associated with it already. + */ + + if (entry->state != VMCIQPB_CREATED_NO_MEM) + return VMCI_ERROR_INVALID_ARGS; + + if (page_store != NULL) { + /* + * Patch up host state to point to guest + * supplied memory. The VMX already + * initialized the queue pair headers, so no + * need for the kernel side to do that. + */ + + result = qp_host_register_user_memory(page_store, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + + entry->state = VMCIQPB_ATTACHED_MEM; + } else { + entry->state = VMCIQPB_ATTACHED_NO_MEM; + } + } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { + /* + * The host side is attempting to attach to a queue + * pair that doesn't have any memory associated with + * it. This must be a pre NOVMVM vmx that hasn't set + * the page store information yet, or a quiesced VM. + */ + + return VMCI_ERROR_UNAVAILABLE; + } else { + /* The host side has successfully attached to a queue pair. */ + entry->state = VMCIQPB_ATTACHED_MEM; + } + + if (entry->state == VMCIQPB_ATTACHED_MEM) { + result = + qp_notify_peer(true, entry->qp.handle, context_id, + entry->create_id); + if (result < VMCI_SUCCESS) + pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", + entry->create_id, entry->qp.handle.context, + entry->qp.handle.resource); + } + + entry->attach_id = context_id; + entry->qp.ref_count++; + if (wakeup_cb) { + entry->wakeup_cb = wakeup_cb; + entry->client_data = client_data; + } + + /* + * When attaching to local queue pairs, the context already has + * an entry tracking the queue pair, so don't add another one. + */ + if (!is_local) + vmci_ctx_qp_create(context, entry->qp.handle); + + if (ent != NULL) + *ent = entry; + + return VMCI_SUCCESS; +} + +/* + * queue_pair_Alloc for use when setting up queue pair endpoints + * on the host. + */ +static int qp_broker_alloc(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context, + vmci_event_release_cb wakeup_cb, + void *client_data, + struct qp_broker_entry **ent, + bool *swap) +{ + const u32 context_id = vmci_ctx_get_id(context); + bool create; + struct qp_broker_entry *entry = NULL; + bool is_local = flags & VMCI_QPFLAG_LOCAL; + int result; + + if (vmci_handle_is_invalid(handle) || + (flags & ~VMCI_QP_ALL_FLAGS) || is_local || + !(produce_size || consume_size) || + !context || context_id == VMCI_INVALID_ID || + handle.context == VMCI_INVALID_ID) { + return VMCI_ERROR_INVALID_ARGS; + } + + if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) + return VMCI_ERROR_INVALID_ARGS; + + /* + * In the initial argument check, we ensure that non-vmkernel hosts + * are not allowed to create local queue pairs. + */ + + mutex_lock(&qp_broker_list.mutex); + + if (!is_local && vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + mutex_unlock(&qp_broker_list.mutex); + return VMCI_ERROR_ALREADY_EXISTS; + } + + if (handle.resource != VMCI_INVALID_ID) + entry = qp_broker_handle_to_entry(handle); + + if (!entry) { + create = true; + result = + qp_broker_create(handle, peer, flags, priv_flags, + produce_size, consume_size, page_store, + context, wakeup_cb, client_data, ent); + } else { + create = false; + result = + qp_broker_attach(entry, peer, flags, priv_flags, + produce_size, consume_size, page_store, + context, wakeup_cb, client_data, ent); + } + + mutex_unlock(&qp_broker_list.mutex); + + if (swap) + *swap = (context_id == VMCI_HOST_CONTEXT_ID) && + !(create && is_local); + + return result; +} + +/* + * This function implements the kernel API for allocating a queue + * pair. + */ +static int qp_alloc_host_work(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags, + vmci_event_release_cb wakeup_cb, + void *client_data) +{ + struct vmci_handle new_handle; + struct vmci_ctx *context; + struct qp_broker_entry *entry; + int result; + bool swap; + + if (vmci_handle_is_invalid(*handle)) { + new_handle = vmci_make_handle( + VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); + } else + new_handle = *handle; + + context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); + entry = NULL; + result = + qp_broker_alloc(new_handle, peer, flags, priv_flags, + produce_size, consume_size, NULL, context, + wakeup_cb, client_data, &entry, &swap); + if (result == VMCI_SUCCESS) { + if (swap) { + /* + * If this is a local queue pair, the attacher + * will swap around produce and consume + * queues. + */ + + *produce_q = entry->consume_q; + *consume_q = entry->produce_q; + } else { + *produce_q = entry->produce_q; + *consume_q = entry->consume_q; + } + + *handle = vmci_resource_handle(&entry->resource); + } else { + *handle = VMCI_INVALID_HANDLE; + pr_devel("queue pair broker failed to alloc (result=%d)\n", + result); + } + vmci_ctx_put(context); + return result; +} + +/* + * Allocates a VMCI queue_pair. Only checks validity of input + * arguments. The real work is done in the host or guest + * specific function. + */ +int vmci_qp_alloc(struct vmci_handle *handle, + struct vmci_queue **produce_q, + u64 produce_size, + struct vmci_queue **consume_q, + u64 consume_size, + u32 peer, + u32 flags, + u32 priv_flags, + bool guest_endpoint, + vmci_event_release_cb wakeup_cb, + void *client_data) +{ + if (!handle || !produce_q || !consume_q || + (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) + return VMCI_ERROR_INVALID_ARGS; + + if (guest_endpoint) { + return qp_alloc_guest_work(handle, produce_q, + produce_size, consume_q, + consume_size, peer, + flags, priv_flags); + } else { + return qp_alloc_host_work(handle, produce_q, + produce_size, consume_q, + consume_size, peer, flags, + priv_flags, wakeup_cb, client_data); + } +} + +/* + * This function implements the host kernel API for detaching from + * a queue pair. + */ +static int qp_detatch_host_work(struct vmci_handle handle) +{ + int result; + struct vmci_ctx *context; + + context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); + + result = vmci_qp_broker_detach(handle, context); + + vmci_ctx_put(context); + return result; +} + +/* + * Detaches from a VMCI queue_pair. Only checks validity of input argument. + * Real work is done in the host or guest specific function. + */ +static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) +{ + if (vmci_handle_is_invalid(handle)) + return VMCI_ERROR_INVALID_ARGS; + + if (guest_endpoint) + return qp_detatch_guest_work(handle); + else + return qp_detatch_host_work(handle); +} + +/* + * Returns the entry from the head of the list. Assumes that the list is + * locked. + */ +static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) +{ + if (!list_empty(&qp_list->head)) { + struct qp_entry *entry = + list_first_entry(&qp_list->head, struct qp_entry, + list_item); + return entry; + } + + return NULL; +} + +void vmci_qp_broker_exit(void) +{ + struct qp_entry *entry; + struct qp_broker_entry *be; + + mutex_lock(&qp_broker_list.mutex); + + while ((entry = qp_list_get_head(&qp_broker_list))) { + be = (struct qp_broker_entry *)entry; + + qp_list_remove_entry(&qp_broker_list, entry); + kfree(be); + } + + mutex_unlock(&qp_broker_list.mutex); +} + +/* + * Requests that a queue pair be allocated with the VMCI queue + * pair broker. Allocates a queue pair entry if one does not + * exist. Attaches to one if it exists, and retrieves the page + * files backing that queue_pair. Assumes that the queue pair + * broker lock is held. + */ +int vmci_qp_broker_alloc(struct vmci_handle handle, + u32 peer, + u32 flags, + u32 priv_flags, + u64 produce_size, + u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context) +{ + return qp_broker_alloc(handle, peer, flags, priv_flags, + produce_size, consume_size, + page_store, context, NULL, NULL, NULL, NULL); +} + +/* + * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate + * step to add the UVAs of the VMX mapping of the queue pair. This function + * provides backwards compatibility with such VMX'en, and takes care of + * registering the page store for a queue pair previously allocated by the + * VMX during create or attach. This function will move the queue pair state + * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or + * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the + * attached state with memory, the queue pair is ready to be used by the + * host peer, and an attached event will be generated. + * + * Assumes that the queue pair broker lock is held. + * + * This function is only used by the hosted platform, since there is no + * issue with backwards compatibility for vmkernel. + */ +int vmci_qp_broker_set_page_store(struct vmci_handle handle, + u64 produce_uva, + u64 consume_uva, + struct vmci_ctx *context) +{ + struct qp_broker_entry *entry; + int result; + const u32 context_id = vmci_ctx_get_id(context); + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + /* + * We only support guest to host queue pairs, so the VMX must + * supply UVAs for the mapped page files. + */ + + if (produce_uva == 0 || consume_uva == 0) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + /* + * If I'm the owner then I can set the page store. + * + * Or, if a host created the queue_pair and I'm the attached peer + * then I can set the page store. + */ + if (entry->create_id != context_id && + (entry->create_id != VMCI_HOST_CONTEXT_ID || + entry->attach_id != context_id)) { + result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; + goto out; + } + + if (entry->state != VMCIQPB_CREATED_NO_MEM && + entry->state != VMCIQPB_ATTACHED_NO_MEM) { + result = VMCI_ERROR_UNAVAILABLE; + goto out; + } + + result = qp_host_get_user_memory(produce_uva, consume_uva, + entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) + goto out; + + result = qp_host_map_queues(entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) { + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + goto out; + } + + if (entry->state == VMCIQPB_CREATED_NO_MEM) + entry->state = VMCIQPB_CREATED_MEM; + else + entry->state = VMCIQPB_ATTACHED_MEM; + + entry->vmci_page_files = true; + + if (entry->state == VMCIQPB_ATTACHED_MEM) { + result = + qp_notify_peer(true, handle, context_id, entry->create_id); + if (result < VMCI_SUCCESS) { + pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", + entry->create_id, entry->qp.handle.context, + entry->qp.handle.resource); + } + } + + result = VMCI_SUCCESS; + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Resets saved queue headers for the given QP broker + * entry. Should be used when guest memory becomes available + * again, or the guest detaches. + */ +static void qp_reset_saved_headers(struct qp_broker_entry *entry) +{ + entry->produce_q->saved_header = NULL; + entry->consume_q->saved_header = NULL; +} + +/* + * The main entry point for detaching from a queue pair registered with the + * queue pair broker. If more than one endpoint is attached to the queue + * pair, the first endpoint will mainly decrement a reference count and + * generate a notification to its peer. The last endpoint will clean up + * the queue pair state registered with the broker. + * + * When a guest endpoint detaches, it will unmap and unregister the guest + * memory backing the queue pair. If the host is still attached, it will + * no longer be able to access the queue pair content. + * + * If the queue pair is already in a state where there is no memory + * registered for the queue pair (any *_NO_MEM state), it will transition to + * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest + * endpoint is the first of two endpoints to detach. If the host endpoint is + * the first out of two to detach, the queue pair will move to the + * VMCIQPB_SHUTDOWN_MEM state. + */ +int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + u32 peer_id; + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) { + return VMCI_ERROR_INVALID_ARGS; + } + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + if (context_id == entry->create_id) { + peer_id = entry->attach_id; + entry->create_id = VMCI_INVALID_ID; + } else { + peer_id = entry->create_id; + entry->attach_id = VMCI_INVALID_ID; + } + entry->qp.ref_count--; + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + bool headers_mapped; + + /* + * Pre NOVMVM vmx'en may detach from a queue pair + * before setting the page store, and in that case + * there is no user memory to detach from. Also, more + * recent VMX'en may detach from a queue pair in the + * quiesced state. + */ + + qp_acquire_queue_mutex(entry->produce_q); + headers_mapped = entry->produce_q->q_header || + entry->consume_q->q_header; + if (QPBROKERSTATE_HAS_MEM(entry)) { + result = + qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, + entry->produce_q, + entry->consume_q); + if (result < VMCI_SUCCESS) + pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", + handle.context, handle.resource, + result); + + if (entry->vmci_page_files) + qp_host_unregister_user_memory(entry->produce_q, + entry-> + consume_q); + else + qp_host_unregister_user_memory(entry->produce_q, + entry-> + consume_q); + + } + + if (!headers_mapped) + qp_reset_saved_headers(entry); + + qp_release_queue_mutex(entry->produce_q); + + if (!headers_mapped && entry->wakeup_cb) + entry->wakeup_cb(entry->client_data); + + } else { + if (entry->wakeup_cb) { + entry->wakeup_cb = NULL; + entry->client_data = NULL; + } + } + + if (entry->qp.ref_count == 0) { + qp_list_remove_entry(&qp_broker_list, &entry->qp); + + if (is_local) + kfree(entry->local_mem); + + qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); + qp_host_free_queue(entry->produce_q, entry->qp.produce_size); + qp_host_free_queue(entry->consume_q, entry->qp.consume_size); + /* Unlink from resource hash table and free callback */ + vmci_resource_remove(&entry->resource); + + kfree(entry); + + vmci_ctx_qp_destroy(context, handle); + } else { + qp_notify_peer(false, handle, context_id, peer_id); + if (context_id == VMCI_HOST_CONTEXT_ID && + QPBROKERSTATE_HAS_MEM(entry)) { + entry->state = VMCIQPB_SHUTDOWN_MEM; + } else { + entry->state = VMCIQPB_SHUTDOWN_NO_MEM; + } + + if (!is_local) + vmci_ctx_qp_destroy(context, handle); + + } + result = VMCI_SUCCESS; + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Establishes the necessary mappings for a queue pair given a + * reference to the queue pair guest memory. This is usually + * called when a guest is unquiesced and the VMX is allowed to + * map guest memory once again. + */ +int vmci_qp_broker_map(struct vmci_handle handle, + struct vmci_ctx *context, + u64 guest_mem) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + result = VMCI_SUCCESS; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + struct vmci_qp_page_store page_store; + + page_store.pages = guest_mem; + page_store.len = QPE_NUM_PAGES(entry->qp); + + qp_acquire_queue_mutex(entry->produce_q); + qp_reset_saved_headers(entry); + result = + qp_host_register_user_memory(&page_store, + entry->produce_q, + entry->consume_q); + qp_release_queue_mutex(entry->produce_q); + if (result == VMCI_SUCCESS) { + /* Move state from *_NO_MEM to *_MEM */ + + entry->state++; + + if (entry->wakeup_cb) + entry->wakeup_cb(entry->client_data); + } + } + + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Saves a snapshot of the queue headers for the given QP broker + * entry. Should be used when guest memory is unmapped. + * Results: + * VMCI_SUCCESS on success, appropriate error code if guest memory + * can't be accessed.. + */ +static int qp_save_headers(struct qp_broker_entry *entry) +{ + int result; + + if (entry->produce_q->saved_header != NULL && + entry->consume_q->saved_header != NULL) { + /* + * If the headers have already been saved, we don't need to do + * it again, and we don't want to map in the headers + * unnecessarily. + */ + + return VMCI_SUCCESS; + } + + if (NULL == entry->produce_q->q_header || + NULL == entry->consume_q->q_header) { + result = qp_host_map_queues(entry->produce_q, entry->consume_q); + if (result < VMCI_SUCCESS) + return result; + } + + memcpy(&entry->saved_produce_q, entry->produce_q->q_header, + sizeof(entry->saved_produce_q)); + entry->produce_q->saved_header = &entry->saved_produce_q; + memcpy(&entry->saved_consume_q, entry->consume_q->q_header, + sizeof(entry->saved_consume_q)); + entry->consume_q->saved_header = &entry->saved_consume_q; + + return VMCI_SUCCESS; +} + +/* + * Removes all references to the guest memory of a given queue pair, and + * will move the queue pair from state *_MEM to *_NO_MEM. It is usually + * called when a VM is being quiesced where access to guest memory should + * avoided. + */ +int vmci_qp_broker_unmap(struct vmci_handle handle, + struct vmci_ctx *context, + u32 gid) +{ + struct qp_broker_entry *entry; + const u32 context_id = vmci_ctx_get_id(context); + bool is_local = false; + int result; + + if (vmci_handle_is_invalid(handle) || !context || + context_id == VMCI_INVALID_ID) + return VMCI_ERROR_INVALID_ARGS; + + mutex_lock(&qp_broker_list.mutex); + + if (!vmci_ctx_qp_exists(context, handle)) { + pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + entry = qp_broker_handle_to_entry(handle); + if (!entry) { + pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", + context_id, handle.context, handle.resource); + result = VMCI_ERROR_NOT_FOUND; + goto out; + } + + if (context_id != entry->create_id && context_id != entry->attach_id) { + result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + goto out; + } + + is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; + + if (context_id != VMCI_HOST_CONTEXT_ID) { + qp_acquire_queue_mutex(entry->produce_q); + result = qp_save_headers(entry); + if (result < VMCI_SUCCESS) + pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", + handle.context, handle.resource, result); + + qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); + + /* + * On hosted, when we unmap queue pairs, the VMX will also + * unmap the guest memory, so we invalidate the previously + * registered memory. If the queue pair is mapped again at a + * later point in time, we will need to reregister the user + * memory with a possibly new user VA. + */ + qp_host_unregister_user_memory(entry->produce_q, + entry->consume_q); + + /* + * Move state from *_MEM to *_NO_MEM. + */ + entry->state--; + + qp_release_queue_mutex(entry->produce_q); + } + + result = VMCI_SUCCESS; + + out: + mutex_unlock(&qp_broker_list.mutex); + return result; +} + +/* + * Destroys all guest queue pair endpoints. If active guest queue + * pairs still exist, hypercalls to attempt detach from these + * queue pairs will be made. Any failure to detach is silently + * ignored. + */ +void vmci_qp_guest_endpoints_exit(void) +{ + struct qp_entry *entry; + struct qp_guest_endpoint *ep; + + mutex_lock(&qp_guest_endpoints.mutex); + + while ((entry = qp_list_get_head(&qp_guest_endpoints))) { + ep = (struct qp_guest_endpoint *)entry; + + /* Don't make a hypercall for local queue_pairs. */ + if (!(entry->flags & VMCI_QPFLAG_LOCAL)) + qp_detatch_hypercall(entry->handle); + + /* We cannot fail the exit, so let's reset ref_count. */ + entry->ref_count = 0; + qp_list_remove_entry(&qp_guest_endpoints, entry); + + qp_guest_endpoint_destroy(ep); + } + + mutex_unlock(&qp_guest_endpoints.mutex); +} + +/* + * Helper routine that will lock the queue pair before subsequent + * operations. + * Note: Non-blocking on the host side is currently only implemented in ESX. + * Since non-blocking isn't yet implemented on the host personality we + * have no reason to acquire a spin lock. So to avoid the use of an + * unnecessary lock only acquire the mutex if we can block. + */ +static void qp_lock(const struct vmci_qp *qpair) +{ + qp_acquire_queue_mutex(qpair->produce_q); +} + +/* + * Helper routine that unlocks the queue pair after calling + * qp_lock. + */ +static void qp_unlock(const struct vmci_qp *qpair) +{ + qp_release_queue_mutex(qpair->produce_q); +} + +/* + * The queue headers may not be mapped at all times. If a queue is + * currently not mapped, it will be attempted to do so. + */ +static int qp_map_queue_headers(struct vmci_queue *produce_q, + struct vmci_queue *consume_q) +{ + int result; + + if (NULL == produce_q->q_header || NULL == consume_q->q_header) { + result = qp_host_map_queues(produce_q, consume_q); + if (result < VMCI_SUCCESS) + return (produce_q->saved_header && + consume_q->saved_header) ? + VMCI_ERROR_QUEUEPAIR_NOT_READY : + VMCI_ERROR_QUEUEPAIR_NOTATTACHED; + } + + return VMCI_SUCCESS; +} + +/* + * Helper routine that will retrieve the produce and consume + * headers of a given queue pair. If the guest memory of the + * queue pair is currently not available, the saved queue headers + * will be returned, if these are available. + */ +static int qp_get_queue_headers(const struct vmci_qp *qpair, + struct vmci_queue_header **produce_q_header, + struct vmci_queue_header **consume_q_header) +{ + int result; + + result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); + if (result == VMCI_SUCCESS) { + *produce_q_header = qpair->produce_q->q_header; + *consume_q_header = qpair->consume_q->q_header; + } else if (qpair->produce_q->saved_header && + qpair->consume_q->saved_header) { + *produce_q_header = qpair->produce_q->saved_header; + *consume_q_header = qpair->consume_q->saved_header; + result = VMCI_SUCCESS; + } + + return result; +} + +/* + * Callback from VMCI queue pair broker indicating that a queue + * pair that was previously not ready, now either is ready or + * gone forever. + */ +static int qp_wakeup_cb(void *client_data) +{ + struct vmci_qp *qpair = (struct vmci_qp *)client_data; + + qp_lock(qpair); + while (qpair->blocked > 0) { + qpair->blocked--; + qpair->generation++; + wake_up(&qpair->event); + } + qp_unlock(qpair); + + return VMCI_SUCCESS; +} + +/* + * Makes the calling thread wait for the queue pair to become + * ready for host side access. Returns true when thread is + * woken up after queue pair state change, false otherwise. + */ +static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) +{ + unsigned int generation; + + qpair->blocked++; + generation = qpair->generation; + qp_unlock(qpair); + wait_event(qpair->event, generation != qpair->generation); + qp_lock(qpair); + + return true; +} + +/* + * Enqueues a given buffer to the produce queue using the provided + * function. As many bytes as possible (space available in the queue) + * are enqueued. Assumes the queue->mutex has been acquired. Returns + * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue + * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the + * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if + * an error occured when accessing the buffer, + * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't + * available. Otherwise, the number of bytes written to the queue is + * returned. Updates the tail pointer of the produce queue. + */ +static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + const u64 produce_q_size, + const void *buf, + size_t buf_size, + vmci_memcpy_to_queue_func memcpy_to_queue) +{ + s64 free_space; + u64 tail; + size_t written; + ssize_t result; + + result = qp_map_queue_headers(produce_q, consume_q); + if (unlikely(result != VMCI_SUCCESS)) + return result; + + free_space = vmci_q_header_free_space(produce_q->q_header, + consume_q->q_header, + produce_q_size); + if (free_space == 0) + return VMCI_ERROR_QUEUEPAIR_NOSPACE; + + if (free_space < VMCI_SUCCESS) + return (ssize_t) free_space; + + written = (size_t) (free_space > buf_size ? buf_size : free_space); + tail = vmci_q_header_producer_tail(produce_q->q_header); + if (likely(tail + written < produce_q_size)) { + result = memcpy_to_queue(produce_q, tail, buf, 0, written); + } else { + /* Tail pointer wraps around. */ + + const size_t tmp = (size_t) (produce_q_size - tail); + + result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); + if (result >= VMCI_SUCCESS) + result = memcpy_to_queue(produce_q, 0, buf, tmp, + written - tmp); + } + + if (result < VMCI_SUCCESS) + return result; + + vmci_q_header_add_producer_tail(produce_q->q_header, written, + produce_q_size); + return written; +} + +/* + * Dequeues data (if available) from the given consume queue. Writes data + * to the user provided buffer using the provided function. + * Assumes the queue->mutex has been acquired. + * Results: + * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. + * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue + * (as defined by the queue size). + * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. + * Otherwise the number of bytes dequeued is returned. + * Side effects: + * Updates the head pointer of the consume queue. + */ +static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, + struct vmci_queue *consume_q, + const u64 consume_q_size, + void *buf, + size_t buf_size, + vmci_memcpy_from_queue_func memcpy_from_queue, + bool update_consumer) +{ + s64 buf_ready; + u64 head; + size_t read; + ssize_t result; + + result = qp_map_queue_headers(produce_q, consume_q); + if (unlikely(result != VMCI_SUCCESS)) + return result; + + buf_ready = vmci_q_header_buf_ready(consume_q->q_header, + produce_q->q_header, + consume_q_size); + if (buf_ready == 0) + return VMCI_ERROR_QUEUEPAIR_NODATA; + + if (buf_ready < VMCI_SUCCESS) + return (ssize_t) buf_ready; + + read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); + head = vmci_q_header_consumer_head(produce_q->q_header); + if (likely(head + read < consume_q_size)) { + result = memcpy_from_queue(buf, 0, consume_q, head, read); + } else { + /* Head pointer wraps around. */ + + const size_t tmp = (size_t) (consume_q_size - head); + + result = memcpy_from_queue(buf, 0, consume_q, head, tmp); + if (result >= VMCI_SUCCESS) + result = memcpy_from_queue(buf, tmp, consume_q, 0, + read - tmp); + + } + + if (result < VMCI_SUCCESS) + return result; + + if (update_consumer) + vmci_q_header_add_consumer_head(produce_q->q_header, + read, consume_q_size); + + return read; +} + +/* + * vmci_qpair_alloc() - Allocates a queue pair. + * @qpair: Pointer for the new vmci_qp struct. + * @handle: Handle to track the resource. + * @produce_qsize: Desired size of the producer queue. + * @consume_qsize: Desired size of the consumer queue. + * @peer: ContextID of the peer. + * @flags: VMCI flags. + * @priv_flags: VMCI priviledge flags. + * + * This is the client interface for allocating the memory for a + * vmci_qp structure and then attaching to the underlying + * queue. If an error occurs allocating the memory for the + * vmci_qp structure no attempt is made to attach. If an + * error occurs attaching, then the structure is freed. + */ +int vmci_qpair_alloc(struct vmci_qp **qpair, + struct vmci_handle *handle, + u64 produce_qsize, + u64 consume_qsize, + u32 peer, + u32 flags, + u32 priv_flags) +{ + struct vmci_qp *my_qpair; + int retval; + struct vmci_handle src = VMCI_INVALID_HANDLE; + struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); + enum vmci_route route; + vmci_event_release_cb wakeup_cb; + void *client_data; + + /* + * Restrict the size of a queuepair. The device already + * enforces a limit on the total amount of memory that can be + * allocated to queuepairs for a guest. However, we try to + * allocate this memory before we make the queuepair + * allocation hypercall. On Linux, we allocate each page + * separately, which means rather than fail, the guest will + * thrash while it tries to allocate, and will become + * increasingly unresponsive to the point where it appears to + * be hung. So we place a limit on the size of an individual + * queuepair here, and leave the device to enforce the + * restriction on total queuepair memory. (Note that this + * doesn't prevent all cases; a user with only this much + * physical memory could still get into trouble.) The error + * used by the device is NO_RESOURCES, so use that here too. + */ + + if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || + produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) + return VMCI_ERROR_NO_RESOURCES; + + retval = vmci_route(&src, &dst, false, &route); + if (retval < VMCI_SUCCESS) + route = vmci_guest_code_active() ? + VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; + + if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { + pr_devel("NONBLOCK OR PINNED set"); + return VMCI_ERROR_INVALID_ARGS; + } + + my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); + if (!my_qpair) + return VMCI_ERROR_NO_MEM; + + my_qpair->produce_q_size = produce_qsize; + my_qpair->consume_q_size = consume_qsize; + my_qpair->peer = peer; + my_qpair->flags = flags; + my_qpair->priv_flags = priv_flags; + + wakeup_cb = NULL; + client_data = NULL; + + if (VMCI_ROUTE_AS_HOST == route) { + my_qpair->guest_endpoint = false; + if (!(flags & VMCI_QPFLAG_LOCAL)) { + my_qpair->blocked = 0; + my_qpair->generation = 0; + init_waitqueue_head(&my_qpair->event); + wakeup_cb = qp_wakeup_cb; + client_data = (void *)my_qpair; + } + } else { + my_qpair->guest_endpoint = true; + } + + retval = vmci_qp_alloc(handle, + &my_qpair->produce_q, + my_qpair->produce_q_size, + &my_qpair->consume_q, + my_qpair->consume_q_size, + my_qpair->peer, + my_qpair->flags, + my_qpair->priv_flags, + my_qpair->guest_endpoint, + wakeup_cb, client_data); + + if (retval < VMCI_SUCCESS) { + kfree(my_qpair); + return retval; + } + + *qpair = my_qpair; + my_qpair->handle = *handle; + + return retval; +} +EXPORT_SYMBOL_GPL(vmci_qpair_alloc); + +/* + * vmci_qpair_detach() - Detatches the client from a queue pair. + * @qpair: Reference of a pointer to the qpair struct. + * + * This is the client interface for detaching from a VMCIQPair. + * Note that this routine will free the memory allocated for the + * vmci_qp structure too. + */ +int vmci_qpair_detach(struct vmci_qp **qpair) +{ + int result; + struct vmci_qp *old_qpair; + + if (!qpair || !(*qpair)) + return VMCI_ERROR_INVALID_ARGS; + + old_qpair = *qpair; + result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); + + /* + * The guest can fail to detach for a number of reasons, and + * if it does so, it will cleanup the entry (if there is one). + * The host can fail too, but it won't cleanup the entry + * immediately, it will do that later when the context is + * freed. Either way, we need to release the qpair struct + * here; there isn't much the caller can do, and we don't want + * to leak. + */ + + memset(old_qpair, 0, sizeof(*old_qpair)); + old_qpair->handle = VMCI_INVALID_HANDLE; + old_qpair->peer = VMCI_INVALID_ID; + kfree(old_qpair); + *qpair = NULL; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_detach); + +/* + * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. + * @qpair: Pointer to the queue pair struct. + * @producer_tail: Reference used for storing producer tail index. + * @consumer_head: Reference used for storing the consumer head index. + * + * This is the client interface for getting the current indexes of the + * QPair from the point of the view of the caller as the producer. + */ +int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, + u64 *producer_tail, + u64 *consumer_head) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + int result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + vmci_q_header_get_pointers(produce_q_header, consume_q_header, + producer_tail, consumer_head); + qp_unlock(qpair); + + if (result == VMCI_SUCCESS && + ((producer_tail && *producer_tail >= qpair->produce_q_size) || + (consumer_head && *consumer_head >= qpair->produce_q_size))) + return VMCI_ERROR_INVALID_SIZE; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); + +/* + * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer. + * @qpair: Pointer to the queue pair struct. + * @consumer_tail: Reference used for storing consumer tail index. + * @producer_head: Reference used for storing the producer head index. + * + * This is the client interface for getting the current indexes of the + * QPair from the point of the view of the caller as the consumer. + */ +int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, + u64 *consumer_tail, + u64 *producer_head) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + int result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + vmci_q_header_get_pointers(consume_q_header, produce_q_header, + consumer_tail, producer_head); + qp_unlock(qpair); + + if (result == VMCI_SUCCESS && + ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || + (producer_head && *producer_head >= qpair->consume_q_size))) + return VMCI_ERROR_INVALID_SIZE; + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); + +/* + * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of free + * space in the QPair from the point of the view of the caller as + * the producer which is the common case. Returns < 0 if err, else + * available bytes into which data can be enqueued if > 0. + */ +s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_free_space(produce_q_header, + consume_q_header, + qpair->produce_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); + +/* + * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of free + * space in the QPair from the point of the view of the caller as + * the consumer which is not the common case. Returns < 0 if err, else + * available bytes into which data can be enqueued if > 0. + */ +s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_free_space(consume_q_header, + produce_q_header, + qpair->consume_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); + +/* + * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from + * producer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of + * enqueued data in the QPair from the point of the view of the + * caller as the producer which is not the common case. Returns < 0 if err, + * else available bytes that may be read. + */ +s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_buf_ready(produce_q_header, + consume_q_header, + qpair->produce_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); + +/* + * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from + * consumer queue. + * @qpair: Pointer to the queue pair struct. + * + * This is the client interface for getting the amount of + * enqueued data in the QPair from the point of the view of the + * caller as the consumer which is the normal case. Returns < 0 if err, + * else available bytes that may be read. + */ +s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) +{ + struct vmci_queue_header *produce_q_header; + struct vmci_queue_header *consume_q_header; + s64 result; + + if (!qpair) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + result = + qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); + if (result == VMCI_SUCCESS) + result = vmci_q_header_buf_ready(consume_q_header, + produce_q_header, + qpair->consume_q_size); + else + result = 0; + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); + +/* + * vmci_qpair_enqueue() - Throw data on the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer containing data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for enqueueing data into the queue. + * Returns number of bytes enqueued or < 0 on error. + */ +ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, + const void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_enqueue_locked(qpair->produce_q, + qpair->consume_q, + qpair->produce_q_size, + buf, buf_size, + qp_memcpy_to_queue); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); + +/* + * vmci_qpair_dequeue() - Get data from the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer for the data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for dequeueing data from the queue. + * Returns number of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, + void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + buf, buf_size, + qp_memcpy_from_queue, true); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); + +/* + * vmci_qpair_peek() - Peek at the data in the queue. + * @qpair: Pointer to the queue pair struct. + * @buf: Pointer to buffer for the data + * @buf_size: Length of buffer. + * @buf_type: Buffer type (Unused on Linux). + * + * This is the client interface for peeking into a queue. (I.e., + * copy data from the queue without updating the head pointer.) + * Returns number of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_peek(struct vmci_qp *qpair, + void *buf, + size_t buf_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !buf) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + buf, buf_size, + qp_memcpy_from_queue, false); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_peek); + +/* + * vmci_qpair_enquev() - Throw data on the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer containing data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for enqueueing data into the queue. + * This function uses IO vectors to handle the work. Returns number + * of bytes enqueued or < 0 on error. + */ +ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, + void *iov, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !iov) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_enqueue_locked(qpair->produce_q, + qpair->consume_q, + qpair->produce_q_size, + iov, iov_size, + qp_memcpy_to_queue_iov); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_enquev); + +/* + * vmci_qpair_dequev() - Get data from the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer for the data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused). + * + * This is the client interface for dequeueing data from the queue. + * This function uses IO vectors to handle the work. Returns number + * of bytes dequeued or < 0 on error. + */ +ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, + void *iov, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !iov) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + iov, iov_size, + qp_memcpy_from_queue_iov, + true); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_dequev); + +/* + * vmci_qpair_peekv() - Peek at the data in the queue using iov. + * @qpair: Pointer to the queue pair struct. + * @iov: Pointer to buffer for the data + * @iov_size: Length of buffer. + * @buf_type: Buffer type (Unused on Linux). + * + * This is the client interface for peeking into a queue. (I.e., + * copy data from the queue without updating the head pointer.) + * This function uses IO vectors to handle the work. Returns number + * of bytes peeked or < 0 on error. + */ +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, + void *iov, + size_t iov_size, + int buf_type) +{ + ssize_t result; + + if (!qpair || !iov) + return VMCI_ERROR_INVALID_ARGS; + + qp_lock(qpair); + + do { + result = qp_dequeue_locked(qpair->produce_q, + qpair->consume_q, + qpair->consume_q_size, + iov, iov_size, + qp_memcpy_from_queue_iov, + false); + + if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && + !qp_wait_for_ready_queue(qpair)) + result = VMCI_ERROR_WOULD_BLOCK; + + } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); + + qp_unlock(qpair); + return result; +} +EXPORT_SYMBOL_GPL(vmci_qpair_peekv); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h new file mode 100644 index 00000000000..ed177f04ef2 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -0,0 +1,173 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_QUEUE_PAIR_H_ +#define _VMCI_QUEUE_PAIR_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_context.h" + +/* Callback needed for correctly waiting on events. */ +typedef int (*vmci_event_release_cb) (void *client_data); + +/* Guest device port I/O. */ +struct ppn_set { + u64 num_produce_pages; + u64 num_consume_pages; + u32 *produce_ppns; + u32 *consume_ppns; + bool initialized; +}; + +/* VMCIqueue_pairAllocInfo */ +struct vmci_qp_alloc_info { + struct vmci_handle handle; + u32 peer; + u32 flags; + u64 produce_size; + u64 consume_size; + u64 ppn_va; /* Start VA of queue pair PPNs. */ + u64 num_ppns; + s32 result; + u32 version; +}; + +/* VMCIqueue_pairSetVAInfo */ +struct vmci_qp_set_va_info { + struct vmci_handle handle; + u64 va; /* Start VA of queue pair PPNs. */ + u64 num_ppns; + u32 version; + s32 result; +}; + +/* + * For backwards compatibility, here is a version of the + * VMCIqueue_pairPageFileInfo before host support end-points was added. + * Note that the current version of that structure requires VMX to + * pass down the VA of the mapped file. Before host support was added + * there was nothing of the sort. So, when the driver sees the ioctl + * with a parameter that is the sizeof + * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version + * of VMX running can't attach to host end points because it doesn't + * provide the VA of the mapped files. + * + * The Linux driver doesn't get an indication of the size of the + * structure passed down from user space. So, to fix a long standing + * but unfiled bug, the _pad field has been renamed to version. + * Existing versions of VMX always initialize the PageFileInfo + * structure so that _pad, er, version is set to 0. + * + * A version value of 1 indicates that the size of the structure has + * been increased to include two UVA's: produce_uva and consume_uva. + * These UVA's are of the mmap()'d queue contents backing files. + * + * In addition, if when VMX is sending down the + * VMCIqueue_pairPageFileInfo structure it gets an error then it will + * try again with the _NoHostQP version of the file to see if an older + * VMCI kernel module is running. + */ + +/* VMCIqueue_pairPageFileInfo */ +struct vmci_qp_page_file_info { + struct vmci_handle handle; + u64 produce_page_file; /* User VA. */ + u64 consume_page_file; /* User VA. */ + u64 produce_page_file_size; /* Size of the file name array. */ + u64 consume_page_file_size; /* Size of the file name array. */ + s32 result; + u32 version; /* Was _pad. */ + u64 produce_va; /* User VA of the mapped file. */ + u64 consume_va; /* User VA of the mapped file. */ +}; + +/* vmci queuepair detach info */ +struct vmci_qp_dtch_info { + struct vmci_handle handle; + s32 result; + u32 _pad; +}; + +/* + * struct vmci_qp_page_store describes how the memory of a given queue pair + * is backed. When the queue pair is between the host and a guest, the + * page store consists of references to the guest pages. On vmkernel, + * this is a list of PPNs, and on hosted, it is a user VA where the + * queue pair is mapped into the VMX address space. + */ +struct vmci_qp_page_store { + /* Reference to pages backing the queue pair. */ + u64 pages; + /* Length of pageList/virtual addres range (in pages). */ + u32 len; +}; + +/* + * This data type contains the information about a queue. + * There are two queues (hence, queue pairs) per transaction model between a + * pair of end points, A & B. One queue is used by end point A to transmit + * commands and responses to B. The other queue is used by B to transmit + * commands and responses. + * + * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains + * either a direct pointer to the linear address of the buffer contents or a + * pointer to structures which help the OS locate those data pages. See + * vmciKernelIf.c for each platform for its definition. + */ +struct vmci_queue { + struct vmci_queue_header *q_header; + struct vmci_queue_header *saved_header; + struct vmci_queue_kern_if *kernel_if; +}; + +/* + * Utility function that checks whether the fields of the page + * store contain valid values. + * Result: + * true if the page store is wellformed. false otherwise. + */ +static inline bool +VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store) +{ + return page_store->len >= 2; +} + +void vmci_qp_broker_exit(void); +int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, + u32 flags, u32 priv_flags, + u64 produce_size, u64 consume_size, + struct vmci_qp_page_store *page_store, + struct vmci_ctx *context); +int vmci_qp_broker_set_page_store(struct vmci_handle handle, + u64 produce_uva, u64 consume_uva, + struct vmci_ctx *context); +int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context); + +void vmci_qp_guest_endpoints_exit(void); + +int vmci_qp_alloc(struct vmci_handle *handle, + struct vmci_queue **produce_q, u64 produce_size, + struct vmci_queue **consume_q, u64 consume_size, + u32 peer, u32 flags, u32 priv_flags, + bool guest_endpoint, vmci_event_release_cb wakeup_cb, + void *client_data); +int vmci_qp_broker_map(struct vmci_handle handle, + struct vmci_ctx *context, u64 guest_mem); +int vmci_qp_broker_unmap(struct vmci_handle handle, + struct vmci_ctx *context, u32 gid); + +#endif /* _VMCI_QUEUE_PAIR_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c new file mode 100644 index 00000000000..9a53a30de44 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -0,0 +1,227 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/hash.h> +#include <linux/types.h> +#include <linux/rculist.h> + +#include "vmci_resource.h" +#include "vmci_driver.h" + + +#define VMCI_RESOURCE_HASH_BITS 7 +#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS) + +struct vmci_hash_table { + spinlock_t lock; + struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS]; +}; + +static struct vmci_hash_table vmci_resource_table = { + .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock), +}; + +static unsigned int vmci_resource_hash(struct vmci_handle handle) +{ + return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS); +} + +/* + * Gets a resource (if one exists) matching given handle from the hash table. + */ +static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle, + enum vmci_resource_type type) +{ + struct vmci_resource *r, *resource = NULL; + unsigned int idx = vmci_resource_hash(handle); + + rcu_read_lock(); + hlist_for_each_entry_rcu(r, + &vmci_resource_table.entries[idx], node) { + u32 cid = r->handle.context; + u32 rid = r->handle.resource; + + if (r->type == type && + rid == handle.resource && + (cid == handle.context || cid == VMCI_INVALID_ID)) { + resource = r; + break; + } + } + rcu_read_unlock(); + + return resource; +} + +/* + * Find an unused resource ID and return it. The first + * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from + * its value + 1. + * Returns VMCI resource id on success, VMCI_INVALID_ID on failure. + */ +static u32 vmci_resource_find_id(u32 context_id, + enum vmci_resource_type resource_type) +{ + static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1; + u32 old_rid = resource_id; + u32 current_rid; + + /* + * Generate a unique resource ID. Keep on trying until we wrap around + * in the RID space. + */ + do { + struct vmci_handle handle; + + current_rid = resource_id; + resource_id++; + if (unlikely(resource_id == VMCI_INVALID_ID)) { + /* Skip the reserved rids. */ + resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1; + } + + handle = vmci_make_handle(context_id, current_rid); + if (!vmci_resource_lookup(handle, resource_type)) + return current_rid; + } while (resource_id != old_rid); + + return VMCI_INVALID_ID; +} + + +int vmci_resource_add(struct vmci_resource *resource, + enum vmci_resource_type resource_type, + struct vmci_handle handle) + +{ + unsigned int idx; + int result; + + spin_lock(&vmci_resource_table.lock); + + if (handle.resource == VMCI_INVALID_ID) { + handle.resource = vmci_resource_find_id(handle.context, + resource_type); + if (handle.resource == VMCI_INVALID_ID) { + result = VMCI_ERROR_NO_HANDLE; + goto out; + } + } else if (vmci_resource_lookup(handle, resource_type)) { + result = VMCI_ERROR_ALREADY_EXISTS; + goto out; + } + + resource->handle = handle; + resource->type = resource_type; + INIT_HLIST_NODE(&resource->node); + kref_init(&resource->kref); + init_completion(&resource->done); + + idx = vmci_resource_hash(resource->handle); + hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]); + + result = VMCI_SUCCESS; + +out: + spin_unlock(&vmci_resource_table.lock); + return result; +} + +void vmci_resource_remove(struct vmci_resource *resource) +{ + struct vmci_handle handle = resource->handle; + unsigned int idx = vmci_resource_hash(handle); + struct vmci_resource *r; + + /* Remove resource from hash table. */ + spin_lock(&vmci_resource_table.lock); + + hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { + if (vmci_handle_is_equal(r->handle, resource->handle)) { + hlist_del_init_rcu(&r->node); + break; + } + } + + spin_unlock(&vmci_resource_table.lock); + synchronize_rcu(); + + vmci_resource_put(resource); + wait_for_completion(&resource->done); +} + +struct vmci_resource * +vmci_resource_by_handle(struct vmci_handle resource_handle, + enum vmci_resource_type resource_type) +{ + struct vmci_resource *r, *resource = NULL; + + rcu_read_lock(); + + r = vmci_resource_lookup(resource_handle, resource_type); + if (r && + (resource_type == r->type || + resource_type == VMCI_RESOURCE_TYPE_ANY)) { + resource = vmci_resource_get(r); + } + + rcu_read_unlock(); + + return resource; +} + +/* + * Get a reference to given resource. + */ +struct vmci_resource *vmci_resource_get(struct vmci_resource *resource) +{ + kref_get(&resource->kref); + + return resource; +} + +static void vmci_release_resource(struct kref *kref) +{ + struct vmci_resource *resource = + container_of(kref, struct vmci_resource, kref); + + /* Verify the resource has been unlinked from hash table */ + WARN_ON(!hlist_unhashed(&resource->node)); + + /* Signal that container of this resource can now be destroyed */ + complete(&resource->done); +} + +/* + * Resource's release function will get called if last reference. + * If it is the last reference, then we are sure that nobody else + * can increment the count again (it's gone from the resource hash + * table), so there's no need for locking here. + */ +int vmci_resource_put(struct vmci_resource *resource) +{ + /* + * We propagate the information back to caller in case it wants to know + * whether entry was freed. + */ + return kref_put(&resource->kref, vmci_release_resource) ? + VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS; +} + +struct vmci_handle vmci_resource_handle(struct vmci_resource *resource) +{ + return resource->handle; +} diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h new file mode 100644 index 00000000000..9190cd298be --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_resource.h @@ -0,0 +1,59 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_RESOURCE_H_ +#define _VMCI_RESOURCE_H_ + +#include <linux/vmw_vmci_defs.h> +#include <linux/types.h> + +#include "vmci_context.h" + + +enum vmci_resource_type { + VMCI_RESOURCE_TYPE_ANY, + VMCI_RESOURCE_TYPE_API, + VMCI_RESOURCE_TYPE_GROUP, + VMCI_RESOURCE_TYPE_DATAGRAM, + VMCI_RESOURCE_TYPE_DOORBELL, + VMCI_RESOURCE_TYPE_QPAIR_GUEST, + VMCI_RESOURCE_TYPE_QPAIR_HOST +}; + +struct vmci_resource { + struct vmci_handle handle; + enum vmci_resource_type type; + struct hlist_node node; + struct kref kref; + struct completion done; +}; + + +int vmci_resource_add(struct vmci_resource *resource, + enum vmci_resource_type resource_type, + struct vmci_handle handle); + +void vmci_resource_remove(struct vmci_resource *resource); + +struct vmci_resource * +vmci_resource_by_handle(struct vmci_handle resource_handle, + enum vmci_resource_type resource_type); + +struct vmci_resource *vmci_resource_get(struct vmci_resource *resource); +int vmci_resource_put(struct vmci_resource *resource); + +struct vmci_handle vmci_resource_handle(struct vmci_resource *resource); + +#endif /* _VMCI_RESOURCE_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c new file mode 100644 index 00000000000..91090658b92 --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_route.c @@ -0,0 +1,226 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <linux/vmw_vmci_defs.h> +#include <linux/vmw_vmci_api.h> + +#include "vmci_context.h" +#include "vmci_driver.h" +#include "vmci_route.h" + +/* + * Make a routing decision for the given source and destination handles. + * This will try to determine the route using the handles and the available + * devices. Will set the source context if it is invalid. + */ +int vmci_route(struct vmci_handle *src, + const struct vmci_handle *dst, + bool from_guest, + enum vmci_route *route) +{ + bool has_host_device = vmci_host_code_active(); + bool has_guest_device = vmci_guest_code_active(); + + *route = VMCI_ROUTE_NONE; + + /* + * "from_guest" is only ever set to true by + * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent), + * which comes from the VMX, so we know it is coming from a + * guest. + * + * To avoid inconsistencies, test these once. We will test + * them again when we do the actual send to ensure that we do + * not touch a non-existent device. + */ + + /* Must have a valid destination context. */ + if (VMCI_INVALID_ID == dst->context) + return VMCI_ERROR_INVALID_ARGS; + + /* Anywhere to hypervisor. */ + if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { + + /* + * If this message already came from a guest then we + * cannot send it to the hypervisor. It must come + * from a local client. + */ + if (from_guest) + return VMCI_ERROR_DST_UNREACHABLE; + + /* + * We must be acting as a guest in order to send to + * the hypervisor. + */ + if (!has_guest_device) + return VMCI_ERROR_DEVICE_NOT_FOUND; + + /* And we cannot send if the source is the host context. */ + if (VMCI_HOST_CONTEXT_ID == src->context) + return VMCI_ERROR_INVALID_ARGS; + + /* + * If the client passed the ANON source handle then + * respect it (both context and resource are invalid). + * However, if they passed only an invalid context, + * then they probably mean ANY, in which case we + * should set the real context here before passing it + * down. + */ + if (VMCI_INVALID_ID == src->context && + VMCI_INVALID_ID != src->resource) + src->context = vmci_get_context_id(); + + /* Send from local client down to the hypervisor. */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; + } + + /* Anywhere to local client on host. */ + if (VMCI_HOST_CONTEXT_ID == dst->context) { + /* + * If it is not from a guest but we are acting as a + * guest, then we need to send it down to the host. + * Note that if we are also acting as a host then this + * will prevent us from sending from local client to + * local client, but we accept that restriction as a + * way to remove any ambiguity from the host context. + */ + if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { + /* + * If the hypervisor is the source, this is + * host local communication. The hypervisor + * may send vmci event datagrams to the host + * itself, but it will never send datagrams to + * an "outer host" through the guest device. + */ + + if (has_host_device) { + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } else { + return VMCI_ERROR_DEVICE_NOT_FOUND; + } + } + + if (!from_guest && has_guest_device) { + /* If no source context then use the current. */ + if (VMCI_INVALID_ID == src->context) + src->context = vmci_get_context_id(); + + /* Send it from local client down to the host. */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; + } + + /* + * Otherwise we already received it from a guest and + * it is destined for a local client on this host, or + * it is from another local client on this host. We + * must be acting as a host to service it. + */ + if (!has_host_device) + return VMCI_ERROR_DEVICE_NOT_FOUND; + + if (VMCI_INVALID_ID == src->context) { + /* + * If it came from a guest then it must have a + * valid context. Otherwise we can use the + * host context. + */ + if (from_guest) + return VMCI_ERROR_INVALID_ARGS; + + src->context = VMCI_HOST_CONTEXT_ID; + } + + /* Route to local client. */ + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } + + /* + * If we are acting as a host then this might be destined for + * a guest. + */ + if (has_host_device) { + /* It will have a context if it is meant for a guest. */ + if (vmci_ctx_exists(dst->context)) { + if (VMCI_INVALID_ID == src->context) { + /* + * If it came from a guest then it + * must have a valid context. + * Otherwise we can use the host + * context. + */ + + if (from_guest) + return VMCI_ERROR_INVALID_ARGS; + + src->context = VMCI_HOST_CONTEXT_ID; + } else if (VMCI_CONTEXT_IS_VM(src->context) && + src->context != dst->context) { + /* + * VM to VM communication is not + * allowed. Since we catch all + * communication destined for the host + * above, this must be destined for a + * VM since there is a valid context. + */ + + return VMCI_ERROR_DST_UNREACHABLE; + } + + /* Pass it up to the guest. */ + *route = VMCI_ROUTE_AS_HOST; + return VMCI_SUCCESS; + } else if (!has_guest_device) { + /* + * The host is attempting to reach a CID + * without an active context, and we can't + * send it down, since we have no guest + * device. + */ + + return VMCI_ERROR_DST_UNREACHABLE; + } + } + + /* + * We must be a guest trying to send to another guest, which means + * we need to send it down to the host. We do not filter out VM to + * VM communication here, since we want to be able to use the guest + * driver on older versions that do support VM to VM communication. + */ + if (!has_guest_device) { + /* + * Ending up here means we have neither guest nor host + * device. + */ + return VMCI_ERROR_DEVICE_NOT_FOUND; + } + + /* If no source context then use the current context. */ + if (VMCI_INVALID_ID == src->context) + src->context = vmci_get_context_id(); + + /* + * Send it from local client down to the host, which will + * route it to the other guest for us. + */ + *route = VMCI_ROUTE_AS_GUEST; + return VMCI_SUCCESS; +} diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h new file mode 100644 index 00000000000..3b30e82419c --- /dev/null +++ b/drivers/misc/vmw_vmci/vmci_route.h @@ -0,0 +1,30 @@ +/* + * VMware VMCI Driver + * + * Copyright (C) 2012 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _VMCI_ROUTE_H_ +#define _VMCI_ROUTE_H_ + +#include <linux/vmw_vmci_defs.h> + +enum vmci_route { + VMCI_ROUTE_NONE, + VMCI_ROUTE_AS_HOST, + VMCI_ROUTE_AS_GUEST, +}; + +int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst, + bool from_guest, enum vmci_route *route); + +#endif /* _VMCI_ROUTE_H_ */ |
