diff options
Diffstat (limited to 'drivers/misc/mei')
| -rw-r--r-- | drivers/misc/mei/Kconfig | 45 | ||||
| -rw-r--r-- | drivers/misc/mei/Makefile | 23 | ||||
| -rw-r--r-- | drivers/misc/mei/amthif.c | 745 | ||||
| -rw-r--r-- | drivers/misc/mei/bus.c | 548 | ||||
| -rw-r--r-- | drivers/misc/mei/client.c | 1067 | ||||
| -rw-r--r-- | drivers/misc/mei/client.h | 109 | ||||
| -rw-r--r-- | drivers/misc/mei/debugfs.c | 197 | ||||
| -rw-r--r-- | drivers/misc/mei/hbm.c | 879 | ||||
| -rw-r--r-- | drivers/misc/mei/hbm.h | 64 | ||||
| -rw-r--r-- | drivers/misc/mei/hw-me-regs.h | 185 | ||||
| -rw-r--r-- | drivers/misc/mei/hw-me.c | 885 | ||||
| -rw-r--r-- | drivers/misc/mei/hw-me.h | 56 | ||||
| -rw-r--r-- | drivers/misc/mei/hw-txe-regs.h | 294 | ||||
| -rw-r--r-- | drivers/misc/mei/hw-txe.c | 1190 | ||||
| -rw-r--r-- | drivers/misc/mei/hw-txe.h | 77 | ||||
| -rw-r--r-- | drivers/misc/mei/hw.h | 274 | ||||
| -rw-r--r-- | drivers/misc/mei/init.c | 395 | ||||
| -rw-r--r-- | drivers/misc/mei/interrupt.c | 659 | ||||
| -rw-r--r-- | drivers/misc/mei/main.c | 719 | ||||
| -rw-r--r-- | drivers/misc/mei/mei_dev.h | 752 | ||||
| -rw-r--r-- | drivers/misc/mei/nfc.c | 558 | ||||
| -rw-r--r-- | drivers/misc/mei/pci-me.c | 488 | ||||
| -rw-r--r-- | drivers/misc/mei/pci-txe.c | 436 | ||||
| -rw-r--r-- | drivers/misc/mei/wd.c | 401 | 
24 files changed, 11046 insertions, 0 deletions
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig new file mode 100644 index 00000000000..d23384dde73 --- /dev/null +++ b/drivers/misc/mei/Kconfig @@ -0,0 +1,45 @@ +config INTEL_MEI +	tristate "Intel Management Engine Interface" +	depends on X86 && PCI && WATCHDOG_CORE +	help +	  The Intel Management Engine (Intel ME) provides Manageability, +	  Security and Media services for system containing Intel chipsets. +	  if selected /dev/mei misc device will be created. + +	  For more information see +	  <http://software.intel.com/en-us/manageability/> + +config INTEL_MEI_ME +	tristate "ME Enabled Intel Chipsets" +	select INTEL_MEI +	depends on X86 && PCI && WATCHDOG_CORE +	help +	  MEI support for ME Enabled Intel chipsets. + +	  Supported Chipsets are: +	  7 Series Chipset Family +	  6 Series Chipset Family +	  5 Series Chipset Family +	  4 Series Chipset Family +	  Mobile 4 Series Chipset Family +	  ICH9 +	  82946GZ/GL +	  82G35 Express +	  82Q963/Q965 +	  82P965/G965 +	  Mobile PM965/GM965 +	  Mobile GME965/GLE960 +	  82Q35 Express +	  82G33/G31/P35/P31 Express +	  82Q33 Express +	  82X38/X48 Express + +config INTEL_MEI_TXE +	tristate "Intel Trusted Execution Environment with ME Interface" +	select INTEL_MEI +	depends on X86 && PCI && WATCHDOG_CORE +	help +	  MEI Support for Trusted Execution Environment device on Intel SoCs + +	  Supported SoCs: +	  Intel Bay Trail diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile new file mode 100644 index 00000000000..8ebc6cda137 --- /dev/null +++ b/drivers/misc/mei/Makefile @@ -0,0 +1,23 @@ +# +# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver +# Copyright (c) 2010-2014, Intel Corporation. +# +obj-$(CONFIG_INTEL_MEI) += mei.o +mei-objs := init.o +mei-objs += hbm.o +mei-objs += interrupt.o +mei-objs += client.o +mei-objs += main.o +mei-objs += amthif.o +mei-objs += wd.o +mei-objs += bus.o +mei-objs += nfc.o +mei-$(CONFIG_DEBUG_FS) += debugfs.o + +obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o +mei-me-objs := pci-me.o +mei-me-objs += hw-me.o + +obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o +mei-txe-objs := pci-txe.o +mei-txe-objs += hw-txe.o diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c new file mode 100644 index 00000000000..0d6234db00f --- /dev/null +++ b/drivers/misc/mei/amthif.c @@ -0,0 +1,745 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/jiffies.h> +#include <linux/uaccess.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +const uuid_le mei_amthif_guid  = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, +					 0xac, 0xa8, 0x46, 0xe0, +					 0xff, 0x65, 0x81, 0x4c); + +/** + * mei_amthif_reset_params - initializes mei device iamthif + * + * @dev: the device structure + */ +void mei_amthif_reset_params(struct mei_device *dev) +{ +	/* reset iamthif parameters. */ +	dev->iamthif_current_cb = NULL; +	dev->iamthif_msg_buf_size = 0; +	dev->iamthif_msg_buf_index = 0; +	dev->iamthif_canceled = false; +	dev->iamthif_ioctl = false; +	dev->iamthif_state = MEI_IAMTHIF_IDLE; +	dev->iamthif_timer = 0; +	dev->iamthif_stall_timer = 0; +	dev->iamthif_open_count = 0; +} + +/** + * mei_amthif_host_init - mei initialization amthif client. + * + * @dev: the device structure + * + */ +int mei_amthif_host_init(struct mei_device *dev) +{ +	struct mei_cl *cl = &dev->iamthif_cl; +	unsigned char *msg_buf; +	int ret, i; + +	dev->iamthif_state = MEI_IAMTHIF_IDLE; + +	mei_cl_init(cl, dev); + +	i = mei_me_cl_by_uuid(dev, &mei_amthif_guid); +	if (i < 0) { +		dev_info(&dev->pdev->dev, +			"amthif: failed to find the client %d\n", i); +		return -ENOTTY; +	} + +	cl->me_client_id = dev->me_clients[i].client_id; + +	/* Assign iamthif_mtu to the value received from ME  */ + +	dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length; +	dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n", +			dev->me_clients[i].props.max_msg_length); + +	kfree(dev->iamthif_msg_buf); +	dev->iamthif_msg_buf = NULL; + +	/* allocate storage for ME message buffer */ +	msg_buf = kcalloc(dev->iamthif_mtu, +			sizeof(unsigned char), GFP_KERNEL); +	if (!msg_buf) { +		dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n"); +		return -ENOMEM; +	} + +	dev->iamthif_msg_buf = msg_buf; + +	ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); + +	if (ret < 0) { +		dev_err(&dev->pdev->dev, +			"amthif: failed link client %d\n", ret); +		return ret; +	} + +	ret = mei_cl_connect(cl, NULL); + +	dev->iamthif_state = MEI_IAMTHIF_IDLE; + +	return ret; +} + +/** + * mei_amthif_find_read_list_entry - finds a amthilist entry for current file + * + * @dev: the device structure + * @file: pointer to file object + * + * returns   returned a list entry on success, NULL on failure. + */ +struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, +						struct file *file) +{ +	struct mei_cl_cb *cb; + +	list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) { +		if (cb->cl && cb->cl == &dev->iamthif_cl && +			cb->file_object == file) +			return cb; +	} +	return NULL; +} + + +/** + * mei_amthif_read - read data from AMTHIF client + * + * @dev: the device structure + * @if_num:  minor number + * @file: pointer to file object + * @*ubuf: pointer to user data in user space + * @length: data length to read + * @offset: data read offset + * + * Locking: called under "dev->device_lock" lock + * + * returns + *  returned data length on success, + *  zero if no data to read, + *  negative on failure. + */ +int mei_amthif_read(struct mei_device *dev, struct file *file, +	       char __user *ubuf, size_t length, loff_t *offset) +{ +	int rets; +	int wait_ret; +	struct mei_cl_cb *cb = NULL; +	struct mei_cl *cl = file->private_data; +	unsigned long timeout; +	int i; + +	/* Only possible if we are in timeout */ +	if (!cl || cl != &dev->iamthif_cl) { +		dev_dbg(&dev->pdev->dev, "bad file ext.\n"); +		return -ETIME; +	} + +	i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); +	if (i < 0) { +		dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); +		return -ENOTTY; +	} +	dev_dbg(&dev->pdev->dev, "checking amthif data\n"); +	cb = mei_amthif_find_read_list_entry(dev, file); + +	/* Check for if we can block or not*/ +	if (cb == NULL && file->f_flags & O_NONBLOCK) +		return -EAGAIN; + + +	dev_dbg(&dev->pdev->dev, "waiting for amthif data\n"); +	while (cb == NULL) { +		/* unlock the Mutex */ +		mutex_unlock(&dev->device_lock); + +		wait_ret = wait_event_interruptible(dev->iamthif_cl.wait, +			(cb = mei_amthif_find_read_list_entry(dev, file))); + +		/* Locking again the Mutex */ +		mutex_lock(&dev->device_lock); + +		if (wait_ret) +			return -ERESTARTSYS; + +		dev_dbg(&dev->pdev->dev, "woke up from sleep\n"); +	} + + +	dev_dbg(&dev->pdev->dev, "Got amthif data\n"); +	dev->iamthif_timer = 0; + +	if (cb) { +		timeout = cb->read_time + +			mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); +		dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n", +				timeout); + +		if  (time_after(jiffies, timeout)) { +			dev_dbg(&dev->pdev->dev, "amthif Time out\n"); +			/* 15 sec for the message has expired */ +			list_del(&cb->list); +			rets = -ETIME; +			goto free; +		} +	} +	/* if the whole message will fit remove it from the list */ +	if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) +		list_del(&cb->list); +	else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { +		/* end of the message has been reached */ +		list_del(&cb->list); +		rets = 0; +		goto free; +	} +		/* else means that not full buffer will be read and do not +		 * remove message from deletion list +		 */ + +	dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n", +	    cb->response_buffer.size); +	dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); + +	/* length is being truncated to PAGE_SIZE, however, +	 * the buf_idx may point beyond */ +	length = min_t(size_t, length, (cb->buf_idx - *offset)); + +	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { +		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); +		rets = -EFAULT; +	} else { +		rets = length; +		if ((*offset + length) < cb->buf_idx) { +			*offset += length; +			goto out; +		} +	} +free: +	dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n"); +	*offset = 0; +	mei_io_cb_free(cb); +out: +	return rets; +} + +/** + * mei_amthif_send_cmd - send amthif command to the ME + * + * @dev: the device structure + * @cb: mei call back struct + * + * returns 0 on success, <0 on failure. + * + */ +static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) +{ +	struct mei_msg_hdr mei_hdr; +	int ret; + +	if (!dev || !cb) +		return -ENODEV; + +	dev_dbg(&dev->pdev->dev, "write data to amthif client.\n"); + +	dev->iamthif_state = MEI_IAMTHIF_WRITING; +	dev->iamthif_current_cb = cb; +	dev->iamthif_file_object = cb->file_object; +	dev->iamthif_canceled = false; +	dev->iamthif_ioctl = true; +	dev->iamthif_msg_buf_size = cb->request_buffer.size; +	memcpy(dev->iamthif_msg_buf, cb->request_buffer.data, +	       cb->request_buffer.size); + +	ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl); +	if (ret < 0) +		return ret; + +	if (ret && mei_hbuf_acquire(dev)) { +		ret = 0; +		if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { +			mei_hdr.length = mei_hbuf_max_len(dev); +			mei_hdr.msg_complete = 0; +		} else { +			mei_hdr.length = cb->request_buffer.size; +			mei_hdr.msg_complete = 1; +		} + +		mei_hdr.host_addr = dev->iamthif_cl.host_client_id; +		mei_hdr.me_addr = dev->iamthif_cl.me_client_id; +		mei_hdr.reserved = 0; +		mei_hdr.internal = 0; +		dev->iamthif_msg_buf_index += mei_hdr.length; +		ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); +		if (ret) +			return ret; + +		if (mei_hdr.msg_complete) { +			if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl)) +				return -EIO; +			dev->iamthif_flow_control_pending = true; +			dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; +			dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n"); +			dev->iamthif_current_cb = cb; +			dev->iamthif_file_object = cb->file_object; +			list_add_tail(&cb->list, &dev->write_waiting_list.list); +		} else { +			dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n"); +			list_add_tail(&cb->list, &dev->write_list.list); +		} +	} else { +		list_add_tail(&cb->list, &dev->write_list.list); +	} +	return 0; +} + +/** + * mei_amthif_write - write amthif data to amthif client + * + * @dev: the device structure + * @cb: mei call back struct + * + * returns 0 on success, <0 on failure. + * + */ +int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) +{ +	int ret; + +	if (!dev || !cb) +		return -ENODEV; + +	ret = mei_io_cb_alloc_resp_buf(cb, dev->iamthif_mtu); +	if (ret) +		return ret; + +	cb->fop_type = MEI_FOP_WRITE; + +	if (!list_empty(&dev->amthif_cmd_list.list) || +	    dev->iamthif_state != MEI_IAMTHIF_IDLE) { +		dev_dbg(&dev->pdev->dev, +			"amthif state = %d\n", dev->iamthif_state); +		dev_dbg(&dev->pdev->dev, "AMTHIF: add cb to the wait list\n"); +		list_add_tail(&cb->list, &dev->amthif_cmd_list.list); +		return 0; +	} +	return mei_amthif_send_cmd(dev, cb); +} +/** + * mei_amthif_run_next_cmd + * + * @dev: the device structure + * + * returns 0 on success, <0 on failure. + */ +void mei_amthif_run_next_cmd(struct mei_device *dev) +{ +	struct mei_cl_cb *pos = NULL; +	struct mei_cl_cb *next = NULL; +	int status; + +	if (!dev) +		return; + +	dev->iamthif_msg_buf_size = 0; +	dev->iamthif_msg_buf_index = 0; +	dev->iamthif_canceled = false; +	dev->iamthif_ioctl = true; +	dev->iamthif_state = MEI_IAMTHIF_IDLE; +	dev->iamthif_timer = 0; +	dev->iamthif_file_object = NULL; + +	dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n"); + +	list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) { +		list_del(&pos->list); + +		if (pos->cl && pos->cl == &dev->iamthif_cl) { +			status = mei_amthif_send_cmd(dev, pos); +			if (status) { +				dev_dbg(&dev->pdev->dev, +					"amthif write failed status = %d\n", +						status); +				return; +			} +			break; +		} +	} +} + + +unsigned int mei_amthif_poll(struct mei_device *dev, +		struct file *file, poll_table *wait) +{ +	unsigned int mask = 0; + +	poll_wait(file, &dev->iamthif_cl.wait, wait); + +	mutex_lock(&dev->device_lock); +	if (!mei_cl_is_connected(&dev->iamthif_cl)) { + +		mask = POLLERR; + +	} else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && +		   dev->iamthif_file_object == file) { + +		mask |= (POLLIN | POLLRDNORM); +		dev_dbg(&dev->pdev->dev, "run next amthif cb\n"); +		mei_amthif_run_next_cmd(dev); +	} +	mutex_unlock(&dev->device_lock); + +	return mask; +} + + + +/** + * mei_amthif_irq_write - write iamthif command in irq thread context. + * + * @dev: the device structure. + * @cb_pos: callback block. + * @cl: private data of the file object. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +			 struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev = cl->dev; +	struct mei_msg_hdr mei_hdr; +	size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index; +	u32 msg_slots = mei_data2slots(len); +	int slots; +	int rets; + +	rets = mei_cl_flow_ctrl_creds(cl); +	if (rets < 0) +		return rets; + +	if (rets == 0) { +		cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +		return 0; +	} + +	mei_hdr.host_addr = cl->host_client_id; +	mei_hdr.me_addr = cl->me_client_id; +	mei_hdr.reserved = 0; +	mei_hdr.internal = 0; + +	slots = mei_hbuf_empty_slots(dev); + +	if (slots >= msg_slots) { +		mei_hdr.length = len; +		mei_hdr.msg_complete = 1; +	/* Split the message only if we can write the whole host buffer */ +	} else if (slots == dev->hbuf_depth) { +		msg_slots = slots; +		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); +		mei_hdr.length = len; +		mei_hdr.msg_complete = 0; +	} else { +		/* wait for next time the host buffer is empty */ +		return 0; +	} + +	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT,  MEI_HDR_PRM(&mei_hdr)); + +	rets = mei_write_message(dev, &mei_hdr, +			dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); +	if (rets) { +		dev->iamthif_state = MEI_IAMTHIF_IDLE; +		cl->status = rets; +		list_del(&cb->list); +		return rets; +	} + +	if (mei_cl_flow_ctrl_reduce(cl)) +		return -EIO; + +	dev->iamthif_msg_buf_index += mei_hdr.length; +	cl->status = 0; + +	if (mei_hdr.msg_complete) { +		dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; +		dev->iamthif_flow_control_pending = true; + +		/* save iamthif cb sent to amthif client */ +		cb->buf_idx = dev->iamthif_msg_buf_index; +		dev->iamthif_current_cb = cb; + +		list_move_tail(&cb->list, &dev->write_waiting_list.list); +	} + + +	return 0; +} + +/** + * mei_amthif_irq_read_message - read routine after ISR to + *			handle the read amthif message + * + * @dev: the device structure + * @mei_hdr: header of amthif message + * @complete_list: An instance of our list structure + * + * returns 0 on success, <0 on failure. + */ +int mei_amthif_irq_read_msg(struct mei_device *dev, +			    struct mei_msg_hdr *mei_hdr, +			    struct mei_cl_cb *complete_list) +{ +	struct mei_cl_cb *cb; +	unsigned char *buffer; + +	BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id); +	BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING); + +	buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index; +	BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length); + +	mei_read_slots(dev, buffer, mei_hdr->length); + +	dev->iamthif_msg_buf_index += mei_hdr->length; + +	if (!mei_hdr->msg_complete) +		return 0; + +	dev_dbg(&dev->pdev->dev, "amthif_message_buffer_index =%d\n", +			mei_hdr->length); + +	dev_dbg(&dev->pdev->dev, "completed amthif read.\n "); +	if (!dev->iamthif_current_cb) +		return -ENODEV; + +	cb = dev->iamthif_current_cb; +	dev->iamthif_current_cb = NULL; + +	if (!cb->cl) +		return -ENODEV; + +	dev->iamthif_stall_timer = 0; +	cb->buf_idx = dev->iamthif_msg_buf_index; +	cb->read_time = jiffies; +	if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) { +		/* found the iamthif cb */ +		dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n "); +		dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n "); +		list_add_tail(&cb->list, &complete_list->list); +	} +	return 0; +} + +/** + * mei_amthif_irq_read - prepares to read amthif data. + * + * @dev: the device structure. + * @slots: free slots. + * + * returns 0, OK; otherwise, error. + */ +int mei_amthif_irq_read(struct mei_device *dev, s32 *slots) +{ +	u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); + +	if (*slots < msg_slots) +		return -EMSGSIZE; + +	*slots -= msg_slots; + +	if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) { +		dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n"); +		return -EIO; +	} + +	dev_dbg(&dev->pdev->dev, "iamthif flow control success\n"); +	dev->iamthif_state = MEI_IAMTHIF_READING; +	dev->iamthif_flow_control_pending = false; +	dev->iamthif_msg_buf_index = 0; +	dev->iamthif_msg_buf_size = 0; +	dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER; +	dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +	return 0; +} + +/** + * mei_amthif_complete - complete amthif callback. + * + * @dev: the device structure. + * @cb_pos: callback block. + */ +void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) +{ +	if (dev->iamthif_canceled != 1) { +		dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; +		dev->iamthif_stall_timer = 0; +		memcpy(cb->response_buffer.data, +				dev->iamthif_msg_buf, +				dev->iamthif_msg_buf_index); +		list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); +		dev_dbg(&dev->pdev->dev, "amthif read completed\n"); +		dev->iamthif_timer = jiffies; +		dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", +				dev->iamthif_timer); +	} else { +		mei_amthif_run_next_cmd(dev); +	} + +	dev_dbg(&dev->pdev->dev, "completing amthif call back.\n"); +	wake_up_interruptible(&dev->iamthif_cl.wait); +} + +/** + * mei_clear_list - removes all callbacks associated with file + *		from mei_cb_list + * + * @dev: device structure. + * @file: file structure + * @mei_cb_list: callbacks list + * + * mei_clear_list is called to clear resources associated with file + * when application calls close function or Ctrl-C was pressed + * + * returns true if callback removed from the list, false otherwise + */ +static bool mei_clear_list(struct mei_device *dev, +		const struct file *file, struct list_head *mei_cb_list) +{ +	struct mei_cl_cb *cb_pos = NULL; +	struct mei_cl_cb *cb_next = NULL; +	bool removed = false; + +	/* list all list member */ +	list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) { +		/* check if list member associated with a file */ +		if (file == cb_pos->file_object) { +			/* remove member from the list */ +			list_del(&cb_pos->list); +			/* check if cb equal to current iamthif cb */ +			if (dev->iamthif_current_cb == cb_pos) { +				dev->iamthif_current_cb = NULL; +				/* send flow control to iamthif client */ +				mei_hbm_cl_flow_control_req(dev, +							&dev->iamthif_cl); +			} +			/* free all allocated buffers */ +			mei_io_cb_free(cb_pos); +			cb_pos = NULL; +			removed = true; +		} +	} +	return removed; +} + +/** + * mei_clear_lists - removes all callbacks associated with file + * + * @dev: device structure + * @file: file structure + * + * mei_clear_lists is called to clear resources associated with file + * when application calls close function or Ctrl-C was pressed + * + * returns true if callback removed from the list, false otherwise + */ +static bool mei_clear_lists(struct mei_device *dev, struct file *file) +{ +	bool removed = false; + +	/* remove callbacks associated with a file */ +	mei_clear_list(dev, file, &dev->amthif_cmd_list.list); +	if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list)) +		removed = true; + +	mei_clear_list(dev, file, &dev->ctrl_rd_list.list); + +	if (mei_clear_list(dev, file, &dev->ctrl_wr_list.list)) +		removed = true; + +	if (mei_clear_list(dev, file, &dev->write_waiting_list.list)) +		removed = true; + +	if (mei_clear_list(dev, file, &dev->write_list.list)) +		removed = true; + +	/* check if iamthif_current_cb not NULL */ +	if (dev->iamthif_current_cb && !removed) { +		/* check file and iamthif current cb association */ +		if (dev->iamthif_current_cb->file_object == file) { +			/* remove cb */ +			mei_io_cb_free(dev->iamthif_current_cb); +			dev->iamthif_current_cb = NULL; +			removed = true; +		} +	} +	return removed; +} + +/** +* mei_amthif_release - the release function +* +*  @dev: device structure +*  @file: pointer to file structure +* +*  returns 0 on success, <0 on error +*/ +int mei_amthif_release(struct mei_device *dev, struct file *file) +{ +	if (dev->iamthif_open_count > 0) +		dev->iamthif_open_count--; + +	if (dev->iamthif_file_object == file && +	    dev->iamthif_state != MEI_IAMTHIF_IDLE) { + +		dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n", +		    dev->iamthif_state); +		dev->iamthif_canceled = true; +		if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) { +			dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n"); +			mei_amthif_run_next_cmd(dev); +		} +	} + +	if (mei_clear_lists(dev, file)) +		dev->iamthif_state = MEI_IAMTHIF_IDLE; + +	return 0; +} diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c new file mode 100644 index 00000000000..0e993ef28b9 --- /dev/null +++ b/drivers/misc/mei/bus.c @@ -0,0 +1,548 @@ +/* + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2012-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) +#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) + +static int mei_cl_device_match(struct device *dev, struct device_driver *drv) +{ +	struct mei_cl_device *device = to_mei_cl_device(dev); +	struct mei_cl_driver *driver = to_mei_cl_driver(drv); +	const struct mei_cl_device_id *id; + +	if (!device) +		return 0; + +	if (!driver || !driver->id_table) +		return 0; + +	id = driver->id_table; + +	while (id->name[0]) { +		if (!strncmp(dev_name(dev), id->name, sizeof(id->name))) +			return 1; + +		id++; +	} + +	return 0; +} + +static int mei_cl_device_probe(struct device *dev) +{ +	struct mei_cl_device *device = to_mei_cl_device(dev); +	struct mei_cl_driver *driver; +	struct mei_cl_device_id id; + +	if (!device) +		return 0; + +	driver = to_mei_cl_driver(dev->driver); +	if (!driver || !driver->probe) +		return -ENODEV; + +	dev_dbg(dev, "Device probe\n"); + +	strncpy(id.name, dev_name(dev), sizeof(id.name)); + +	return driver->probe(device, &id); +} + +static int mei_cl_device_remove(struct device *dev) +{ +	struct mei_cl_device *device = to_mei_cl_device(dev); +	struct mei_cl_driver *driver; + +	if (!device || !dev->driver) +		return 0; + +	if (device->event_cb) { +		device->event_cb = NULL; +		cancel_work_sync(&device->event_work); +	} + +	driver = to_mei_cl_driver(dev->driver); +	if (!driver->remove) { +		dev->driver = NULL; + +		return 0; +	} + +	return driver->remove(device); +} + +static ssize_t modalias_show(struct device *dev, struct device_attribute *a, +			     char *buf) +{ +	int len; + +	len = snprintf(buf, PAGE_SIZE, "mei:%s\n", dev_name(dev)); + +	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *mei_cl_dev_attrs[] = { +	&dev_attr_modalias.attr, +	NULL, +}; +ATTRIBUTE_GROUPS(mei_cl_dev); + +static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env) +{ +	if (add_uevent_var(env, "MODALIAS=mei:%s", dev_name(dev))) +		return -ENOMEM; + +	return 0; +} + +static struct bus_type mei_cl_bus_type = { +	.name		= "mei", +	.dev_groups	= mei_cl_dev_groups, +	.match		= mei_cl_device_match, +	.probe		= mei_cl_device_probe, +	.remove		= mei_cl_device_remove, +	.uevent		= mei_cl_uevent, +}; + +static void mei_cl_dev_release(struct device *dev) +{ +	kfree(to_mei_cl_device(dev)); +} + +static struct device_type mei_cl_device_type = { +	.release	= mei_cl_dev_release, +}; + +static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev, +						uuid_le uuid) +{ +	struct mei_cl *cl; + +	list_for_each_entry(cl, &dev->device_list, device_link) { +		if (!uuid_le_cmp(uuid, cl->device_uuid)) +			return cl; +	} + +	return NULL; +} +struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, +					uuid_le uuid, char *name, +					struct mei_cl_ops *ops) +{ +	struct mei_cl_device *device; +	struct mei_cl *cl; +	int status; + +	cl = mei_bus_find_mei_cl_by_uuid(dev, uuid); +	if (cl == NULL) +		return NULL; + +	device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL); +	if (!device) +		return NULL; + +	device->cl = cl; +	device->ops = ops; + +	device->dev.parent = &dev->pdev->dev; +	device->dev.bus = &mei_cl_bus_type; +	device->dev.type = &mei_cl_device_type; + +	dev_set_name(&device->dev, "%s", name); + +	status = device_register(&device->dev); +	if (status) { +		dev_err(&dev->pdev->dev, "Failed to register MEI device\n"); +		kfree(device); +		return NULL; +	} + +	cl->device = device; + +	dev_dbg(&device->dev, "client %s registered\n", name); + +	return device; +} +EXPORT_SYMBOL_GPL(mei_cl_add_device); + +void mei_cl_remove_device(struct mei_cl_device *device) +{ +	device_unregister(&device->dev); +} +EXPORT_SYMBOL_GPL(mei_cl_remove_device); + +int __mei_cl_driver_register(struct mei_cl_driver *driver, struct module *owner) +{ +	int err; + +	driver->driver.name = driver->name; +	driver->driver.owner = owner; +	driver->driver.bus = &mei_cl_bus_type; + +	err = driver_register(&driver->driver); +	if (err) +		return err; + +	pr_debug("mei: driver [%s] registered\n", driver->driver.name); + +	return 0; +} +EXPORT_SYMBOL_GPL(__mei_cl_driver_register); + +void mei_cl_driver_unregister(struct mei_cl_driver *driver) +{ +	driver_unregister(&driver->driver); + +	pr_debug("mei: driver [%s] unregistered\n", driver->driver.name); +} +EXPORT_SYMBOL_GPL(mei_cl_driver_unregister); + +static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, +			bool blocking) +{ +	struct mei_device *dev; +	struct mei_cl_cb *cb; +	int id; +	int rets; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	if (cl->state != MEI_FILE_CONNECTED) +		return -ENODEV; + +	/* Check if we have an ME client device */ +	id = mei_me_cl_by_id(dev, cl->me_client_id); +	if (id < 0) +		return id; + +	if (length > dev->me_clients[id].props.max_msg_length) +		return -EFBIG; + +	cb = mei_io_cb_init(cl, NULL); +	if (!cb) +		return -ENOMEM; + +	rets = mei_io_cb_alloc_req_buf(cb, length); +	if (rets < 0) { +		mei_io_cb_free(cb); +		return rets; +	} + +	memcpy(cb->request_buffer.data, buf, length); + +	mutex_lock(&dev->device_lock); + +	rets = mei_cl_write(cl, cb, blocking); + +	mutex_unlock(&dev->device_lock); +	if (rets < 0) +		mei_io_cb_free(cb); + +	return rets; +} + +int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) +{ +	struct mei_device *dev; +	struct mei_cl_cb *cb; +	size_t r_length; +	int err; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	mutex_lock(&dev->device_lock); + +	if (!cl->read_cb) { +		err = mei_cl_read_start(cl, length); +		if (err < 0) { +			mutex_unlock(&dev->device_lock); +			return err; +		} +	} + +	if (cl->reading_state != MEI_READ_COMPLETE && +	    !waitqueue_active(&cl->rx_wait)) { + +		mutex_unlock(&dev->device_lock); + +		if (wait_event_interruptible(cl->rx_wait, +				cl->reading_state == MEI_READ_COMPLETE  || +				mei_cl_is_transitioning(cl))) { + +			if (signal_pending(current)) +				return -EINTR; +			return -ERESTARTSYS; +		} + +		mutex_lock(&dev->device_lock); +	} + +	cb = cl->read_cb; + +	if (cl->reading_state != MEI_READ_COMPLETE) { +		r_length = 0; +		goto out; +	} + +	r_length = min_t(size_t, length, cb->buf_idx); + +	memcpy(buf, cb->response_buffer.data, r_length); + +	mei_io_cb_free(cb); +	cl->reading_state = MEI_IDLE; +	cl->read_cb = NULL; + +out: +	mutex_unlock(&dev->device_lock); + +	return r_length; +} + +inline int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length) +{ +	return ___mei_cl_send(cl, buf, length, 0); +} + +inline int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length) +{ +	return ___mei_cl_send(cl, buf, length, 1); +} + +int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length) +{ +	struct mei_cl *cl = device->cl; + +	if (cl == NULL) +		return -ENODEV; + +	if (device->ops && device->ops->send) +		return device->ops->send(device, buf, length); + +	return __mei_cl_send(cl, buf, length); +} +EXPORT_SYMBOL_GPL(mei_cl_send); + +int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length) +{ +	struct mei_cl *cl =  device->cl; + +	if (cl == NULL) +		return -ENODEV; + +	if (device->ops && device->ops->recv) +		return device->ops->recv(device, buf, length); + +	return __mei_cl_recv(cl, buf, length); +} +EXPORT_SYMBOL_GPL(mei_cl_recv); + +static void mei_bus_event_work(struct work_struct *work) +{ +	struct mei_cl_device *device; + +	device = container_of(work, struct mei_cl_device, event_work); + +	if (device->event_cb) +		device->event_cb(device, device->events, device->event_context); + +	device->events = 0; + +	/* Prepare for the next read */ +	mei_cl_read_start(device->cl, 0); +} + +int mei_cl_register_event_cb(struct mei_cl_device *device, +			  mei_cl_event_cb_t event_cb, void *context) +{ +	if (device->event_cb) +		return -EALREADY; + +	device->events = 0; +	device->event_cb = event_cb; +	device->event_context = context; +	INIT_WORK(&device->event_work, mei_bus_event_work); + +	mei_cl_read_start(device->cl, 0); + +	return 0; +} +EXPORT_SYMBOL_GPL(mei_cl_register_event_cb); + +void *mei_cl_get_drvdata(const struct mei_cl_device *device) +{ +	return dev_get_drvdata(&device->dev); +} +EXPORT_SYMBOL_GPL(mei_cl_get_drvdata); + +void mei_cl_set_drvdata(struct mei_cl_device *device, void *data) +{ +	dev_set_drvdata(&device->dev, data); +} +EXPORT_SYMBOL_GPL(mei_cl_set_drvdata); + +int mei_cl_enable_device(struct mei_cl_device *device) +{ +	int err; +	struct mei_device *dev; +	struct mei_cl *cl = device->cl; + +	if (cl == NULL) +		return -ENODEV; + +	dev = cl->dev; + +	mutex_lock(&dev->device_lock); + +	err = mei_cl_connect(cl, NULL); +	if (err < 0) { +		mutex_unlock(&dev->device_lock); +		dev_err(&dev->pdev->dev, "Could not connect to the ME client"); + +		return err; +	} + +	mutex_unlock(&dev->device_lock); + +	if (device->event_cb && !cl->read_cb) +		mei_cl_read_start(device->cl, 0); + +	if (!device->ops || !device->ops->enable) +		return 0; + +	return device->ops->enable(device); +} +EXPORT_SYMBOL_GPL(mei_cl_enable_device); + +int mei_cl_disable_device(struct mei_cl_device *device) +{ +	int err; +	struct mei_device *dev; +	struct mei_cl *cl = device->cl; + +	if (cl == NULL) +		return -ENODEV; + +	dev = cl->dev; + +	mutex_lock(&dev->device_lock); + +	if (cl->state != MEI_FILE_CONNECTED) { +		mutex_unlock(&dev->device_lock); +		dev_err(&dev->pdev->dev, "Already disconnected"); + +		return 0; +	} + +	cl->state = MEI_FILE_DISCONNECTING; + +	err = mei_cl_disconnect(cl); +	if (err < 0) { +		mutex_unlock(&dev->device_lock); +		dev_err(&dev->pdev->dev, +			"Could not disconnect from the ME client"); + +		return err; +	} + +	/* Flush queues and remove any pending read */ +	mei_cl_flush_queues(cl); + +	if (cl->read_cb) { +		struct mei_cl_cb *cb = NULL; + +		cb = mei_cl_find_read_cb(cl); +		/* Remove entry from read list */ +		if (cb) +			list_del(&cb->list); + +		cb = cl->read_cb; +		cl->read_cb = NULL; + +		if (cb) { +			mei_io_cb_free(cb); +			cb = NULL; +		} +	} + +	device->event_cb = NULL; + +	mutex_unlock(&dev->device_lock); + +	if (!device->ops || !device->ops->disable) +		return 0; + +	return device->ops->disable(device); +} +EXPORT_SYMBOL_GPL(mei_cl_disable_device); + +void mei_cl_bus_rx_event(struct mei_cl *cl) +{ +	struct mei_cl_device *device = cl->device; + +	if (!device || !device->event_cb) +		return; + +	set_bit(MEI_CL_EVENT_RX, &device->events); + +	schedule_work(&device->event_work); +} + +void mei_cl_bus_remove_devices(struct mei_device *dev) +{ +	struct mei_cl *cl, *next; + +	mutex_lock(&dev->device_lock); +	list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { +		if (cl->device) +			mei_cl_remove_device(cl->device); + +		list_del(&cl->device_link); +		mei_cl_unlink(cl); +		kfree(cl); +	} +	mutex_unlock(&dev->device_lock); +} + +int __init mei_cl_bus_init(void) +{ +	return bus_register(&mei_cl_bus_type); +} + +void __exit mei_cl_bus_exit(void) +{ +	bus_unregister(&mei_cl_bus_type); +} diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c new file mode 100644 index 00000000000..59d20c599b1 --- /dev/null +++ b/drivers/misc/mei/client.c @@ -0,0 +1,1067 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +/** + * mei_me_cl_by_uuid - locate index of me client + * + * @dev: mei device + * + * Locking: called under "dev->device_lock" lock + * + * returns me client index or -ENOENT if not found + */ +int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) +{ +	int i; + +	for (i = 0; i < dev->me_clients_num; ++i) +		if (uuid_le_cmp(*uuid, +				dev->me_clients[i].props.protocol_name) == 0) +			return i; + +	return -ENOENT; +} + + +/** + * mei_me_cl_by_id return index to me_clients for client_id + * + * @dev: the device structure + * @client_id: me client id + * + * Locking: called under "dev->device_lock" lock + * + * returns index on success, -ENOENT on failure. + */ + +int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) +{ +	int i; + +	for (i = 0; i < dev->me_clients_num; i++) +		if (dev->me_clients[i].client_id == client_id) +			return i; + +	return -ENOENT; +} + + +/** + * mei_cl_cmp_id - tells if the clients are the same + * + * @cl1: host client 1 + * @cl2: host client 2 + * + * returns true  - if the clients has same host and me ids + *         false - otherwise + */ +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, +				const struct mei_cl *cl2) +{ +	return cl1 && cl2 && +		(cl1->host_client_id == cl2->host_client_id) && +		(cl1->me_client_id == cl2->me_client_id); +} + +/** + * mei_io_list_flush - removes cbs belonging to cl. + * + * @list:  an instance of our list structure + * @cl:    host client, can be NULL for flushing the whole list + * @free:  whether to free the cbs + */ +static void __mei_io_list_flush(struct mei_cl_cb *list, +				struct mei_cl *cl, bool free) +{ +	struct mei_cl_cb *cb; +	struct mei_cl_cb *next; + +	/* enable removing everything if no cl is specified */ +	list_for_each_entry_safe(cb, next, &list->list, list) { +		if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { +			list_del(&cb->list); +			if (free) +				mei_io_cb_free(cb); +		} +	} +} + +/** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list:  An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ +	__mei_io_list_flush(list, cl, false); +} + + +/** + * mei_io_list_free - removes cb belonging to cl and free them + * + * @list:  An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) +{ +	__mei_io_list_flush(list, cl, true); +} + +/** + * mei_io_cb_free - free mei_cb_private related memory + * + * @cb: mei callback struct + */ +void mei_io_cb_free(struct mei_cl_cb *cb) +{ +	if (cb == NULL) +		return; + +	kfree(cb->request_buffer.data); +	kfree(cb->response_buffer.data); +	kfree(cb); +} + +/** + * mei_io_cb_init - allocate and initialize io callback + * + * @cl - mei client + * @fp: pointer to file structure + * + * returns mei_cl_cb pointer or NULL; + */ +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) +{ +	struct mei_cl_cb *cb; + +	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); +	if (!cb) +		return NULL; + +	mei_io_list_init(cb); + +	cb->file_object = fp; +	cb->cl = cl; +	cb->buf_idx = 0; +	return cb; +} + +/** + * mei_io_cb_alloc_req_buf - allocate request buffer + * + * @cb: io callback structure + * @length: size of the buffer + * + * returns 0 on success + *         -EINVAL if cb is NULL + *         -ENOMEM if allocation failed + */ +int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) +{ +	if (!cb) +		return -EINVAL; + +	if (length == 0) +		return 0; + +	cb->request_buffer.data = kmalloc(length, GFP_KERNEL); +	if (!cb->request_buffer.data) +		return -ENOMEM; +	cb->request_buffer.size = length; +	return 0; +} +/** + * mei_io_cb_alloc_resp_buf - allocate response buffer + * + * @cb: io callback structure + * @length: size of the buffer + * + * returns 0 on success + *         -EINVAL if cb is NULL + *         -ENOMEM if allocation failed + */ +int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) +{ +	if (!cb) +		return -EINVAL; + +	if (length == 0) +		return 0; + +	cb->response_buffer.data = kmalloc(length, GFP_KERNEL); +	if (!cb->response_buffer.data) +		return -ENOMEM; +	cb->response_buffer.size = length; +	return 0; +} + + + +/** + * mei_cl_flush_queues - flushes queue lists belonging to cl. + * + * @cl: host client + */ +int mei_cl_flush_queues(struct mei_cl *cl) +{ +	struct mei_device *dev; + +	if (WARN_ON(!cl || !cl->dev)) +		return -EINVAL; + +	dev = cl->dev; + +	cl_dbg(dev, cl, "remove list entry belonging to cl\n"); +	mei_io_list_flush(&cl->dev->read_list, cl); +	mei_io_list_free(&cl->dev->write_list, cl); +	mei_io_list_free(&cl->dev->write_waiting_list, cl); +	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); +	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); +	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); +	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); +	return 0; +} + + +/** + * mei_cl_init - initializes cl. + * + * @cl: host client to be initialized + * @dev: mei device + */ +void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) +{ +	memset(cl, 0, sizeof(struct mei_cl)); +	init_waitqueue_head(&cl->wait); +	init_waitqueue_head(&cl->rx_wait); +	init_waitqueue_head(&cl->tx_wait); +	INIT_LIST_HEAD(&cl->link); +	INIT_LIST_HEAD(&cl->device_link); +	cl->reading_state = MEI_IDLE; +	cl->writing_state = MEI_IDLE; +	cl->dev = dev; +} + +/** + * mei_cl_allocate - allocates cl  structure and sets it up. + * + * @dev: mei device + * returns  The allocated file or NULL on failure + */ +struct mei_cl *mei_cl_allocate(struct mei_device *dev) +{ +	struct mei_cl *cl; + +	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); +	if (!cl) +		return NULL; + +	mei_cl_init(cl, dev); + +	return cl; +} + +/** + * mei_cl_find_read_cb - find this cl's callback in the read list + * + * @cl: host client + * + * returns cb on success, NULL on error + */ +struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) +{ +	struct mei_device *dev = cl->dev; +	struct mei_cl_cb *cb; + +	list_for_each_entry(cb, &dev->read_list.list, list) +		if (mei_cl_cmp_id(cl, cb->cl)) +			return cb; +	return NULL; +} + +/** mei_cl_link: allocate host id in the host map + * + * @cl - host client + * @id - fixed host id or -1 for generic one + * + * returns 0 on success + *	-EINVAL on incorrect values + *	-ENONET if client not found + */ +int mei_cl_link(struct mei_cl *cl, int id) +{ +	struct mei_device *dev; +	long open_handle_count; + +	if (WARN_ON(!cl || !cl->dev)) +		return -EINVAL; + +	dev = cl->dev; + +	/* If Id is not assigned get one*/ +	if (id == MEI_HOST_CLIENT_ID_ANY) +		id = find_first_zero_bit(dev->host_clients_map, +					MEI_CLIENTS_MAX); + +	if (id >= MEI_CLIENTS_MAX) { +		dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); +		return -EMFILE; +	} + +	open_handle_count = dev->open_handle_count + dev->iamthif_open_count; +	if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { +		dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", +			MEI_MAX_OPEN_HANDLE_COUNT); +		return -EMFILE; +	} + +	dev->open_handle_count++; + +	cl->host_client_id = id; +	list_add_tail(&cl->link, &dev->file_list); + +	set_bit(id, dev->host_clients_map); + +	cl->state = MEI_FILE_INITIALIZING; + +	cl_dbg(dev, cl, "link cl\n"); +	return 0; +} + +/** + * mei_cl_unlink - remove me_cl from the list + * + * @cl: host client + */ +int mei_cl_unlink(struct mei_cl *cl) +{ +	struct mei_device *dev; + +	/* don't shout on error exit path */ +	if (!cl) +		return 0; + +	/* wd and amthif might not be initialized */ +	if (!cl->dev) +		return 0; + +	dev = cl->dev; + +	cl_dbg(dev, cl, "unlink client"); + +	if (dev->open_handle_count > 0) +		dev->open_handle_count--; + +	/* never clear the 0 bit */ +	if (cl->host_client_id) +		clear_bit(cl->host_client_id, dev->host_clients_map); + +	list_del_init(&cl->link); + +	cl->state = MEI_FILE_INITIALIZING; + +	return 0; +} + + +void mei_host_client_init(struct work_struct *work) +{ +	struct mei_device *dev = container_of(work, +					      struct mei_device, init_work); +	struct mei_client_properties *client_props; +	int i; + +	mutex_lock(&dev->device_lock); + +	for (i = 0; i < dev->me_clients_num; i++) { +		client_props = &dev->me_clients[i].props; + +		if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) +			mei_amthif_host_init(dev); +		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) +			mei_wd_host_init(dev); +		else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) +			mei_nfc_host_init(dev); + +	} + +	dev->dev_state = MEI_DEV_ENABLED; +	dev->reset_count = 0; + +	mutex_unlock(&dev->device_lock); + +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); +	pm_runtime_autosuspend(&dev->pdev->dev); +} + +/** + * mei_hbuf_acquire: try to acquire host buffer + * + * @dev: the device structure + * returns true if host buffer was acquired + */ +bool mei_hbuf_acquire(struct mei_device *dev) +{ +	if (mei_pg_state(dev) == MEI_PG_ON || +	    dev->pg_event == MEI_PG_EVENT_WAIT) { +		dev_dbg(&dev->pdev->dev, "device is in pg\n"); +		return false; +	} + +	if (!dev->hbuf_is_ready) { +		dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); +		return false; +	} + +	dev->hbuf_is_ready = false; + +	return true; +} + +/** + * mei_cl_disconnect - disconnect host client from the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_disconnect(struct mei_cl *cl) +{ +	struct mei_device *dev; +	struct mei_cl_cb *cb; +	int rets, err; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	cl_dbg(dev, cl, "disconnecting"); + +	if (cl->state != MEI_FILE_DISCONNECTING) +		return 0; + +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	} + +	cb = mei_io_cb_init(cl, NULL); +	if (!cb) { +		rets = -ENOMEM; +		goto free; +	} + +	cb->fop_type = MEI_FOP_CLOSE; +	if (mei_hbuf_acquire(dev)) { +		if (mei_hbm_cl_disconnect_req(dev, cl)) { +			rets = -ENODEV; +			cl_err(dev, cl, "failed to disconnect.\n"); +			goto free; +		} +		mdelay(10); /* Wait for hardware disconnection ready */ +		list_add_tail(&cb->list, &dev->ctrl_rd_list.list); +	} else { +		cl_dbg(dev, cl, "add disconnect cb to control write list\n"); +		list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + +	} +	mutex_unlock(&dev->device_lock); + +	err = wait_event_timeout(dev->wait_recvd_msg, +			MEI_FILE_DISCONNECTED == cl->state, +			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + +	mutex_lock(&dev->device_lock); +	if (MEI_FILE_DISCONNECTED == cl->state) { +		rets = 0; +		cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); +	} else { +		rets = -ENODEV; +		if (MEI_FILE_DISCONNECTED != cl->state) +			cl_err(dev, cl, "wrong status client disconnect.\n"); + +		if (err) +			cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); + +		cl_err(dev, cl, "failed to disconnect from FW client.\n"); +	} + +	mei_io_list_flush(&dev->ctrl_rd_list, cl); +	mei_io_list_flush(&dev->ctrl_wr_list, cl); +free: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); + +	mei_io_cb_free(cb); +	return rets; +} + + +/** + * mei_cl_is_other_connecting - checks if other + *    client with the same me client id is connecting + * + * @cl: private data of the file object + * + * returns true if other client is connected, false - otherwise. + */ +bool mei_cl_is_other_connecting(struct mei_cl *cl) +{ +	struct mei_device *dev; +	struct mei_cl *ocl; /* the other client */ + +	if (WARN_ON(!cl || !cl->dev)) +		return false; + +	dev = cl->dev; + +	list_for_each_entry(ocl, &dev->file_list, link) { +		if (ocl->state == MEI_FILE_CONNECTING && +		    ocl != cl && +		    cl->me_client_id == ocl->me_client_id) +			return true; + +	} + +	return false; +} + +/** + * mei_cl_connect - connect host client to the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_connect(struct mei_cl *cl, struct file *file) +{ +	struct mei_device *dev; +	struct mei_cl_cb *cb; +	int rets; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	} + +	cb = mei_io_cb_init(cl, file); +	if (!cb) { +		rets = -ENOMEM; +		goto out; +	} + +	cb->fop_type = MEI_FOP_CONNECT; + +	/* run hbuf acquire last so we don't have to undo */ +	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { +		cl->state = MEI_FILE_CONNECTING; +		if (mei_hbm_cl_connect_req(dev, cl)) { +			rets = -ENODEV; +			goto out; +		} +		cl->timer_count = MEI_CONNECT_TIMEOUT; +		list_add_tail(&cb->list, &dev->ctrl_rd_list.list); +	} else { +		list_add_tail(&cb->list, &dev->ctrl_wr_list.list); +	} + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_recvd_msg, +			(cl->state == MEI_FILE_CONNECTED || +			 cl->state == MEI_FILE_DISCONNECTED), +			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); +	mutex_lock(&dev->device_lock); + +	if (cl->state != MEI_FILE_CONNECTED) { +		/* something went really wrong */ +		if (!cl->status) +			cl->status = -EFAULT; + +		mei_io_list_flush(&dev->ctrl_rd_list, cl); +		mei_io_list_flush(&dev->ctrl_wr_list, cl); +	} + +	rets = cl->status; + +out: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); + +	mei_io_cb_free(cb); +	return rets; +} + +/** + * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. + * + * @cl: private data of the file object + * + * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. + *	-ENOENT if mei_cl is not present + *	-EINVAL if single_recv_buf == 0 + */ +int mei_cl_flow_ctrl_creds(struct mei_cl *cl) +{ +	struct mei_device *dev; +	struct mei_me_client *me_cl; +	int id; + +	if (WARN_ON(!cl || !cl->dev)) +		return -EINVAL; + +	dev = cl->dev; + +	if (!dev->me_clients_num) +		return 0; + +	if (cl->mei_flow_ctrl_creds > 0) +		return 1; + +	id = mei_me_cl_by_id(dev, cl->me_client_id); +	if (id < 0) { +		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); +		return id; +	} + +	me_cl = &dev->me_clients[id]; +	if (me_cl->mei_flow_ctrl_creds) { +		if (WARN_ON(me_cl->props.single_recv_buf == 0)) +			return -EINVAL; +		return 1; +	} +	return 0; +} + +/** + * mei_cl_flow_ctrl_reduce - reduces flow_control. + * + * @cl: private data of the file object + * + * @returns + *	0 on success + *	-ENOENT when me client is not found + *	-EINVAL when ctrl credits are <= 0 + */ +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) +{ +	struct mei_device *dev; +	struct mei_me_client *me_cl; +	int id; + +	if (WARN_ON(!cl || !cl->dev)) +		return -EINVAL; + +	dev = cl->dev; + +	id = mei_me_cl_by_id(dev, cl->me_client_id); +	if (id < 0) { +		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); +		return id; +	} + +	me_cl = &dev->me_clients[id]; +	if (me_cl->props.single_recv_buf != 0) { +		if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) +			return -EINVAL; +		me_cl->mei_flow_ctrl_creds--; +	} else { +		if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) +			return -EINVAL; +		cl->mei_flow_ctrl_creds--; +	} +	return 0; +} + +/** + * mei_cl_read_start - the start read client message function. + * + * @cl: host client + * + * returns 0 on success, <0 on failure. + */ +int mei_cl_read_start(struct mei_cl *cl, size_t length) +{ +	struct mei_device *dev; +	struct mei_cl_cb *cb; +	int rets; +	int i; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	if (!mei_cl_is_connected(cl)) +		return -ENODEV; + +	if (cl->read_cb) { +		cl_dbg(dev, cl, "read is pending.\n"); +		return -EBUSY; +	} +	i = mei_me_cl_by_id(dev, cl->me_client_id); +	if (i < 0) { +		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); +		return  -ENOTTY; +	} + +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	} + +	cb = mei_io_cb_init(cl, NULL); +	if (!cb) { +		rets = -ENOMEM; +		goto out; +	} + +	/* always allocate at least client max message */ +	length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); +	rets = mei_io_cb_alloc_resp_buf(cb, length); +	if (rets) +		goto out; + +	cb->fop_type = MEI_FOP_READ; +	if (mei_hbuf_acquire(dev)) { +		rets = mei_hbm_cl_flow_control_req(dev, cl); +		if (rets < 0) +			goto out; + +		list_add_tail(&cb->list, &dev->read_list.list); +	} else { +		list_add_tail(&cb->list, &dev->ctrl_wr_list.list); +	} + +	cl->read_cb = cb; + +out: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); + +	if (rets) +		mei_io_cb_free(cb); + +	return rets; +} + +/** + * mei_cl_irq_write - write a message to device + *	from the interrupt thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise error. + */ +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +		     struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev; +	struct mei_msg_data *buf; +	struct mei_msg_hdr mei_hdr; +	size_t len; +	u32 msg_slots; +	int slots; +	int rets; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	buf = &cb->request_buffer; + +	rets = mei_cl_flow_ctrl_creds(cl); +	if (rets < 0) +		return rets; + +	if (rets == 0) { +		cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +		return 0; +	} + +	slots = mei_hbuf_empty_slots(dev); +	len = buf->size - cb->buf_idx; +	msg_slots = mei_data2slots(len); + +	mei_hdr.host_addr = cl->host_client_id; +	mei_hdr.me_addr = cl->me_client_id; +	mei_hdr.reserved = 0; +	mei_hdr.internal = cb->internal; + +	if (slots >= msg_slots) { +		mei_hdr.length = len; +		mei_hdr.msg_complete = 1; +	/* Split the message only if we can write the whole host buffer */ +	} else if (slots == dev->hbuf_depth) { +		msg_slots = slots; +		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); +		mei_hdr.length = len; +		mei_hdr.msg_complete = 0; +	} else { +		/* wait for next time the host buffer is empty */ +		return 0; +	} + +	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", +			cb->request_buffer.size, cb->buf_idx); + +	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); +	if (rets) { +		cl->status = rets; +		list_move_tail(&cb->list, &cmpl_list->list); +		return rets; +	} + +	cl->status = 0; +	cl->writing_state = MEI_WRITING; +	cb->buf_idx += mei_hdr.length; + +	if (mei_hdr.msg_complete) { +		if (mei_cl_flow_ctrl_reduce(cl)) +			return -EIO; +		list_move_tail(&cb->list, &dev->write_waiting_list.list); +	} + +	return 0; +} + +/** + * mei_cl_write - submit a write cb to mei device +	assumes device_lock is locked + * + * @cl: host client + * @cl: write callback with filled data + * + * returns number of bytes sent on success, <0 on failure. + */ +int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) +{ +	struct mei_device *dev; +	struct mei_msg_data *buf; +	struct mei_msg_hdr mei_hdr; +	int rets; + + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	if (WARN_ON(!cb)) +		return -EINVAL; + +	dev = cl->dev; + + +	buf = &cb->request_buffer; + +	cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); + +	rets = pm_runtime_get(&dev->pdev->dev); +	if (rets < 0 && rets != -EINPROGRESS) { +		pm_runtime_put_noidle(&dev->pdev->dev); +		cl_err(dev, cl, "rpm: get failed %d\n", rets); +		return rets; +	} + +	cb->fop_type = MEI_FOP_WRITE; +	cb->buf_idx = 0; +	cl->writing_state = MEI_IDLE; + +	mei_hdr.host_addr = cl->host_client_id; +	mei_hdr.me_addr = cl->me_client_id; +	mei_hdr.reserved = 0; +	mei_hdr.msg_complete = 0; +	mei_hdr.internal = cb->internal; + +	rets = mei_cl_flow_ctrl_creds(cl); +	if (rets < 0) +		goto err; + +	if (rets == 0) { +		cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); +		rets = buf->size; +		goto out; +	} +	if (!mei_hbuf_acquire(dev)) { +		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); +		rets = buf->size; +		goto out; +	} + +	/* Check for a maximum length */ +	if (buf->size > mei_hbuf_max_len(dev)) { +		mei_hdr.length = mei_hbuf_max_len(dev); +		mei_hdr.msg_complete = 0; +	} else { +		mei_hdr.length = buf->size; +		mei_hdr.msg_complete = 1; +	} + +	rets = mei_write_message(dev, &mei_hdr, buf->data); +	if (rets) +		goto err; + +	cl->writing_state = MEI_WRITING; +	cb->buf_idx = mei_hdr.length; + +out: +	if (mei_hdr.msg_complete) { +		rets = mei_cl_flow_ctrl_reduce(cl); +		if (rets < 0) +			goto err; + +		list_add_tail(&cb->list, &dev->write_waiting_list.list); +	} else { +		list_add_tail(&cb->list, &dev->write_list.list); +	} + + +	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { + +		mutex_unlock(&dev->device_lock); +		rets = wait_event_interruptible(cl->tx_wait, +				cl->writing_state == MEI_WRITE_COMPLETE); +		mutex_lock(&dev->device_lock); +		/* wait_event_interruptible returns -ERESTARTSYS */ +		if (rets) { +			if (signal_pending(current)) +				rets = -EINTR; +			goto err; +		} +	} + +	rets = buf->size; +err: +	cl_dbg(dev, cl, "rpm: autosuspend\n"); +	pm_runtime_mark_last_busy(&dev->pdev->dev); +	pm_runtime_put_autosuspend(&dev->pdev->dev); + +	return rets; +} + + +/** + * mei_cl_complete - processes completed operation for a client + * + * @cl: private data of the file object. + * @cb: callback block. + */ +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) +{ +	if (cb->fop_type == MEI_FOP_WRITE) { +		mei_io_cb_free(cb); +		cb = NULL; +		cl->writing_state = MEI_WRITE_COMPLETE; +		if (waitqueue_active(&cl->tx_wait)) +			wake_up_interruptible(&cl->tx_wait); + +	} else if (cb->fop_type == MEI_FOP_READ && +			MEI_READING == cl->reading_state) { +		cl->reading_state = MEI_READ_COMPLETE; +		if (waitqueue_active(&cl->rx_wait)) +			wake_up_interruptible(&cl->rx_wait); +		else +			mei_cl_bus_rx_event(cl); + +	} +} + + +/** + * mei_cl_all_disconnect - disconnect forcefully all connected clients + * + * @dev - mei device + */ + +void mei_cl_all_disconnect(struct mei_device *dev) +{ +	struct mei_cl *cl; + +	list_for_each_entry(cl, &dev->file_list, link) { +		cl->state = MEI_FILE_DISCONNECTED; +		cl->mei_flow_ctrl_creds = 0; +		cl->timer_count = 0; +	} +} + + +/** + * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted + * + * @dev  - mei device + */ +void mei_cl_all_wakeup(struct mei_device *dev) +{ +	struct mei_cl *cl; +	list_for_each_entry(cl, &dev->file_list, link) { +		if (waitqueue_active(&cl->rx_wait)) { +			cl_dbg(dev, cl, "Waking up reading client!\n"); +			wake_up_interruptible(&cl->rx_wait); +		} +		if (waitqueue_active(&cl->tx_wait)) { +			cl_dbg(dev, cl, "Waking up writing client!\n"); +			wake_up_interruptible(&cl->tx_wait); +		} +	} +} + +/** + * mei_cl_all_write_clear - clear all pending writes + + * @dev - mei device + */ +void mei_cl_all_write_clear(struct mei_device *dev) +{ +	mei_io_list_free(&dev->write_list, NULL); +	mei_io_list_free(&dev->write_waiting_list, NULL); +} + + diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h new file mode 100644 index 00000000000..96d5de0389f --- /dev/null +++ b/drivers/misc/mei/client.h @@ -0,0 +1,109 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_CLIENT_H_ +#define _MEI_CLIENT_H_ + +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/poll.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid); +int mei_me_cl_by_id(struct mei_device *dev, u8 client_id); + +/* + * MEI IO Functions + */ +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp); +void mei_io_cb_free(struct mei_cl_cb *priv_cb); +int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length); +int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length); + + +/** + * mei_io_list_init - Sets up a queue list. + * + * @list: An instance cl callback structure + */ +static inline void mei_io_list_init(struct mei_cl_cb *list) +{ +	INIT_LIST_HEAD(&list->list); +} +/* + * MEI Host Client Functions + */ + +struct mei_cl *mei_cl_allocate(struct mei_device *dev); +void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); + + +int mei_cl_link(struct mei_cl *cl, int id); +int mei_cl_unlink(struct mei_cl *cl); + +int mei_cl_flush_queues(struct mei_cl *cl); +struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); + + +int mei_cl_flow_ctrl_creds(struct mei_cl *cl); + +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); +/* + *  MEI input output function prototype + */ +static inline bool mei_cl_is_connected(struct mei_cl *cl) +{ +	return  cl->dev && +		cl->dev->dev_state == MEI_DEV_ENABLED && +		cl->state == MEI_FILE_CONNECTED; +} +static inline bool mei_cl_is_transitioning(struct mei_cl *cl) +{ +	return  MEI_FILE_INITIALIZING == cl->state || +		MEI_FILE_DISCONNECTED == cl->state || +		MEI_FILE_DISCONNECTING == cl->state; +} + +bool mei_cl_is_other_connecting(struct mei_cl *cl); +int mei_cl_disconnect(struct mei_cl *cl); +int mei_cl_connect(struct mei_cl *cl, struct file *file); +int mei_cl_read_start(struct mei_cl *cl, size_t length); +int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +		     struct mei_cl_cb *cmpl_list); + +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); + +void mei_host_client_init(struct work_struct *work); + + + +void mei_cl_all_disconnect(struct mei_device *dev); +void mei_cl_all_wakeup(struct mei_device *dev); +void mei_cl_all_write_clear(struct mei_device *dev); + +#define MEI_CL_FMT "cl:host=%02d me=%02d " +#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id + +#define cl_dbg(dev, cl, format, arg...) \ +	dev_dbg(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#define cl_err(dev, cl, format, arg...) \ +	dev_err(&(dev)->pdev->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) + +#endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c new file mode 100644 index 00000000000..ced5b777c70 --- /dev/null +++ b/drivers/misc/mei/debugfs.c @@ -0,0 +1,197 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2012-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/pci.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw.h" + +static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, +					size_t cnt, loff_t *ppos) +{ +	struct mei_device *dev = fp->private_data; +	struct mei_me_client *cl; +	const size_t bufsz = 1024; +	char *buf = kzalloc(bufsz, GFP_KERNEL); +	int i; +	int pos = 0; +	int ret; + +	if  (!buf) +		return -ENOMEM; + +	pos += scnprintf(buf + pos, bufsz - pos, +			"  |id|addr|         UUID                       |con|msg len|\n"); + +	mutex_lock(&dev->device_lock); + +	/*  if the driver is not enabled the list won't be consistent */ +	if (dev->dev_state != MEI_DEV_ENABLED) +		goto out; + +	for (i = 0; i < dev->me_clients_num; i++) { +		cl = &dev->me_clients[i]; + +		/* skip me clients that cannot be connected */ +		if (cl->props.max_number_of_connections == 0) +			continue; + +		pos += scnprintf(buf + pos, bufsz - pos, +			"%2d|%2d|%4d|%pUl|%3d|%7d|\n", +			i, cl->client_id, +			cl->props.fixed_address, +			&cl->props.protocol_name, +			cl->props.max_number_of_connections, +			cl->props.max_msg_length); +	} +out: +	mutex_unlock(&dev->device_lock); +	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); +	kfree(buf); +	return ret; +} + +static const struct file_operations mei_dbgfs_fops_meclients = { +	.open = simple_open, +	.read = mei_dbgfs_read_meclients, +	.llseek = generic_file_llseek, +}; + +static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, +					size_t cnt, loff_t *ppos) +{ +	struct mei_device *dev = fp->private_data; +	struct mei_cl *cl; +	const size_t bufsz = 1024; +	char *buf; +	int i = 0; +	int pos = 0; +	int ret; + +	if (!dev) +		return -ENODEV; + +	buf = kzalloc(bufsz, GFP_KERNEL); +	if  (!buf) +		return -ENOMEM; + +	pos += scnprintf(buf + pos, bufsz - pos, +			"  |me|host|state|rd|wr|\n"); + +	mutex_lock(&dev->device_lock); + +	/*  if the driver is not enabled the list won't b consitent */ +	if (dev->dev_state != MEI_DEV_ENABLED) +		goto out; + +	list_for_each_entry(cl, &dev->file_list, link) { + +		pos += scnprintf(buf + pos, bufsz - pos, +			"%2d|%2d|%4d|%5d|%2d|%2d|\n", +			i, cl->me_client_id, cl->host_client_id, cl->state, +			cl->reading_state, cl->writing_state); +		i++; +	} +out: +	mutex_unlock(&dev->device_lock); +	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); +	kfree(buf); +	return ret; +} + +static const struct file_operations mei_dbgfs_fops_active = { +	.open = simple_open, +	.read = mei_dbgfs_read_active, +	.llseek = generic_file_llseek, +}; + +static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, +					size_t cnt, loff_t *ppos) +{ +	struct mei_device *dev = fp->private_data; +	const size_t bufsz = 1024; +	char *buf = kzalloc(bufsz, GFP_KERNEL); +	int pos = 0; +	int ret; + +	if  (!buf) +		return -ENOMEM; + +	pos += scnprintf(buf + pos, bufsz - pos, "%s\n", +			mei_dev_state_str(dev->dev_state)); +	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); +	kfree(buf); +	return ret; +} +static const struct file_operations mei_dbgfs_fops_devstate = { +	.open = simple_open, +	.read = mei_dbgfs_read_devstate, +	.llseek = generic_file_llseek, +}; + +/** + * mei_dbgfs_deregister - Remove the debugfs files and directories + * @mei - pointer to mei device private data + */ +void mei_dbgfs_deregister(struct mei_device *dev) +{ +	if (!dev->dbgfs_dir) +		return; +	debugfs_remove_recursive(dev->dbgfs_dir); +	dev->dbgfs_dir = NULL; +} + +/** + * Add the debugfs files + * + */ +int mei_dbgfs_register(struct mei_device *dev, const char *name) +{ +	struct dentry *dir, *f; +	dir = debugfs_create_dir(name, NULL); +	if (!dir) +		return -ENOMEM; + +	f = debugfs_create_file("meclients", S_IRUSR, dir, +				dev, &mei_dbgfs_fops_meclients); +	if (!f) { +		dev_err(&dev->pdev->dev, "meclients: registration failed\n"); +		goto err; +	} +	f = debugfs_create_file("active", S_IRUSR, dir, +				dev, &mei_dbgfs_fops_active); +	if (!f) { +		dev_err(&dev->pdev->dev, "meclients: registration failed\n"); +		goto err; +	} +	f = debugfs_create_file("devstate", S_IRUSR, dir, +				dev, &mei_dbgfs_fops_devstate); +	if (!f) { +		dev_err(&dev->pdev->dev, "devstate: registration failed\n"); +		goto err; +	} +	dev->dbgfs_dir = dir; +	return 0; +err: +	mei_dbgfs_deregister(dev); +	return -ENODEV; +} + diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c new file mode 100644 index 00000000000..804106209d7 --- /dev/null +++ b/drivers/misc/mei/hbm.c @@ -0,0 +1,879 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/mei.h> +#include <linux/pm_runtime.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) +{ +#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status +	switch (status) { +	MEI_CL_CS(SUCCESS); +	MEI_CL_CS(NOT_FOUND); +	MEI_CL_CS(ALREADY_STARTED); +	MEI_CL_CS(OUT_OF_RESOURCES); +	MEI_CL_CS(MESSAGE_SMALL); +	default: return "unknown"; +	} +#undef MEI_CL_CCS +} + +/** + * mei_cl_conn_status_to_errno - convert client connect response + * status to error code + * + * @status: client connect response status + * + * returns corresponding error code + */ +static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) +{ +	switch (status) { +	case MEI_CL_CONN_SUCCESS:          return 0; +	case MEI_CL_CONN_NOT_FOUND:        return -ENOTTY; +	case MEI_CL_CONN_ALREADY_STARTED:  return -EBUSY; +	case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY; +	case MEI_CL_CONN_MESSAGE_SMALL:    return -EINVAL; +	default:                           return -EINVAL; +	} +} + +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ +	dev->init_clients_timer = 0; +	dev->hbm_state = MEI_HBM_IDLE; +} + +/** + * mei_hbm_reset - reset hbm counters and book keeping data structurs + * + * @dev: the device structure + */ +void mei_hbm_reset(struct mei_device *dev) +{ +	dev->me_clients_num = 0; +	dev->me_client_presentation_num = 0; +	dev->me_client_index = 0; + +	kfree(dev->me_clients); +	dev->me_clients = NULL; + +	mei_hbm_idle(dev); +} + +/** + * mei_hbm_me_cl_allocate - allocates storage for me clients + * + * @dev: the device structure + * + * returns 0 on success -ENOMEM on allocation failure + */ +static int mei_hbm_me_cl_allocate(struct mei_device *dev) +{ +	struct mei_me_client *clients; +	int b; + +	mei_hbm_reset(dev); + +	/* count how many ME clients we have */ +	for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) +		dev->me_clients_num++; + +	if (dev->me_clients_num == 0) +		return 0; + +	dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n", +		dev->me_clients_num * sizeof(struct mei_me_client)); +	/* allocate storage for ME clients representation */ +	clients = kcalloc(dev->me_clients_num, +			sizeof(struct mei_me_client), GFP_KERNEL); +	if (!clients) { +		dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); +		return -ENOMEM; +	} +	dev->me_clients = clients; +	return 0; +} + +/** + * mei_hbm_cl_hdr - construct client hbm header + * + * @cl: - client + * @hbm_cmd: host bus message command + * @buf: buffer for cl header + * @len: buffer length + */ +static inline +void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) +{ +	struct mei_hbm_cl_cmd *cmd = buf; + +	memset(cmd, 0, len); + +	cmd->hbm_cmd = hbm_cmd; +	cmd->host_addr = cl->host_client_id; +	cmd->me_addr = cl->me_client_id; +} + +/** + * mei_hbm_cl_addr_equal - tells if they have the same address + * + * @cl: - client + * @buf: buffer with cl header + * + * returns true if addresses are the same + */ +static inline +bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) +{ +	struct mei_hbm_cl_cmd *cmd = buf; +	return cl->host_client_id == cmd->host_addr && +		cl->me_client_id == cmd->me_addr; +} + + +int mei_hbm_start_wait(struct mei_device *dev) +{ +	int ret; +	if (dev->hbm_state > MEI_HBM_START) +		return 0; + +	mutex_unlock(&dev->device_lock); +	ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, +			dev->hbm_state == MEI_HBM_IDLE || +			dev->hbm_state >= MEI_HBM_STARTED, +			mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); +	mutex_lock(&dev->device_lock); + +	if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) { +		dev->hbm_state = MEI_HBM_IDLE; +		dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); +		return -ETIME; +	} +	return 0; +} + +/** + * mei_hbm_start_req - sends start request message. + * + * @dev: the device structure + * + * returns 0 on success and < 0 on failure + */ +int mei_hbm_start_req(struct mei_device *dev) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	struct hbm_host_version_request *start_req; +	const size_t len = sizeof(struct hbm_host_version_request); +	int ret; + +	mei_hbm_hdr(mei_hdr, len); + +	/* host start message */ +	start_req = (struct hbm_host_version_request *)dev->wr_msg.data; +	memset(start_req, 0, len); +	start_req->hbm_cmd = HOST_START_REQ_CMD; +	start_req->host_version.major_version = HBM_MAJOR_VERSION; +	start_req->host_version.minor_version = HBM_MINOR_VERSION; + +	dev->hbm_state = MEI_HBM_IDLE; +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) { +		dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", +			ret); +		return ret; +	} + +	dev->hbm_state = MEI_HBM_START; +	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; +	return 0; +} + +/* + * mei_hbm_enum_clients_req - sends enumeration client request message. + * + * @dev: the device structure + * + * returns 0 on success and < 0 on failure + */ +static int mei_hbm_enum_clients_req(struct mei_device *dev) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	struct hbm_host_enum_request *enum_req; +	const size_t len = sizeof(struct hbm_host_enum_request); +	int ret; + +	/* enumerate clients */ +	mei_hbm_hdr(mei_hdr, len); + +	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; +	memset(enum_req, 0, len); +	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; + +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) { +		dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", +			ret); +		return ret; +	} +	dev->hbm_state = MEI_HBM_ENUM_CLIENTS; +	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; +	return 0; +} + +/** + * mei_hbm_prop_req - request property for a single client + * + * @dev: the device structure + * + * returns 0 on success and < 0 on failure + */ + +static int mei_hbm_prop_req(struct mei_device *dev) +{ + +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	struct hbm_props_request *prop_req; +	const size_t len = sizeof(struct hbm_props_request); +	unsigned long next_client_index; +	unsigned long client_num; +	int ret; + +	client_num = dev->me_client_presentation_num; + +	next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, +					  dev->me_client_index); + +	/* We got all client properties */ +	if (next_client_index == MEI_CLIENTS_MAX) { +		dev->hbm_state = MEI_HBM_STARTED; +		schedule_work(&dev->init_work); + +		return 0; +	} + +	dev->me_clients[client_num].client_id = next_client_index; +	dev->me_clients[client_num].mei_flow_ctrl_creds = 0; + +	mei_hbm_hdr(mei_hdr, len); +	prop_req = (struct hbm_props_request *)dev->wr_msg.data; + +	memset(prop_req, 0, sizeof(struct hbm_props_request)); + + +	prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; +	prop_req->address = next_client_index; + +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) { +		dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", +			ret); +		return ret; +	} + +	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; +	dev->me_client_index = next_client_index; + +	return 0; +} + +/* + * mei_hbm_pg - sends pg command + * + * @dev: the device structure + * @pg_cmd: the pg command code + * + * This function returns -EIO on write failure + */ +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	struct hbm_power_gate *req; +	const size_t len = sizeof(struct hbm_power_gate); +	int ret; + +	mei_hbm_hdr(mei_hdr, len); + +	req = (struct hbm_power_gate *)dev->wr_msg.data; +	memset(req, 0, len); +	req->hbm_cmd = pg_cmd; + +	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); +	if (ret) +		dev_err(&dev->pdev->dev, "power gate command write failed.\n"); +	return ret; +} +EXPORT_SYMBOL_GPL(mei_hbm_pg); + +/** + * mei_hbm_stop_req - send stop request message + * + * @dev - mei device + * @cl: client info + * + * This function returns -EIO on write failure + */ +static int mei_hbm_stop_req(struct mei_device *dev) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	struct hbm_host_stop_request *req = +			(struct hbm_host_stop_request *)dev->wr_msg.data; +	const size_t len = sizeof(struct hbm_host_stop_request); + +	mei_hbm_hdr(mei_hdr, len); + +	memset(req, 0, len); +	req->hbm_cmd = HOST_STOP_REQ_CMD; +	req->reason = DRIVER_STOP_REQUEST; + +	return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_flow_control_req - sends flow control request. + * + * @dev: the device structure + * @cl: client info + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	const size_t len = sizeof(struct hbm_flow_control); + +	mei_hbm_hdr(mei_hdr, len); +	mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); + +	cl_dbg(dev, cl, "sending flow control\n"); + +	return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_add_single_flow_creds - adds single buffer credentials. + * + * @dev: the device structure + * @flow: flow control. + * + * return 0 on success, < 0 otherwise + */ +static int mei_hbm_add_single_flow_creds(struct mei_device *dev, +				  struct hbm_flow_control *flow) +{ +	struct mei_me_client *me_cl; +	int id; + +	id = mei_me_cl_by_id(dev, flow->me_addr); +	if (id < 0) { +		dev_err(&dev->pdev->dev, "no such me client %d\n", +			flow->me_addr); +		return id; +	} + +	me_cl = &dev->me_clients[id]; +	if (me_cl->props.single_recv_buf) { +		me_cl->mei_flow_ctrl_creds++; +		dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", +		    flow->me_addr); +		dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", +		    me_cl->mei_flow_ctrl_creds); +	} else { +		BUG();	/* error in flow control */ +	} + +	return 0; +} + +/** + * mei_hbm_cl_flow_control_res - flow control response from me + * + * @dev: the device structure + * @flow_control: flow control response bus message + */ +static void mei_hbm_cl_flow_control_res(struct mei_device *dev, +		struct hbm_flow_control *flow_control) +{ +	struct mei_cl *cl; + +	if (!flow_control->host_addr) { +		/* single receive buffer */ +		mei_hbm_add_single_flow_creds(dev, flow_control); +		return; +	} + +	/* normal connection */ +	list_for_each_entry(cl, &dev->file_list, link) { +		if (mei_hbm_cl_addr_equal(cl, flow_control)) { +			cl->mei_flow_ctrl_creds++; +			dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", +				flow_control->host_addr, flow_control->me_addr); +			dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n", +				    cl->mei_flow_ctrl_creds); +				break; +		} +	} +} + + +/** + * mei_hbm_cl_disconnect_req - sends disconnect message to fw. + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	const size_t len = sizeof(struct hbm_client_connect_request); + +	mei_hbm_hdr(mei_hdr, len); +	mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len); + +	return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	const size_t len = sizeof(struct hbm_client_connect_response); + +	mei_hbm_hdr(mei_hdr, len); +	mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len); + +	return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** + * mei_hbm_cl_disconnect_res - disconnect response from ME + * + * @dev: the device structure + * @rs: disconnect response bus message + */ +static void mei_hbm_cl_disconnect_res(struct mei_device *dev, +		struct hbm_client_connect_response *rs) +{ +	struct mei_cl *cl; +	struct mei_cl_cb *cb, *next; + +	dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", +			rs->me_addr, rs->host_addr, rs->status); + +	list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { +		cl = cb->cl; + +		/* this should not happen */ +		if (WARN_ON(!cl)) { +			list_del(&cb->list); +			return; +		} + +		if (mei_hbm_cl_addr_equal(cl, rs)) { +			list_del(&cb->list); +			if (rs->status == MEI_CL_DISCONN_SUCCESS) +				cl->state = MEI_FILE_DISCONNECTED; + +			cl->status = 0; +			cl->timer_count = 0; +			break; +		} +	} +} + +/** + * mei_hbm_cl_connect_req - send connection request to specific me client + * + * @dev: the device structure + * @cl: a client to connect to + * + * returns -EIO on write failure + */ +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) +{ +	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; +	const size_t len = sizeof(struct hbm_client_connect_request); + +	mei_hbm_hdr(mei_hdr, len); +	mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len); + +	return mei_write_message(dev, mei_hdr,  dev->wr_msg.data); +} + +/** + * mei_hbm_cl_connect_res - connect response from the ME + * + * @dev: the device structure + * @rs: connect response bus message + */ +static void mei_hbm_cl_connect_res(struct mei_device *dev, +		struct hbm_client_connect_response *rs) +{ + +	struct mei_cl *cl; +	struct mei_cl_cb *cb, *next; + +	dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", +			rs->me_addr, rs->host_addr, +			mei_cl_conn_status_str(rs->status)); + +	cl = NULL; + +	list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { + +		cl = cb->cl; +		/* this should not happen */ +		if (WARN_ON(!cl)) { +			list_del_init(&cb->list); +			continue; +		} + +		if (cb->fop_type !=  MEI_FOP_CONNECT) +			continue; + +		if (mei_hbm_cl_addr_equal(cl, rs)) { +			list_del(&cb->list); +			break; +		} +	} + +	if (!cl) +		return; + +	cl->timer_count = 0; +	if (rs->status == MEI_CL_CONN_SUCCESS) +		cl->state = MEI_FILE_CONNECTED; +	else +		cl->state = MEI_FILE_DISCONNECTED; +	cl->status = mei_cl_conn_status_to_errno(rs->status); +} + + +/** + * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware + *  host sends disconnect response + * + * @dev: the device structure. + * @disconnect_req: disconnect request bus message from the me + * + * returns -ENOMEM on allocation failure + */ +static int mei_hbm_fw_disconnect_req(struct mei_device *dev, +		struct hbm_client_connect_request *disconnect_req) +{ +	struct mei_cl *cl; +	struct mei_cl_cb *cb; + +	list_for_each_entry(cl, &dev->file_list, link) { +		if (mei_hbm_cl_addr_equal(cl, disconnect_req)) { +			dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", +					disconnect_req->host_addr, +					disconnect_req->me_addr); +			cl->state = MEI_FILE_DISCONNECTED; +			cl->timer_count = 0; + +			cb = mei_io_cb_init(cl, NULL); +			if (!cb) +				return -ENOMEM; +			cb->fop_type = MEI_FOP_DISCONNECT_RSP; +			cl_dbg(dev, cl, "add disconnect response as first\n"); +			list_add(&cb->list, &dev->ctrl_wr_list.list); + +			break; +		} +	} +	return 0; +} + + +/** + * mei_hbm_version_is_supported - checks whether the driver can + *     support the hbm version of the device + * + * @dev: the device structure + * returns true if driver can support hbm version of the device + */ +bool mei_hbm_version_is_supported(struct mei_device *dev) +{ +	return	(dev->version.major_version < HBM_MAJOR_VERSION) || +		(dev->version.major_version == HBM_MAJOR_VERSION && +		 dev->version.minor_version <= HBM_MINOR_VERSION); +} + +/** + * mei_hbm_dispatch - bottom half read routine after ISR to + * handle the read bus message cmd processing. + * + * @dev: the device structure + * @mei_hdr: header of bus message + * + * returns 0 on success and < 0 on failure + */ +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +{ +	struct mei_bus_message *mei_msg; +	struct mei_me_client *me_client; +	struct hbm_host_version_response *version_res; +	struct hbm_client_connect_response *connect_res; +	struct hbm_client_connect_response *disconnect_res; +	struct hbm_client_connect_request *disconnect_req; +	struct hbm_flow_control *flow_control; +	struct hbm_props_response *props_res; +	struct hbm_host_enum_response *enum_res; + +	/* read the message to our buffer */ +	BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); +	mei_read_slots(dev, dev->rd_msg_buf, hdr->length); +	mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; + +	/* ignore spurious message and prevent reset nesting +	 * hbm is put to idle during system reset +	 */ +	if (dev->hbm_state == MEI_HBM_IDLE) { +		dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); +		return 0; +	} + +	switch (mei_msg->hbm_cmd) { +	case HOST_START_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); + +		dev->init_clients_timer = 0; + +		version_res = (struct hbm_host_version_response *)mei_msg; + +		dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", +				HBM_MAJOR_VERSION, HBM_MINOR_VERSION, +				version_res->me_max_version.major_version, +				version_res->me_max_version.minor_version); + +		if (version_res->host_version_supported) { +			dev->version.major_version = HBM_MAJOR_VERSION; +			dev->version.minor_version = HBM_MINOR_VERSION; +		} else { +			dev->version.major_version = +				version_res->me_max_version.major_version; +			dev->version.minor_version = +				version_res->me_max_version.minor_version; +		} + +		if (!mei_hbm_version_is_supported(dev)) { +			dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); + +			dev->hbm_state = MEI_HBM_STOPPED; +			if (mei_hbm_stop_req(dev)) { +				dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); +				return -EIO; +			} +			break; +		} + +		if (dev->dev_state != MEI_DEV_INIT_CLIENTS || +		    dev->hbm_state != MEI_HBM_START) { +			dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO; +		} + +		dev->hbm_state = MEI_HBM_STARTED; + +		if (mei_hbm_enum_clients_req(dev)) { +			dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); +			return -EIO; +		} + +		wake_up_interruptible(&dev->wait_recvd_msg); +		break; + +	case CLIENT_CONNECT_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); + +		connect_res = (struct hbm_client_connect_response *) mei_msg; +		mei_hbm_cl_connect_res(dev, connect_res); +		wake_up(&dev->wait_recvd_msg); +		break; + +	case CLIENT_DISCONNECT_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); + +		disconnect_res = (struct hbm_client_connect_response *) mei_msg; +		mei_hbm_cl_disconnect_res(dev, disconnect_res); +		wake_up(&dev->wait_recvd_msg); +		break; + +	case MEI_FLOW_CONTROL_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); + +		flow_control = (struct hbm_flow_control *) mei_msg; +		mei_hbm_cl_flow_control_res(dev, flow_control); +		break; + +	case MEI_PG_ISOLATION_ENTRY_RES_CMD: +		dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n"); +		dev->pg_event = MEI_PG_EVENT_RECEIVED; +		if (waitqueue_active(&dev->wait_pg)) +			wake_up(&dev->wait_pg); +		break; + +	case MEI_PG_ISOLATION_EXIT_REQ_CMD: +		dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n"); +		dev->pg_event = MEI_PG_EVENT_RECEIVED; +		if (waitqueue_active(&dev->wait_pg)) +			wake_up(&dev->wait_pg); +		else +			/* +			* If the driver is not waiting on this then +			* this is HW initiated exit from PG. +			* Start runtime pm resume sequence to exit from PG. +			*/ +			pm_request_resume(&dev->pdev->dev); +		break; + +	case HOST_CLIENT_PROPERTIES_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); + +		dev->init_clients_timer = 0; + +		if (dev->me_clients == NULL) { +			dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); +			return -EPROTO; +		} + +		props_res = (struct hbm_props_response *)mei_msg; +		me_client = &dev->me_clients[dev->me_client_presentation_num]; + +		if (props_res->status) { +			dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", +				props_res->status); +			return -EPROTO; +		} + +		if (me_client->client_id != props_res->address) { +			dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", +				me_client->client_id, props_res->address); +			return -EPROTO; +		} + +		if (dev->dev_state != MEI_DEV_INIT_CLIENTS || +		    dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { +			dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO; +		} + +		me_client->props = props_res->client_properties; +		dev->me_client_index++; +		dev->me_client_presentation_num++; + +		/* request property for the next client */ +		if (mei_hbm_prop_req(dev)) +			return -EIO; + +		break; + +	case HOST_ENUM_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); + +		dev->init_clients_timer = 0; + +		enum_res = (struct hbm_host_enum_response *) mei_msg; +		BUILD_BUG_ON(sizeof(dev->me_clients_map) +				< sizeof(enum_res->valid_addresses)); +		memcpy(dev->me_clients_map, enum_res->valid_addresses, +			sizeof(enum_res->valid_addresses)); + +		if (dev->dev_state != MEI_DEV_INIT_CLIENTS || +		    dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { +			dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO; +		} + +		if (mei_hbm_me_cl_allocate(dev)) { +			dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); +			return -ENOMEM; +		} + +		dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; + +		/* first property request */ +		if (mei_hbm_prop_req(dev)) +			return -EIO; + +		break; + +	case HOST_STOP_RES_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); + +		dev->init_clients_timer = 0; + +		if (dev->hbm_state != MEI_HBM_STOPPED) { +			dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", +				dev->dev_state, dev->hbm_state); +			return -EPROTO; +		} + +		dev->dev_state = MEI_DEV_POWER_DOWN; +		dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); +		/* force the reset */ +		return -EPROTO; +		break; + +	case CLIENT_DISCONNECT_REQ_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); + +		disconnect_req = (struct hbm_client_connect_request *)mei_msg; +		mei_hbm_fw_disconnect_req(dev, disconnect_req); +		break; + +	case ME_STOP_REQ_CMD: +		dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); +		dev->hbm_state = MEI_HBM_STOPPED; +		if (mei_hbm_stop_req(dev)) { +			dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); +			return -EIO; +		} +		break; +	default: +		BUG(); +		break; + +	} +	return 0; +} + diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h new file mode 100644 index 00000000000..683eb2835ce --- /dev/null +++ b/drivers/misc/mei/hbm.h @@ -0,0 +1,64 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HBM_H_ +#define _MEI_HBM_H_ + +struct mei_device; +struct mei_msg_hdr; +struct mei_cl; + +/** + * enum mei_hbm_state - host bus message protocol state + * + * @MEI_HBM_IDLE : protocol not started + * @MEI_HBM_START : start request message was sent + * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent + * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties + */ +enum mei_hbm_state { +	MEI_HBM_IDLE = 0, +	MEI_HBM_START, +	MEI_HBM_STARTED, +	MEI_HBM_ENUM_CLIENTS, +	MEI_HBM_CLIENT_PROPERTIES, +	MEI_HBM_STOPPED, +}; + +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); + +static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) +{ +	hdr->host_addr = 0; +	hdr->me_addr = 0; +	hdr->length = length; +	hdr->msg_complete = 1; +	hdr->reserved = 0; +} + +void mei_hbm_idle(struct mei_device *dev); +void mei_hbm_reset(struct mei_device *dev); +int mei_hbm_start_req(struct mei_device *dev); +int mei_hbm_start_wait(struct mei_device *dev); +int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); +bool mei_hbm_version_is_supported(struct mei_device *dev); +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); + +#endif /* _MEI_HBM_H_ */ + diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h new file mode 100644 index 00000000000..a7856c0ac57 --- /dev/null +++ b/drivers/misc/mei/hw-me-regs.h @@ -0,0 +1,185 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + *	Intel Corporation. + *	linux-mei@linux.intel.com + *	http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + *  * Redistributions of source code must retain the above copyright + *    notice, this list of conditions and the following disclaimer. + *  * Redistributions in binary form must reproduce the above copyright + *    notice, this list of conditions and the following disclaimer in + *    the documentation and/or other materials provided with the + *    distribution. + *  * Neither the name Intel Corporation nor the names of its + *    contributors may be used to endorse or promote products derived + *    from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_MEI_REGS_H_ +#define _MEI_HW_MEI_REGS_H_ + +/* + * MEI device IDs + */ +#define MEI_DEV_ID_82946GZ    0x2974  /* 82946GZ/GL */ +#define MEI_DEV_ID_82G35      0x2984  /* 82G35 Express */ +#define MEI_DEV_ID_82Q965     0x2994  /* 82Q963/Q965 */ +#define MEI_DEV_ID_82G965     0x29A4  /* 82P965/G965 */ + +#define MEI_DEV_ID_82GM965    0x2A04  /* Mobile PM965/GM965 */ +#define MEI_DEV_ID_82GME965   0x2A14  /* Mobile GME965/GLE960 */ + +#define MEI_DEV_ID_ICH9_82Q35 0x29B4  /* 82Q35 Express */ +#define MEI_DEV_ID_ICH9_82G33 0x29C4  /* 82G33/G31/P35/P31 Express */ +#define MEI_DEV_ID_ICH9_82Q33 0x29D4  /* 82Q33 Express */ +#define MEI_DEV_ID_ICH9_82X38 0x29E4  /* 82X38/X48 Express */ +#define MEI_DEV_ID_ICH9_3200  0x29F4  /* 3200/3210 Server */ + +#define MEI_DEV_ID_ICH9_6     0x28B4  /* Bearlake */ +#define MEI_DEV_ID_ICH9_7     0x28C4  /* Bearlake */ +#define MEI_DEV_ID_ICH9_8     0x28D4  /* Bearlake */ +#define MEI_DEV_ID_ICH9_9     0x28E4  /* Bearlake */ +#define MEI_DEV_ID_ICH9_10    0x28F4  /* Bearlake */ + +#define MEI_DEV_ID_ICH9M_1    0x2A44  /* Cantiga */ +#define MEI_DEV_ID_ICH9M_2    0x2A54  /* Cantiga */ +#define MEI_DEV_ID_ICH9M_3    0x2A64  /* Cantiga */ +#define MEI_DEV_ID_ICH9M_4    0x2A74  /* Cantiga */ + +#define MEI_DEV_ID_ICH10_1    0x2E04  /* Eaglelake */ +#define MEI_DEV_ID_ICH10_2    0x2E14  /* Eaglelake */ +#define MEI_DEV_ID_ICH10_3    0x2E24  /* Eaglelake */ +#define MEI_DEV_ID_ICH10_4    0x2E34  /* Eaglelake */ + +#define MEI_DEV_ID_IBXPK_1    0x3B64  /* Calpella */ +#define MEI_DEV_ID_IBXPK_2    0x3B65  /* Calpella */ + +#define MEI_DEV_ID_CPT_1      0x1C3A  /* Couger Point */ +#define MEI_DEV_ID_PBG_1      0x1D3A  /* C600/X79 Patsburg */ + +#define MEI_DEV_ID_PPT_1      0x1E3A  /* Panther Point */ +#define MEI_DEV_ID_PPT_2      0x1CBA  /* Panther Point */ +#define MEI_DEV_ID_PPT_3      0x1DBA  /* Panther Point */ + +#define MEI_DEV_ID_LPT_H      0x8C3A  /* Lynx Point H */ +#define MEI_DEV_ID_LPT_W      0x8D3A  /* Lynx Point - Wellsburg */ +#define MEI_DEV_ID_LPT_LP     0x9C3A  /* Lynx Point LP */ +#define MEI_DEV_ID_LPT_HR     0x8CBA  /* Lynx Point H Refresh */ + +#define MEI_DEV_ID_WPT_LP     0x9CBA  /* Wildcat Point LP */ + +/* Host Firmware Status Registers in PCI Config Space */ +#define PCI_CFG_HFS_1         0x40 +#define PCI_CFG_HFS_2         0x48 + +/* + * MEI HW Section + */ + +/* MEI registers */ +/* H_CB_WW - Host Circular Buffer (CB) Write Window register */ +#define H_CB_WW    0 +/* H_CSR - Host Control Status register */ +#define H_CSR      4 +/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */ +#define ME_CB_RW   8 +/* ME_CSR_HA - ME Control Status Host Access register (read only) */ +#define ME_CSR_HA  0xC +/* H_HGC_CSR - PGI register */ +#define H_HPG_CSR  0x10 + + +/* register bits of H_CSR (Host Control Status register) */ +/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ +#define H_CBD             0xFF000000 +/* Host Circular Buffer Write Pointer */ +#define H_CBWP            0x00FF0000 +/* Host Circular Buffer Read Pointer */ +#define H_CBRP            0x0000FF00 +/* Host Reset */ +#define H_RST             0x00000010 +/* Host Ready */ +#define H_RDY             0x00000008 +/* Host Interrupt Generate */ +#define H_IG              0x00000004 +/* Host Interrupt Status */ +#define H_IS              0x00000002 +/* Host Interrupt Enable */ +#define H_IE              0x00000001 + + +/* register bits of ME_CSR_HA (ME Control Status Host Access register) */ +/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only +access to ME_CBD */ +#define ME_CBD_HRA        0xFF000000 +/* ME CB Write Pointer HRA - host read only access to ME_CBWP */ +#define ME_CBWP_HRA       0x00FF0000 +/* ME CB Read Pointer HRA - host read only access to ME_CBRP */ +#define ME_CBRP_HRA       0x0000FF00 +/* ME Power Gate Isolation Capability HRA  - host ready only access */ +#define ME_PGIC_HRA       0x00000040 +/* ME Reset HRA - host read only access to ME_RST */ +#define ME_RST_HRA        0x00000010 +/* ME Ready HRA - host read only access to ME_RDY */ +#define ME_RDY_HRA        0x00000008 +/* ME Interrupt Generate HRA - host read only access to ME_IG */ +#define ME_IG_HRA         0x00000004 +/* ME Interrupt Status HRA - host read only access to ME_IS */ +#define ME_IS_HRA         0x00000002 +/* ME Interrupt Enable HRA - host read only access to ME_IE */ +#define ME_IE_HRA         0x00000001 + + +/* register bits - H_HPG_CSR */ +#define H_HPG_CSR_PGIHEXR       0x00000001 +#define H_HPG_CSR_PGI           0x00000002 + +#endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c new file mode 100644 index 00000000000..6a2d272cea4 --- /dev/null +++ b/drivers/misc/mei/hw-me.c @@ -0,0 +1,885 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> + +#include <linux/kthread.h> +#include <linux/interrupt.h> + +#include "mei_dev.h" +#include "hbm.h" + +#include "hw-me.h" +#include "hw-me-regs.h" + +/** + * mei_me_reg_read - Reads 32bit data from the mei device + * + * @dev: the device structure + * @offset: offset from which to read the data + * + * returns register value (u32) + */ +static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, +			       unsigned long offset) +{ +	return ioread32(hw->mem_addr + offset); +} + + +/** + * mei_me_reg_write - Writes 32bit data to the mei device + * + * @dev: the device structure + * @offset: offset from which to write the data + * @value: register value to write (u32) + */ +static inline void mei_me_reg_write(const struct mei_me_hw *hw, +				 unsigned long offset, u32 value) +{ +	iowrite32(value, hw->mem_addr + offset); +} + +/** + * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer + *  read window register + * + * @dev: the device structure + * + * returns ME_CB_RW register value (u32) + */ +static u32 mei_me_mecbrw_read(const struct mei_device *dev) +{ +	return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); +} +/** + * mei_me_mecsr_read - Reads 32bit data from the ME CSR + * + * @dev: the device structure + * + * returns ME_CSR_HA register value (u32) + */ +static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) +{ +	return mei_me_reg_read(hw, ME_CSR_HA); +} + +/** + * mei_hcsr_read - Reads 32bit data from the host CSR + * + * @dev: the device structure + * + * returns H_CSR register value (u32) + */ +static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) +{ +	return mei_me_reg_read(hw, H_CSR); +} + +/** + * mei_hcsr_set - writes H_CSR register to the mei device, + * and ignores the H_IS bit for it is write-one-to-zero. + * + * @dev: the device structure + */ +static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) +{ +	hcsr &= ~H_IS; +	mei_me_reg_write(hw, H_CSR, hcsr); +} + + +/** + * mei_me_hw_config - configure hw dependent settings + * + * @dev: mei device + */ +static void mei_me_hw_config(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 hcsr = mei_hcsr_read(to_me_hw(dev)); +	/* Doesn't change in runtime */ +	dev->hbuf_depth = (hcsr & H_CBD) >> 24; + +	hw->pg_state = MEI_PG_OFF; +} + +/** + * mei_me_pg_state  - translate internal pg state + *   to the mei power gating state + * + * @hw -  me hardware + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	return hw->pg_state; +} + +/** + * mei_clear_interrupts - clear and stop interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_clear(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 hcsr = mei_hcsr_read(hw); +	if ((hcsr & H_IS) == H_IS) +		mei_me_reg_write(hw, H_CSR, hcsr); +} +/** + * mei_me_intr_enable - enables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_enable(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 hcsr = mei_hcsr_read(hw); +	hcsr |= H_IE; +	mei_hcsr_set(hw, hcsr); +} + +/** + * mei_disable_interrupts - disables mei device interrupts + * + * @dev: the device structure + */ +static void mei_me_intr_disable(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 hcsr = mei_hcsr_read(hw); +	hcsr  &= ~H_IE; +	mei_hcsr_set(hw, hcsr); +} + +/** + * mei_me_hw_reset_release - release device from the reset + * + * @dev: the device structure + */ +static void mei_me_hw_reset_release(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 hcsr = mei_hcsr_read(hw); + +	hcsr |= H_IG; +	hcsr &= ~H_RST; +	mei_hcsr_set(hw, hcsr); + +	/* complete this write before we set host ready on another CPU */ +	mmiowb(); +} +/** + * mei_me_hw_reset - resets fw via mei csr register. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + */ +static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 hcsr = mei_hcsr_read(hw); + +	hcsr |= H_RST | H_IG | H_IS; + +	if (intr_enable) +		hcsr |= H_IE; +	else +		hcsr &= ~H_IE; + +	dev->recvd_hw_ready = false; +	mei_me_reg_write(hw, H_CSR, hcsr); + +	/* +	 * Host reads the H_CSR once to ensure that the +	 * posted write to H_CSR completes. +	 */ +	hcsr = mei_hcsr_read(hw); + +	if ((hcsr & H_RST) == 0) +		dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); + +	if ((hcsr & H_RDY) == H_RDY) +		dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); + +	if (intr_enable == false) +		mei_me_hw_reset_release(dev); + +	return 0; +} + +/** + * mei_me_host_set_ready - enable device + * + * @dev - mei device + * returns bool + */ + +static void mei_me_host_set_ready(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	hw->host_hw_state = mei_hcsr_read(hw); +	hw->host_hw_state |= H_IE | H_IG | H_RDY; +	mei_hcsr_set(hw, hw->host_hw_state); +} +/** + * mei_me_host_is_ready - check whether the host has turned ready + * + * @dev - mei device + * returns bool + */ +static bool mei_me_host_is_ready(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	hw->host_hw_state = mei_hcsr_read(hw); +	return (hw->host_hw_state & H_RDY) == H_RDY; +} + +/** + * mei_me_hw_is_ready - check whether the me(hw) has turned ready + * + * @dev - mei device + * returns bool + */ +static bool mei_me_hw_is_ready(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	hw->me_hw_state = mei_me_mecsr_read(hw); +	return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA; +} + +static int mei_me_hw_ready_wait(struct mei_device *dev) +{ +	int err; + +	mutex_unlock(&dev->device_lock); +	err = wait_event_interruptible_timeout(dev->wait_hw_ready, +			dev->recvd_hw_ready, +			mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); +	mutex_lock(&dev->device_lock); +	if (!err && !dev->recvd_hw_ready) { +		if (!err) +			err = -ETIME; +		dev_err(&dev->pdev->dev, +			"wait hw ready failed. status = %d\n", err); +		return err; +	} + +	dev->recvd_hw_ready = false; +	return 0; +} + +static int mei_me_hw_start(struct mei_device *dev) +{ +	int ret = mei_me_hw_ready_wait(dev); +	if (ret) +		return ret; +	dev_dbg(&dev->pdev->dev, "hw is ready\n"); + +	mei_me_host_set_ready(dev); +	return ret; +} + + +/** + * mei_hbuf_filled_slots - gets number of device filled buffer slots + * + * @dev: the device structure + * + * returns number of filled slots + */ +static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	char read_ptr, write_ptr; + +	hw->host_hw_state = mei_hcsr_read(hw); + +	read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8); +	write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16); + +	return (unsigned char) (write_ptr - read_ptr); +} + +/** + * mei_me_hbuf_is_empty - checks if host buffer is empty. + * + * @dev: the device structure + * + * returns true if empty, false - otherwise. + */ +static bool mei_me_hbuf_is_empty(struct mei_device *dev) +{ +	return mei_hbuf_filled_slots(dev) == 0; +} + +/** + * mei_me_hbuf_empty_slots - counts write empty slots. + * + * @dev: the device structure + * + * returns -EOVERFLOW if overflow, otherwise empty slots count + */ +static int mei_me_hbuf_empty_slots(struct mei_device *dev) +{ +	unsigned char filled_slots, empty_slots; + +	filled_slots = mei_hbuf_filled_slots(dev); +	empty_slots = dev->hbuf_depth - filled_slots; + +	/* check for overflow */ +	if (filled_slots > dev->hbuf_depth) +		return -EOVERFLOW; + +	return empty_slots; +} + +static size_t mei_me_hbuf_max_len(const struct mei_device *dev) +{ +	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); +} + + +/** + * mei_me_write_message - writes a message to mei device. + * + * @dev: the device structure + * @header: mei HECI header of message + * @buf: message payload will be written + * + * This function returns -EIO if write has failed + */ +static int mei_me_write_message(struct mei_device *dev, +			struct mei_msg_hdr *header, +			unsigned char *buf) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	unsigned long rem; +	unsigned long length = header->length; +	u32 *reg_buf = (u32 *)buf; +	u32 hcsr; +	u32 dw_cnt; +	int i; +	int empty_slots; + +	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + +	empty_slots = mei_hbuf_empty_slots(dev); +	dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots); + +	dw_cnt = mei_data2slots(length); +	if (empty_slots < 0 || dw_cnt > empty_slots) +		return -EMSGSIZE; + +	mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); + +	for (i = 0; i < length / 4; i++) +		mei_me_reg_write(hw, H_CB_WW, reg_buf[i]); + +	rem = length & 0x3; +	if (rem > 0) { +		u32 reg = 0; +		memcpy(®, &buf[length - rem], rem); +		mei_me_reg_write(hw, H_CB_WW, reg); +	} + +	hcsr = mei_hcsr_read(hw) | H_IG; +	mei_hcsr_set(hw, hcsr); +	if (!mei_me_hw_is_ready(dev)) +		return -EIO; + +	return 0; +} + +/** + * mei_me_count_full_read_slots - counts read full slots. + * + * @dev: the device structure + * + * returns -EOVERFLOW if overflow, otherwise filled slots count + */ +static int mei_me_count_full_read_slots(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	char read_ptr, write_ptr; +	unsigned char buffer_depth, filled_slots; + +	hw->me_hw_state = mei_me_mecsr_read(hw); +	buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24); +	read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8); +	write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16); +	filled_slots = (unsigned char) (write_ptr - read_ptr); + +	/* check for overflow */ +	if (filled_slots > buffer_depth) +		return -EOVERFLOW; + +	dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots); +	return (int)filled_slots; +} + +/** + * mei_me_read_slots - reads a message from mei device. + * + * @dev: the device structure + * @buffer: message buffer will be written + * @buffer_length: message size will be read + */ +static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, +		    unsigned long buffer_length) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 *reg_buf = (u32 *)buffer; +	u32 hcsr; + +	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) +		*reg_buf++ = mei_me_mecbrw_read(dev); + +	if (buffer_length > 0) { +		u32 reg = mei_me_mecbrw_read(dev); +		memcpy(reg_buf, ®, buffer_length); +	} + +	hcsr = mei_hcsr_read(hw) | H_IG; +	mei_hcsr_set(hw, hcsr); +	return 0; +} + +/** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_enter(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 reg = mei_me_reg_read(hw, H_HPG_CSR); +	reg |= H_HPG_CSR_PGI; +	mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_exit(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 reg = mei_me_reg_read(hw, H_HPG_CSR); + +	WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); + +	reg |= H_HPG_CSR_PGIHEXR; +	mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_set_sync - perform pg entry procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_set_sync(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); +	int ret; + +	dev->pg_event = MEI_PG_EVENT_WAIT; + +	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); +	if (ret) +		return ret; + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_pg, +		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); +	mutex_lock(&dev->device_lock); + +	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { +		mei_me_pg_enter(dev); +		ret = 0; +	} else { +		ret = -ETIME; +	} + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	hw->pg_state = MEI_PG_ON; + +	return ret; +} + +/** + * mei_me_pg_unset_sync - perform pg exit procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_unset_sync(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); +	int ret; + +	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) +		goto reply; + +	dev->pg_event = MEI_PG_EVENT_WAIT; + +	mei_me_pg_exit(dev); + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_pg, +		dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); +	mutex_lock(&dev->device_lock); + +reply: +	if (dev->pg_event == MEI_PG_EVENT_RECEIVED) +		ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); +	else +		ret = -ETIME; + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	hw->pg_state = MEI_PG_OFF; + +	return ret; +} + +/** + * mei_me_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_me_pg_is_enabled(struct mei_device *dev) +{ +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 reg = mei_me_reg_read(hw, ME_CSR_HA); + +	if ((reg & ME_PGIC_HRA) == 0) +		goto notsupported; + +	if (dev->version.major_version < HBM_MAJOR_VERSION_PGI) +		goto notsupported; + +	if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && +	    dev->version.minor_version < HBM_MINOR_VERSION_PGI) +		goto notsupported; + +	return true; + +notsupported: +	dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", +		!!(reg & ME_PGIC_HRA), +		dev->version.major_version, +		dev->version.minor_version, +		HBM_MAJOR_VERSION_PGI, +		HBM_MINOR_VERSION_PGI); + +	return false; +} + +/** + * mei_me_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) +{ +	struct mei_device *dev = (struct mei_device *) dev_id; +	struct mei_me_hw *hw = to_me_hw(dev); +	u32 csr_reg = mei_hcsr_read(hw); + +	if ((csr_reg & H_IS) != H_IS) +		return IRQ_NONE; + +	/* clear H_IS bit in H_CSR */ +	mei_me_reg_write(hw, H_CSR, csr_reg); + +	return IRQ_WAKE_THREAD; +} + +/** + * mei_me_irq_thread_handler - function called after ISR to handle the interrupt + * processing. + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) +{ +	struct mei_device *dev = (struct mei_device *) dev_id; +	struct mei_cl_cb complete_list; +	s32 slots; +	int rets = 0; + +	dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); +	/* initialize our complete list */ +	mutex_lock(&dev->device_lock); +	mei_io_list_init(&complete_list); + +	/* Ack the interrupt here +	 * In case of MSI we don't go through the quick handler */ +	if (pci_dev_msi_enabled(dev->pdev)) +		mei_clear_interrupts(dev); + +	/* check if ME wants a reset */ +	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { +		dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); +		schedule_work(&dev->reset_work); +		goto end; +	} + +	/*  check if we need to start the dev */ +	if (!mei_host_is_ready(dev)) { +		if (mei_hw_is_ready(dev)) { +			mei_me_hw_reset_release(dev); +			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); + +			dev->recvd_hw_ready = true; +			wake_up_interruptible(&dev->wait_hw_ready); +		} else { +			dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); +		} +		goto end; +	} +	/* check slots available for reading */ +	slots = mei_count_full_read_slots(dev); +	while (slots > 0) { +		dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); +		rets = mei_irq_read_handler(dev, &complete_list, &slots); +		/* There is a race between ME write and interrupt delivery: +		 * Not all data is always available immediately after the +		 * interrupt, so try to read again on the next interrupt. +		 */ +		if (rets == -ENODATA) +			break; + +		if (rets && dev->dev_state != MEI_DEV_RESETTING) { +			dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n", +						rets); +			schedule_work(&dev->reset_work); +			goto end; +		} +	} + +	dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + +	/* +	 * During PG handshake only allowed write is the replay to the +	 * PG exit message, so block calling write function +	 * if the pg state is not idle +	 */ +	if (dev->pg_event == MEI_PG_EVENT_IDLE) { +		rets = mei_irq_write_handler(dev, &complete_list); +		dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +	} + +	mei_irq_compl_handler(dev, &complete_list); + +end: +	dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); +	mutex_unlock(&dev->device_lock); +	return IRQ_HANDLED; +} + +/** + * mei_me_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns 0 on success an error code otherwise + */ +static int mei_me_fw_status(struct mei_device *dev, +			    struct mei_fw_status *fw_status) +{ +	const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2}; +	int i; + +	if (!fw_status) +		return -EINVAL; + +	switch (dev->pdev->device) { +	case MEI_DEV_ID_IBXPK_1: +	case MEI_DEV_ID_IBXPK_2: +	case MEI_DEV_ID_CPT_1: +	case MEI_DEV_ID_PBG_1: +	case MEI_DEV_ID_PPT_1: +	case MEI_DEV_ID_PPT_2: +	case MEI_DEV_ID_PPT_3: +	case MEI_DEV_ID_LPT_H: +	case MEI_DEV_ID_LPT_W: +	case MEI_DEV_ID_LPT_LP: +	case MEI_DEV_ID_LPT_HR: +	case MEI_DEV_ID_WPT_LP: +		fw_status->count = 2; +		break; +	case MEI_DEV_ID_ICH10_1: +	case MEI_DEV_ID_ICH10_2: +	case MEI_DEV_ID_ICH10_3: +	case MEI_DEV_ID_ICH10_4: +		fw_status->count = 1; +		break; +	default: +		fw_status->count = 0; +		break; +	} + +	for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { +		int ret; +		ret = pci_read_config_dword(dev->pdev, +				pci_cfg_reg[i], &fw_status->status[i]); +		if (ret) +			return ret; +	} +	return 0; +} + +static const struct mei_hw_ops mei_me_hw_ops = { + +	.pg_state  = mei_me_pg_state, + +	.fw_status = mei_me_fw_status, +	.host_is_ready = mei_me_host_is_ready, + +	.hw_is_ready = mei_me_hw_is_ready, +	.hw_reset = mei_me_hw_reset, +	.hw_config = mei_me_hw_config, +	.hw_start = mei_me_hw_start, + +	.pg_is_enabled = mei_me_pg_is_enabled, + +	.intr_clear = mei_me_intr_clear, +	.intr_enable = mei_me_intr_enable, +	.intr_disable = mei_me_intr_disable, + +	.hbuf_free_slots = mei_me_hbuf_empty_slots, +	.hbuf_is_ready = mei_me_hbuf_is_empty, +	.hbuf_max_len = mei_me_hbuf_max_len, + +	.write = mei_me_write_message, + +	.rdbuf_full_slots = mei_me_count_full_read_slots, +	.read_hdr = mei_me_mecbrw_read, +	.read = mei_me_read_slots +}; + +static bool mei_me_fw_type_nm(struct pci_dev *pdev) +{ +	u32 reg; +	pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); +	/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ +	return (reg & 0x600) == 0x200; +} + +#define MEI_CFG_FW_NM                           \ +	.quirk_probe = mei_me_fw_type_nm + +static bool mei_me_fw_type_sps(struct pci_dev *pdev) +{ +	u32 reg; +	/* Read ME FW Status check for SPS Firmware */ +	pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); +	/* if bits [19:16] = 15, running SPS Firmware */ +	return (reg & 0xf0000) == 0xf0000; +} + +#define MEI_CFG_FW_SPS                           \ +	.quirk_probe = mei_me_fw_type_sps + + +#define MEI_CFG_LEGACY_HFS                      \ +	.fw_status.count = 0 + +#define MEI_CFG_ICH_HFS                        \ +	.fw_status.count = 1,                   \ +	.fw_status.status[0] = PCI_CFG_HFS_1 + +#define MEI_CFG_PCH_HFS                         \ +	.fw_status.count = 2,                   \ +	.fw_status.status[0] = PCI_CFG_HFS_1,   \ +	.fw_status.status[1] = PCI_CFG_HFS_2 + + +/* ICH Legacy devices */ +const struct mei_cfg mei_me_legacy_cfg = { +	MEI_CFG_LEGACY_HFS, +}; + +/* ICH devices */ +const struct mei_cfg mei_me_ich_cfg = { +	MEI_CFG_ICH_HFS, +}; + +/* PCH devices */ +const struct mei_cfg mei_me_pch_cfg = { +	MEI_CFG_PCH_HFS, +}; + + +/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ +const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { +	MEI_CFG_PCH_HFS, +	MEI_CFG_FW_NM, +}; + +/* PCH Lynx Point with quirk for SPS Firmware exclusion */ +const struct mei_cfg mei_me_lpt_cfg = { +	MEI_CFG_PCH_HFS, +	MEI_CFG_FW_SPS, +}; + +/** + * mei_me_dev_init - allocates and initializes the mei device structure + * + * @pdev: The pci device structure + * @cfg: per device generation config + * + * returns The mei_device_device pointer on success, NULL on failure. + */ +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, +				   const struct mei_cfg *cfg) +{ +	struct mei_device *dev; + +	dev = kzalloc(sizeof(struct mei_device) + +			 sizeof(struct mei_me_hw), GFP_KERNEL); +	if (!dev) +		return NULL; + +	mei_device_init(dev, cfg); + +	dev->ops = &mei_me_hw_ops; + +	dev->pdev = pdev; +	return dev; +} + diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h new file mode 100644 index 00000000000..12b0f4bbe1f --- /dev/null +++ b/drivers/misc/mei/hw-me.h @@ -0,0 +1,56 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + + + +#ifndef _MEI_INTERFACE_H_ +#define _MEI_INTERFACE_H_ + +#include <linux/mei.h> +#include <linux/irqreturn.h> +#include "mei_dev.h" +#include "client.h" + +#define MEI_ME_RPM_TIMEOUT    500 /* ms */ + +struct mei_me_hw { +	void __iomem *mem_addr; +	/* +	 * hw states of host and fw(ME) +	 */ +	u32 host_hw_state; +	u32 me_hw_state; +	enum mei_pg_state pg_state; +}; + +#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) + +extern const struct mei_cfg mei_me_legacy_cfg; +extern const struct mei_cfg mei_me_ich_cfg; +extern const struct mei_cfg mei_me_pch_cfg; +extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg; +extern const struct mei_cfg mei_me_lpt_cfg; + +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, +				   const struct mei_cfg *cfg); + +int mei_me_pg_set_sync(struct mei_device *dev); +int mei_me_pg_unset_sync(struct mei_device *dev); + +irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); + +#endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h new file mode 100644 index 00000000000..f19229c4e65 --- /dev/null +++ b/drivers/misc/mei/hw-txe-regs.h @@ -0,0 +1,294 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * Contact Information: + *	Intel Corporation. + *	linux-mei@linux.intel.com + *	http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + *  * Redistributions of source code must retain the above copyright + *    notice, this list of conditions and the following disclaimer. + *  * Redistributions in binary form must reproduce the above copyright + *    notice, this list of conditions and the following disclaimer in + *    the documentation and/or other materials provided with the + *    distribution. + *  * Neither the name Intel Corporation nor the names of its + *    contributors may be used to endorse or promote products derived + *    from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_TXE_REGS_H_ +#define _MEI_HW_TXE_REGS_H_ + +#include "hw.h" + +#define SEC_ALIVENESS_TIMER_TIMEOUT        (5 * MSEC_PER_SEC) +#define SEC_ALIVENESS_WAIT_TIMEOUT         (1 * MSEC_PER_SEC) +#define SEC_RESET_WAIT_TIMEOUT             (1 * MSEC_PER_SEC) +#define SEC_READY_WAIT_TIMEOUT             (5 * MSEC_PER_SEC) +#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define RESET_CANCEL_WAIT_TIMEOUT          (1 * MSEC_PER_SEC) + +enum { +	SEC_BAR, +	BRIDGE_BAR, + +	NUM_OF_MEM_BARS +}; + +/* SeC FW Status Register + * + * FW uses this register in order to report its status to host. + * This register resides in PCI-E config space. + */ +#define PCI_CFG_TXE_FW_STS0   0x40 +#  define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK    0x0000000F +#  define PCI_CFG_TXE_FW_STS0_OP_ST_MSK     0x000001C0 +#  define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200 +#  define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK  0x0000F000 +#  define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK   0x000F0000 +#  define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK   0x00F00000 +#define PCI_CFG_TXE_FW_STS1   0x48 + +#define IPC_BASE_ADDR	0x80400 /* SeC IPC Base Address */ + +/* IPC Input Doorbell Register */ +#define SEC_IPC_INPUT_DOORBELL_REG       (0x0000 + IPC_BASE_ADDR) + +/* IPC Input Status Register + * This register indicates whether or not processing of + * the most recent command has been completed by the SEC + * New commands and payloads should not be written by the Host + * until this indicates that the previous command has been processed. + */ +#define SEC_IPC_INPUT_STATUS_REG         (0x0008 + IPC_BASE_ADDR) +#  define SEC_IPC_INPUT_STATUS_RDY    BIT(0) + +/* IPC Host Interrupt Status Register */ +#define SEC_IPC_HOST_INT_STATUS_REG      (0x0010 + IPC_BASE_ADDR) +#define   SEC_IPC_HOST_INT_STATUS_OUT_DB             BIT(0) +#define   SEC_IPC_HOST_INT_STATUS_IN_RDY             BIT(1) +#define   SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD       BIT(5) +#define   SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS     BIT(17) +#define   SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR       BIT(18) +#define   SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR       BIT(19) +#define   SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW    BIT(21) + +/* Convenient mask for pending interrupts */ +#define   SEC_IPC_HOST_INT_STATUS_PENDING \ +		(SEC_IPC_HOST_INT_STATUS_OUT_DB| \ +		SEC_IPC_HOST_INT_STATUS_IN_RDY) + +/* IPC Host Interrupt Mask Register */ +#define SEC_IPC_HOST_INT_MASK_REG        (0x0014 + IPC_BASE_ADDR) + +#  define SEC_IPC_HOST_INT_MASK_OUT_DB	BIT(0) /* Output Doorbell Int Mask */ +#  define SEC_IPC_HOST_INT_MASK_IN_RDY	BIT(1) /* Input Ready Int Mask */ + +/* IPC Input Payload RAM */ +#define SEC_IPC_INPUT_PAYLOAD_REG        (0x0100 + IPC_BASE_ADDR) +/* IPC Shared Payload RAM */ +#define IPC_SHARED_PAYLOAD_REG           (0x0200 + IPC_BASE_ADDR) + +/* SeC Address Translation Table Entry 2 - Ctrl + * + * This register resides also in SeC's PCI-E Memory space. + */ +#define SATT2_CTRL_REG                   0x1040 +#  define SATT2_CTRL_VALID_MSK            BIT(0) +#  define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8 +#  define SATT2_CTRL_BRIDGE_HOST_EN_MSK   BIT(12) + +/* SATT Table Entry 2 SAP Base Address Register */ +#define SATT2_SAP_BA_REG                 0x1044 +/* SATT Table Entry 2 SAP Size Register. */ +#define SATT2_SAP_SIZE_REG               0x1048 + /* SATT Table Entry 2 SAP Bridge Address - LSB Register */ +#define SATT2_BRG_BA_LSB_REG             0x104C + +/* Host High-level Interrupt Status Register */ +#define HHISR_REG                        0x2020 +/* Host High-level Interrupt Enable Register + * + * Resides in PCI memory space. This is the top hierarchy for + * interrupts from SeC to host, aggregating both interrupts that + * arrive through HICR registers as well as interrupts + * that arrive via IPC. + */ +#define HHIER_REG                        0x2024 +#define   IPC_HHIER_SEC	BIT(0) +#define   IPC_HHIER_BRIDGE	BIT(1) +#define   IPC_HHIER_MSK	(IPC_HHIER_SEC | IPC_HHIER_BRIDGE) + +/* Host High-level Interrupt Mask Register. + * + * Resides in PCI memory space. + * This is the top hierarchy for masking interrupts from SeC to host. + */ +#define HHIMR_REG                        0x2028 +#define   IPC_HHIMR_SEC       BIT(0) +#define   IPC_HHIMR_BRIDGE    BIT(1) + +/* Host High-level IRQ Status Register */ +#define HHIRQSR_REG                      0x202C + +/* Host Interrupt Cause Register 0 - SeC IPC Readiness + * + * This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * This register is used by SeC's IPC driver in order + * to synchronize with host about IPC interface state. + */ +#define HICR_SEC_IPC_READINESS_REG       0x2040 +#define   HICR_SEC_IPC_READINESS_HOST_RDY  BIT(0) +#define   HICR_SEC_IPC_READINESS_SEC_RDY   BIT(1) +#define   HICR_SEC_IPC_READINESS_SYS_RDY     \ +	  (HICR_SEC_IPC_READINESS_HOST_RDY | \ +	   HICR_SEC_IPC_READINESS_SEC_RDY) +#define   HICR_SEC_IPC_READINESS_RDY_CLR   BIT(2) + +/* Host Interrupt Cause Register 1 - Aliveness Response */ +/* This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * The register may be used by SeC to ACK a host request for aliveness. + */ +#define HICR_HOST_ALIVENESS_RESP_REG     0x2044 +#define   HICR_HOST_ALIVENESS_RESP_ACK    BIT(0) + +/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */ +#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048 + +/* Host Interrupt Status Register. + * + * Resides in PCI memory space. + * This is the main register involved in generating interrupts + * from SeC to host via HICRs. + * The interrupt generation rules are as follows: + * An interrupt will be generated whenever for any i, + * there is a transition from a state where at least one of + * the following conditions did not hold, to a state where + * ALL the following conditions hold: + * A) HISR.INT[i]_STS == 1. + * B) HIER.INT[i]_EN == 1. + */ +#define HISR_REG                         0x2060 +#define   HISR_INT_0_STS      BIT(0) +#define   HISR_INT_1_STS      BIT(1) +#define   HISR_INT_2_STS      BIT(2) +#define   HISR_INT_3_STS      BIT(3) +#define   HISR_INT_4_STS      BIT(4) +#define   HISR_INT_5_STS      BIT(5) +#define   HISR_INT_6_STS      BIT(6) +#define   HISR_INT_7_STS      BIT(7) +#define   HISR_INT_STS_MSK \ +	(HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS) + +/* Host Interrupt Enable Register. Resides in PCI memory space. */ +#define HIER_REG                         0x2064 +#define   HIER_INT_0_EN      BIT(0) +#define   HIER_INT_1_EN      BIT(1) +#define   HIER_INT_2_EN      BIT(2) +#define   HIER_INT_3_EN      BIT(3) +#define   HIER_INT_4_EN      BIT(4) +#define   HIER_INT_5_EN      BIT(5) +#define   HIER_INT_6_EN      BIT(6) +#define   HIER_INT_7_EN      BIT(7) + +#define   HIER_INT_EN_MSK \ +	 (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN) + + +/* SEC Memory Space IPC output payload. + * + * This register is part of the output payload which SEC provides to host. + */ +#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG    0x20C0 + +/* SeC Interrupt Cause Register - Host Aliveness Request + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * The register is used by host to request SeC aliveness. + */ +#define SICR_HOST_ALIVENESS_REQ_REG      0x214C +#define   SICR_HOST_ALIVENESS_REQ_REQUESTED    BIT(0) + + +/* SeC Interrupt Cause Register - Host IPC Readiness + * + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * This register is used by the host's SeC driver uses in order + * to synchronize with SeC about IPC interface state. + */ +#define SICR_HOST_IPC_READINESS_REQ_REG  0x2150 + + +#define SICR_HOST_IPC_READINESS_HOST_RDY  BIT(0) +#define SICR_HOST_IPC_READINESS_SEC_RDY   BIT(1) +#define SICR_HOST_IPC_READINESS_SYS_RDY     \ +	(SICR_HOST_IPC_READINESS_HOST_RDY | \ +	 SICR_HOST_IPC_READINESS_SEC_RDY) +#define SICR_HOST_IPC_READINESS_RDY_CLR   BIT(2) + +/* SeC Interrupt Cause Register - SeC IPC Output Status + * + * This register indicates whether or not processing of the most recent + * command has been completed by the Host. + * New commands and payloads should not be written by SeC until this + * register indicates that the previous command has been processed. + */ +#define SICR_SEC_IPC_OUTPUT_STATUS_REG   0x2154 +#  define SEC_IPC_OUTPUT_STATUS_RDY BIT(0) + + + +/*  MEI IPC Message payload size 64 bytes */ +#define PAYLOAD_SIZE        64 + +/* MAX size for SATT range 32MB */ +#define SATT_RANGE_MAX     (32 << 20) + + +#endif /* _MEI_HW_TXE_REGS_H_ */ + diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c new file mode 100644 index 00000000000..93273783dec --- /dev/null +++ b/drivers/misc/mei/hw-txe.c @@ -0,0 +1,1190 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/irqreturn.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw-txe.h" +#include "client.h" +#include "hbm.h" + +/** + * mei_txe_reg_read - Reads 32bit data from the device + * + * @base_addr: registers base address + * @offset: register offset + * + */ +static inline u32 mei_txe_reg_read(void __iomem *base_addr, +					unsigned long offset) +{ +	return ioread32(base_addr + offset); +} + +/** + * mei_txe_reg_write - Writes 32bit data to the device + * + * @base_addr: registers base address + * @offset: register offset + * @value: the value to write + */ +static inline void mei_txe_reg_write(void __iomem *base_addr, +				unsigned long offset, u32 value) +{ +	iowrite32(value, base_addr + offset); +} + +/** + * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Doesn't check for aliveness while Reads 32bit data from the SeC BAR + */ +static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, +				unsigned long offset) +{ +	return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset); +} + +/** + * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, +				unsigned long offset) +{ +	WARN(!hw->aliveness, "sec read: aliveness not asserted\n"); +	return mei_txe_sec_reg_read_silent(hw, offset); +} +/** + * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR + *   doesn't check for aliveness + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Doesn't check for aliveness while writes 32bit data from to the SeC BAR + */ +static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw, +				unsigned long offset, u32 value) +{ +	mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value); +} + +/** + * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw, +				unsigned long offset, u32 value) +{ +	WARN(!hw->aliveness, "sec write: aliveness not asserted\n"); +	mei_txe_sec_reg_write_silent(hw, offset, value); +} +/** + * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to read the data + * + */ +static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, +				unsigned long offset) +{ +	return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset); +} + +/** + * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to write the data + * @value: the byte to write + */ +static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw, +				unsigned long offset, u32 value) +{ +	mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value); +} + +/** + * mei_txe_aliveness_set - request for aliveness change + * + * @dev: the device structure + * @req: requested aliveness value + * + * Request for aliveness change and returns true if the change is + *   really needed and false if aliveness is already + *   in the requested state + * Requires device lock to be held + */ +static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	bool do_req = hw->aliveness != req; + +	dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", +				hw->aliveness, req); +	if (do_req) { +		dev->pg_event = MEI_PG_EVENT_WAIT; +		mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); +	} +	return do_req; +} + + +/** + * mei_txe_aliveness_req_get - get aliveness requested register value + * + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from + * from HICR_HOST_ALIVENESS_REQ register value + */ +static u32 mei_txe_aliveness_req_get(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 reg; +	reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); +	return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; +} + +/** + * mei_txe_aliveness_get - get aliveness response register value + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit + * from HICR_HOST_ALIVENESS_RESP register value + */ +static u32 mei_txe_aliveness_get(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 reg; +	reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); +	return reg & HICR_HOST_ALIVENESS_RESP_ACK; +} + +/** + * mei_txe_aliveness_poll - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns > 0 if the expected value was received, -ETIME otherwise + */ +static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	int t = 0; + +	do { +		hw->aliveness = mei_txe_aliveness_get(dev); +		if (hw->aliveness == expected) { +			dev->pg_event = MEI_PG_EVENT_IDLE; +			dev_dbg(&dev->pdev->dev, +				"aliveness settled after %d msecs\n", t); +			return t; +		} +		mutex_unlock(&dev->device_lock); +		msleep(MSEC_PER_SEC / 5); +		mutex_lock(&dev->device_lock); +		t += MSEC_PER_SEC / 5; +	} while (t < SEC_ALIVENESS_WAIT_TIMEOUT); + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	dev_err(&dev->pdev->dev, "aliveness timed out\n"); +	return -ETIME; +} + +/** + * mei_txe_aliveness_wait - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns returns 0 on success and < 0 otherwise + */ +static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	const unsigned long timeout = +			msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT); +	long err; +	int ret; + +	hw->aliveness = mei_txe_aliveness_get(dev); +	if (hw->aliveness == expected) +		return 0; + +	mutex_unlock(&dev->device_lock); +	err = wait_event_timeout(hw->wait_aliveness_resp, +			dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); +	mutex_lock(&dev->device_lock); + +	hw->aliveness = mei_txe_aliveness_get(dev); +	ret = hw->aliveness == expected ? 0 : -ETIME; + +	if (ret) +		dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", +			err, hw->aliveness, dev->pg_event); +	else +		dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", +			jiffies_to_msecs(timeout - err), +			hw->aliveness, dev->pg_event); + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	return ret; +} + +/** + * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete + * + * @dev: the device structure + * + * returns returns 0 on success and < 0 otherwise + */ +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) +{ +	if (mei_txe_aliveness_set(dev, req)) +		return mei_txe_aliveness_wait(dev, req); +	return 0; +} + +/** + * mei_txe_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_txe_pg_is_enabled(struct mei_device *dev) +{ +	return true; +} + +/** + * mei_txe_pg_state  - translate aliveness register value + *   to the mei power gating state + * + * @dev: the device structure + * + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; +} + +/** + * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt + * + * @dev: the device structure + */ +static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 hintmsk; +	/* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */ +	hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG); +	hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY; +	mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk); +} + +/** + * mei_txe_input_doorbell_set + *   - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. + * @dev: the device structure + */ +static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) +{ +	/* Clear the interrupt cause */ +	clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause); +	mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1); +} + +/** + * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 + * + * @dev: the device structure + */ +static void mei_txe_output_ready_set(struct mei_txe_hw *hw) +{ +	mei_txe_br_reg_write(hw, +			SICR_SEC_IPC_OUTPUT_STATUS_REG, +			SEC_IPC_OUTPUT_STATUS_RDY); +} + +/** + * mei_txe_is_input_ready - check if TXE is ready for receiving data + * + * @dev: the device structure + */ +static bool mei_txe_is_input_ready(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 status; +	status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); +	return !!(SEC_IPC_INPUT_STATUS_RDY & status); +} + +/** + * mei_txe_intr_clear - clear all interrupts + * + * @dev: the device structure + */ +static inline void mei_txe_intr_clear(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, +		SEC_IPC_HOST_INT_STATUS_PENDING); +	mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); +	mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK); +} + +/** + * mei_txe_intr_disable - disable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_disable(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, HHIER_REG, 0); +	mei_txe_br_reg_write(hw, HIER_REG, 0); +} +/** + * mei_txe_intr_disable - enable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_enable(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); +	mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); +} + +/** + * mei_txe_pending_interrupts - check if there are pending interrupts + *	only Aliveness, Input ready, and output doorbell are of relevance + * + * @dev: the device structure + * + * Checks if there are pending interrupts + * only Aliveness, Readiness, Input ready, and Output doorbell are relevant + */ +static bool mei_txe_pending_interrupts(struct mei_device *dev) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	bool ret = (hw->intr_cause & (TXE_INTR_READINESS | +				      TXE_INTR_ALIVENESS | +				      TXE_INTR_IN_READY  | +				      TXE_INTR_OUT_DB)); + +	if (ret) { +		dev_dbg(&dev->pdev->dev, +			"Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", +			!!(hw->intr_cause & TXE_INTR_IN_READY), +			!!(hw->intr_cause & TXE_INTR_READINESS), +			!!(hw->intr_cause & TXE_INTR_ALIVENESS), +			!!(hw->intr_cause & TXE_INTR_OUT_DB)); +	} +	return ret; +} + +/** + * mei_txe_input_payload_write - write a dword to the host buffer + *	at offset idx + * + * @dev: the device structure + * @idx: index in the host buffer + * @value: value + */ +static void mei_txe_input_payload_write(struct mei_device *dev, +			unsigned long idx, u32 value) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + +			(idx * sizeof(u32)), value); +} + +/** + * mei_txe_out_data_read - read dword from the device buffer + *	at offset idx + * + * @dev: the device structure + * @idx: index in the device buffer + * + * returns register value at index + */ +static u32 mei_txe_out_data_read(const struct mei_device *dev, +					unsigned long idx) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return mei_txe_br_reg_read(hw, +		BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); +} + +/* Readiness */ + +/** + * mei_txe_readiness_set_host_rdy + * + * @dev: the device structure + */ +static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, +		SICR_HOST_IPC_READINESS_REQ_REG, +		SICR_HOST_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_clear + * + * @dev: the device structure + */ +static void mei_txe_readiness_clear(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, +				SICR_HOST_IPC_READINESS_RDY_CLR); +} +/** + * mei_txe_readiness_get - Reads and returns + *	the HICR_SEC_IPC_READINESS register value + * + * @dev: the device structure + */ +static u32 mei_txe_readiness_get(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +} + + +/** + * mei_txe_readiness_is_sec_rdy - check readiness + *  for HICR_SEC_IPC_READINESS_SEC_RDY + * + * @readiness - cached readiness state + */ +static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) +{ +	return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY); +} + +/** + * mei_txe_hw_is_ready - check if the hw is ready + * + * @dev: the device structure + */ +static bool mei_txe_hw_is_ready(struct mei_device *dev) +{ +	u32 readiness =  mei_txe_readiness_get(dev); +	return mei_txe_readiness_is_sec_rdy(readiness); +} + +/** + * mei_txe_host_is_ready - check if the host is ready + * + * @dev: the device structure + */ +static inline bool mei_txe_host_is_ready(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +	return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_wait - wait till readiness settles + * + * @dev: the device structure + * + * returns 0 on success and -ETIME on timeout + */ +static int mei_txe_readiness_wait(struct mei_device *dev) +{ +	if (mei_txe_hw_is_ready(dev)) +		return 0; + +	mutex_unlock(&dev->device_lock); +	wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, +			msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); +	mutex_lock(&dev->device_lock); +	if (!dev->recvd_hw_ready) { +		dev_err(&dev->pdev->dev, "wait for readiness failed\n"); +		return -ETIME; +	} + +	dev->recvd_hw_ready = false; +	return 0; +} + +/** + *  mei_txe_hw_config - configure hardware at the start of the devices + * + * @dev: the device structure + * + * Configure hardware at the start of the device should be done only + *   once at the device probe time + */ +static void mei_txe_hw_config(struct mei_device *dev) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	/* Doesn't change in runtime */ +	dev->hbuf_depth = PAYLOAD_SIZE / 4; + +	hw->aliveness = mei_txe_aliveness_get(dev); +	hw->readiness = mei_txe_readiness_get(dev); + +	dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", +		hw->aliveness, hw->readiness); +} + + +/** + * mei_txe_write - writes a message to device. + * + * @dev: the device structure + * @header: header of message + * @buf: message buffer will be written + * returns 1 if success, 0 - otherwise. + */ + +static int mei_txe_write(struct mei_device *dev, +		struct mei_msg_hdr *header, unsigned char *buf) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	unsigned long rem; +	unsigned long length; +	int slots = dev->hbuf_depth; +	u32 *reg_buf = (u32 *)buf; +	u32 dw_cnt; +	int i; + +	if (WARN_ON(!header || !buf)) +		return -EINVAL; + +	length = header->length; + +	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + +	dw_cnt = mei_data2slots(length); +	if (dw_cnt > slots) +		return -EMSGSIZE; + +	if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n")) +		return -EAGAIN; + +	/* Enable Input Ready Interrupt. */ +	mei_txe_input_ready_interrupt_enable(dev); + +	if (!mei_txe_is_input_ready(dev)) { +		struct mei_fw_status fw_status; +		mei_fw_status(dev, &fw_status); +		dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n", +			FW_STS_PRM(fw_status)); +		return -EAGAIN; +	} + +	mei_txe_input_payload_write(dev, 0, *((u32 *)header)); + +	for (i = 0; i < length / 4; i++) +		mei_txe_input_payload_write(dev, i + 1, reg_buf[i]); + +	rem = length & 0x3; +	if (rem > 0) { +		u32 reg = 0; +		memcpy(®, &buf[length - rem], rem); +		mei_txe_input_payload_write(dev, i + 1, reg); +	} + +	/* after each write the whole buffer is consumed */ +	hw->slots = 0; + +	/* Set Input-Doorbell */ +	mei_txe_input_doorbell_set(hw); + +	return 0; +} + +/** + * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns the PAYLOAD_SIZE - 4 + */ +static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) +{ +	return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr); +} + +/** + * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns always hbuf_depth + */ +static int mei_txe_hbuf_empty_slots(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	return hw->slots; +} + +/** + * mei_txe_count_full_read_slots - mimics the me device circular buffer + * + * @dev: the device structure + * + * returns always buffer size in dwords count + */ +static int mei_txe_count_full_read_slots(struct mei_device *dev) +{ +	/* read buffers has static size */ +	return  PAYLOAD_SIZE / 4; +} + +/** + * mei_txe_read_hdr - read message header which is always in 4 first bytes + * + * @dev: the device structure + * + * returns mei message header + */ + +static u32 mei_txe_read_hdr(const struct mei_device *dev) +{ +	return mei_txe_out_data_read(dev, 0); +} +/** + * mei_txe_read - reads a message from the txe device. + * + * @dev: the device structure + * @buf: message buffer will be written + * @len: message size will be read + * + * returns -EINVAL on error wrong argument and 0 on success + */ +static int mei_txe_read(struct mei_device *dev, +		unsigned char *buf, unsigned long len) +{ + +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 i; +	u32 *reg_buf = (u32 *)buf; +	u32 rem = len & 0x3; + +	if (WARN_ON(!buf || !len)) +		return -EINVAL; + +	dev_dbg(&dev->pdev->dev, +		"buffer-length = %lu buf[0]0x%08X\n", +		len, mei_txe_out_data_read(dev, 0)); + +	for (i = 0; i < len / 4; i++) { +		/* skip header: index starts from 1 */ +		u32 reg = mei_txe_out_data_read(dev, i + 1); +		dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg); +		*reg_buf++ = reg; +	} + +	if (rem) { +		u32 reg = mei_txe_out_data_read(dev, i + 1); +		memcpy(reg_buf, ®, rem); +	} + +	mei_txe_output_ready_set(hw); +	return 0; +} + +/** + * mei_txe_hw_reset - resets host and fw. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); + +	u32 aliveness_req; +	/* +	 * read input doorbell to ensure consistency between  Bridge and SeC +	 * return value might be garbage return +	 */ +	(void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG); + +	aliveness_req = mei_txe_aliveness_req_get(dev); +	hw->aliveness = mei_txe_aliveness_get(dev); + +	/* Disable interrupts in this stage we will poll */ +	mei_txe_intr_disable(dev); + +	/* +	 * If Aliveness Request and Aliveness Response are not equal then +	 * wait for them to be equal +	 * Since we might have interrupts disabled - poll for it +	 */ +	if (aliveness_req != hw->aliveness) +		if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { +			dev_err(&dev->pdev->dev, +				"wait for aliveness settle failed ... bailing out\n"); +			return -EIO; +		} + +	/* +	 * If Aliveness Request and Aliveness Response are set then clear them +	 */ +	if (aliveness_req) { +		mei_txe_aliveness_set(dev, 0); +		if (mei_txe_aliveness_poll(dev, 0) < 0) { +			dev_err(&dev->pdev->dev, +				"wait for aliveness failed ... bailing out\n"); +			return -EIO; +		} +	} + +	/* +	 * Set rediness RDY_CLR bit +	 */ +	mei_txe_readiness_clear(dev); + +	return 0; +} + +/** + * mei_txe_hw_start - start the hardware after reset + * + * @dev: the device structure + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_start(struct mei_device *dev) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	int ret; + +	u32 hisr; + +	/* bring back interrupts */ +	mei_txe_intr_enable(dev); + +	ret = mei_txe_readiness_wait(dev); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "wating for readiness failed\n"); +		return ret; +	} + +	/* +	 * If HISR.INT2_STS interrupt status bit is set then clear it. +	 */ +	hisr = mei_txe_br_reg_read(hw, HISR_REG); +	if (hisr & HISR_INT_2_STS) +		mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS); + +	/* Clear the interrupt cause of OutputDoorbell */ +	clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause); + +	ret = mei_txe_aliveness_set_sync(dev, 1); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n"); +		return ret; +	} + +	/* enable input ready interrupts: +	 * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK +	 */ +	mei_txe_input_ready_interrupt_enable(dev); + + +	/*  Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */ +	mei_txe_output_ready_set(hw); + +	/* Set bit SICR_HOST_IPC_READINESS.HOST_RDY +	 */ +	mei_txe_readiness_set_host_rdy(dev); + +	return 0; +} + +/** + * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into + *  single bit mask and acknowledge the interrupts + * + * @dev: the device structure + * @do_ack: acknowledge interrupts + */ +static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); +	u32 hisr; +	u32 hhisr; +	u32 ipc_isr; +	u32 aliveness; +	bool generated; + +	/* read interrupt registers */ +	hhisr = mei_txe_br_reg_read(hw, HHISR_REG); +	generated = (hhisr & IPC_HHIER_MSK); +	if (!generated) +		goto out; + +	hisr = mei_txe_br_reg_read(hw, HISR_REG); + +	aliveness = mei_txe_aliveness_get(dev); +	if (hhisr & IPC_HHIER_SEC && aliveness) +		ipc_isr = mei_txe_sec_reg_read_silent(hw, +				SEC_IPC_HOST_INT_STATUS_REG); +	else +		ipc_isr = 0; + +	generated = generated || +		(hisr & HISR_INT_STS_MSK) || +		(ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING); + +	if (generated && do_ack) { +		/* Save the interrupt causes */ +		hw->intr_cause |= hisr & HISR_INT_STS_MSK; +		if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY) +			hw->intr_cause |= TXE_INTR_IN_READY; + + +		mei_txe_intr_disable(dev); +		/* Clear the interrupts in hierarchy: +		 * IPC and Bridge, than the High Level */ +		mei_txe_sec_reg_write_silent(hw, +			SEC_IPC_HOST_INT_STATUS_REG, ipc_isr); +		mei_txe_br_reg_write(hw, HISR_REG, hisr); +		mei_txe_br_reg_write(hw, HHISR_REG, hhisr); +	} + +out: +	return generated; +} + +/** + * mei_txe_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) +{ +	struct mei_device *dev = dev_id; + +	if (mei_txe_check_and_ack_intrs(dev, true)) +		return IRQ_WAKE_THREAD; +	return IRQ_NONE; +} + + +/** + * mei_txe_irq_thread_handler - txe interrupt thread + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) +{ +	struct mei_device *dev = (struct mei_device *) dev_id; +	struct mei_txe_hw *hw = to_txe_hw(dev); +	struct mei_cl_cb complete_list; +	s32 slots; +	int rets = 0; + +	dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", +		mei_txe_br_reg_read(hw, HHISR_REG), +		mei_txe_br_reg_read(hw, HISR_REG), +		mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); + + +	/* initialize our complete list */ +	mutex_lock(&dev->device_lock); +	mei_io_list_init(&complete_list); + +	if (pci_dev_msi_enabled(dev->pdev)) +		mei_txe_check_and_ack_intrs(dev, true); + +	/* show irq events */ +	mei_txe_pending_interrupts(dev); + +	hw->aliveness = mei_txe_aliveness_get(dev); +	hw->readiness = mei_txe_readiness_get(dev); + +	/* Readiness: +	 * Detection of TXE driver going through reset +	 * or TXE driver resetting the HECI interface. +	 */ +	if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { +		dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n"); + +		/* Check if SeC is going through reset */ +		if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { +			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); +			dev->recvd_hw_ready = true; +		} else { +			dev->recvd_hw_ready = false; +			if (dev->dev_state != MEI_DEV_RESETTING) { + +				dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); +				schedule_work(&dev->reset_work); +				goto end; + +			} +		} +		wake_up(&dev->wait_hw_ready); +	} + +	/************************************************************/ +	/* Check interrupt cause: +	 * Aliveness: Detection of SeC acknowledge of host request that +	 * it remain alive or host cancellation of that request. +	 */ + +	if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { +		/* Clear the interrupt cause */ +		dev_dbg(&dev->pdev->dev, +			"Aliveness Interrupt: Status: %d\n", hw->aliveness); +		dev->pg_event = MEI_PG_EVENT_RECEIVED; +		if (waitqueue_active(&hw->wait_aliveness_resp)) +			wake_up(&hw->wait_aliveness_resp); +	} + + +	/* Output Doorbell: +	 * Detection of SeC having sent output to host +	 */ +	slots = mei_count_full_read_slots(dev); +	if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) { +		/* Read from TXE */ +		rets = mei_irq_read_handler(dev, &complete_list, &slots); +		if (rets && dev->dev_state != MEI_DEV_RESETTING) { +			dev_err(&dev->pdev->dev, +				"mei_irq_read_handler ret = %d.\n", rets); + +			schedule_work(&dev->reset_work); +			goto end; +		} +	} +	/* Input Ready: Detection if host can write to SeC */ +	if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) { +		dev->hbuf_is_ready = true; +		hw->slots = dev->hbuf_depth; +	} + +	if (hw->aliveness && dev->hbuf_is_ready) { +		/* get the real register value */ +		dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +		rets = mei_irq_write_handler(dev, &complete_list); +		if (rets && rets != -EMSGSIZE) +			dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n", +				rets); +		dev->hbuf_is_ready = mei_hbuf_is_ready(dev); +	} + +	mei_irq_compl_handler(dev, &complete_list); + +end: +	dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + +	mutex_unlock(&dev->device_lock); + +	mei_enable_interrupts(dev); +	return IRQ_HANDLED; +} + + +/** + * mei_txe_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns: 0 on success an error code otherwise + */ +static int mei_txe_fw_status(struct mei_device *dev, +			     struct mei_fw_status *fw_status) +{ +	const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1}; +	int i; + +	if (!fw_status) +		return -EINVAL; + +	fw_status->count = 2; + +	for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { +		int ret; +		ret = pci_read_config_dword(dev->pdev, +				pci_cfg_reg[i], &fw_status->status[i]); +		if (ret) +			return ret; +	} + +	return 0; +} + +static const struct mei_hw_ops mei_txe_hw_ops = { + +	.fw_status = mei_txe_fw_status, +	.host_is_ready = mei_txe_host_is_ready, + +	.pg_state = mei_txe_pg_state, + +	.hw_is_ready = mei_txe_hw_is_ready, +	.hw_reset = mei_txe_hw_reset, +	.hw_config = mei_txe_hw_config, +	.hw_start = mei_txe_hw_start, + +	.pg_is_enabled = mei_txe_pg_is_enabled, + +	.intr_clear = mei_txe_intr_clear, +	.intr_enable = mei_txe_intr_enable, +	.intr_disable = mei_txe_intr_disable, + +	.hbuf_free_slots = mei_txe_hbuf_empty_slots, +	.hbuf_is_ready = mei_txe_is_input_ready, +	.hbuf_max_len = mei_txe_hbuf_max_len, + +	.write = mei_txe_write, + +	.rdbuf_full_slots = mei_txe_count_full_read_slots, +	.read_hdr = mei_txe_read_hdr, + +	.read = mei_txe_read, + +}; + +#define MEI_CFG_TXE_FW_STS                            \ +	.fw_status.count = 2,                         \ +	.fw_status.status[0] = PCI_CFG_TXE_FW_STS0,   \ +	.fw_status.status[1] = PCI_CFG_TXE_FW_STS1 + +const struct mei_cfg mei_txe_cfg = { +	MEI_CFG_TXE_FW_STS, +}; + + +/** + * mei_txe_dev_init - allocates and initializes txe hardware specific structure + * + * @pdev - pci device + * @cfg - per device generation config + * + * returns struct mei_device * on success or NULL; + * + */ +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, +				    const struct mei_cfg *cfg) +{ +	struct mei_device *dev; +	struct mei_txe_hw *hw; + +	dev = kzalloc(sizeof(struct mei_device) + +			 sizeof(struct mei_txe_hw), GFP_KERNEL); +	if (!dev) +		return NULL; + +	mei_device_init(dev, cfg); + +	hw = to_txe_hw(dev); + +	init_waitqueue_head(&hw->wait_aliveness_resp); + +	dev->ops = &mei_txe_hw_ops; + +	dev->pdev = pdev; +	return dev; +} + +/** + * mei_txe_setup_satt2 - SATT2 configuration for DMA support. + * + * @dev:   the device structure + * @addr:  physical address start of the range + * @range: physical range size + */ +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) +{ +	struct mei_txe_hw *hw = to_txe_hw(dev); + +	u32 lo32 = lower_32_bits(addr); +	u32 hi32 = upper_32_bits(addr); +	u32 ctrl; + +	/* SATT is limited to 36 Bits */ +	if (hi32 & ~0xF) +		return -EINVAL; + +	/* SATT has to be 16Byte aligned */ +	if (lo32 & 0xF) +		return -EINVAL; + +	/* SATT range has to be 4Bytes aligned */ +	if (range & 0x4) +		return -EINVAL; + +	/* SATT is limited to 32 MB range*/ +	if (range > SATT_RANGE_MAX) +		return -EINVAL; + +	ctrl = SATT2_CTRL_VALID_MSK; +	ctrl |= hi32  << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT; + +	mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); +	mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); +	mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); +	dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", +		range, lo32, ctrl); + +	return 0; +} diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h new file mode 100644 index 00000000000..e244af79167 --- /dev/null +++ b/drivers/misc/mei/hw-txe.h @@ -0,0 +1,77 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HW_TXE_H_ +#define _MEI_HW_TXE_H_ + +#include <linux/irqreturn.h> + +#include "hw.h" +#include "hw-txe-regs.h" + +#define MEI_TXI_RPM_TIMEOUT    500 /* ms */ + +/* Flatten Hierarchy interrupt cause */ +#define TXE_INTR_READINESS_BIT  0 /* HISR_INT_0_STS */ +#define TXE_INTR_READINESS      HISR_INT_0_STS +#define TXE_INTR_ALIVENESS_BIT  1 /* HISR_INT_1_STS */ +#define TXE_INTR_ALIVENESS      HISR_INT_1_STS +#define TXE_INTR_OUT_DB_BIT     2 /* HISR_INT_2_STS */ +#define TXE_INTR_OUT_DB         HISR_INT_2_STS +#define TXE_INTR_IN_READY_BIT   8 /* beyond HISR */ +#define TXE_INTR_IN_READY       BIT(8) + +/** + * struct mei_txe_hw - txe hardware specifics + * + * @mem_addr:            SeC and BRIDGE bars + * @aliveness:           aliveness (power gating) state of the hardware + * @readiness:           readiness state of the hardware + * @wait_aliveness_resp: aliveness wait queue + * @intr_cause:          translated interrupt cause + */ +struct mei_txe_hw { +	void __iomem *mem_addr[NUM_OF_MEM_BARS]; +	u32 aliveness; +	u32 readiness; +	u32 slots; + +	wait_queue_head_t wait_aliveness_resp; + +	unsigned long intr_cause; +}; + +#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw) + +static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) +{ +	return container_of((void *)hw, struct mei_device, hw); +} + +extern const struct mei_cfg mei_txe_cfg; + +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, +	const struct mei_cfg *cfg); + +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); + +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req); + +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range); + + +#endif /* _MEI_HW_TXE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h new file mode 100644 index 00000000000..dd448e58cc8 --- /dev/null +++ b/drivers/misc/mei/hw.h @@ -0,0 +1,274 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HW_TYPES_H_ +#define _MEI_HW_TYPES_H_ + +#include <linux/uuid.h> + +/* + * Timeouts in Seconds + */ +#define MEI_HW_READY_TIMEOUT        2  /* Timeout on ready message */ +#define MEI_CONNECT_TIMEOUT         3  /* HPS: at least 2 seconds */ + +#define MEI_CL_CONNECT_TIMEOUT     15  /* HPS: Client Connect Timeout */ +#define MEI_CLIENTS_INIT_TIMEOUT   15  /* HPS: Clients Enumeration Timeout */ + +#define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */ +#define MEI_IAMTHIF_READ_TIMER     10  /* HPS */ + +#define MEI_PGI_TIMEOUT            1  /* PG Isolation time response 1 sec */ +#define MEI_HBM_TIMEOUT            1   /* 1 second */ + +/* + * MEI Version + */ +#define HBM_MINOR_VERSION                   1 +#define HBM_MAJOR_VERSION                   1 + +/* + * MEI version with PGI support + */ +#define HBM_MINOR_VERSION_PGI               1 +#define HBM_MAJOR_VERSION_PGI               1 + +/* Host bus message command opcode */ +#define MEI_HBM_CMD_OP_MSK                  0x7f +/* Host bus message command RESPONSE */ +#define MEI_HBM_CMD_RES_MSK                 0x80 + +/* + * MEI Bus Message Command IDs + */ +#define HOST_START_REQ_CMD                  0x01 +#define HOST_START_RES_CMD                  0x81 + +#define HOST_STOP_REQ_CMD                   0x02 +#define HOST_STOP_RES_CMD                   0x82 + +#define ME_STOP_REQ_CMD                     0x03 + +#define HOST_ENUM_REQ_CMD                   0x04 +#define HOST_ENUM_RES_CMD                   0x84 + +#define HOST_CLIENT_PROPERTIES_REQ_CMD      0x05 +#define HOST_CLIENT_PROPERTIES_RES_CMD      0x85 + +#define CLIENT_CONNECT_REQ_CMD              0x06 +#define CLIENT_CONNECT_RES_CMD              0x86 + +#define CLIENT_DISCONNECT_REQ_CMD           0x07 +#define CLIENT_DISCONNECT_RES_CMD           0x87 + +#define MEI_FLOW_CONTROL_CMD                0x08 + +#define MEI_PG_ISOLATION_ENTRY_REQ_CMD      0x0a +#define MEI_PG_ISOLATION_ENTRY_RES_CMD      0x8a +#define MEI_PG_ISOLATION_EXIT_REQ_CMD       0x0b +#define MEI_PG_ISOLATION_EXIT_RES_CMD       0x8b + +/* + * MEI Stop Reason + * used by hbm_host_stop_request.reason + */ +enum mei_stop_reason_types { +	DRIVER_STOP_REQUEST = 0x00, +	DEVICE_D1_ENTRY = 0x01, +	DEVICE_D2_ENTRY = 0x02, +	DEVICE_D3_ENTRY = 0x03, +	SYSTEM_S1_ENTRY = 0x04, +	SYSTEM_S2_ENTRY = 0x05, +	SYSTEM_S3_ENTRY = 0x06, +	SYSTEM_S4_ENTRY = 0x07, +	SYSTEM_S5_ENTRY = 0x08 +}; + +/* + * Client Connect Status + * used by hbm_client_connect_response.status + */ +enum mei_cl_connect_status { +	MEI_CL_CONN_SUCCESS          = 0x00, +	MEI_CL_CONN_NOT_FOUND        = 0x01, +	MEI_CL_CONN_ALREADY_STARTED  = 0x02, +	MEI_CL_CONN_OUT_OF_RESOURCES = 0x03, +	MEI_CL_CONN_MESSAGE_SMALL    = 0x04 +}; + +/* + * Client Disconnect Status + */ +enum  mei_cl_disconnect_status { +	MEI_CL_DISCONN_SUCCESS = 0x00 +}; + +/* + *  MEI BUS Interface Section + */ +struct mei_msg_hdr { +	u32 me_addr:8; +	u32 host_addr:8; +	u32 length:9; +	u32 reserved:5; +	u32 internal:1; +	u32 msg_complete:1; +} __packed; + + +struct mei_bus_message { +	u8 hbm_cmd; +	u8 data[0]; +} __packed; + +/** + * struct hbm_cl_cmd - client specific host bus command + *	CONNECT, DISCONNECT, and FlOW CONTROL + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @data + */ +struct mei_hbm_cl_cmd { +	u8 hbm_cmd; +	u8 me_addr; +	u8 host_addr; +	u8 data; +}; + +struct hbm_version { +	u8 minor_version; +	u8 major_version; +} __packed; + +struct hbm_host_version_request { +	u8 hbm_cmd; +	u8 reserved; +	struct hbm_version host_version; +} __packed; + +struct hbm_host_version_response { +	u8 hbm_cmd; +	u8 host_version_supported; +	struct hbm_version me_max_version; +} __packed; + +struct hbm_host_stop_request { +	u8 hbm_cmd; +	u8 reason; +	u8 reserved[2]; +} __packed; + +struct hbm_host_stop_response { +	u8 hbm_cmd; +	u8 reserved[3]; +} __packed; + +struct hbm_me_stop_request { +	u8 hbm_cmd; +	u8 reason; +	u8 reserved[2]; +} __packed; + +struct hbm_host_enum_request { +	u8 hbm_cmd; +	u8 reserved[3]; +} __packed; + +struct hbm_host_enum_response { +	u8 hbm_cmd; +	u8 reserved[3]; +	u8 valid_addresses[32]; +} __packed; + +struct mei_client_properties { +	uuid_le protocol_name; +	u8 protocol_version; +	u8 max_number_of_connections; +	u8 fixed_address; +	u8 single_recv_buf; +	u32 max_msg_length; +} __packed; + +struct hbm_props_request { +	u8 hbm_cmd; +	u8 address; +	u8 reserved[2]; +} __packed; + + +struct hbm_props_response { +	u8 hbm_cmd; +	u8 address; +	u8 status; +	u8 reserved[1]; +	struct mei_client_properties client_properties; +} __packed; + +/** + * struct hbm_power_gate - power gate request/response + * + * @hbm_cmd - bus message command header + * @reserved[3] + */ +struct hbm_power_gate { +	u8 hbm_cmd; +	u8 reserved[3]; +} __packed; + +/** + * struct hbm_client_connect_request - connect/disconnect request + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @reserved + */ +struct hbm_client_connect_request { +	u8 hbm_cmd; +	u8 me_addr; +	u8 host_addr; +	u8 reserved; +} __packed; + +/** + * struct hbm_client_connect_response - connect/disconnect response + * + * @hbm_cmd - bus message command header + * @me_addr - address of the client in ME + * @host_addr - address of the client in the driver + * @status - status of the request + */ +struct hbm_client_connect_response { +	u8 hbm_cmd; +	u8 me_addr; +	u8 host_addr; +	u8 status; +} __packed; + + +#define MEI_FC_MESSAGE_RESERVED_LENGTH           5 + +struct hbm_flow_control { +	u8 hbm_cmd; +	u8 me_addr; +	u8 host_addr; +	u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; +} __packed; + + +#endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c new file mode 100644 index 00000000000..00692922248 --- /dev/null +++ b/drivers/misc/mei/init.c @@ -0,0 +1,395 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/delay.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +const char *mei_dev_state_str(int state) +{ +#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state +	switch (state) { +	MEI_DEV_STATE(INITIALIZING); +	MEI_DEV_STATE(INIT_CLIENTS); +	MEI_DEV_STATE(ENABLED); +	MEI_DEV_STATE(RESETTING); +	MEI_DEV_STATE(DISABLED); +	MEI_DEV_STATE(POWER_DOWN); +	MEI_DEV_STATE(POWER_UP); +	default: +		return "unknown"; +	} +#undef MEI_DEV_STATE +} + + +/** + * mei_cancel_work. Cancel mei background jobs + * + * @dev: the device structure + * + * returns 0 on success or < 0 if the reset hasn't succeeded + */ +void mei_cancel_work(struct mei_device *dev) +{ +	cancel_work_sync(&dev->init_work); +	cancel_work_sync(&dev->reset_work); + +	cancel_delayed_work(&dev->timer_work); +} +EXPORT_SYMBOL_GPL(mei_cancel_work); + +/** + * mei_reset - resets host and fw. + * + * @dev: the device structure + */ +int mei_reset(struct mei_device *dev) +{ +	enum mei_dev_state state = dev->dev_state; +	bool interrupts_enabled; +	int ret; + +	if (state != MEI_DEV_INITIALIZING && +	    state != MEI_DEV_DISABLED && +	    state != MEI_DEV_POWER_DOWN && +	    state != MEI_DEV_POWER_UP) { +		struct mei_fw_status fw_status; +		mei_fw_status(dev, &fw_status); +		dev_warn(&dev->pdev->dev, +			"unexpected reset: dev_state = %s " FW_STS_FMT "\n", +			mei_dev_state_str(state), FW_STS_PRM(fw_status)); +	} + +	/* we're already in reset, cancel the init timer +	 * if the reset was called due the hbm protocol error +	 * we need to call it before hw start +	 * so the hbm watchdog won't kick in +	 */ +	mei_hbm_idle(dev); + +	/* enter reset flow */ +	interrupts_enabled = state != MEI_DEV_POWER_DOWN; +	dev->dev_state = MEI_DEV_RESETTING; + +	dev->reset_count++; +	if (dev->reset_count > MEI_MAX_CONSEC_RESET) { +		dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); +		dev->dev_state = MEI_DEV_DISABLED; +		return -ENODEV; +	} + +	ret = mei_hw_reset(dev, interrupts_enabled); +	/* fall through and remove the sw state even if hw reset has failed */ + +	/* no need to clean up software state in case of power up */ +	if (state != MEI_DEV_INITIALIZING && +	    state != MEI_DEV_POWER_UP) { + +		/* remove all waiting requests */ +		mei_cl_all_write_clear(dev); + +		mei_cl_all_disconnect(dev); + +		/* wake up all readers and writers so they can be interrupted */ +		mei_cl_all_wakeup(dev); + +		/* remove entry if already in list */ +		dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); +		mei_cl_unlink(&dev->wd_cl); +		mei_cl_unlink(&dev->iamthif_cl); +		mei_amthif_reset_params(dev); +	} + +	mei_hbm_reset(dev); + +	dev->rd_msg_hdr = 0; +	dev->wd_pending = false; + +	if (ret) { +		dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); +		return ret; +	} + +	if (state == MEI_DEV_POWER_DOWN) { +		dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); +		dev->dev_state = MEI_DEV_DISABLED; +		return 0; +	} + +	ret = mei_hw_start(dev); +	if (ret) { +		dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); +		return ret; +	} + +	dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); + +	dev->dev_state = MEI_DEV_INIT_CLIENTS; +	ret = mei_hbm_start_req(dev); +	if (ret) { +		dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); +		dev->dev_state = MEI_DEV_RESETTING; +		return ret; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(mei_reset); + +/** + * mei_start - initializes host and fw to start work. + * + * @dev: the device structure + * + * returns 0 on success, <0 on failure. + */ +int mei_start(struct mei_device *dev) +{ +	int ret; +	mutex_lock(&dev->device_lock); + +	/* acknowledge interrupt and stop interrupts */ +	mei_clear_interrupts(dev); + +	mei_hw_config(dev); + +	dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); + +	dev->reset_count = 0; +	do { +		dev->dev_state = MEI_DEV_INITIALIZING; +		ret = mei_reset(dev); + +		if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { +			dev_err(&dev->pdev->dev, "reset failed ret = %d", ret); +			goto err; +		} +	} while (ret); + +	/* we cannot start the device w/o hbm start message completed */ +	if (dev->dev_state == MEI_DEV_DISABLED) { +		dev_err(&dev->pdev->dev, "reset failed"); +		goto err; +	} + +	if (mei_hbm_start_wait(dev)) { +		dev_err(&dev->pdev->dev, "HBM haven't started"); +		goto err; +	} + +	if (!mei_host_is_ready(dev)) { +		dev_err(&dev->pdev->dev, "host is not ready.\n"); +		goto err; +	} + +	if (!mei_hw_is_ready(dev)) { +		dev_err(&dev->pdev->dev, "ME is not ready.\n"); +		goto err; +	} + +	if (!mei_hbm_version_is_supported(dev)) { +		dev_dbg(&dev->pdev->dev, "MEI start failed.\n"); +		goto err; +	} + +	dev_dbg(&dev->pdev->dev, "link layer has been established.\n"); + +	mutex_unlock(&dev->device_lock); +	return 0; +err: +	dev_err(&dev->pdev->dev, "link layer initialization failed.\n"); +	dev->dev_state = MEI_DEV_DISABLED; +	mutex_unlock(&dev->device_lock); +	return -ENODEV; +} +EXPORT_SYMBOL_GPL(mei_start); + +/** + * mei_restart - restart device after suspend + * + * @dev: the device structure + * + * returns 0 on success or -ENODEV if the restart hasn't succeeded + */ +int mei_restart(struct mei_device *dev) +{ +	int err; + +	mutex_lock(&dev->device_lock); + +	mei_clear_interrupts(dev); + +	dev->dev_state = MEI_DEV_POWER_UP; +	dev->reset_count = 0; + +	err = mei_reset(dev); + +	mutex_unlock(&dev->device_lock); + +	if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { +		dev_err(&dev->pdev->dev, "device disabled = %d\n", err); +		return -ENODEV; +	} + +	/* try to start again */ +	if (err) +		schedule_work(&dev->reset_work); + + +	return 0; +} +EXPORT_SYMBOL_GPL(mei_restart); + +static void mei_reset_work(struct work_struct *work) +{ +	struct mei_device *dev = +		container_of(work, struct mei_device,  reset_work); +	int ret; + +	mutex_lock(&dev->device_lock); + +	ret = mei_reset(dev); + +	mutex_unlock(&dev->device_lock); + +	if (dev->dev_state == MEI_DEV_DISABLED) { +		dev_err(&dev->pdev->dev, "device disabled = %d\n", ret); +		return; +	} + +	/* retry reset in case of failure */ +	if (ret) +		schedule_work(&dev->reset_work); +} + +void mei_stop(struct mei_device *dev) +{ +	dev_dbg(&dev->pdev->dev, "stopping the device.\n"); + +	mei_cancel_work(dev); + +	mei_nfc_host_exit(dev); + +	mei_cl_bus_remove_devices(dev); + +	mutex_lock(&dev->device_lock); + +	mei_wd_stop(dev); + +	dev->dev_state = MEI_DEV_POWER_DOWN; +	mei_reset(dev); + +	mutex_unlock(&dev->device_lock); + +	mei_watchdog_unregister(dev); +} +EXPORT_SYMBOL_GPL(mei_stop); + +/** + * mei_write_is_idle - check if the write queues are idle + * + * @dev: the device structure + * + * returns true of there is no pending write + */ +bool mei_write_is_idle(struct mei_device *dev) +{ +	bool idle = (dev->dev_state == MEI_DEV_ENABLED && +		list_empty(&dev->ctrl_wr_list.list) && +		list_empty(&dev->write_list.list)); + +	dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", +		idle, +		mei_dev_state_str(dev->dev_state), +		list_empty(&dev->ctrl_wr_list.list), +		list_empty(&dev->write_list.list)); + +	return idle; +} +EXPORT_SYMBOL_GPL(mei_write_is_idle); + +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) +{ +	int i; +	const struct mei_fw_status *fw_src = &dev->cfg->fw_status; + +	if (!fw_status) +		return -EINVAL; + +	fw_status->count = fw_src->count; +	for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { +		int ret; +		ret = pci_read_config_dword(dev->pdev, +			fw_src->status[i], &fw_status->status[i]); +		if (ret) +			return ret; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(mei_fw_status); + +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) +{ +	/* setup our list array */ +	INIT_LIST_HEAD(&dev->file_list); +	INIT_LIST_HEAD(&dev->device_list); +	mutex_init(&dev->device_lock); +	init_waitqueue_head(&dev->wait_hw_ready); +	init_waitqueue_head(&dev->wait_pg); +	init_waitqueue_head(&dev->wait_recvd_msg); +	init_waitqueue_head(&dev->wait_stop_wd); +	dev->dev_state = MEI_DEV_INITIALIZING; +	dev->reset_count = 0; + +	mei_io_list_init(&dev->read_list); +	mei_io_list_init(&dev->write_list); +	mei_io_list_init(&dev->write_waiting_list); +	mei_io_list_init(&dev->ctrl_wr_list); +	mei_io_list_init(&dev->ctrl_rd_list); + +	INIT_DELAYED_WORK(&dev->timer_work, mei_timer); +	INIT_WORK(&dev->init_work, mei_host_client_init); +	INIT_WORK(&dev->reset_work, mei_reset_work); + +	INIT_LIST_HEAD(&dev->wd_cl.link); +	INIT_LIST_HEAD(&dev->iamthif_cl.link); +	mei_io_list_init(&dev->amthif_cmd_list); +	mei_io_list_init(&dev->amthif_rd_complete_list); + +	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); +	dev->open_handle_count = 0; + +	/* +	 * Reserving the first client ID +	 * 0: Reserved for MEI Bus Message communications +	 */ +	bitmap_set(dev->host_clients_map, 0, 1); + +	dev->pg_event = MEI_PG_EVENT_IDLE; +	dev->cfg      = cfg; +} +EXPORT_SYMBOL_GPL(mei_device_init); + diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c new file mode 100644 index 00000000000..4e3cba6da3f --- /dev/null +++ b/drivers/misc/mei/interrupt.c @@ -0,0 +1,659 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + + +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/fs.h> +#include <linux/jiffies.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + + +/** + * mei_irq_compl_handler - dispatch complete handlers + *	for the completed callbacks + * + * @dev - mei device + * @compl_list - list of completed cbs + */ +void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) +{ +	struct mei_cl_cb *cb, *next; +	struct mei_cl *cl; + +	list_for_each_entry_safe(cb, next, &compl_list->list, list) { +		cl = cb->cl; +		list_del(&cb->list); +		if (!cl) +			continue; + +		dev_dbg(&dev->pdev->dev, "completing call back.\n"); +		if (cl == &dev->iamthif_cl) +			mei_amthif_complete(dev, cb); +		else +			mei_cl_complete(cl, cb); +	} +} +EXPORT_SYMBOL_GPL(mei_irq_compl_handler); + +/** + * mei_cl_hbm_equal - check if hbm is addressed to the client + * + * @cl: host client + * @mei_hdr: header of mei client message + * + * returns true if matches, false otherwise + */ +static inline int mei_cl_hbm_equal(struct mei_cl *cl, +			struct mei_msg_hdr *mei_hdr) +{ +	return cl->host_client_id == mei_hdr->host_addr && +		cl->me_client_id == mei_hdr->me_addr; +} +/** + * mei_cl_is_reading - checks if the client +		is the one to read this message + * + * @cl: mei client + * @mei_hdr: header of mei message + * + * returns true on match and false otherwise + */ +static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) +{ +	return mei_cl_hbm_equal(cl, mei_hdr) && +		cl->state == MEI_FILE_CONNECTED && +		cl->reading_state != MEI_READ_COMPLETE; +} + +/** + * mei_irq_read_client_message - process client message + * + * @dev: the device structure + * @mei_hdr: header of mei client message + * @complete_list: An instance of our list structure + * + * returns 0 on success, <0 on failure. + */ +static int mei_cl_irq_read_msg(struct mei_device *dev, +			       struct mei_msg_hdr *mei_hdr, +			       struct mei_cl_cb *complete_list) +{ +	struct mei_cl *cl; +	struct mei_cl_cb *cb, *next; +	unsigned char *buffer = NULL; + +	list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { +		cl = cb->cl; +		if (!cl || !mei_cl_is_reading(cl, mei_hdr)) +			continue; + +		cl->reading_state = MEI_READING; + +		if (cb->response_buffer.size == 0 || +		    cb->response_buffer.data == NULL) { +			cl_err(dev, cl, "response buffer is not allocated.\n"); +			list_del(&cb->list); +			return -ENOMEM; +		} + +		if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { +			cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", +				cb->response_buffer.size, +				mei_hdr->length, cb->buf_idx); +			buffer = krealloc(cb->response_buffer.data, +					  mei_hdr->length + cb->buf_idx, +					  GFP_KERNEL); + +			if (!buffer) { +				cl_err(dev, cl, "allocation failed.\n"); +				list_del(&cb->list); +				return -ENOMEM; +			} +			cb->response_buffer.data = buffer; +			cb->response_buffer.size = +				mei_hdr->length + cb->buf_idx; +		} + +		buffer = cb->response_buffer.data + cb->buf_idx; +		mei_read_slots(dev, buffer, mei_hdr->length); + +		cb->buf_idx += mei_hdr->length; +		if (mei_hdr->msg_complete) { +			cl->status = 0; +			list_del(&cb->list); +			cl_dbg(dev, cl, "completed read length = %lu\n", +				cb->buf_idx); +			list_add_tail(&cb->list, &complete_list->list); +		} +		break; +	} + +	dev_dbg(&dev->pdev->dev, "message read\n"); +	if (!buffer) { +		mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); +		dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n", +				MEI_HDR_PRM(mei_hdr)); +	} + +	return 0; +} + +/** + * mei_cl_irq_disconnect_rsp - send disconnection response message + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, +				     struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; +	int ret; + +	slots = mei_hbuf_empty_slots(dev); +	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); + +	if (slots < msg_slots) +		return -EMSGSIZE; + +	ret = mei_hbm_cl_disconnect_rsp(dev, cl); + +	cl->state = MEI_FILE_DISCONNECTED; +	cl->status = 0; +	list_del(&cb->list); +	mei_io_cb_free(cb); + +	return ret; +} + + + +/** + * mei_cl_irq_close - processes close related operation from + *	interrupt thread context - send disconnect request + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, +			    struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; + +	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); +	slots = mei_hbuf_empty_slots(dev); + +	if (slots < msg_slots) +		return -EMSGSIZE; + +	if (mei_hbm_cl_disconnect_req(dev, cl)) { +		cl->status = 0; +		cb->buf_idx = 0; +		list_move_tail(&cb->list, &cmpl_list->list); +		return -EIO; +	} + +	cl->state = MEI_FILE_DISCONNECTING; +	cl->status = 0; +	cb->buf_idx = 0; +	list_move_tail(&cb->list, &dev->ctrl_rd_list.list); +	cl->timer_count = MEI_CONNECT_TIMEOUT; + +	return 0; +} + + +/** + * mei_cl_irq_close - processes client read related operation from the + *	interrupt thread context - request for flow control credits + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, +			   struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; +	int ret; + +	msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); +	slots = mei_hbuf_empty_slots(dev); + +	if (slots < msg_slots) +		return -EMSGSIZE; + +	ret = mei_hbm_cl_flow_control_req(dev, cl); +	if (ret) { +		cl->status = ret; +		cb->buf_idx = 0; +		list_move_tail(&cb->list, &cmpl_list->list); +		return ret; +	} + +	list_move_tail(&cb->list, &dev->read_list.list); + +	return 0; +} + + +/** + * mei_cl_irq_connect - send connect request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, +			      struct mei_cl_cb *cmpl_list) +{ +	struct mei_device *dev = cl->dev; +	u32 msg_slots; +	int slots; +	int ret; + +	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); +	slots = mei_hbuf_empty_slots(dev); + +	if (mei_cl_is_other_connecting(cl)) +		return 0; + +	if (slots < msg_slots) +		return -EMSGSIZE; + +	cl->state = MEI_FILE_CONNECTING; + +	ret = mei_hbm_cl_connect_req(dev, cl); +	if (ret) { +		cl->status = ret; +		cb->buf_idx = 0; +		list_del(&cb->list); +		return ret; +	} + +	list_move_tail(&cb->list, &dev->ctrl_rd_list.list); +	cl->timer_count = MEI_CONNECT_TIMEOUT; +	return 0; +} + + +/** + * mei_irq_read_handler - bottom half read routine after ISR to + * handle the read processing. + * + * @dev: the device structure + * @cmpl_list: An instance of our list structure + * @slots: slots to read. + * + * returns 0 on success, <0 on failure. + */ +int mei_irq_read_handler(struct mei_device *dev, +		struct mei_cl_cb *cmpl_list, s32 *slots) +{ +	struct mei_msg_hdr *mei_hdr; +	struct mei_cl *cl; +	int ret; + +	if (!dev->rd_msg_hdr) { +		dev->rd_msg_hdr = mei_read_hdr(dev); +		(*slots)--; +		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); +	} +	mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; +	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); + +	if (mei_hdr->reserved || !dev->rd_msg_hdr) { +		dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", +				dev->rd_msg_hdr); +		ret = -EBADMSG; +		goto end; +	} + +	if (mei_slots2data(*slots) < mei_hdr->length) { +		dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", +				*slots); +		/* we can't read the message */ +		ret = -ENODATA; +		goto end; +	} + +	/*  HBM message */ +	if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { +		ret = mei_hbm_dispatch(dev, mei_hdr); +		if (ret) { +			dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", +					ret); +			goto end; +		} +		goto reset_slots; +	} + +	/* find recipient cl */ +	list_for_each_entry(cl, &dev->file_list, link) { +		if (mei_cl_hbm_equal(cl, mei_hdr)) { +			cl_dbg(dev, cl, "got a message\n"); +			break; +		} +	} + +	/* if no recipient cl was found we assume corrupted header */ +	if (&cl->link == &dev->file_list) { +		dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", +				dev->rd_msg_hdr); +		ret = -EBADMSG; +		goto end; +	} + +	if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && +	    MEI_FILE_CONNECTED == dev->iamthif_cl.state && +	    dev->iamthif_state == MEI_IAMTHIF_READING) { + +		ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); +		if (ret) { +			dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", +					ret); +			goto end; +		} +	} else { +		ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); +		if (ret) { +			dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", +					ret); +			goto end; +		} +	} + +reset_slots: +	/* reset the number of slots and header */ +	*slots = mei_count_full_read_slots(dev); +	dev->rd_msg_hdr = 0; + +	if (*slots == -EOVERFLOW) { +		/* overflow - reset */ +		dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n"); +		/* set the event since message has been read */ +		ret = -ERANGE; +		goto end; +	} +end: +	return ret; +} +EXPORT_SYMBOL_GPL(mei_irq_read_handler); + + +/** + * mei_irq_write_handler -  dispatch write requests + *  after irq received + * + * @dev: the device structure + * @cmpl_list: An instance of our list structure + * + * returns 0 on success, <0 on failure. + */ +int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) +{ + +	struct mei_cl *cl; +	struct mei_cl_cb *cb, *next; +	struct mei_cl_cb *list; +	s32 slots; +	int ret; + + +	if (!mei_hbuf_acquire(dev)) +		return 0; + +	slots = mei_hbuf_empty_slots(dev); +	if (slots <= 0) +		return -EMSGSIZE; + +	/* complete all waiting for write CB */ +	dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n"); + +	list = &dev->write_waiting_list; +	list_for_each_entry_safe(cb, next, &list->list, list) { +		cl = cb->cl; +		if (cl == NULL) +			continue; + +		cl->status = 0; +		list_del(&cb->list); +		if (cb->fop_type == MEI_FOP_WRITE && +		    cl != &dev->iamthif_cl) { +			cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); +			cl->writing_state = MEI_WRITE_COMPLETE; +			list_add_tail(&cb->list, &cmpl_list->list); +		} +		if (cl == &dev->iamthif_cl) { +			cl_dbg(dev, cl, "check iamthif flow control.\n"); +			if (dev->iamthif_flow_control_pending) { +				ret = mei_amthif_irq_read(dev, &slots); +				if (ret) +					return ret; +			} +		} +	} + +	if (dev->wd_state == MEI_WD_STOPPING) { +		dev->wd_state = MEI_WD_IDLE; +		wake_up(&dev->wait_stop_wd); +	} + +	if (mei_cl_is_connected(&dev->wd_cl)) { +		if (dev->wd_pending && +		    mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { +			ret = mei_wd_send(dev); +			if (ret) +				return ret; +			dev->wd_pending = false; +		} +	} + +	/* complete control write list CB */ +	dev_dbg(&dev->pdev->dev, "complete control write list cb.\n"); +	list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { +		cl = cb->cl; +		if (!cl) { +			list_del(&cb->list); +			return -ENODEV; +		} +		switch (cb->fop_type) { +		case MEI_FOP_CLOSE: +			/* send disconnect message */ +			ret = mei_cl_irq_close(cl, cb, cmpl_list); +			if (ret) +				return ret; + +			break; +		case MEI_FOP_READ: +			/* send flow control message */ +			ret = mei_cl_irq_read(cl, cb, cmpl_list); +			if (ret) +				return ret; + +			break; +		case MEI_FOP_CONNECT: +			/* connect message */ +			ret = mei_cl_irq_connect(cl, cb, cmpl_list); +			if (ret) +				return ret; + +			break; +		case MEI_FOP_DISCONNECT_RSP: +			/* send disconnect resp */ +			ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); +			if (ret) +				return ret; +			break; +		default: +			BUG(); +		} + +	} +	/* complete  write list CB */ +	dev_dbg(&dev->pdev->dev, "complete write list cb.\n"); +	list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { +		cl = cb->cl; +		if (cl == NULL) +			continue; +		if (cl == &dev->iamthif_cl) +			ret = mei_amthif_irq_write(cl, cb, cmpl_list); +		else +			ret = mei_cl_irq_write(cl, cb, cmpl_list); +		if (ret) +			return ret; +	} +	return 0; +} +EXPORT_SYMBOL_GPL(mei_irq_write_handler); + + + +/** + * mei_timer - timer function. + * + * @work: pointer to the work_struct structure + * + */ +void mei_timer(struct work_struct *work) +{ +	unsigned long timeout; +	struct mei_cl *cl; +	struct mei_cl_cb  *cb_pos = NULL; +	struct mei_cl_cb  *cb_next = NULL; + +	struct mei_device *dev = container_of(work, +					struct mei_device, timer_work.work); + + +	mutex_lock(&dev->device_lock); + +	/* Catch interrupt stalls during HBM init handshake */ +	if (dev->dev_state == MEI_DEV_INIT_CLIENTS && +	    dev->hbm_state != MEI_HBM_IDLE) { + +		if (dev->init_clients_timer) { +			if (--dev->init_clients_timer == 0) { +				dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", +					dev->hbm_state); +				mei_reset(dev); +				goto out; +			} +		} +	} + +	if (dev->dev_state != MEI_DEV_ENABLED) +		goto out; + +	/*** connect/disconnect timeouts ***/ +	list_for_each_entry(cl, &dev->file_list, link) { +		if (cl->timer_count) { +			if (--cl->timer_count == 0) { +				dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); +				mei_reset(dev); +				goto out; +			} +		} +	} + +	if (!mei_cl_is_connected(&dev->iamthif_cl)) +		goto out; + +	if (dev->iamthif_stall_timer) { +		if (--dev->iamthif_stall_timer == 0) { +			dev_err(&dev->pdev->dev, "timer: amthif  hanged.\n"); +			mei_reset(dev); +			dev->iamthif_msg_buf_size = 0; +			dev->iamthif_msg_buf_index = 0; +			dev->iamthif_canceled = false; +			dev->iamthif_ioctl = true; +			dev->iamthif_state = MEI_IAMTHIF_IDLE; +			dev->iamthif_timer = 0; + +			mei_io_cb_free(dev->iamthif_current_cb); +			dev->iamthif_current_cb = NULL; + +			dev->iamthif_file_object = NULL; +			mei_amthif_run_next_cmd(dev); +		} +	} + +	if (dev->iamthif_timer) { + +		timeout = dev->iamthif_timer + +			mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); + +		dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", +				dev->iamthif_timer); +		dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout); +		dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies); +		if (time_after(jiffies, timeout)) { +			/* +			 * User didn't read the AMTHI data on time (15sec) +			 * freeing AMTHI for other requests +			 */ + +			dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n"); + +			list_for_each_entry_safe(cb_pos, cb_next, +				&dev->amthif_rd_complete_list.list, list) { + +				cl = cb_pos->file_object->private_data; + +				/* Finding the AMTHI entry. */ +				if (cl == &dev->iamthif_cl) +					list_del(&cb_pos->list); +			} +			mei_io_cb_free(dev->iamthif_current_cb); +			dev->iamthif_current_cb = NULL; + +			dev->iamthif_file_object->private_data = NULL; +			dev->iamthif_file_object = NULL; +			dev->iamthif_timer = 0; +			mei_amthif_run_next_cmd(dev); + +		} +	} +out: +	if (dev->dev_state != MEI_DEV_DISABLED) +		schedule_delayed_work(&dev->timer_work, 2 * HZ); +	mutex_unlock(&dev->device_lock); +} + diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c new file mode 100644 index 00000000000..66f0a1a0645 --- /dev/null +++ b/drivers/misc/mei/main.c @@ -0,0 +1,719 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/compat.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" + +/** + * mei_open - the open function + * + * @inode: pointer to inode structure + * @file: pointer to file structure + * + * returns 0 on success, <0 on error + */ +static int mei_open(struct inode *inode, struct file *file) +{ +	struct miscdevice *misc = file->private_data; +	struct pci_dev *pdev; +	struct mei_cl *cl; +	struct mei_device *dev; + +	int err; + +	if (!misc->parent) +		return -ENODEV; + +	pdev = container_of(misc->parent, struct pci_dev, dev); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	cl = NULL; + +	err = -ENODEV; +	if (dev->dev_state != MEI_DEV_ENABLED) { +		dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n", +		    mei_dev_state_str(dev->dev_state)); +		goto err_unlock; +	} + +	err = -ENOMEM; +	cl = mei_cl_allocate(dev); +	if (!cl) +		goto err_unlock; + +	/* open_handle_count check is handled in the mei_cl_link */ +	err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); +	if (err) +		goto err_unlock; + +	file->private_data = cl; + +	mutex_unlock(&dev->device_lock); + +	return nonseekable_open(inode, file); + +err_unlock: +	mutex_unlock(&dev->device_lock); +	kfree(cl); +	return err; +} + +/** + * mei_release - the release function + * + * @inode: pointer to inode structure + * @file: pointer to file structure + * + * returns 0 on success, <0 on error + */ +static int mei_release(struct inode *inode, struct file *file) +{ +	struct mei_cl *cl = file->private_data; +	struct mei_cl_cb *cb; +	struct mei_device *dev; +	int rets = 0; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	mutex_lock(&dev->device_lock); +	if (cl == &dev->iamthif_cl) { +		rets = mei_amthif_release(dev, file); +		goto out; +	} +	if (cl->state == MEI_FILE_CONNECTED) { +		cl->state = MEI_FILE_DISCONNECTING; +		cl_dbg(dev, cl, "disconnecting\n"); +		rets = mei_cl_disconnect(cl); +	} +	mei_cl_flush_queues(cl); +	cl_dbg(dev, cl, "removing\n"); + +	mei_cl_unlink(cl); + + +	/* free read cb */ +	cb = NULL; +	if (cl->read_cb) { +		cb = mei_cl_find_read_cb(cl); +		/* Remove entry from read list */ +		if (cb) +			list_del(&cb->list); + +		cb = cl->read_cb; +		cl->read_cb = NULL; +	} + +	file->private_data = NULL; + +	mei_io_cb_free(cb); + +	kfree(cl); +out: +	mutex_unlock(&dev->device_lock); +	return rets; +} + + +/** + * mei_read - the read function. + * + * @file: pointer to file structure + * @ubuf: pointer to user buffer + * @length: buffer length + * @offset: data offset in buffer + * + * returns >=0 data length on success , <0 on error + */ +static ssize_t mei_read(struct file *file, char __user *ubuf, +			size_t length, loff_t *offset) +{ +	struct mei_cl *cl = file->private_data; +	struct mei_cl_cb *cb_pos = NULL; +	struct mei_cl_cb *cb = NULL; +	struct mei_device *dev; +	int rets; +	int err; + + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + + +	mutex_lock(&dev->device_lock); +	if (dev->dev_state != MEI_DEV_ENABLED) { +		rets = -ENODEV; +		goto out; +	} + +	if (length == 0) { +		rets = 0; +		goto out; +	} + +	if (cl == &dev->iamthif_cl) { +		rets = mei_amthif_read(dev, file, ubuf, length, offset); +		goto out; +	} + +	if (cl->read_cb) { +		cb = cl->read_cb; +		/* read what left */ +		if (cb->buf_idx > *offset) +			goto copy_buffer; +		/* offset is beyond buf_idx we have no more data return 0 */ +		if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { +			rets = 0; +			goto free; +		} +		/* Offset needs to be cleaned for contiguous reads*/ +		if (cb->buf_idx == 0 && *offset > 0) +			*offset = 0; +	} else if (*offset > 0) { +		*offset = 0; +	} + +	err = mei_cl_read_start(cl, length); +	if (err && err != -EBUSY) { +		dev_dbg(&dev->pdev->dev, +			"mei start read failure with status = %d\n", err); +		rets = err; +		goto out; +	} + +	if (MEI_READ_COMPLETE != cl->reading_state && +			!waitqueue_active(&cl->rx_wait)) { +		if (file->f_flags & O_NONBLOCK) { +			rets = -EAGAIN; +			goto out; +		} + +		mutex_unlock(&dev->device_lock); + +		if (wait_event_interruptible(cl->rx_wait, +				MEI_READ_COMPLETE == cl->reading_state || +				mei_cl_is_transitioning(cl))) { + +			if (signal_pending(current)) +				return -EINTR; +			return -ERESTARTSYS; +		} + +		mutex_lock(&dev->device_lock); +		if (mei_cl_is_transitioning(cl)) { +			rets = -EBUSY; +			goto out; +		} +	} + +	cb = cl->read_cb; + +	if (!cb) { +		rets = -ENODEV; +		goto out; +	} +	if (cl->reading_state != MEI_READ_COMPLETE) { +		rets = 0; +		goto out; +	} +	/* now copy the data to user space */ +copy_buffer: +	dev_dbg(&dev->pdev->dev, "buf.size = %d buf.idx= %ld\n", +	    cb->response_buffer.size, cb->buf_idx); +	if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { +		rets = -EMSGSIZE; +		goto free; +	} + +	/* length is being truncated to PAGE_SIZE, +	 * however buf_idx may point beyond that */ +	length = min_t(size_t, length, cb->buf_idx - *offset); + +	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { +		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); +		rets = -EFAULT; +		goto free; +	} + +	rets = length; +	*offset += length; +	if ((unsigned long)*offset < cb->buf_idx) +		goto out; + +free: +	cb_pos = mei_cl_find_read_cb(cl); +	/* Remove entry from read list */ +	if (cb_pos) +		list_del(&cb_pos->list); +	mei_io_cb_free(cb); +	cl->reading_state = MEI_IDLE; +	cl->read_cb = NULL; +out: +	dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); +	mutex_unlock(&dev->device_lock); +	return rets; +} +/** + * mei_write - the write function. + * + * @file: pointer to file structure + * @ubuf: pointer to user buffer + * @length: buffer length + * @offset: data offset in buffer + * + * returns >=0 data length on success , <0 on error + */ +static ssize_t mei_write(struct file *file, const char __user *ubuf, +			 size_t length, loff_t *offset) +{ +	struct mei_cl *cl = file->private_data; +	struct mei_cl_cb *write_cb = NULL; +	struct mei_device *dev; +	unsigned long timeout = 0; +	int rets; +	int id; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	mutex_lock(&dev->device_lock); + +	if (dev->dev_state != MEI_DEV_ENABLED) { +		rets = -ENODEV; +		goto out; +	} + +	id = mei_me_cl_by_id(dev, cl->me_client_id); +	if (id < 0) { +		rets = -ENOTTY; +		goto out; +	} + +	if (length == 0) { +		rets = 0; +		goto out; +	} + +	if (length > dev->me_clients[id].props.max_msg_length) { +		rets = -EFBIG; +		goto out; +	} + +	if (cl->state != MEI_FILE_CONNECTED) { +		dev_err(&dev->pdev->dev, "host client = %d,  is not connected to ME client = %d", +			cl->host_client_id, cl->me_client_id); +		rets = -ENODEV; +		goto out; +	} +	if (cl == &dev->iamthif_cl) { +		write_cb = mei_amthif_find_read_list_entry(dev, file); + +		if (write_cb) { +			timeout = write_cb->read_time + +				mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); + +			if (time_after(jiffies, timeout) || +			    cl->reading_state == MEI_READ_COMPLETE) { +				*offset = 0; +				list_del(&write_cb->list); +				mei_io_cb_free(write_cb); +				write_cb = NULL; +			} +		} +	} + +	/* free entry used in read */ +	if (cl->reading_state == MEI_READ_COMPLETE) { +		*offset = 0; +		write_cb = mei_cl_find_read_cb(cl); +		if (write_cb) { +			list_del(&write_cb->list); +			mei_io_cb_free(write_cb); +			write_cb = NULL; +			cl->reading_state = MEI_IDLE; +			cl->read_cb = NULL; +		} +	} else if (cl->reading_state == MEI_IDLE) +		*offset = 0; + + +	write_cb = mei_io_cb_init(cl, file); +	if (!write_cb) { +		dev_err(&dev->pdev->dev, "write cb allocation failed\n"); +		rets = -ENOMEM; +		goto out; +	} +	rets = mei_io_cb_alloc_req_buf(write_cb, length); +	if (rets) +		goto out; + +	rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); +	if (rets) { +		dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); +		rets = -EFAULT; +		goto out; +	} + +	if (cl == &dev->iamthif_cl) { +		rets = mei_amthif_write(dev, write_cb); + +		if (rets) { +			dev_err(&dev->pdev->dev, +				"amthif write failed with status = %d\n", rets); +			goto out; +		} +		mutex_unlock(&dev->device_lock); +		return length; +	} + +	rets = mei_cl_write(cl, write_cb, false); +out: +	mutex_unlock(&dev->device_lock); +	if (rets < 0) +		mei_io_cb_free(write_cb); +	return rets; +} + +/** + * mei_ioctl_connect_client - the connect to fw client IOCTL function + * + * @dev: the device structure + * @data: IOCTL connect data, input and output parameters + * @file: private data of the file object + * + * Locking: called under "dev->device_lock" lock + * + * returns 0 on success, <0 on failure. + */ +static int mei_ioctl_connect_client(struct file *file, +			struct mei_connect_client_data *data) +{ +	struct mei_device *dev; +	struct mei_client *client; +	struct mei_cl *cl; +	int i; +	int rets; + +	cl = file->private_data; +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	if (dev->dev_state != MEI_DEV_ENABLED) { +		rets = -ENODEV; +		goto end; +	} + +	if (cl->state != MEI_FILE_INITIALIZING && +	    cl->state != MEI_FILE_DISCONNECTED) { +		rets = -EBUSY; +		goto end; +	} + +	/* find ME client we're trying to connect to */ +	i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); +	if (i < 0 || dev->me_clients[i].props.fixed_address) { +		dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", +				&data->in_client_uuid); +		rets = -ENOTTY; +		goto end; +	} + +	cl->me_client_id = dev->me_clients[i].client_id; + +	dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", +			cl->me_client_id); +	dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", +			dev->me_clients[i].props.protocol_version); +	dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", +			dev->me_clients[i].props.max_msg_length); + +	/* if we're connecting to amthif client then we will use the +	 * existing connection +	 */ +	if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { +		dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); +		if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { +			rets = -ENODEV; +			goto end; +		} +		mei_cl_unlink(cl); + +		kfree(cl); +		cl = NULL; +		dev->iamthif_open_count++; +		file->private_data = &dev->iamthif_cl; + +		client = &data->out_client_properties; +		client->max_msg_length = +			dev->me_clients[i].props.max_msg_length; +		client->protocol_version = +			dev->me_clients[i].props.protocol_version; +		rets = dev->iamthif_cl.status; + +		goto end; +	} + + +	/* prepare the output buffer */ +	client = &data->out_client_properties; +	client->max_msg_length = dev->me_clients[i].props.max_msg_length; +	client->protocol_version = dev->me_clients[i].props.protocol_version; +	dev_dbg(&dev->pdev->dev, "Can connect?\n"); + + +	rets = mei_cl_connect(cl, file); + +end: +	return rets; +} + + +/** + * mei_ioctl - the IOCTL function + * + * @file: pointer to file structure + * @cmd: ioctl command + * @data: pointer to mei message structure + * + * returns 0 on success , <0 on error + */ +static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) +{ +	struct mei_device *dev; +	struct mei_cl *cl = file->private_data; +	struct mei_connect_client_data *connect_data = NULL; +	int rets; + +	if (cmd != IOCTL_MEI_CONNECT_CLIENT) +		return -EINVAL; + +	if (WARN_ON(!cl || !cl->dev)) +		return -ENODEV; + +	dev = cl->dev; + +	dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd); + +	mutex_lock(&dev->device_lock); +	if (dev->dev_state != MEI_DEV_ENABLED) { +		rets = -ENODEV; +		goto out; +	} + +	dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); + +	connect_data = kzalloc(sizeof(struct mei_connect_client_data), +							GFP_KERNEL); +	if (!connect_data) { +		rets = -ENOMEM; +		goto out; +	} +	dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); +	if (copy_from_user(connect_data, (char __user *)data, +				sizeof(struct mei_connect_client_data))) { +		dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); +		rets = -EFAULT; +		goto out; +	} + +	rets = mei_ioctl_connect_client(file, connect_data); + +	/* if all is ok, copying the data back to user. */ +	if (rets) +		goto out; + +	dev_dbg(&dev->pdev->dev, "copy connect data to user\n"); +	if (copy_to_user((char __user *)data, connect_data, +				sizeof(struct mei_connect_client_data))) { +		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); +		rets = -EFAULT; +		goto out; +	} + +out: +	kfree(connect_data); +	mutex_unlock(&dev->device_lock); +	return rets; +} + +/** + * mei_compat_ioctl - the compat IOCTL function + * + * @file: pointer to file structure + * @cmd: ioctl command + * @data: pointer to mei message structure + * + * returns 0 on success , <0 on error + */ +#ifdef CONFIG_COMPAT +static long mei_compat_ioctl(struct file *file, +			unsigned int cmd, unsigned long data) +{ +	return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); +} +#endif + + +/** + * mei_poll - the poll function + * + * @file: pointer to file structure + * @wait: pointer to poll_table structure + * + * returns poll mask + */ +static unsigned int mei_poll(struct file *file, poll_table *wait) +{ +	struct mei_cl *cl = file->private_data; +	struct mei_device *dev; +	unsigned int mask = 0; + +	if (WARN_ON(!cl || !cl->dev)) +		return POLLERR; + +	dev = cl->dev; + +	mutex_lock(&dev->device_lock); + +	if (!mei_cl_is_connected(cl)) { +		mask = POLLERR; +		goto out; +	} + +	mutex_unlock(&dev->device_lock); + + +	if (cl == &dev->iamthif_cl) +		return mei_amthif_poll(dev, file, wait); + +	poll_wait(file, &cl->tx_wait, wait); + +	mutex_lock(&dev->device_lock); + +	if (!mei_cl_is_connected(cl)) { +		mask = POLLERR; +		goto out; +	} + +	mask |= (POLLIN | POLLRDNORM); + +out: +	mutex_unlock(&dev->device_lock); +	return mask; +} + +/* + * file operations structure will be used for mei char device. + */ +static const struct file_operations mei_fops = { +	.owner = THIS_MODULE, +	.read = mei_read, +	.unlocked_ioctl = mei_ioctl, +#ifdef CONFIG_COMPAT +	.compat_ioctl = mei_compat_ioctl, +#endif +	.open = mei_open, +	.release = mei_release, +	.write = mei_write, +	.poll = mei_poll, +	.llseek = no_llseek +}; + +/* + * Misc Device Struct + */ +static struct miscdevice  mei_misc_device = { +		.name = "mei", +		.fops = &mei_fops, +		.minor = MISC_DYNAMIC_MINOR, +}; + + +int mei_register(struct mei_device *dev) +{ +	int ret; +	mei_misc_device.parent = &dev->pdev->dev; +	ret = misc_register(&mei_misc_device); +	if (ret) +		return ret; + +	if (mei_dbgfs_register(dev, mei_misc_device.name)) +		dev_err(&dev->pdev->dev, "cannot register debugfs\n"); + +	return 0; +} +EXPORT_SYMBOL_GPL(mei_register); + +void mei_deregister(struct mei_device *dev) +{ +	mei_dbgfs_deregister(dev); +	misc_deregister(&mei_misc_device); +	mei_misc_device.parent = NULL; +} +EXPORT_SYMBOL_GPL(mei_deregister); + +static int __init mei_init(void) +{ +	return mei_cl_bus_init(); +} + +static void __exit mei_exit(void) +{ +	mei_cl_bus_exit(); +} + +module_init(mei_init); +module_exit(mei_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h new file mode 100644 index 00000000000..5c7e990e2f2 --- /dev/null +++ b/drivers/misc/mei/mei_dev.h @@ -0,0 +1,752 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_DEV_H_ +#define _MEI_DEV_H_ + +#include <linux/types.h> +#include <linux/watchdog.h> +#include <linux/poll.h> +#include <linux/mei.h> +#include <linux/mei_cl_bus.h> + +#include "hw.h" +#include "hbm.h" + +/* + * watch dog definition + */ +#define MEI_WD_HDR_SIZE       4 +#define MEI_WD_STOP_MSG_SIZE  MEI_WD_HDR_SIZE +#define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16) + +#define MEI_WD_DEFAULT_TIMEOUT   120  /* seconds */ +#define MEI_WD_MIN_TIMEOUT       120  /* seconds */ +#define MEI_WD_MAX_TIMEOUT     65535  /* seconds */ + +#define MEI_WD_STOP_TIMEOUT      10 /* msecs */ + +#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT       (1 << 0) + +#define MEI_RD_MSG_BUF_SIZE           (128 * sizeof(u32)) + + +/* + * AMTHI Client UUID + */ +extern const uuid_le mei_amthif_guid; + +/* + * Watchdog Client UUID + */ +extern const uuid_le mei_wd_guid; + +/* + * Number of Maximum MEI Clients + */ +#define MEI_CLIENTS_MAX 256 + +/* + * maximum number of consecutive resets + */ +#define MEI_MAX_CONSEC_RESET  3 + +/* + * Number of File descriptors/handles + * that can be opened to the driver. + * + * Limit to 255: 256 Total Clients + * minus internal client for MEI Bus Messages + */ +#define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) + +/* + * Internal Clients Number + */ +#define MEI_HOST_CLIENT_ID_ANY        (-1) +#define MEI_HBM_HOST_CLIENT_ID         0 /* not used, just for documentation */ +#define MEI_WD_HOST_CLIENT_ID          1 +#define MEI_IAMTHIF_HOST_CLIENT_ID     2 + + +/* File state */ +enum file_state { +	MEI_FILE_INITIALIZING = 0, +	MEI_FILE_CONNECTING, +	MEI_FILE_CONNECTED, +	MEI_FILE_DISCONNECTING, +	MEI_FILE_DISCONNECTED +}; + +/* MEI device states */ +enum mei_dev_state { +	MEI_DEV_INITIALIZING = 0, +	MEI_DEV_INIT_CLIENTS, +	MEI_DEV_ENABLED, +	MEI_DEV_RESETTING, +	MEI_DEV_DISABLED, +	MEI_DEV_POWER_DOWN, +	MEI_DEV_POWER_UP +}; + +const char *mei_dev_state_str(int state); + +enum iamthif_states { +	MEI_IAMTHIF_IDLE, +	MEI_IAMTHIF_WRITING, +	MEI_IAMTHIF_FLOW_CONTROL, +	MEI_IAMTHIF_READING, +	MEI_IAMTHIF_READ_COMPLETE +}; + +enum mei_file_transaction_states { +	MEI_IDLE, +	MEI_WRITING, +	MEI_WRITE_COMPLETE, +	MEI_FLOW_CONTROL, +	MEI_READING, +	MEI_READ_COMPLETE +}; + +enum mei_wd_states { +	MEI_WD_IDLE, +	MEI_WD_RUNNING, +	MEI_WD_STOPPING, +}; + +/** + * enum mei_cb_file_ops  - file operation associated with the callback + * @MEI_FOP_READ      - read + * @MEI_FOP_WRITE     - write + * @MEI_FOP_CONNECT   - connect + * @MEI_FOP_DISCONNECT_RSP - disconnect response + * @MEI_FOP_OPEN      - open + * @MEI_FOP_CLOSE     - close + */ +enum mei_cb_file_ops { +	MEI_FOP_READ = 0, +	MEI_FOP_WRITE, +	MEI_FOP_CONNECT, +	MEI_FOP_DISCONNECT_RSP, +	MEI_FOP_OPEN, +	MEI_FOP_CLOSE +}; + +/* + * Intel MEI message data struct + */ +struct mei_msg_data { +	u32 size; +	unsigned char *data; +}; + +/* Maximum number of processed FW status registers */ +#define MEI_FW_STATUS_MAX 2 + +/* + * struct mei_fw_status - storage of FW status data + * + * @count - number of actually available elements in array + * @status - FW status registers + */ +struct mei_fw_status { +	int count; +	u32 status[MEI_FW_STATUS_MAX]; +}; + +/** + * struct mei_me_client - representation of me (fw) client + * + * @props  - client properties + * @client_id - me client id + * @mei_flow_ctrl_creds - flow control credits + */ +struct mei_me_client { +	struct mei_client_properties props; +	u8 client_id; +	u8 mei_flow_ctrl_creds; +}; + + +struct mei_cl; + +/** + * struct mei_cl_cb - file operation callback structure + * + * @cl - file client who is running this operation + * @fop_type - file operation type + */ +struct mei_cl_cb { +	struct list_head list; +	struct mei_cl *cl; +	enum mei_cb_file_ops fop_type; +	struct mei_msg_data request_buffer; +	struct mei_msg_data response_buffer; +	unsigned long buf_idx; +	unsigned long read_time; +	struct file *file_object; +	u32 internal:1; +}; + +/* MEI client instance carried as file->private_data*/ +struct mei_cl { +	struct list_head link; +	struct mei_device *dev; +	enum file_state state; +	wait_queue_head_t tx_wait; +	wait_queue_head_t rx_wait; +	wait_queue_head_t wait; +	int status; +	/* ID of client connected */ +	u8 host_client_id; +	u8 me_client_id; +	u8 mei_flow_ctrl_creds; +	u8 timer_count; +	enum mei_file_transaction_states reading_state; +	enum mei_file_transaction_states writing_state; +	struct mei_cl_cb *read_cb; + +	/* MEI CL bus data */ +	struct mei_cl_device *device; +	struct list_head device_link; +	uuid_le device_uuid; +}; + +/** struct mei_hw_ops + * + * @fw_status        - read FW status from PCI config space + * @host_is_ready    - query for host readiness + + * @hw_is_ready      - query if hw is ready + * @hw_reset         - reset hw + * @hw_start         - start hw after reset + * @hw_config        - configure hw + + * @pg_state         - power gating state of the device + * @pg_is_enabled    - is power gating enabled + + * @intr_clear       - clear pending interrupts + * @intr_enable      - enable interrupts + * @intr_disable     - disable interrupts + + * @hbuf_free_slots  - query for write buffer empty slots + * @hbuf_is_ready    - query if write buffer is empty + * @hbuf_max_len     - query for write buffer max len + + * @write            - write a message to FW + + * @rdbuf_full_slots - query how many slots are filled + + * @read_hdr         - get first 4 bytes (header) + * @read             - read a buffer from the FW + */ +struct mei_hw_ops { + +	int (*fw_status)(struct mei_device *dev, +		struct mei_fw_status *fw_status); +	bool (*host_is_ready)(struct mei_device *dev); + +	bool (*hw_is_ready)(struct mei_device *dev); +	int (*hw_reset)(struct mei_device *dev, bool enable); +	int (*hw_start)(struct mei_device *dev); +	void (*hw_config)(struct mei_device *dev); + +	enum mei_pg_state (*pg_state)(struct mei_device *dev); +	bool (*pg_is_enabled)(struct mei_device *dev); + +	void (*intr_clear)(struct mei_device *dev); +	void (*intr_enable)(struct mei_device *dev); +	void (*intr_disable)(struct mei_device *dev); + +	int (*hbuf_free_slots)(struct mei_device *dev); +	bool (*hbuf_is_ready)(struct mei_device *dev); +	size_t (*hbuf_max_len)(const struct mei_device *dev); + +	int (*write)(struct mei_device *dev, +		     struct mei_msg_hdr *hdr, +		     unsigned char *buf); + +	int (*rdbuf_full_slots)(struct mei_device *dev); + +	u32 (*read_hdr)(const struct mei_device *dev); +	int (*read)(struct mei_device *dev, +		     unsigned char *buf, unsigned long len); +}; + +/* MEI bus API*/ + +/** + * struct mei_cl_ops - MEI CL device ops + * This structure allows ME host clients to implement technology + * specific operations. + * + * @enable: Enable an MEI CL device. Some devices require specific + *	HECI commands to initialize completely. + * @disable: Disable an MEI CL device. + * @send: Tx hook for the device. This allows ME host clients to trap + *	the device driver buffers before actually physically + *	pushing it to the ME. + * @recv: Rx hook for the device. This allows ME host clients to trap the + *	ME buffers before forwarding them to the device driver. + */ +struct mei_cl_ops { +	int (*enable)(struct mei_cl_device *device); +	int (*disable)(struct mei_cl_device *device); +	int (*send)(struct mei_cl_device *device, u8 *buf, size_t length); +	int (*recv)(struct mei_cl_device *device, u8 *buf, size_t length); +}; + +struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, +					uuid_le uuid, char *name, +					struct mei_cl_ops *ops); +void mei_cl_remove_device(struct mei_cl_device *device); + +int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length); +int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length); +int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); +void mei_cl_bus_rx_event(struct mei_cl *cl); +void mei_cl_bus_remove_devices(struct mei_device *dev); +int mei_cl_bus_init(void); +void mei_cl_bus_exit(void); + + +/** + * struct mei_cl_device - MEI device handle + * An mei_cl_device pointer is returned from mei_add_device() + * and links MEI bus clients to their actual ME host client pointer. + * Drivers for MEI devices will get an mei_cl_device pointer + * when being probed and shall use it for doing ME bus I/O. + * + * @dev: linux driver model device pointer + * @uuid: me client uuid + * @cl: mei client + * @ops: ME transport ops + * @event_cb: Drivers register this callback to get asynchronous ME + *	events (e.g. Rx buffer pending) notifications. + * @events: Events bitmask sent to the driver. + * @priv_data: client private data + */ +struct mei_cl_device { +	struct device dev; + +	struct mei_cl *cl; + +	const struct mei_cl_ops *ops; + +	struct work_struct event_work; +	mei_cl_event_cb_t event_cb; +	void *event_context; +	unsigned long events; + +	void *priv_data; +}; + + + /** + * enum mei_pg_event - power gating transition events + * + * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition + * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete + * @MEI_PG_EVENT_RECEIVED: the driver received pg event + */ +enum mei_pg_event { +	MEI_PG_EVENT_IDLE, +	MEI_PG_EVENT_WAIT, +	MEI_PG_EVENT_RECEIVED, +}; + +/** + * enum mei_pg_state - device internal power gating state + * + * @MEI_PG_OFF: device is not power gated - it is active + * @MEI_PG_ON:  device is power gated - it is in lower power state + */ +enum mei_pg_state { +	MEI_PG_OFF = 0, +	MEI_PG_ON =  1, +}; + +/* + * mei_cfg + * + * @fw_status - FW status + * @quirk_probe - device exclusion quirk + */ +struct mei_cfg { +	const struct mei_fw_status fw_status; +	bool (*quirk_probe)(struct pci_dev *pdev); +}; + + +#define MEI_PCI_DEVICE(dev, cfg) \ +	.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ +	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ +	.driver_data = (kernel_ulong_t)&(cfg) + + +/** + * struct mei_device -  MEI private device struct + + * @reset_count - limits the number of consecutive resets + * @hbm_state - state of host bus message protocol + * @pg_event - power gating event + * @mem_addr - mem mapped base register address + + * @hbuf_depth - depth of hardware host/write buffer is slots + * @hbuf_is_ready - query if the host host/write buffer is ready + * @wr_msg - the buffer for hbm control messages + * @cfg - per device generation config and ops + */ +struct mei_device { +	struct pci_dev *pdev;	/* pointer to pci device struct */ +	/* +	 * lists of queues +	 */ +	/* array of pointers to aio lists */ +	struct mei_cl_cb read_list;		/* driver read queue */ +	struct mei_cl_cb write_list;		/* driver write queue */ +	struct mei_cl_cb write_waiting_list;	/* write waiting queue */ +	struct mei_cl_cb ctrl_wr_list;		/* managed write IOCTL list */ +	struct mei_cl_cb ctrl_rd_list;		/* managed read IOCTL list */ + +	/* +	 * list of files +	 */ +	struct list_head file_list; +	long open_handle_count; + +	/* +	 * lock for the device +	 */ +	struct mutex device_lock; /* device lock */ +	struct delayed_work timer_work;	/* MEI timer delayed work (timeouts) */ + +	bool recvd_hw_ready; +	/* +	 * waiting queue for receive message from FW +	 */ +	wait_queue_head_t wait_hw_ready; +	wait_queue_head_t wait_pg; +	wait_queue_head_t wait_recvd_msg; +	wait_queue_head_t wait_stop_wd; + +	/* +	 * mei device  states +	 */ +	unsigned long reset_count; +	enum mei_dev_state dev_state; +	enum mei_hbm_state hbm_state; +	u16 init_clients_timer; + +	/* +	 * Power Gating support +	 */ +	enum mei_pg_event pg_event; +#ifdef CONFIG_PM_RUNTIME +	struct dev_pm_domain pg_domain; +#endif /* CONFIG_PM_RUNTIME */ + +	unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];	/* control messages */ +	u32 rd_msg_hdr; + +	/* write buffer */ +	u8 hbuf_depth; +	bool hbuf_is_ready; + +	/* used for control messages */ +	struct { +		struct mei_msg_hdr hdr; +		unsigned char data[128]; +	} wr_msg; + +	struct hbm_version version; + +	struct mei_me_client *me_clients; /* Note: memory has to be allocated */ +	DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); +	DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); +	unsigned long me_clients_num; +	unsigned long me_client_presentation_num; +	unsigned long me_client_index; + +	struct mei_cl wd_cl; +	enum mei_wd_states wd_state; +	bool wd_pending; +	u16 wd_timeout; +	unsigned char wd_data[MEI_WD_START_MSG_SIZE]; + + +	/* amthif list for cmd waiting */ +	struct mei_cl_cb amthif_cmd_list; +	/* driver managed amthif list for reading completed amthif cmd data */ +	struct mei_cl_cb amthif_rd_complete_list; +	struct file *iamthif_file_object; +	struct mei_cl iamthif_cl; +	struct mei_cl_cb *iamthif_current_cb; +	long iamthif_open_count; +	int iamthif_mtu; +	unsigned long iamthif_timer; +	u32 iamthif_stall_timer; +	unsigned char *iamthif_msg_buf; /* Note: memory has to be allocated */ +	u32 iamthif_msg_buf_size; +	u32 iamthif_msg_buf_index; +	enum iamthif_states iamthif_state; +	bool iamthif_flow_control_pending; +	bool iamthif_ioctl; +	bool iamthif_canceled; + +	struct work_struct init_work; +	struct work_struct reset_work; + +	/* List of bus devices */ +	struct list_head device_list; + +#if IS_ENABLED(CONFIG_DEBUG_FS) +	struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ + + +	const struct mei_hw_ops *ops; +	const struct mei_cfg *cfg; +	char hw[0] __aligned(sizeof(void *)); +}; + +static inline unsigned long mei_secs_to_jiffies(unsigned long sec) +{ +	return msecs_to_jiffies(sec * MSEC_PER_SEC); +} + +/** + * mei_data2slots - get slots - number of (dwords) from a message length + *	+ size of the mei header + * @length - size of the messages in bytes + * returns  - number of slots + */ +static inline u32 mei_data2slots(size_t length) +{ +	return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); +} + +/** + * mei_slots2data- get data in slots - bytes from slots + * @slots -  number of available slots + * returns  - number of bytes in slots + */ +static inline u32 mei_slots2data(int slots) +{ +	return slots * 4; +} + +/* + * mei init function prototypes + */ +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg); +int mei_reset(struct mei_device *dev); +int mei_start(struct mei_device *dev); +int mei_restart(struct mei_device *dev); +void mei_stop(struct mei_device *dev); +void mei_cancel_work(struct mei_device *dev); + +/* + *  MEI interrupt functions prototype + */ + +void mei_timer(struct work_struct *work); +int mei_irq_read_handler(struct mei_device *dev, +		struct mei_cl_cb *cmpl_list, s32 *slots); + +int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); +void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); + +/* + * AMTHIF - AMT Host Interface Functions + */ +void mei_amthif_reset_params(struct mei_device *dev); + +int mei_amthif_host_init(struct mei_device *dev); + +int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb); + +int mei_amthif_read(struct mei_device *dev, struct file *file, +		char __user *ubuf, size_t length, loff_t *offset); + +unsigned int mei_amthif_poll(struct mei_device *dev, +		struct file *file, poll_table *wait); + +int mei_amthif_release(struct mei_device *dev, struct file *file); + +struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, +						struct file *file); + +void mei_amthif_run_next_cmd(struct mei_device *dev); + +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, +			struct mei_cl_cb *cmpl_list); + +void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); +int mei_amthif_irq_read_msg(struct mei_device *dev, +			    struct mei_msg_hdr *mei_hdr, +			    struct mei_cl_cb *complete_list); +int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); + +/* + * NFC functions + */ +int mei_nfc_host_init(struct mei_device *dev); +void mei_nfc_host_exit(struct mei_device *dev); + +/* + * NFC Client UUID + */ +extern const uuid_le mei_nfc_guid; + +int mei_wd_send(struct mei_device *dev); +int mei_wd_stop(struct mei_device *dev); +int mei_wd_host_init(struct mei_device *dev); +/* + * mei_watchdog_register  - Registering watchdog interface + *   once we got connection to the WD Client + * @dev - mei device + */ +int mei_watchdog_register(struct mei_device *dev); +/* + * mei_watchdog_unregister  - Unregistering watchdog interface + * @dev - mei device + */ +void mei_watchdog_unregister(struct mei_device *dev); + +/* + * Register Access Function + */ + + +static inline void mei_hw_config(struct mei_device *dev) +{ +	dev->ops->hw_config(dev); +} + +static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) +{ +	return dev->ops->pg_state(dev); +} + +static inline bool mei_pg_is_enabled(struct mei_device *dev) +{ +	return dev->ops->pg_is_enabled(dev); +} + +static inline int mei_hw_reset(struct mei_device *dev, bool enable) +{ +	return dev->ops->hw_reset(dev, enable); +} + +static inline int mei_hw_start(struct mei_device *dev) +{ +	return dev->ops->hw_start(dev); +} + +static inline void mei_clear_interrupts(struct mei_device *dev) +{ +	dev->ops->intr_clear(dev); +} + +static inline void mei_enable_interrupts(struct mei_device *dev) +{ +	dev->ops->intr_enable(dev); +} + +static inline void mei_disable_interrupts(struct mei_device *dev) +{ +	dev->ops->intr_disable(dev); +} + +static inline bool mei_host_is_ready(struct mei_device *dev) +{ +	return dev->ops->host_is_ready(dev); +} +static inline bool mei_hw_is_ready(struct mei_device *dev) +{ +	return dev->ops->hw_is_ready(dev); +} + +static inline bool mei_hbuf_is_ready(struct mei_device *dev) +{ +	return dev->ops->hbuf_is_ready(dev); +} + +static inline int mei_hbuf_empty_slots(struct mei_device *dev) +{ +	return dev->ops->hbuf_free_slots(dev); +} + +static inline size_t mei_hbuf_max_len(const struct mei_device *dev) +{ +	return dev->ops->hbuf_max_len(dev); +} + +static inline int mei_write_message(struct mei_device *dev, +			struct mei_msg_hdr *hdr, +			unsigned char *buf) +{ +	return dev->ops->write(dev, hdr, buf); +} + +static inline u32 mei_read_hdr(const struct mei_device *dev) +{ +	return dev->ops->read_hdr(dev); +} + +static inline void mei_read_slots(struct mei_device *dev, +		     unsigned char *buf, unsigned long len) +{ +	dev->ops->read(dev, buf, len); +} + +static inline int mei_count_full_read_slots(struct mei_device *dev) +{ +	return dev->ops->rdbuf_full_slots(dev); +} + +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status); + +#define FW_STS_FMT "%08X %08X" +#define FW_STS_PRM(fw_status) \ +	(fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \ +	(fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF + +bool mei_hbuf_acquire(struct mei_device *dev); + +bool mei_write_is_idle(struct mei_device *dev); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int mei_dbgfs_register(struct mei_device *dev, const char *name); +void mei_dbgfs_deregister(struct mei_device *dev); +#else +static inline int mei_dbgfs_register(struct mei_device *dev, const char *name) +{ +	return 0; +} +static inline void mei_dbgfs_deregister(struct mei_device *dev) {} +#endif /* CONFIG_DEBUG_FS */ + +int mei_register(struct mei_device *dev); +void mei_deregister(struct mei_device *dev); + +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" +#define MEI_HDR_PRM(hdr)                  \ +	(hdr)->host_addr, (hdr)->me_addr, \ +	(hdr)->length, (hdr)->internal, (hdr)->msg_complete + +#endif diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c new file mode 100644 index 00000000000..3095fc514a6 --- /dev/null +++ b/drivers/misc/mei/nfc.c @@ -0,0 +1,558 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +struct mei_nfc_cmd { +	u8 command; +	u8 status; +	u16 req_id; +	u32 reserved; +	u16 data_size; +	u8 sub_command; +	u8 data[]; +} __packed; + +struct mei_nfc_reply { +	u8 command; +	u8 status; +	u16 req_id; +	u32 reserved; +	u16 data_size; +	u8 sub_command; +	u8 reply_status; +	u8 data[]; +} __packed; + +struct mei_nfc_if_version { +	u8 radio_version_sw[3]; +	u8 reserved[3]; +	u8 radio_version_hw[3]; +	u8 i2c_addr; +	u8 fw_ivn; +	u8 vendor_id; +	u8 radio_type; +} __packed; + +struct mei_nfc_connect { +	u8 fw_ivn; +	u8 vendor_id; +} __packed; + +struct mei_nfc_connect_resp { +	u8 fw_ivn; +	u8 vendor_id; +	u16 me_major; +	u16 me_minor; +	u16 me_hotfix; +	u16 me_build; +} __packed; + +struct mei_nfc_hci_hdr { +	u8 cmd; +	u8 status; +	u16 req_id; +	u32 reserved; +	u16 data_size; +} __packed; + +#define MEI_NFC_CMD_MAINTENANCE 0x00 +#define MEI_NFC_CMD_HCI_SEND 0x01 +#define MEI_NFC_CMD_HCI_RECV 0x02 + +#define MEI_NFC_SUBCMD_CONNECT    0x00 +#define MEI_NFC_SUBCMD_IF_VERSION 0x01 + +#define MEI_NFC_HEADER_SIZE 10 + +/** mei_nfc_dev - NFC mei device + * + * @cl: NFC host client + * @cl_info: NFC info host client + * @init_work: perform connection to the info client + * @fw_ivn: NFC Interface Version Number + * @vendor_id: NFC manufacturer ID + * @radio_type: NFC radio type + */ +struct mei_nfc_dev { +	struct mei_cl *cl; +	struct mei_cl *cl_info; +	struct work_struct init_work; +	wait_queue_head_t send_wq; +	u8 fw_ivn; +	u8 vendor_id; +	u8 radio_type; +	char *bus_name; + +	u16 req_id; +	u16 recv_req_id; +}; + +static struct mei_nfc_dev nfc_dev; + +/* UUIDs for NFC F/W clients */ +const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, +				     0x94, 0xd4, 0x50, 0x26, +				     0x67, 0x23, 0x77, 0x5c); + +static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d, +					0x48, 0xa4, 0xef, 0xab, +					0xba, 0x8a, 0x12, 0x06); + +/* Vendors */ +#define MEI_NFC_VENDOR_INSIDE 0x00 +#define MEI_NFC_VENDOR_NXP    0x01 + +/* Radio types */ +#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00 +#define MEI_NFC_VENDOR_NXP_PN544    0x01 + +static void mei_nfc_free(struct mei_nfc_dev *ndev) +{ +	if (ndev->cl) { +		list_del(&ndev->cl->device_link); +		mei_cl_unlink(ndev->cl); +		kfree(ndev->cl); +	} + +	if (ndev->cl_info) { +		list_del(&ndev->cl_info->device_link); +		mei_cl_unlink(ndev->cl_info); +		kfree(ndev->cl_info); +	} + +	memset(ndev, 0, sizeof(struct mei_nfc_dev)); +} + +static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) +{ +	struct mei_device *dev; + +	if (!ndev->cl) +		return -ENODEV; + +	dev = ndev->cl->dev; + +	switch (ndev->vendor_id) { +	case MEI_NFC_VENDOR_INSIDE: +		switch (ndev->radio_type) { +		case MEI_NFC_VENDOR_INSIDE_UREAD: +			ndev->bus_name = "microread"; +			return 0; + +		default: +			dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", +				ndev->radio_type); + +			return -EINVAL; +		} + +	case MEI_NFC_VENDOR_NXP: +		switch (ndev->radio_type) { +		case MEI_NFC_VENDOR_NXP_PN544: +			ndev->bus_name = "pn544"; +			return 0; +		default: +			dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", +				ndev->radio_type); + +			return -EINVAL; +		} + +	default: +		dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n", +			ndev->vendor_id); + +		return -EINVAL; +	} + +	return 0; +} + +static int mei_nfc_connect(struct mei_nfc_dev *ndev) +{ +	struct mei_device *dev; +	struct mei_cl *cl; +	struct mei_nfc_cmd *cmd, *reply; +	struct mei_nfc_connect *connect; +	struct mei_nfc_connect_resp *connect_resp; +	size_t connect_length, connect_resp_length; +	int bytes_recv, ret; + +	cl = ndev->cl; +	dev = cl->dev; + +	connect_length = sizeof(struct mei_nfc_cmd) + +			sizeof(struct mei_nfc_connect); + +	connect_resp_length = sizeof(struct mei_nfc_cmd) + +			sizeof(struct mei_nfc_connect_resp); + +	cmd = kzalloc(connect_length, GFP_KERNEL); +	if (!cmd) +		return -ENOMEM; +	connect = (struct mei_nfc_connect *)cmd->data; + +	reply = kzalloc(connect_resp_length, GFP_KERNEL); +	if (!reply) { +		kfree(cmd); +		return -ENOMEM; +	} + +	connect_resp = (struct mei_nfc_connect_resp *)reply->data; + +	cmd->command = MEI_NFC_CMD_MAINTENANCE; +	cmd->data_size = 3; +	cmd->sub_command = MEI_NFC_SUBCMD_CONNECT; +	connect->fw_ivn = ndev->fw_ivn; +	connect->vendor_id = ndev->vendor_id; + +	ret = __mei_cl_send(cl, (u8 *)cmd, connect_length); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "Could not send connect cmd\n"); +		goto err; +	} + +	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length); +	if (bytes_recv < 0) { +		dev_err(&dev->pdev->dev, "Could not read connect response\n"); +		ret = bytes_recv; +		goto err; +	} + +	dev_info(&dev->pdev->dev, "IVN 0x%x Vendor ID 0x%x\n", +		 connect_resp->fw_ivn, connect_resp->vendor_id); + +	dev_info(&dev->pdev->dev, "ME FW %d.%d.%d.%d\n", +		connect_resp->me_major, connect_resp->me_minor, +		connect_resp->me_hotfix, connect_resp->me_build); + +	ret = 0; + +err: +	kfree(reply); +	kfree(cmd); + +	return ret; +} + +static int mei_nfc_if_version(struct mei_nfc_dev *ndev) +{ +	struct mei_device *dev; +	struct mei_cl *cl; + +	struct mei_nfc_cmd cmd; +	struct mei_nfc_reply *reply = NULL; +	struct mei_nfc_if_version *version; +	size_t if_version_length; +	int bytes_recv, ret; + +	cl = ndev->cl_info; +	dev = cl->dev; + +	memset(&cmd, 0, sizeof(struct mei_nfc_cmd)); +	cmd.command = MEI_NFC_CMD_MAINTENANCE; +	cmd.data_size = 1; +	cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION; + +	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "Could not send IF version cmd\n"); +		return ret; +	} + +	/* to be sure on the stack we alloc memory */ +	if_version_length = sizeof(struct mei_nfc_reply) + +		sizeof(struct mei_nfc_if_version); + +	reply = kzalloc(if_version_length, GFP_KERNEL); +	if (!reply) +		return -ENOMEM; + +	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); +	if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { +		dev_err(&dev->pdev->dev, "Could not read IF version\n"); +		ret = -EIO; +		goto err; +	} + +	version = (struct mei_nfc_if_version *)reply->data; + +	ndev->fw_ivn = version->fw_ivn; +	ndev->vendor_id = version->vendor_id; +	ndev->radio_type = version->radio_type; + +err: +	kfree(reply); +	return ret; +} + +static int mei_nfc_enable(struct mei_cl_device *cldev) +{ +	struct mei_device *dev; +	struct mei_nfc_dev *ndev = &nfc_dev; +	int ret; + +	dev = ndev->cl->dev; + +	ret = mei_nfc_connect(ndev); +	if (ret < 0) { +		dev_err(&dev->pdev->dev, "Could not connect to NFC"); +		return ret; +	} + +	return 0; +} + +static int mei_nfc_disable(struct mei_cl_device *cldev) +{ +	return 0; +} + +static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ +	struct mei_device *dev; +	struct mei_nfc_dev *ndev; +	struct mei_nfc_hci_hdr *hdr; +	u8 *mei_buf; +	int err; + +	ndev = (struct mei_nfc_dev *) cldev->priv_data; +	dev = ndev->cl->dev; + +	mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL); +	if (!mei_buf) +		return -ENOMEM; + +	hdr = (struct mei_nfc_hci_hdr *) mei_buf; +	hdr->cmd = MEI_NFC_CMD_HCI_SEND; +	hdr->status = 0; +	hdr->req_id = ndev->req_id; +	hdr->reserved = 0; +	hdr->data_size = length; + +	memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); + +	err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE); +	if (err < 0) +		return err; + +	kfree(mei_buf); + +	if (!wait_event_interruptible_timeout(ndev->send_wq, +				ndev->recv_req_id == ndev->req_id, HZ)) { +		dev_err(&dev->pdev->dev, "NFC MEI command timeout\n"); +		err = -ETIME; +	} else { +		ndev->req_id++; +	} + +	return err; +} + +static int mei_nfc_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ +	struct mei_nfc_dev *ndev; +	struct mei_nfc_hci_hdr *hci_hdr; +	int received_length; + +	ndev = (struct mei_nfc_dev *)cldev->priv_data; + +	received_length = __mei_cl_recv(ndev->cl, buf, length); +	if (received_length < 0) +		return received_length; + +	hci_hdr = (struct mei_nfc_hci_hdr *) buf; + +	if (hci_hdr->cmd == MEI_NFC_CMD_HCI_SEND) { +		ndev->recv_req_id = hci_hdr->req_id; +		wake_up(&ndev->send_wq); + +		return 0; +	} + +	return received_length; +} + +static struct mei_cl_ops nfc_ops = { +	.enable = mei_nfc_enable, +	.disable = mei_nfc_disable, +	.send = mei_nfc_send, +	.recv = mei_nfc_recv, +}; + +static void mei_nfc_init(struct work_struct *work) +{ +	struct mei_device *dev; +	struct mei_cl_device *cldev; +	struct mei_nfc_dev *ndev; +	struct mei_cl *cl_info; + +	ndev = container_of(work, struct mei_nfc_dev, init_work); + +	cl_info = ndev->cl_info; +	dev = cl_info->dev; + +	mutex_lock(&dev->device_lock); + +	if (mei_cl_connect(cl_info, NULL) < 0) { +		mutex_unlock(&dev->device_lock); +		dev_err(&dev->pdev->dev, +			"Could not connect to the NFC INFO ME client"); + +		goto err; +	} + +	mutex_unlock(&dev->device_lock); + +	if (mei_nfc_if_version(ndev) < 0) { +		dev_err(&dev->pdev->dev, "Could not get the NFC interface version"); + +		goto err; +	} + +	dev_info(&dev->pdev->dev, +		"NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", +		ndev->fw_ivn, ndev->vendor_id, ndev->radio_type); + +	mutex_lock(&dev->device_lock); + +	if (mei_cl_disconnect(cl_info) < 0) { +		mutex_unlock(&dev->device_lock); +		dev_err(&dev->pdev->dev, +			"Could not disconnect the NFC INFO ME client"); + +		goto err; +	} + +	mutex_unlock(&dev->device_lock); + +	if (mei_nfc_build_bus_name(ndev) < 0) { +		dev_err(&dev->pdev->dev, +			"Could not build the bus ID name\n"); +		return; +	} + +	cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops); +	if (!cldev) { +		dev_err(&dev->pdev->dev, +			"Could not add the NFC device to the MEI bus\n"); + +		goto err; +	} + +	cldev->priv_data = ndev; + + +	return; + +err: +	mutex_lock(&dev->device_lock); +	mei_nfc_free(ndev); +	mutex_unlock(&dev->device_lock); + +	return; +} + + +int mei_nfc_host_init(struct mei_device *dev) +{ +	struct mei_nfc_dev *ndev = &nfc_dev; +	struct mei_cl *cl_info, *cl = NULL; +	int i, ret; + +	/* already initialized */ +	if (ndev->cl_info) +		return 0; + +	ndev->cl_info = mei_cl_allocate(dev); +	ndev->cl = mei_cl_allocate(dev); + +	cl = ndev->cl; +	cl_info = ndev->cl_info; + +	if (!cl || !cl_info) { +		ret = -ENOMEM; +		goto err; +	} + +	/* check for valid client id */ +	i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); +	if (i < 0) { +		dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); +		ret = -ENOTTY; +		goto err; +	} + +	cl_info->me_client_id = dev->me_clients[i].client_id; + +	ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY); +	if (ret) +		goto err; + +	cl_info->device_uuid = mei_nfc_info_guid; + +	list_add_tail(&cl_info->device_link, &dev->device_list); + +	/* check for valid client id */ +	i = mei_me_cl_by_uuid(dev, &mei_nfc_guid); +	if (i < 0) { +		dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); +		ret = -ENOTTY; +		goto err; +	} + +	cl->me_client_id = dev->me_clients[i].client_id; + +	ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); +	if (ret) +		goto err; + +	cl->device_uuid = mei_nfc_guid; + + +	list_add_tail(&cl->device_link, &dev->device_list); + +	ndev->req_id = 1; + +	INIT_WORK(&ndev->init_work, mei_nfc_init); +	init_waitqueue_head(&ndev->send_wq); +	schedule_work(&ndev->init_work); + +	return 0; + +err: +	mei_nfc_free(ndev); + +	return ret; +} + +void mei_nfc_host_exit(struct mei_device *dev) +{ +	struct mei_nfc_dev *ndev = &nfc_dev; +	cancel_work_sync(&ndev->init_work); +} + + diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c new file mode 100644 index 00000000000..1b46c64a649 --- /dev/null +++ b/drivers/misc/mei/pci-me.c @@ -0,0 +1,488 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/aio.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/compat.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> + +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "client.h" +#include "hw-me-regs.h" +#include "hw-me.h" + +/* mei_pci_tbl - PCI Device ID Table */ +static const struct pci_device_id mei_me_pci_tbl[] = { +	{MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)}, + +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)}, + +	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, +	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, + +	/* required last entry */ +	{0, } +}; + +MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); + +#ifdef CONFIG_PM_RUNTIME +static inline void mei_me_set_pm_domain(struct mei_device *dev); +static inline void mei_me_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_me_set_pm_domain(struct mei_device *dev) {} +static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + +/** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * + * @pdev: PCI device structure + * @cfg: per generation config + * + * returns true if ME Interface is valid, false otherwise + */ +static bool mei_me_quirk_probe(struct pci_dev *pdev, +				const struct mei_cfg *cfg) +{ +	if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { +		dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); +		return false; +	} + +	return true; +} + +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in kcs_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ +	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); +	struct mei_device *dev; +	struct mei_me_hw *hw; +	int err; + + +	if (!mei_me_quirk_probe(pdev, cfg)) +		return -ENODEV; + +	/* enable pci dev */ +	err = pci_enable_device(pdev); +	if (err) { +		dev_err(&pdev->dev, "failed to enable pci device.\n"); +		goto end; +	} +	/* set PCI host mastering  */ +	pci_set_master(pdev); +	/* pci request regions for mei driver */ +	err = pci_request_regions(pdev, KBUILD_MODNAME); +	if (err) { +		dev_err(&pdev->dev, "failed to get pci regions.\n"); +		goto disable_device; +	} + +	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || +	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + +		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); +		if (err) +			err = dma_set_coherent_mask(&pdev->dev, +						    DMA_BIT_MASK(32)); +	} +	if (err) { +		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); +		goto release_regions; +	} + + +	/* allocates and initializes the mei dev structure */ +	dev = mei_me_dev_init(pdev, cfg); +	if (!dev) { +		err = -ENOMEM; +		goto release_regions; +	} +	hw = to_me_hw(dev); +	/* mapping  IO device memory */ +	hw->mem_addr = pci_iomap(pdev, 0, 0); +	if (!hw->mem_addr) { +		dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); +		err = -ENOMEM; +		goto free_device; +	} +	pci_enable_msi(pdev); + +	 /* request and enable interrupt */ +	if (pci_dev_msi_enabled(pdev)) +		err = request_threaded_irq(pdev->irq, +			NULL, +			mei_me_irq_thread_handler, +			IRQF_ONESHOT, KBUILD_MODNAME, dev); +	else +		err = request_threaded_irq(pdev->irq, +			mei_me_irq_quick_handler, +			mei_me_irq_thread_handler, +			IRQF_SHARED, KBUILD_MODNAME, dev); + +	if (err) { +		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", +		       pdev->irq); +		goto disable_msi; +	} + +	if (mei_start(dev)) { +		dev_err(&pdev->dev, "init hw failure.\n"); +		err = -ENODEV; +		goto release_irq; +	} + +	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); +	pm_runtime_use_autosuspend(&pdev->dev); + +	err = mei_register(dev); +	if (err) +		goto release_irq; + +	pci_set_drvdata(pdev, dev); + +	schedule_delayed_work(&dev->timer_work, HZ); + +	/* +	* For not wake-able HW runtime pm framework +	* can't be used on pci device level. +	* Use domain runtime pm callbacks instead. +	*/ +	if (!pci_dev_run_wake(pdev)) +		mei_me_set_pm_domain(dev); + +	if (mei_pg_is_enabled(dev)) +		pm_runtime_put_noidle(&pdev->dev); + +	dev_dbg(&pdev->dev, "initialization successful.\n"); + +	return 0; + +release_irq: +	mei_cancel_work(dev); +	mei_disable_interrupts(dev); +	free_irq(pdev->irq, dev); +disable_msi: +	pci_disable_msi(pdev); +	pci_iounmap(pdev, hw->mem_addr); +free_device: +	kfree(dev); +release_regions: +	pci_release_regions(pdev); +disable_device: +	pci_disable_device(pdev); +end: +	dev_err(&pdev->dev, "initialization failed.\n"); +	return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_me_remove(struct pci_dev *pdev) +{ +	struct mei_device *dev; +	struct mei_me_hw *hw; + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return; + +	if (mei_pg_is_enabled(dev)) +		pm_runtime_get_noresume(&pdev->dev); + +	hw = to_me_hw(dev); + + +	dev_dbg(&pdev->dev, "stop\n"); +	mei_stop(dev); + +	if (!pci_dev_run_wake(pdev)) +		mei_me_unset_pm_domain(dev); + +	/* disable interrupts */ +	mei_disable_interrupts(dev); + +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +	if (hw->mem_addr) +		pci_iounmap(pdev, hw->mem_addr); + +	mei_deregister(dev); + +	kfree(dev); + +	pci_release_regions(pdev); +	pci_disable_device(pdev); + + +} +#ifdef CONFIG_PM_SLEEP +static int mei_me_pci_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev = pci_get_drvdata(pdev); + +	if (!dev) +		return -ENODEV; + +	dev_dbg(&pdev->dev, "suspend\n"); + +	mei_stop(dev); + +	mei_disable_interrupts(dev); + +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +	return 0; +} + +static int mei_me_pci_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int err; + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	pci_enable_msi(pdev); + +	/* request and enable interrupt */ +	if (pci_dev_msi_enabled(pdev)) +		err = request_threaded_irq(pdev->irq, +			NULL, +			mei_me_irq_thread_handler, +			IRQF_ONESHOT, KBUILD_MODNAME, dev); +	else +		err = request_threaded_irq(pdev->irq, +			mei_me_irq_quick_handler, +			mei_me_irq_thread_handler, +			IRQF_SHARED, KBUILD_MODNAME, dev); + +	if (err) { +		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", +				pdev->irq); +		return err; +	} + +	err = mei_restart(dev); +	if (err) +		return err; + +	/* Start timer if stopped in suspend */ +	schedule_delayed_work(&dev->timer_work, HZ); + +	return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_me_pm_runtime_idle(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; + +	dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; +	if (mei_write_is_idle(dev)) +		pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); + +	return -EBUSY; +} + +static int mei_me_pm_runtime_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	if (mei_write_is_idle(dev)) +		ret = mei_me_pg_set_sync(dev); +	else +		ret = -EAGAIN; + +	mutex_unlock(&dev->device_lock); + +	dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); + +	return ret; +} + +static int mei_me_pm_runtime_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	ret = mei_me_pg_unset_sync(dev); + +	mutex_unlock(&dev->device_lock); + +	dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); + +	return ret; +} + +/** + * mei_me_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_set_pm_domain(struct mei_device *dev) +{ +	struct pci_dev *pdev  = dev->pdev; + +	if (pdev->dev.bus && pdev->dev.bus->pm) { +		dev->pg_domain.ops = *pdev->dev.bus->pm; + +		dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; +		dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; +		dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; + +		pdev->dev.pm_domain = &dev->pg_domain; +	} +} + +/** + * mei_me_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_unset_pm_domain(struct mei_device *dev) +{ +	/* stop using pm callbacks if any */ +	dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_me_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, +				mei_me_pci_resume) +	SET_RUNTIME_PM_OPS( +		mei_me_pm_runtime_suspend, +		mei_me_pm_runtime_resume, +		mei_me_pm_runtime_idle) +}; + +#define MEI_ME_PM_OPS	(&mei_me_pm_ops) +#else +#define MEI_ME_PM_OPS	NULL +#endif /* CONFIG_PM */ +/* + *  PCI driver structure + */ +static struct pci_driver mei_me_driver = { +	.name = KBUILD_MODNAME, +	.id_table = mei_me_pci_tbl, +	.probe = mei_me_probe, +	.remove = mei_me_remove, +	.shutdown = mei_me_remove, +	.driver.pm = MEI_ME_PM_OPS, +}; + +module_pci_driver(mei_me_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c new file mode 100644 index 00000000000..2343c6236df --- /dev/null +++ b/drivers/misc/mei/pci-txe.c @@ -0,0 +1,436 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + + +#include "mei_dev.h" +#include "hw-txe.h" + +static const struct pci_device_id mei_txe_pci_tbl[] = { +	{MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */ +	{0, } +}; +MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); + +#ifdef CONFIG_PM_RUNTIME +static inline void mei_txe_set_pm_domain(struct mei_device *dev); +static inline void mei_txe_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + +static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) +{ +	int i; +	for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { +		if (hw->mem_addr[i]) { +			pci_iounmap(pdev, hw->mem_addr[i]); +			hw->mem_addr[i] = NULL; +		} +	} +} +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mei_txe_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ +	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); +	struct mei_device *dev; +	struct mei_txe_hw *hw; +	int err; +	int i; + +	/* enable pci dev */ +	err = pci_enable_device(pdev); +	if (err) { +		dev_err(&pdev->dev, "failed to enable pci device.\n"); +		goto end; +	} +	/* set PCI host mastering  */ +	pci_set_master(pdev); +	/* pci request regions for mei driver */ +	err = pci_request_regions(pdev, KBUILD_MODNAME); +	if (err) { +		dev_err(&pdev->dev, "failed to get pci regions.\n"); +		goto disable_device; +	} + +	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); +	if (err) { +		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); +		if (err) { +			dev_err(&pdev->dev, "No suitable DMA available.\n"); +			goto release_regions; +		} +	} + +	/* allocates and initializes the mei dev structure */ +	dev = mei_txe_dev_init(pdev, cfg); +	if (!dev) { +		err = -ENOMEM; +		goto release_regions; +	} +	hw = to_txe_hw(dev); + +	/* mapping  IO device memory */ +	for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { +		hw->mem_addr[i] = pci_iomap(pdev, i, 0); +		if (!hw->mem_addr[i]) { +			dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); +			err = -ENOMEM; +			goto free_device; +		} +	} + + +	pci_enable_msi(pdev); + +	/* clear spurious interrupts */ +	mei_clear_interrupts(dev); + +	/* request and enable interrupt  */ +	if (pci_dev_msi_enabled(pdev)) +		err = request_threaded_irq(pdev->irq, +			NULL, +			mei_txe_irq_thread_handler, +			IRQF_ONESHOT, KBUILD_MODNAME, dev); +	else +		err = request_threaded_irq(pdev->irq, +			mei_txe_irq_quick_handler, +			mei_txe_irq_thread_handler, +			IRQF_SHARED, KBUILD_MODNAME, dev); +	if (err) { +		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", +			pdev->irq); +		goto free_device; +	} + +	if (mei_start(dev)) { +		dev_err(&pdev->dev, "init hw failure.\n"); +		err = -ENODEV; +		goto release_irq; +	} + +	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); +	pm_runtime_use_autosuspend(&pdev->dev); + +	err = mei_register(dev); +	if (err) +		goto release_irq; + +	pci_set_drvdata(pdev, dev); + +	/* +	* For not wake-able HW runtime pm framework +	* can't be used on pci device level. +	* Use domain runtime pm callbacks instead. +	*/ +	if (!pci_dev_run_wake(pdev)) +		mei_txe_set_pm_domain(dev); + +	pm_runtime_put_noidle(&pdev->dev); + +	return 0; + +release_irq: + +	mei_cancel_work(dev); + +	/* disable interrupts */ +	mei_disable_interrupts(dev); + +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +free_device: +	mei_txe_pci_iounmap(pdev, hw); + +	kfree(dev); +release_regions: +	pci_release_regions(pdev); +disable_device: +	pci_disable_device(pdev); +end: +	dev_err(&pdev->dev, "initialization failed.\n"); +	return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_txe_remove(struct pci_dev *pdev) +{ +	struct mei_device *dev; +	struct mei_txe_hw *hw; + +	dev = pci_get_drvdata(pdev); +	if (!dev) { +		dev_err(&pdev->dev, "mei: dev =NULL\n"); +		return; +	} + +	pm_runtime_get_noresume(&pdev->dev); + +	hw = to_txe_hw(dev); + +	mei_stop(dev); + +	if (!pci_dev_run_wake(pdev)) +		mei_txe_unset_pm_domain(dev); + +	/* disable interrupts */ +	mei_disable_interrupts(dev); +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +	pci_set_drvdata(pdev, NULL); + +	mei_txe_pci_iounmap(pdev, hw); + +	mei_deregister(dev); + +	kfree(dev); + +	pci_release_regions(pdev); +	pci_disable_device(pdev); +} + + +#ifdef CONFIG_PM_SLEEP +static int mei_txe_pci_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev = pci_get_drvdata(pdev); + +	if (!dev) +		return -ENODEV; + +	dev_dbg(&pdev->dev, "suspend\n"); + +	mei_stop(dev); + +	mei_disable_interrupts(dev); + +	free_irq(pdev->irq, dev); +	pci_disable_msi(pdev); + +	return 0; +} + +static int mei_txe_pci_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int err; + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	pci_enable_msi(pdev); + +	mei_clear_interrupts(dev); + +	/* request and enable interrupt */ +	if (pci_dev_msi_enabled(pdev)) +		err = request_threaded_irq(pdev->irq, +			NULL, +			mei_txe_irq_thread_handler, +			IRQF_ONESHOT, KBUILD_MODNAME, dev); +	else +		err = request_threaded_irq(pdev->irq, +			mei_txe_irq_quick_handler, +			mei_txe_irq_thread_handler, +			IRQF_SHARED, KBUILD_MODNAME, dev); +	if (err) { +		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", +				pdev->irq); +		return err; +	} + +	err = mei_restart(dev); + +	return err; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_txe_pm_runtime_idle(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; + +	dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; +	if (mei_write_is_idle(dev)) +		pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); + +	return -EBUSY; +} +static int mei_txe_pm_runtime_suspend(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	if (mei_write_is_idle(dev)) +		ret = mei_txe_aliveness_set_sync(dev, 0); +	else +		ret = -EAGAIN; + +	/* +	 * If everything is okay we're about to enter PCI low +	 * power state (D3) therefor we need to disable the +	 * interrupts towards host. +	 * However if device is not wakeable we do not enter +	 * D-low state and we need to keep the interrupt kicking +	 */ +	 if (!ret && pci_dev_run_wake(pdev)) +		mei_disable_interrupts(dev); + +	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); + +	mutex_unlock(&dev->device_lock); +	return ret; +} + +static int mei_txe_pm_runtime_resume(struct device *device) +{ +	struct pci_dev *pdev = to_pci_dev(device); +	struct mei_device *dev; +	int ret; + +	dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); + +	dev = pci_get_drvdata(pdev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	mei_enable_interrupts(dev); + +	ret = mei_txe_aliveness_set_sync(dev, 1); + +	mutex_unlock(&dev->device_lock); + +	dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); + +	return ret; +} + +/** + * mei_txe_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_set_pm_domain(struct mei_device *dev) +{ +	struct pci_dev *pdev  = dev->pdev; + +	if (pdev->dev.bus && pdev->dev.bus->pm) { +		dev->pg_domain.ops = *pdev->dev.bus->pm; + +		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; +		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; +		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; + +		pdev->dev.pm_domain = &dev->pg_domain; +	} +} + +/** + * mei_txe_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) +{ +	/* stop using pm callbacks if any */ +	dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_txe_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, +				mei_txe_pci_resume) +	SET_RUNTIME_PM_OPS( +		mei_txe_pm_runtime_suspend, +		mei_txe_pm_runtime_resume, +		mei_txe_pm_runtime_idle) +}; + +#define MEI_TXE_PM_OPS	(&mei_txe_pm_ops) +#else +#define MEI_TXE_PM_OPS	NULL +#endif /* CONFIG_PM */ + +/* + *  PCI driver structure + */ +static struct pci_driver mei_txe_driver = { +	.name = KBUILD_MODNAME, +	.id_table = mei_txe_pci_tbl, +	.probe = mei_txe_probe, +	.remove = mei_txe_remove, +	.shutdown = mei_txe_remove, +	.driver.pm = MEI_TXE_PM_OPS, +}; + +module_pci_driver(mei_txe_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c new file mode 100644 index 00000000000..a84a664dfcc --- /dev/null +++ b/drivers/misc/mei/wd.c @@ -0,0 +1,401 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/sched.h> +#include <linux/watchdog.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; +static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 }; + +/* + * AMT Watchdog Device + */ +#define INTEL_AMT_WATCHDOG_ID "INTCAMT" + +/* UUIDs for AMT F/W clients */ +const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89, +						0x9D, 0xA9, 0x15, 0x14, 0xCB, +						0x32, 0xAB); + +static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) +{ +	dev_dbg(&dev->pdev->dev, "wd: set timeout=%d.\n", timeout); +	memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE); +	memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16)); +} + +/** + * mei_wd_host_init - connect to the watchdog client + * + * @dev: the device structure + * + * returns -ENOTTY if wd client cannot be found + *         -EIO if write has failed + *         0 on success + */ +int mei_wd_host_init(struct mei_device *dev) +{ +	struct mei_cl *cl = &dev->wd_cl; +	int id; +	int ret; + +	mei_cl_init(cl, dev); + +	dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; +	dev->wd_state = MEI_WD_IDLE; + + +	/* check for valid client id */ +	id = mei_me_cl_by_uuid(dev, &mei_wd_guid); +	if (id < 0) { +		dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); +		return -ENOTTY; +	} + +	cl->me_client_id = dev->me_clients[id].client_id; + +	ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); + +	if (ret < 0) { +		dev_info(&dev->pdev->dev, "wd: failed link client\n"); +		return ret; +	} + +	ret = mei_cl_connect(cl, NULL); + +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret); +		mei_cl_unlink(cl); +		return ret; +	} + +	ret = mei_watchdog_register(dev); +	if (ret) { +		mei_cl_disconnect(cl); +		mei_cl_unlink(cl); +	} +	return ret; +} + +/** + * mei_wd_send - sends watch dog message to fw. + * + * @dev: the device structure + * + * returns 0 if success, + *	-EIO when message send fails + *	-EINVAL when invalid message is to be sent + *	-ENODEV on flow control failure + */ +int mei_wd_send(struct mei_device *dev) +{ +	struct mei_cl *cl = &dev->wd_cl; +	struct mei_msg_hdr hdr; +	int ret; + +	hdr.host_addr = cl->host_client_id; +	hdr.me_addr = cl->me_client_id; +	hdr.msg_complete = 1; +	hdr.reserved = 0; +	hdr.internal = 0; + +	if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) +		hdr.length = MEI_WD_START_MSG_SIZE; +	else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) +		hdr.length = MEI_WD_STOP_MSG_SIZE; +	else { +		dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n"); +		return -EINVAL; +	} + +	ret = mei_write_message(dev, &hdr, dev->wd_data); +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: write message failed\n"); +		return ret; +	} + +	ret = mei_cl_flow_ctrl_reduce(cl); +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n"); +		return ret; +	} + +	return 0; +} + +/** + * mei_wd_stop - sends watchdog stop message to fw. + * + * @dev: the device structure + * @preserve: indicate if to keep the timeout value + * + * returns 0 if success + * on error: + *	-EIO    when message send fails + *	-EINVAL when invalid message is to be sent + *	-ETIME  on message timeout + */ +int mei_wd_stop(struct mei_device *dev) +{ +	int ret; + +	if (dev->wd_cl.state != MEI_FILE_CONNECTED || +	    dev->wd_state != MEI_WD_RUNNING) +		return 0; + +	memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE); + +	dev->wd_state = MEI_WD_STOPPING; + +	ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); +	if (ret < 0) +		goto err; + +	if (ret && mei_hbuf_acquire(dev)) { +		ret = mei_wd_send(dev); +		if (ret) +			goto err; +		dev->wd_pending = false; +	} else { +		dev->wd_pending = true; +	} + +	mutex_unlock(&dev->device_lock); + +	ret = wait_event_timeout(dev->wait_stop_wd, +				dev->wd_state == MEI_WD_IDLE, +				msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); +	mutex_lock(&dev->device_lock); +	if (dev->wd_state != MEI_WD_IDLE) { +		/* timeout */ +		ret = -ETIME; +		dev_warn(&dev->pdev->dev, +			"wd: stop failed to complete ret=%d.\n", ret); +		goto err; +	} +	dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n", +			MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); +	return 0; +err: +	return ret; +} + +/* + * mei_wd_ops_start - wd start command from the watchdog core. + * + * @wd_dev - watchdog device struct + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_start(struct watchdog_device *wd_dev) +{ +	int err = -ENODEV; +	struct mei_device *dev; + +	dev = watchdog_get_drvdata(wd_dev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	if (dev->dev_state != MEI_DEV_ENABLED) { +		dev_dbg(&dev->pdev->dev, +			"wd: dev_state != MEI_DEV_ENABLED  dev_state = %s\n", +			mei_dev_state_str(dev->dev_state)); +		goto end_unlock; +	} + +	if (dev->wd_cl.state != MEI_FILE_CONNECTED)	{ +		dev_dbg(&dev->pdev->dev, +			"MEI Driver is not connected to Watchdog Client\n"); +		goto end_unlock; +	} + +	mei_wd_set_start_timeout(dev, dev->wd_timeout); + +	err = 0; +end_unlock: +	mutex_unlock(&dev->device_lock); +	return err; +} + +/* + * mei_wd_ops_stop -  wd stop command from the watchdog core. + * + * @wd_dev - watchdog device struct + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_stop(struct watchdog_device *wd_dev) +{ +	struct mei_device *dev; + +	dev = watchdog_get_drvdata(wd_dev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); +	mei_wd_stop(dev); +	mutex_unlock(&dev->device_lock); + +	return 0; +} + +/* + * mei_wd_ops_ping - wd ping command from the watchdog core. + * + * @wd_dev - watchdog device struct + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_ping(struct watchdog_device *wd_dev) +{ +	struct mei_device *dev; +	int ret; + +	dev = watchdog_get_drvdata(wd_dev); +	if (!dev) +		return -ENODEV; + +	mutex_lock(&dev->device_lock); + +	if (dev->wd_cl.state != MEI_FILE_CONNECTED) { +		dev_err(&dev->pdev->dev, "wd: not connected.\n"); +		ret = -ENODEV; +		goto end; +	} + +	dev->wd_state = MEI_WD_RUNNING; + +	ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); +	if (ret < 0) +		goto end; +	/* Check if we can send the ping to HW*/ +	if (ret && mei_hbuf_acquire(dev)) { + +		dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); + +		ret = mei_wd_send(dev); +		if (ret) +			goto end; +		dev->wd_pending = false; +	} else { +		dev->wd_pending = true; +	} + +end: +	mutex_unlock(&dev->device_lock); +	return ret; +} + +/* + * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core. + * + * @wd_dev - watchdog device struct + * @timeout - timeout value to set + * + * returns 0 if success, negative errno code for failure + */ +static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, +		unsigned int timeout) +{ +	struct mei_device *dev; + +	dev = watchdog_get_drvdata(wd_dev); +	if (!dev) +		return -ENODEV; + +	/* Check Timeout value */ +	if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT) +		return -EINVAL; + +	mutex_lock(&dev->device_lock); + +	dev->wd_timeout = timeout; +	wd_dev->timeout = timeout; +	mei_wd_set_start_timeout(dev, dev->wd_timeout); + +	mutex_unlock(&dev->device_lock); + +	return 0; +} + +/* + * Watchdog Device structs + */ +static const struct watchdog_ops wd_ops = { +		.owner = THIS_MODULE, +		.start = mei_wd_ops_start, +		.stop = mei_wd_ops_stop, +		.ping = mei_wd_ops_ping, +		.set_timeout = mei_wd_ops_set_timeout, +}; +static const struct watchdog_info wd_info = { +		.identity = INTEL_AMT_WATCHDOG_ID, +		.options = WDIOF_KEEPALIVEPING | +			   WDIOF_SETTIMEOUT | +			   WDIOF_ALARMONLY, +}; + +static struct watchdog_device amt_wd_dev = { +		.info = &wd_info, +		.ops = &wd_ops, +		.timeout = MEI_WD_DEFAULT_TIMEOUT, +		.min_timeout = MEI_WD_MIN_TIMEOUT, +		.max_timeout = MEI_WD_MAX_TIMEOUT, +}; + + +int mei_watchdog_register(struct mei_device *dev) +{ + +	int ret; + +	/* unlock to perserve correct locking order */ +	mutex_unlock(&dev->device_lock); +	ret = watchdog_register_device(&amt_wd_dev); +	mutex_lock(&dev->device_lock); +	if (ret) { +		dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n", +			ret); +		return ret; +	} + +	dev_dbg(&dev->pdev->dev, +		"wd: successfully register watchdog interface.\n"); +	watchdog_set_drvdata(&amt_wd_dev, dev); +	return 0; +} + +void mei_watchdog_unregister(struct mei_device *dev) +{ +	if (watchdog_get_drvdata(&amt_wd_dev) == NULL) +		return; + +	watchdog_set_drvdata(&amt_wd_dev, NULL); +	watchdog_unregister_device(&amt_wd_dev); +} +  | 
