aboutsummaryrefslogtreecommitdiff
path: root/drivers/hv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/Makefile2
-rw-r--r--drivers/hv/channel.c208
-rw-r--r--drivers/hv/channel_mgmt.c349
-rw-r--r--drivers/hv/connection.c295
-rw-r--r--drivers/hv/hv.c143
-rw-r--r--drivers/hv/hv_balloon.c639
-rw-r--r--drivers/hv/hv_fcopy.c414
-rw-r--r--drivers/hv/hv_kvp.c61
-rw-r--r--drivers/hv/hv_snapshot.c281
-rw-r--r--drivers/hv/hv_util.c134
-rw-r--r--drivers/hv/hyperv_vmbus.h95
-rw-r--r--drivers/hv/ring_buffer.c150
-rw-r--r--drivers/hv/vmbus_drv.c640
14 files changed, 2764 insertions, 649 deletions
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index b38ef6d8d04..0403b51d20b 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -2,7 +2,7 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
- depends on X86 && ACPI && PCI
+ depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index e6abfa02d8b..5e4dfa4cfe2 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
-hv_utils-y := hv_util.o hv_kvp.o
+hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f..284cf66489f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/uio.h>
#include "hyperv_vmbus.h"
@@ -47,63 +48,19 @@ static void vmbus_setevent(struct vmbus_channel *channel)
(unsigned long *) vmbus_connection.send_int_page +
(channel->offermsg.child_relid >> 5));
- monitorpage = vmbus_connection.monitor_pages;
- monitorpage++; /* Get the child to parent monitor page */
+ /* Get the child to parent monitor page */
+ monitorpage = vmbus_connection.monitor_pages[1];
sync_set_bit(channel->monitor_bit,
(unsigned long *)&monitorpage->trigger_group
[channel->monitor_grp].pending);
} else {
- vmbus_set_event(channel->offermsg.child_relid);
+ vmbus_set_event(channel);
}
}
/*
- * vmbus_get_debug_info -Retrieve various channel debug info
- */
-void vmbus_get_debug_info(struct vmbus_channel *channel,
- struct vmbus_channel_debug_info *debuginfo)
-{
- struct hv_monitor_page *monitorpage;
- u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
- u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
-
- debuginfo->relid = channel->offermsg.child_relid;
- debuginfo->state = channel->state;
- memcpy(&debuginfo->interfacetype,
- &channel->offermsg.offer.if_type, sizeof(uuid_le));
- memcpy(&debuginfo->interface_instance,
- &channel->offermsg.offer.if_instance,
- sizeof(uuid_le));
-
- monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
-
- debuginfo->monitorid = channel->offermsg.monitorid;
-
- debuginfo->servermonitor_pending =
- monitorpage->trigger_group[monitor_group].pending;
- debuginfo->servermonitor_latency =
- monitorpage->latency[monitor_group][monitor_offset];
- debuginfo->servermonitor_connectionid =
- monitorpage->parameter[monitor_group]
- [monitor_offset].connectionid.u.id;
-
- monitorpage++;
-
- debuginfo->clientmonitor_pending =
- monitorpage->trigger_group[monitor_group].pending;
- debuginfo->clientmonitor_latency =
- monitorpage->latency[monitor_group][monitor_offset];
- debuginfo->clientmonitor_connectionid =
- monitorpage->parameter[monitor_group]
- [monitor_offset].connectionid.u.id;
-
- hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
- hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
-}
-
-/*
* vmbus_open - Open the specified channel.
*/
int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
@@ -116,6 +73,15 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
unsigned long flags;
int ret, t, err = 0;
+ spin_lock_irqsave(&newchannel->sc_lock, flags);
+ if (newchannel->state == CHANNEL_OPEN_STATE) {
+ newchannel->state = CHANNEL_OPENING_STATE;
+ } else {
+ spin_unlock_irqrestore(&newchannel->sc_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&newchannel->sc_lock, flags);
+
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
@@ -181,7 +147,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
- open_msg->server_contextarea_gpadlhandle = 0;
+ open_msg->target_vp = newchannel->target_vp;
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@@ -216,6 +182,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (err == 0)
+ newchannel->state = CHANNEL_OPENED_STATE;
+
kfree(open_info);
return err;
@@ -241,7 +210,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
{
int i;
int pagecount;
- unsigned long long pfn;
struct vmbus_channel_gpadl_header *gpadl_header;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msgheader;
@@ -251,7 +219,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
pagecount = size >> PAGE_SHIFT;
- pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
/* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -280,7 +247,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
+ gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
*messagecount = 1;
@@ -333,7 +301,9 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor gurantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = pfn + pfnsum + i;
+ gpadl_body->pfn[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * (pfnsum + i)) >>
+ PAGE_SHIFT;
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -359,7 +329,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = pfn+i;
+ gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
+ kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
*msginfo = msgheader;
*messagecount = 1;
@@ -376,7 +347,7 @@ nomem:
* vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
*
* @channel: a channel
- * @kbuffer: from kmalloc
+ * @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
* @gpadl_handle: some funky thing
*/
@@ -500,19 +471,26 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
-/*
- * vmbus_close - Close the specified channel
- */
-void vmbus_close(struct vmbus_channel *channel)
+static void reset_channel_cb(void *arg)
+{
+ struct vmbus_channel *channel = arg;
+
+ channel->onchannel_callback = NULL;
+}
+
+static void vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
- unsigned long flags;
+ channel->state = CHANNEL_OPEN_STATE;
+ channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */
- spin_lock_irqsave(&channel->inbound_lock, flags);
- channel->onchannel_callback = NULL;
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu, reset_channel_cb,
+ channel, true);
+ else
+ reset_channel_cb(channel);
/* Send a closing message */
@@ -538,6 +516,37 @@ void vmbus_close(struct vmbus_channel *channel)
}
+
+/*
+ * vmbus_close - Close the specified channel
+ */
+void vmbus_close(struct vmbus_channel *channel)
+{
+ struct list_head *cur, *tmp;
+ struct vmbus_channel *cur_channel;
+
+ if (channel->primary_channel != NULL) {
+ /*
+ * We will only close sub-channels when
+ * the primary is closed.
+ */
+ return;
+ }
+ /*
+ * Close all the sub-channels first and then close the
+ * primary channel.
+ */
+ list_for_each_safe(cur, tmp, &channel->sc_list) {
+ cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
+ if (cur_channel->state != CHANNEL_OPENED_STATE)
+ continue;
+ vmbus_close_internal(cur_channel);
+ }
+ /*
+ * Now close the primary.
+ */
+ vmbus_close_internal(channel);
+}
EXPORT_SYMBOL_GPL(vmbus_close);
/**
@@ -554,16 +563,17 @@ EXPORT_SYMBOL_GPL(vmbus_close);
*
* Mainly used by Hyper-V drivers.
*/
-int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u64 requestid,
enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
int ret;
+ bool signal = false;
/* Setup the descriptor */
@@ -574,15 +584,16 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
desc.len8 = (u16)(packetlen_aligned >> 3);
desc.trans_id = requestid;
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -604,8 +615,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -635,15 +647,16 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.range[i].pfn = pagebuffers[i].pfn;
}
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -663,13 +676,13 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
- struct scatterlist bufferlist[3];
+ struct kvec bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
-
- if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
+ if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
return -EINVAL;
/*
@@ -697,15 +710,16 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
pfncount * sizeof(u64));
- sg_init_table(bufferlist, 3);
- sg_set_buf(&bufferlist[0], &desc, descsize);
- sg_set_buf(&bufferlist[1], buffer, bufferlen);
- sg_set_buf(&bufferlist[2], &aligned_data,
- packetlen_aligned - packetlen);
+ bufferlist[0].iov_base = &desc;
+ bufferlist[0].iov_len = descsize;
+ bufferlist[1].iov_base = buffer;
+ bufferlist[1].iov_len = bufferlen;
+ bufferlist[2].iov_base = &aligned_data;
+ bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -732,6 +746,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 packetlen;
u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -758,8 +773,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
/* Copy over the packet to the user buffer */
ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
- (desc.offset8 << 3));
+ (desc.offset8 << 3), &signal);
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
@@ -774,8 +791,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
{
struct vmpacket_descriptor desc;
u32 packetlen;
- u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -788,7 +805,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
packetlen = desc.len8 << 3;
- userlen = packetlen - (desc.offset8 << 3);
*buffer_actual_len = packetlen;
@@ -802,8 +818,12 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
*requestid = desc.trans_id;
/* Copy over the entire packet to the user buffer */
- ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+ ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
+ &signal);
- return 0;
+ if (signal)
+ vmbus_setevent(channel);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5cff8d..ed9350d4276 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -48,30 +48,39 @@ struct vmbus_channel_message_table_entry {
* @negop is of type &struct icmsg_negotiate.
* Set up and fill in default negotiate response message.
*
- * The max_fw_version specifies the maximum framework version that
- * we can support and max _srv_version specifies the maximum service
- * version we can support. A special value MAX_SRV_VER can be
- * specified to indicate that we can handle the maximum version
- * exposed by the host.
+ * The fw_version specifies the framework version that
+ * we can support and srv_version specifies the service
+ * version we can support.
*
* Mainly used by Hyper-V drivers.
*/
-void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
+bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
struct icmsg_negotiate *negop, u8 *buf,
- int max_fw_version, int max_srv_version)
+ int fw_version, int srv_version)
{
- int icframe_vercnt;
- int icmsg_vercnt;
+ int icframe_major, icframe_minor;
+ int icmsg_major, icmsg_minor;
+ int fw_major, fw_minor;
+ int srv_major, srv_minor;
int i;
+ bool found_match = false;
icmsghdrp->icmsgsize = 0x10;
+ fw_major = (fw_version >> 16);
+ fw_minor = (fw_version & 0xFFFF);
+
+ srv_major = (srv_version >> 16);
+ srv_minor = (srv_version & 0xFFFF);
negop = (struct icmsg_negotiate *)&buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
- icframe_vercnt = negop->icframe_vercnt;
- icmsg_vercnt = negop->icmsg_vercnt;
+ icframe_major = negop->icframe_vercnt;
+ icframe_minor = 0;
+
+ icmsg_major = negop->icmsg_vercnt;
+ icmsg_minor = 0;
/*
* Select the framework version number we will
@@ -79,26 +88,48 @@ void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
*/
for (i = 0; i < negop->icframe_vercnt; i++) {
- if (negop->icversion_data[i].major <= max_fw_version)
- icframe_vercnt = negop->icversion_data[i].major;
+ if ((negop->icversion_data[i].major == fw_major) &&
+ (negop->icversion_data[i].minor == fw_minor)) {
+ icframe_major = negop->icversion_data[i].major;
+ icframe_minor = negop->icversion_data[i].minor;
+ found_match = true;
+ }
}
+ if (!found_match)
+ goto fw_error;
+
+ found_match = false;
+
for (i = negop->icframe_vercnt;
(i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
- if (negop->icversion_data[i].major <= max_srv_version)
- icmsg_vercnt = negop->icversion_data[i].major;
+ if ((negop->icversion_data[i].major == srv_major) &&
+ (negop->icversion_data[i].minor == srv_minor)) {
+ icmsg_major = negop->icversion_data[i].major;
+ icmsg_minor = negop->icversion_data[i].minor;
+ found_match = true;
+ }
}
/*
- * Respond with the maximum framework and service
+ * Respond with the framework and service
* version numbers we can support.
*/
- negop->icframe_vercnt = 1;
- negop->icmsg_vercnt = 1;
- negop->icversion_data[0].major = icframe_vercnt;
- negop->icversion_data[0].minor = 0;
- negop->icversion_data[1].major = icmsg_vercnt;
- negop->icversion_data[1].minor = 0;
+
+fw_error:
+ if (!found_match) {
+ negop->icframe_vercnt = 0;
+ negop->icmsg_vercnt = 0;
+ } else {
+ negop->icframe_vercnt = 1;
+ negop->icmsg_vercnt = 1;
+ }
+
+ negop->icversion_data[0].major = icframe_major;
+ negop->icversion_data[0].minor = icframe_minor;
+ negop->icversion_data[1].major = icmsg_major;
+ negop->icversion_data[1].minor = icmsg_minor;
+ return found_match;
}
EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
@@ -115,6 +146,10 @@ static struct vmbus_channel *alloc_channel(void)
return NULL;
spin_lock_init(&channel->inbound_lock);
+ spin_lock_init(&channel->sc_lock);
+
+ INIT_LIST_HEAD(&channel->sc_list);
+ INIT_LIST_HEAD(&channel->percpu_list);
channel->controlwq = create_workqueue("hv_vmbus_ctl");
if (!channel->controlwq) {
@@ -154,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel)
queue_work(vmbus_connection.work_queue, &channel->work);
}
+static void percpu_channel_enq(void *arg)
+{
+ struct vmbus_channel *channel = arg;
+ int cpu = smp_processor_id();
+ list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
+}
+
+static void percpu_channel_deq(void *arg)
+{
+ struct vmbus_channel *channel = arg;
+
+ list_del(&channel->percpu_list);
+}
/*
* vmbus_process_rescind_offer -
@@ -165,8 +213,34 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
struct vmbus_channel *channel = container_of(work,
struct vmbus_channel,
work);
+ unsigned long flags;
+ struct vmbus_channel *primary_channel;
+ struct vmbus_channel_relid_released msg;
+
+ if (channel->device_obj)
+ vmbus_device_unregister(channel->device_obj);
+ memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
+ msg.child_relid = channel->offermsg.child_relid;
+ msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
+ vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
+
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu,
+ percpu_channel_deq, channel, true);
+ else
+ percpu_channel_deq(channel);
- vmbus_device_unregister(channel->device_obj);
+ if (channel->primary_channel == NULL) {
+ spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
+ list_del(&channel->listentry);
+ spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+ } else {
+ primary_channel = channel->primary_channel;
+ spin_lock_irqsave(&primary_channel->sc_lock, flags);
+ list_del(&channel->sc_list);
+ spin_unlock_irqrestore(&primary_channel->sc_lock, flags);
+ }
+ free_channel(channel);
}
void vmbus_free_channels(void)
@@ -191,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work)
work);
struct vmbus_channel *channel;
bool fnew = true;
+ bool enq = false;
int ret;
unsigned long flags;
@@ -210,18 +285,61 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
- if (fnew)
+ if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
+ enq = true;
+ }
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+ if (enq) {
+ if (newchannel->target_cpu != smp_processor_id())
+ smp_call_function_single(newchannel->target_cpu,
+ percpu_channel_enq,
+ newchannel, true);
+ else
+ percpu_channel_enq(newchannel);
+ }
if (!fnew) {
+ /*
+ * Check to see if this is a sub-channel.
+ */
+ if (newchannel->offermsg.offer.sub_channel_index != 0) {
+ /*
+ * Process the sub-channel.
+ */
+ newchannel->primary_channel = channel;
+ spin_lock_irqsave(&channel->sc_lock, flags);
+ list_add_tail(&newchannel->sc_list, &channel->sc_list);
+ spin_unlock_irqrestore(&channel->sc_lock, flags);
+
+ if (newchannel->target_cpu != smp_processor_id())
+ smp_call_function_single(newchannel->target_cpu,
+ percpu_channel_enq,
+ newchannel, true);
+ else
+ percpu_channel_enq(newchannel);
+
+ newchannel->state = CHANNEL_OPEN_STATE;
+ if (channel->sc_creation_callback != NULL)
+ channel->sc_creation_callback(newchannel);
+
+ return;
+ }
+
free_channel(newchannel);
return;
}
/*
+ * This state is used to indicate a successful open
+ * so that when we do close the channel normally, we
+ * can cleanup properly
+ */
+ newchannel->state = CHANNEL_OPEN_STATE;
+
+ /*
* Start the process of binding this offer to the driver
* We need to set the DeviceObject field before calling
* vmbus_child_dev_add()
@@ -247,14 +365,74 @@ static void vmbus_process_offer(struct work_struct *work)
kfree(newchannel->device_obj);
free_channel(newchannel);
- } else {
+ }
+}
+
+enum {
+ IDE = 0,
+ SCSI,
+ NIC,
+ MAX_PERF_CHN,
+};
+
+/*
+ * This is an array of device_ids (device types) that are performance critical.
+ * We attempt to distribute the interrupt load for these devices across
+ * all available CPUs.
+ */
+static const struct hv_vmbus_device_id hp_devs[] = {
+ /* IDE */
+ { HV_IDE_GUID, },
+ /* Storage - SCSI */
+ { HV_SCSI_GUID, },
+ /* Network */
+ { HV_NIC_GUID, },
+};
+
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static u32 next_vp;
+
+/*
+ * Starting with Win8, we can statically distribute the incoming
+ * channel interrupt load by binding a channel to VCPU. We
+ * implement here a simple round robin scheme for distributing
+ * the interrupt load.
+ * We will bind channels that are not performance critical to cpu 0 and
+ * performance critical channels (IDE, SCSI and Network) will be uniformly
+ * distributed across all available CPUs.
+ */
+static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
+{
+ u32 cur_cpu;
+ int i;
+ bool perf_chn = false;
+ u32 max_cpus = num_online_cpus();
+
+ for (i = IDE; i < MAX_PERF_CHN; i++) {
+ if (!memcmp(type_guid->b, hp_devs[i].guid,
+ sizeof(uuid_le))) {
+ perf_chn = true;
+ break;
+ }
+ }
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
/*
- * This state is used to indicate a successful open
- * so that when we do close the channel normally, we
- * can cleanup properly
+ * Prior to win8, all channel interrupts are
+ * delivered on cpu 0.
+ * Also if the channel is not a performance critical
+ * channel, bind it to cpu 0.
*/
- newchannel->state = CHANNEL_OPEN_STATE;
+ channel->target_cpu = 0;
+ channel->target_vp = 0;
+ return;
}
+ cur_cpu = (++next_vp % max_cpus);
+ channel->target_cpu = cur_cpu;
+ channel->target_vp = hv_context.vp_index[cur_cpu];
}
/*
@@ -275,6 +453,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
+ /*
+ * By default we setup state to enable batched
+ * reading. A specific service can choose to
+ * disable this prior to opening the channel.
+ */
+ newchannel->batched_reading = true;
+
+ /*
+ * Setup state for signalling the host.
+ */
+ newchannel->sig_event = (struct hv_input_signal_event *)
+ (ALIGN((unsigned long)
+ &newchannel->sig_buf,
+ HV_HYPERCALL_PARAM_ALIGN));
+
+ newchannel->sig_event->connectionid.asu32 = 0;
+ newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
+ newchannel->sig_event->flag_number = 0;
+ newchannel->sig_event->rsvdz = 0;
+
+ if (vmbus_proto_version != VERSION_WS2008) {
+ newchannel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ newchannel->sig_event->connectionid.u.id =
+ offer->connection_id;
+ }
+
+ init_vp_index(newchannel, &offer->offer.if_type);
+
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;
@@ -581,4 +788,86 @@ cleanup:
return ret;
}
-/* eof */
+/*
+ * Retrieve the (sub) channel on which to send an outgoing request.
+ * When a primary channel has multiple sub-channels, we choose a
+ * channel whose VCPU binding is closest to the VCPU on which
+ * this call is being made.
+ */
+struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
+{
+ struct list_head *cur, *tmp;
+ int cur_cpu = hv_context.vp_index[smp_processor_id()];
+ struct vmbus_channel *cur_channel;
+ struct vmbus_channel *outgoing_channel = primary;
+ int cpu_distance, new_cpu_distance;
+
+ if (list_empty(&primary->sc_list))
+ return outgoing_channel;
+
+ list_for_each_safe(cur, tmp, &primary->sc_list) {
+ cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
+ if (cur_channel->state != CHANNEL_OPENED_STATE)
+ continue;
+
+ if (cur_channel->target_vp == cur_cpu)
+ return cur_channel;
+
+ cpu_distance = ((outgoing_channel->target_vp > cur_cpu) ?
+ (outgoing_channel->target_vp - cur_cpu) :
+ (cur_cpu - outgoing_channel->target_vp));
+
+ new_cpu_distance = ((cur_channel->target_vp > cur_cpu) ?
+ (cur_channel->target_vp - cur_cpu) :
+ (cur_cpu - cur_channel->target_vp));
+
+ if (cpu_distance < new_cpu_distance)
+ continue;
+
+ outgoing_channel = cur_channel;
+ }
+
+ return outgoing_channel;
+}
+EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
+
+static void invoke_sc_cb(struct vmbus_channel *primary_channel)
+{
+ struct list_head *cur, *tmp;
+ struct vmbus_channel *cur_channel;
+
+ if (primary_channel->sc_creation_callback == NULL)
+ return;
+
+ list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
+ cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
+
+ primary_channel->sc_creation_callback(cur_channel);
+ }
+}
+
+void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
+ void (*sc_cr_cb)(struct vmbus_channel *new_sc))
+{
+ primary_channel->sc_creation_callback = sc_cr_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
+
+bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
+{
+ bool ret;
+
+ ret = !list_empty(&primary->sc_list);
+
+ if (ret) {
+ /*
+ * Invoke the callback on sub-channel creation.
+ * This will present a uniform interface to the
+ * clients.
+ */
+ invoke_sc_cb(primary);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0b664..ae22e3c1fc4 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -40,15 +41,93 @@ struct vmbus_connection vmbus_connection = {
};
/*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+static __u32 vmbus_get_next_version(__u32 current_version)
+{
+ switch (current_version) {
+ case (VERSION_WIN7):
+ return VERSION_WS2008;
+
+ case (VERSION_WIN8):
+ return VERSION_WIN7;
+
+ case (VERSION_WIN8_1):
+ return VERSION_WIN8;
+
+ case (VERSION_WS2008):
+ default:
+ return VERSION_INVAL;
+ }
+}
+
+static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ __u32 version)
+{
+ int ret = 0;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+
+ init_completion(&msginfo->waitevent);
+
+ msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+ msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+ msg->vmbus_version_requested = version;
+ msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+ msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
+ msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8_1)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
+
+ /*
+ * Add to list before we send the request since we may
+ * receive the response before returning from this routine
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_initiate_contact));
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return ret;
+ }
+
+ /* Wait for the connection response */
+ wait_for_completion(&msginfo->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ /* Check if successful */
+ if (msginfo->response.version_response.version_supported) {
+ vmbus_connection.conn_state = CONNECTED;
+ } else {
+ return -ECONNREFUSED;
+ }
+
+ return ret;
+}
+
+/*
* vmbus_connect - Sends a connect request on the partition service connection
*/
int vmbus_connect(void)
{
int ret = 0;
- int t;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_initiate_contact *msg;
- unsigned long flags;
+ __u32 version;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
@@ -84,9 +163,10 @@ int vmbus_connect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages =
- (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 1);
- if (vmbus_connection.monitor_pages == NULL) {
+ vmbus_connection.monitor_pages[0] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
+ vmbus_connection.monitor_pages[1] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
+ if ((vmbus_connection.monitor_pages[0] == NULL) ||
+ (vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
goto cleanup;
}
@@ -99,69 +179,41 @@ int vmbus_connect(void)
goto cleanup;
}
- init_completion(&msginfo->waitevent);
-
- msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
-
- msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
- msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
- msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
- msg->monitor_page2 = virt_to_phys(
- (void *)((unsigned long)vmbus_connection.monitor_pages +
- PAGE_SIZE));
-
/*
- * Add to list before we send the request since we may
- * receive the response before returning from this routine
+ * Negotiate a compatible VMBUS version number with the
+ * host. We start with the highest number we can support
+ * and work our way down until we negotiate a compatible
+ * version.
*/
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&msginfo->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ version = VERSION_CURRENT;
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_initiate_contact));
- if (ret != 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- goto cleanup;
- }
+ do {
+ ret = vmbus_negotiate_version(msginfo, version);
+ if (ret == -ETIMEDOUT)
+ goto cleanup;
- /* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (vmbus_connection.conn_state == CONNECTED)
+ break;
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ version = vmbus_get_next_version(version);
+ } while (version != VERSION_INVAL);
- /* Check if successful */
- if (msginfo->response.version_response.version_supported) {
- vmbus_connection.conn_state = CONNECTED;
- } else {
- pr_err("Unable to connect, "
- "Version %d not supported by Hyper-V\n",
- VMBUS_REVISION_NUMBER);
- ret = -ECONNREFUSED;
+ if (version == VERSION_INVAL)
goto cleanup;
- }
+
+ vmbus_proto_version = version;
+ pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
+ host_info_eax, host_info_ebx >> 16,
+ host_info_ebx & 0xFFFF, host_info_ecx,
+ host_info_edx >> 24, host_info_edx & 0xFFFFFF,
+ version >> 16, version & 0xFFFF);
kfree(msginfo);
return 0;
cleanup:
+ pr_err("Unable to connect to host\n");
vmbus_connection.conn_state = DISCONNECTED;
if (vmbus_connection.work_queue)
@@ -172,16 +224,38 @@ cleanup:
vmbus_connection.int_page = NULL;
}
- if (vmbus_connection.monitor_pages) {
- free_pages((unsigned long)vmbus_connection.monitor_pages, 1);
- vmbus_connection.monitor_pages = NULL;
- }
+ free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
+ free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages[1] = NULL;
kfree(msginfo);
return ret;
}
+/*
+ * Map the given relid to the corresponding channel based on the
+ * per-cpu list of channels that have been affinitized to this CPU.
+ * This will be used in the channel callback path as we can do this
+ * mapping in a lock-free fashion.
+ */
+static struct vmbus_channel *pcpu_relid2channel(u32 relid)
+{
+ struct vmbus_channel *channel;
+ struct vmbus_channel *found_channel = NULL;
+ int cpu = smp_processor_id();
+ struct list_head *pcpu_head = &hv_context.percpu_list[cpu];
+
+ list_for_each_entry(channel, pcpu_head, percpu_list) {
+ if (channel->offermsg.child_relid == relid) {
+ found_channel = channel;
+ break;
+ }
+ }
+
+ return found_channel;
+}
/*
* relid2channel - Get the channel object given its
@@ -192,12 +266,26 @@ struct vmbus_channel *relid2channel(u32 relid)
struct vmbus_channel *channel;
struct vmbus_channel *found_channel = NULL;
unsigned long flags;
+ struct list_head *cur, *tmp;
+ struct vmbus_channel *cur_sc;
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->offermsg.child_relid == relid) {
found_channel = channel;
break;
+ } else if (!list_empty(&channel->sc_list)) {
+ /*
+ * Deal with sub-channels.
+ */
+ list_for_each_safe(cur, tmp, &channel->sc_list) {
+ cur_sc = list_entry(cur, struct vmbus_channel,
+ sc_list);
+ if (cur_sc->offermsg.child_relid == relid) {
+ found_channel = cur_sc;
+ break;
+ }
+ }
}
}
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
@@ -211,13 +299,15 @@ struct vmbus_channel *relid2channel(u32 relid)
static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
- unsigned long flags;
+ void *arg;
+ bool read_state;
+ u32 bytes_to_read;
/*
* Find the channel based on this relid and invokes the
* channel callback to process the event
*/
- channel = relid2channel(relid);
+ channel = pcpu_relid2channel(relid);
if (!channel) {
pr_err("channel not found for relid - %u\n", relid);
@@ -227,19 +317,40 @@ static void process_chn_event(u32 relid)
/*
* A channel once created is persistent even when there
* is no driver handling the device. An unloading driver
- * sets the onchannel_callback to NULL under the
- * protection of the channel inbound_lock. Thus, checking
- * and invoking the driver specific callback takes care of
- * orderly unloading of the driver.
+ * sets the onchannel_callback to NULL on the same CPU
+ * as where this interrupt is handled (in an interrupt context).
+ * Thus, checking and invoking the driver specific callback takes
+ * care of orderly unloading of the driver.
*/
- spin_lock_irqsave(&channel->inbound_lock, flags);
- if (channel->onchannel_callback != NULL)
- channel->onchannel_callback(channel->channel_callback_context);
- else
+ if (channel->onchannel_callback != NULL) {
+ arg = channel->channel_callback_context;
+ read_state = channel->batched_reading;
+ /*
+ * This callback reads the messages sent by the host.
+ * We can optimize host to guest signaling by ensuring:
+ * 1. While reading the channel, we disable interrupts from
+ * host.
+ * 2. Ensure that we process all posted messages from the host
+ * before returning from this callback.
+ * 3. Once we return, enable signaling from the host. Once this
+ * state is set we check to see if additional packets are
+ * available to read. In this case we repeat the process.
+ */
+
+ do {
+ if (read_state)
+ hv_begin_read(&channel->inbound);
+ channel->onchannel_callback(arg);
+ if (read_state)
+ bytes_to_read = hv_end_read(&channel->inbound);
+ else
+ bytes_to_read = 0;
+ } while (read_state && (bytes_to_read != 0));
+ } else {
pr_err("no channel callback for relid - %u\n", relid);
+ }
- spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
/*
@@ -248,10 +359,32 @@ static void process_chn_event(u32 relid)
void vmbus_on_event(unsigned long data)
{
u32 dword;
- u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ u32 maxdword;
int bit;
u32 relid;
- u32 *recv_int_page = vmbus_connection.recv_int_page;
+ u32 *recv_int_page = NULL;
+ void *page_addr;
+ int cpu = smp_processor_id();
+ union hv_synic_event_flags *event;
+
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
+ maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ recv_int_page = vmbus_connection.recv_int_page;
+ } else {
+ /*
+ * When the host is win8 and beyond, the event page
+ * can be directly checked to get the id of the channel
+ * that has the interrupt pending.
+ */
+ maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
+ page_addr = hv_context.synic_event_page[cpu];
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ recv_int_page = event->flags32;
+ }
+
+
/* Check events */
if (!recv_int_page)
@@ -307,12 +440,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
-int vmbus_set_event(u32 child_relid)
+int vmbus_set_event(struct vmbus_channel *channel)
{
- /* Each u32 represents 32 channels */
- sync_set_bit(child_relid & 31,
- (unsigned long *)vmbus_connection.send_int_page +
- (child_relid >> 5));
+ u32 child_relid = channel->offermsg.child_relid;
+
+ if (!channel->is_dedicated_interrupt) {
+ /* Each u32 represents 32 channels */
+ sync_set_bit(child_relid & 31,
+ (unsigned long *)vmbus_connection.send_int_page +
+ (child_relid >> 5));
+ }
- return hv_signal_event();
+ return hv_signal_event(channel->sig_event);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f0f36..edfc8488cb0 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/version.h>
+#include <linux/interrupt.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -34,13 +35,16 @@
struct hv_context hv_context = {
.synic_initialized = false,
.hypercall_page = NULL,
- .signal_event_param = NULL,
- .signal_event_buffer = NULL,
};
/*
* query_hypervisor_info - Get version info of the windows hypervisor
*/
+unsigned int host_info_eax;
+unsigned int host_info_ebx;
+unsigned int host_info_ecx;
+unsigned int host_info_edx;
+
static int query_hypervisor_info(void)
{
unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
edx = 0;
op = HVCPUID_VERSION;
cpuid(op, &eax, &ebx, &ecx, &edx);
- pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
- eax,
- ebx >> 16,
- ebx & 0xFFFF,
- ecx,
- edx >> 24,
- edx & 0xFFFFFF);
+ host_info_eax = eax;
+ host_info_ebx = ebx;
+ host_info_ecx = ecx;
+ host_info_edx = edx;
}
return max_leaf;
}
@@ -137,6 +138,10 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.vp_index, 0,
+ sizeof(int) * NR_CPUS);
+ memset(hv_context.event_dpc, 0,
+ sizeof(void *) * NR_CPUS);
max_leaf = query_hypervisor_info();
@@ -168,24 +173,6 @@ int hv_init(void)
hv_context.hypercall_page = virtaddr;
- /* Setup the global signal event param for the signal event hypercall */
- hv_context.signal_event_buffer =
- kmalloc(sizeof(struct hv_input_signal_event_buffer),
- GFP_KERNEL);
- if (!hv_context.signal_event_buffer)
- goto cleanup;
-
- hv_context.signal_event_param =
- (struct hv_input_signal_event *)
- (ALIGN((unsigned long)
- hv_context.signal_event_buffer,
- HV_HYPERCALL_PARAM_ALIGN));
- hv_context.signal_event_param->connectionid.asu32 = 0;
- hv_context.signal_event_param->connectionid.u.id =
- VMBUS_EVENT_CONNECTION_ID;
- hv_context.signal_event_param->flag_number = 0;
- hv_context.signal_event_param->rsvdz = 0;
-
return 0;
cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- kfree(hv_context.signal_event_buffer);
- hv_context.signal_event_buffer = NULL;
- hv_context.signal_event_param = NULL;
-
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,16 +256,68 @@ int hv_post_message(union hv_connection_id connection_id,
*
* This involves a hypercall.
*/
-u16 hv_signal_event(void)
+u16 hv_signal_event(void *con_id)
{
u16 status;
- status = do_hypercall(HVCALL_SIGNAL_EVENT,
- hv_context.signal_event_param,
- NULL) & 0xFFFF;
+ status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
+
return status;
}
+
+int hv_synic_alloc(void)
+{
+ size_t size = sizeof(struct tasklet_struct);
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+ if (hv_context.event_dpc[cpu] == NULL) {
+ pr_err("Unable to allocate event dpc\n");
+ goto err;
+ }
+ tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+
+ hv_context.synic_message_page[cpu] =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+
+ if (hv_context.synic_message_page[cpu] == NULL) {
+ pr_err("Unable to allocate SYNIC message page\n");
+ goto err;
+ }
+
+ hv_context.synic_event_page[cpu] =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+
+ if (hv_context.synic_event_page[cpu] == NULL) {
+ pr_err("Unable to allocate SYNIC event page\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+static void hv_synic_free_cpu(int cpu)
+{
+ kfree(hv_context.event_dpc[cpu]);
+ if (hv_context.synic_event_page[cpu])
+ free_page((unsigned long)hv_context.synic_event_page[cpu]);
+ if (hv_context.synic_message_page[cpu])
+ free_page((unsigned long)hv_context.synic_message_page[cpu]);
+}
+
+void hv_synic_free(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ hv_synic_free_cpu(cpu);
+}
+
/*
* hv_synic_init - Initialize the Synthethic Interrupt Controller.
*
@@ -290,15 +325,15 @@ u16 hv_signal_event(void)
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
-void hv_synic_init(void *irqarg)
+void hv_synic_init(void *arg)
{
u64 version;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
+ u64 vp_index;
- u32 irq_vector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
if (!hv_context.hypercall_page)
@@ -307,22 +342,6 @@ void hv_synic_init(void *irqarg)
/* Check the version */
rdmsrl(HV_X64_MSR_SVERSION, version);
- hv_context.synic_message_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
-
- if (hv_context.synic_message_page[cpu] == NULL) {
- pr_err("Unable to allocate SYNIC message page\n");
- goto cleanup;
- }
-
- hv_context.synic_event_page[cpu] =
- (void *)get_zeroed_page(GFP_ATOMIC);
-
- if (hv_context.synic_event_page[cpu] == NULL) {
- pr_err("Unable to allocate SYNIC event page\n");
- goto cleanup;
- }
-
/* Setup the Synic's message page */
rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
simp.simp_enabled = 1;
@@ -343,9 +362,9 @@ void hv_synic_init(void *irqarg)
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.as_uint64 = 0;
- shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
+ shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
- shared_sint.auto_eoi = false;
+ shared_sint.auto_eoi = true;
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
@@ -356,14 +375,16 @@ void hv_synic_init(void *irqarg)
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
hv_context.synic_initialized = true;
- return;
-cleanup:
- if (hv_context.synic_event_page[cpu])
- free_page((unsigned long)hv_context.synic_event_page[cpu]);
+ /*
+ * Setup the mapping between Hyper-V's notion
+ * of cpuid and Linux' notion of cpuid.
+ * This array will be indexed using Linux cpuid.
+ */
+ rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+ hv_context.vp_index[cpu] = (u32)vp_index;
- if (hv_context.synic_message_page[cpu])
- free_page((unsigned long)hv_context.synic_message_page[cpu]);
+ INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
return;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index dd289fd179c..5e90c5d771a 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/jiffies.h>
#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -29,7 +30,6 @@
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
-#include <linux/mman.h>
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
@@ -118,7 +118,14 @@ union dm_caps {
struct {
__u64 balloon:1;
__u64 hot_add:1;
- __u64 reservedz:62;
+ /*
+ * To support guests that may have alignment
+ * limitations on hot-add, the guest can specify
+ * its alignment requirements; a value of n
+ * represents an alignment of 2^n in mega bytes.
+ */
+ __u64 hot_add_alignment:4;
+ __u64 reservedz:58;
} cap_bits;
__u64 caps;
} __packed;
@@ -413,12 +420,56 @@ struct dm_info_msg {
* End protocol definitions.
*/
-static bool hot_add;
+/*
+ * State to manage hot adding memory into the guest.
+ * The range start_pfn : end_pfn specifies the range
+ * that the host has asked us to hot add. The range
+ * start_pfn : ha_end_pfn specifies the range that we have
+ * currently hot added. We hot add in multiples of 128M
+ * chunks; it is possible that we may not be able to bring
+ * online all the pages in the region. The range
+ * covered_start_pfn : covered_end_pfn defines the pages that can
+ * be brough online.
+ */
+
+struct hv_hotadd_state {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long covered_start_pfn;
+ unsigned long covered_end_pfn;
+ unsigned long ha_end_pfn;
+ unsigned long end_pfn;
+};
+
+struct balloon_state {
+ __u32 num_pages;
+ struct work_struct wrk;
+};
+
+struct hot_add_wrk {
+ union dm_mem_page_range ha_page_range;
+ union dm_mem_page_range ha_region_range;
+ struct work_struct wrk;
+};
+
+static bool hot_add = true;
static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 45;
+
+/*
+ * The last time we posted a pressure report to host.
+ */
+static unsigned long last_post_time;
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = (5 * PAGE_SIZE);
@@ -440,6 +491,7 @@ enum hv_dm_state {
static __u8 recv_buffer[PAGE_SIZE];
static __u8 *send_buffer;
#define PAGES_IN_2M 512
+#define HA_CHUNK (32 * 1024)
struct hv_dynmem_device {
struct hv_device *dev;
@@ -453,7 +505,28 @@ struct hv_dynmem_device {
unsigned int num_pages_ballooned;
/*
- * This thread handles both balloon/hot-add
+ * State to manage the ballooning (up) operation.
+ */
+ struct balloon_state balloon_wrk;
+
+ /*
+ * State to execute the "hot-add" operation.
+ */
+ struct hot_add_wrk ha_wrk;
+
+ /*
+ * This state tracks if the host has specified a hot-add
+ * region.
+ */
+ bool host_specified_ha_region;
+
+ /*
+ * State to synchronize hot-add.
+ */
+ struct completion ol_waitevent;
+ bool ha_waiting;
+ /*
+ * This thread handles hot-add
* requests from the host as well as notifying
* the host with regards to memory pressure in
* the guest.
@@ -461,6 +534,11 @@ struct hv_dynmem_device {
struct task_struct *thread;
/*
+ * A list of hot-add regions.
+ */
+ struct list_head ha_region_list;
+
+ /*
* We start with the highest version we can support
* and downgrade based on the host; we save here the
* next version to try.
@@ -470,35 +548,356 @@ struct hv_dynmem_device {
static struct hv_dynmem_device dm_device;
-static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
+static void post_status(struct hv_dynmem_device *dm);
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
{
+ int i;
- struct dm_hot_add_response resp;
+ for (i = 0; i < size; i++) {
+ struct page *pg;
+ pg = pfn_to_page(start_pfn + i);
+ __online_page_set_limits(pg);
+ __online_page_increment_counters(pg);
+ __online_page_free(pg);
+ }
+}
+
+static void hv_mem_hot_add(unsigned long start, unsigned long size,
+ unsigned long pfn_count,
+ struct hv_hotadd_state *has)
+{
+ int ret = 0;
+ int i, nid;
+ unsigned long start_pfn;
+ unsigned long processed_pfn;
+ unsigned long total_pfn = pfn_count;
+
+ for (i = 0; i < (size/HA_CHUNK); i++) {
+ start_pfn = start + (i * HA_CHUNK);
+ has->ha_end_pfn += HA_CHUNK;
+
+ if (total_pfn > HA_CHUNK) {
+ processed_pfn = HA_CHUNK;
+ total_pfn -= HA_CHUNK;
+ } else {
+ processed_pfn = total_pfn;
+ total_pfn = 0;
+ }
+
+ has->covered_end_pfn += processed_pfn;
+
+ init_completion(&dm_device.ol_waitevent);
+ dm_device.ha_waiting = true;
+
+ nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
+ ret = add_memory(nid, PFN_PHYS((start_pfn)),
+ (HA_CHUNK << PAGE_SHIFT));
+
+ if (ret) {
+ pr_info("hot_add memory failed error is %d\n", ret);
+ if (ret == -EEXIST) {
+ /*
+ * This error indicates that the error
+ * is not a transient failure. This is the
+ * case where the guest's physical address map
+ * precludes hot adding memory. Stop all further
+ * memory hot-add.
+ */
+ do_hot_add = false;
+ }
+ has->ha_end_pfn -= HA_CHUNK;
+ has->covered_end_pfn -= processed_pfn;
+ break;
+ }
+
+ /*
+ * Wait for the memory block to be onlined.
+ * Since the hot add has succeeded, it is ok to
+ * proceed even if the pages in the hot added region
+ * have not been "onlined" within the allowed time.
+ */
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
+ post_status(&dm_device);
+ }
+
+ return;
+}
+
+static void hv_online_page(struct page *pg)
+{
+ struct list_head *cur;
+ struct hv_hotadd_state *has;
+ unsigned long cur_start_pgp;
+ unsigned long cur_end_pgp;
+
+ if (dm_device.ha_waiting) {
+ dm_device.ha_waiting = false;
+ complete(&dm_device.ol_waitevent);
+ }
+
+ list_for_each(cur, &dm_device.ha_region_list) {
+ has = list_entry(cur, struct hv_hotadd_state, list);
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(has->covered_start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+
+ if (((unsigned long)pg >= cur_start_pgp) &&
+ ((unsigned long)pg < cur_end_pgp)) {
+ /*
+ * This frame is currently backed; online the
+ * page.
+ */
+ __online_page_set_limits(pg);
+ __online_page_increment_counters(pg);
+ __online_page_free(pg);
+ has->covered_start_pfn++;
+ }
+ }
+}
+
+static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+{
+ struct list_head *cur;
+ struct hv_hotadd_state *has;
+ unsigned long residual, new_inc;
+
+ if (list_empty(&dm_device.ha_region_list))
+ return false;
+
+ list_for_each(cur, &dm_device.ha_region_list) {
+ has = list_entry(cur, struct hv_hotadd_state, list);
+
+ /*
+ * If the pfn range we are dealing with is not in the current
+ * "hot add block", move on.
+ */
+ if ((start_pfn >= has->end_pfn))
+ continue;
+ /*
+ * If the current hot add-request extends beyond
+ * our current limit; extend it.
+ */
+ if ((start_pfn + pfn_cnt) > has->end_pfn) {
+ residual = (start_pfn + pfn_cnt - has->end_pfn);
+ /*
+ * Extend the region by multiples of HA_CHUNK.
+ */
+ new_inc = (residual / HA_CHUNK) * HA_CHUNK;
+ if (residual % HA_CHUNK)
+ new_inc += HA_CHUNK;
+
+ has->end_pfn += new_inc;
+ }
+
+ /*
+ * If the current start pfn is not where the covered_end
+ * is, update it.
+ */
+
+ if (has->covered_end_pfn != start_pfn) {
+ has->covered_end_pfn = start_pfn;
+ has->covered_start_pfn = start_pfn;
+ }
+ return true;
+
+ }
+
+ return false;
+}
+
+static unsigned long handle_pg_range(unsigned long pg_start,
+ unsigned long pg_count)
+{
+ unsigned long start_pfn = pg_start;
+ unsigned long pfn_cnt = pg_count;
+ unsigned long size;
+ struct list_head *cur;
+ struct hv_hotadd_state *has;
+ unsigned long pgs_ol = 0;
+ unsigned long old_covered_state;
+
+ if (list_empty(&dm_device.ha_region_list))
+ return 0;
+
+ list_for_each(cur, &dm_device.ha_region_list) {
+ has = list_entry(cur, struct hv_hotadd_state, list);
+
+ /*
+ * If the pfn range we are dealing with is not in the current
+ * "hot add block", move on.
+ */
+ if ((start_pfn >= has->end_pfn))
+ continue;
- if (do_hot_add) {
+ old_covered_state = has->covered_end_pfn;
- pr_info("Memory hot add not supported\n");
+ if (start_pfn < has->ha_end_pfn) {
+ /*
+ * This is the case where we are backing pages
+ * in an already hot added region. Bring
+ * these pages online first.
+ */
+ pgs_ol = has->ha_end_pfn - start_pfn;
+ if (pgs_ol > pfn_cnt)
+ pgs_ol = pfn_cnt;
+ hv_bring_pgs_online(start_pfn, pgs_ol);
+ has->covered_end_pfn += pgs_ol;
+ has->covered_start_pfn += pgs_ol;
+ pfn_cnt -= pgs_ol;
+ }
+ if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
+ /*
+ * We have some residual hot add range
+ * that needs to be hot added; hot add
+ * it now. Hot add a multiple of
+ * of HA_CHUNK that fully covers the pages
+ * we have.
+ */
+ size = (has->end_pfn - has->ha_end_pfn);
+ if (pfn_cnt <= size) {
+ size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
+ if (pfn_cnt % HA_CHUNK)
+ size += HA_CHUNK;
+ } else {
+ pfn_cnt = size;
+ }
+ hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
+ }
/*
- * Currently we do not support hot add.
- * Just fail the request.
+ * If we managed to online any pages that were given to us,
+ * we declare success.
*/
+ return has->covered_end_pfn - old_covered_state;
+
+ }
+
+ return 0;
+}
+
+static unsigned long process_hot_add(unsigned long pg_start,
+ unsigned long pfn_cnt,
+ unsigned long rg_start,
+ unsigned long rg_size)
+{
+ struct hv_hotadd_state *ha_region = NULL;
+
+ if (pfn_cnt == 0)
+ return 0;
+
+ if (!dm_device.host_specified_ha_region)
+ if (pfn_covered(pg_start, pfn_cnt))
+ goto do_pg_range;
+
+ /*
+ * If the host has specified a hot-add range; deal with it first.
+ */
+
+ if (rg_size != 0) {
+ ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
+ if (!ha_region)
+ return 0;
+
+ INIT_LIST_HEAD(&ha_region->list);
+
+ list_add_tail(&ha_region->list, &dm_device.ha_region_list);
+ ha_region->start_pfn = rg_start;
+ ha_region->ha_end_pfn = rg_start;
+ ha_region->covered_start_pfn = pg_start;
+ ha_region->covered_end_pfn = pg_start;
+ ha_region->end_pfn = rg_start + rg_size;
}
+do_pg_range:
+ /*
+ * Process the page range specified; bringing them
+ * online if possible.
+ */
+ return handle_pg_range(pg_start, pfn_cnt);
+}
+
+#endif
+
+static void hot_add_req(struct work_struct *dummy)
+{
+ struct dm_hot_add_response resp;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unsigned long pg_start, pfn_cnt;
+ unsigned long rg_start, rg_sz;
+#endif
+ struct hv_dynmem_device *dm = &dm_device;
+
memset(&resp, 0, sizeof(struct dm_hot_add_response));
resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
resp.hdr.size = sizeof(struct dm_hot_add_response);
- resp.hdr.trans_id = atomic_inc_return(&trans_id);
- resp.page_count = 0;
- resp.result = 0;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
+ pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
+
+ rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
+ rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
+
+ if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
+ unsigned long region_size;
+ unsigned long region_start;
+
+ /*
+ * The host has not specified the hot-add region.
+ * Based on the hot-add page range being specified,
+ * compute a hot-add region that can cover the pages
+ * that need to be hot-added while ensuring the alignment
+ * and size requirements of Linux as it relates to hot-add.
+ */
+ region_start = pg_start;
+ region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
+ if (pfn_cnt % HA_CHUNK)
+ region_size += HA_CHUNK;
+
+ region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
+
+ rg_start = region_start;
+ rg_sz = region_size;
+ }
+
+ if (do_hot_add)
+ resp.page_count = process_hot_add(pg_start, pfn_cnt,
+ rg_start, rg_sz);
+#endif
+ /*
+ * The result field of the response structure has the
+ * following semantics:
+ *
+ * 1. If all or some pages hot-added: Guest should return success.
+ *
+ * 2. If no pages could be hot-added:
+ *
+ * If the guest returns success, then the host
+ * will not attempt any further hot-add operations. This
+ * signifies a permanent failure.
+ *
+ * If the guest returns failure, then this failure will be
+ * treated as a transient failure and the host may retry the
+ * hot-add operation after some delay.
+ */
+ if (resp.page_count > 0)
+ resp.result = 1;
+ else if (!do_hot_add)
+ resp.result = 1;
+ else
+ resp.result = 0;
+
+ if (!do_hot_add || (resp.page_count == 0))
+ pr_info("Memory hot add failed\n");
dm->state = DM_INITIALIZED;
+ resp.hdr.trans_id = atomic_inc_return(&trans_id);
vmbus_sendpacket(dm->dev->channel, &resp,
sizeof(struct dm_hot_add_response),
(unsigned long)NULL,
VM_PKT_DATA_INBAND, 0);
-
}
static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
@@ -517,6 +916,34 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
}
}
+static unsigned long compute_balloon_floor(void)
+{
+ unsigned long min_pages;
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+ /* Simple continuous piecewiese linear function:
+ * max MiB -> min MiB gradient
+ * 0 0
+ * 16 16
+ * 32 24
+ * 128 72 (1/2)
+ * 512 168 (1/4)
+ * 2048 360 (1/8)
+ * 8192 552 (1/32)
+ * 32768 1320
+ * 131072 4392
+ */
+ if (totalram_pages < MB2PAGES(128))
+ min_pages = MB2PAGES(8) + (totalram_pages >> 1);
+ else if (totalram_pages < MB2PAGES(512))
+ min_pages = MB2PAGES(40) + (totalram_pages >> 2);
+ else if (totalram_pages < MB2PAGES(2048))
+ min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+ else
+ min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+#undef MB2PAGES
+ return min_pages;
+}
+
/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
@@ -530,16 +957,53 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
+ struct sysinfo val;
+ unsigned long now = jiffies;
+ unsigned long last_post = last_post_time;
+
+ if (pressure_report_delay > 0) {
+ --pressure_report_delay;
+ return;
+ }
+ if (!time_after(now, (last_post_time + HZ)))
+ return;
+ si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
status.hdr.trans_id = atomic_inc_return(&trans_id);
+ /*
+ * The host expects the guest to report free memory.
+ * Further, the host expects the pressure information to
+ * include the ballooned out pages.
+ * For a given amount of memory that we are managing, we
+ * need to compute a floor below which we should not balloon.
+ * Compute this and add it to the pressure report.
+ */
+ status.num_avail = val.freeram;
+ status.num_committed = vm_memory_committed() +
+ dm->num_pages_ballooned +
+ compute_balloon_floor();
- status.num_committed = vm_memory_committed();
+ /*
+ * If our transaction ID is no longer current, just don't
+ * send the status. This can happen if we were interrupted
+ * after we picked our transaction ID.
+ */
+ if (status.hdr.trans_id != atomic_read(&trans_id))
+ return;
+ /*
+ * If the last post time that we sampled has changed,
+ * we have raced, don't post the status.
+ */
+ if (last_post != last_post_time)
+ return;
+
+ last_post_time = jiffies;
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
(unsigned long)NULL,
@@ -547,8 +1011,6 @@ static void post_status(struct hv_dynmem_device *dm)
}
-
-
static void free_balloon_pages(struct hv_dynmem_device *dm,
union dm_mem_page_range *range_array)
{
@@ -597,6 +1059,14 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
dm->num_pages_ballooned += alloc_unit;
+ /*
+ * If we allocatted 2M pages; split them so we
+ * can free them in any order we get.
+ */
+
+ if (alloc_unit != 1)
+ split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
+
bl_resp->range_count++;
bl_resp->range_array[i].finfo.start_page =
page_to_pfn(pg);
@@ -610,9 +1080,9 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
-static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
+static void balloon_up(struct work_struct *dummy)
{
- int num_pages = req->num_pages;
+ int num_pages = dm_device.balloon_wrk.num_pages;
int num_ballooned = 0;
struct dm_balloon_response *bl_resp;
int alloc_unit;
@@ -623,28 +1093,33 @@ static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
/*
- * Currently, we only support 4k allocations.
+ * We will attempt 2M allocations. However, if we fail to
+ * allocate 2M chunks, we will go back to 4k allocations.
*/
- alloc_unit = 1;
+ alloc_unit = 512;
while (!done) {
bl_resp = (struct dm_balloon_response *)send_buffer;
memset(send_buffer, 0, PAGE_SIZE);
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
bl_resp->more_pages = 1;
num_pages -= num_ballooned;
- num_ballooned = alloc_balloon_pages(dm, num_pages,
+ num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
bl_resp, alloc_unit,
&alloc_error);
+ if ((alloc_error) && (alloc_unit != 1)) {
+ alloc_unit = 1;
+ continue;
+ }
+
if ((alloc_error) || (num_ballooned == num_pages)) {
bl_resp->more_pages = 0;
done = true;
- dm->state = DM_INITIALIZED;
+ dm_device.state = DM_INITIALIZED;
}
/*
@@ -654,6 +1129,7 @@ static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
*/
do {
+ bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
ret = vmbus_sendpacket(dm_device.dev->channel,
bl_resp,
bl_resp->hdr.size,
@@ -662,7 +1138,7 @@ static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
if (ret == -EAGAIN)
msleep(20);
-
+ post_status(&dm_device);
} while (ret == -EAGAIN);
if (ret) {
@@ -672,7 +1148,7 @@ static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
pr_info("Balloon response failed\n");
for (i = 0; i < bl_resp->range_count; i++)
- free_balloon_pages(dm,
+ free_balloon_pages(&dm_device,
&bl_resp->range_array[i]);
done = true;
@@ -689,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
struct dm_unballoon_response resp;
int i;
- for (i = 0; i < range_count; i++)
+ for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
+ post_status(&dm_device);
+ }
if (req->more_pages == 1)
return;
@@ -714,10 +1192,10 @@ static int dm_thread_func(void *dm_dev)
{
struct hv_dynmem_device *dm = dm_dev;
int t;
- unsigned long scan_start;
while (!kthread_should_stop()) {
- t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ);
+ t = wait_for_completion_interruptible_timeout(
+ &dm_device.config_event, 1*HZ);
/*
* The host expects us to post information on the memory
* pressure every second.
@@ -726,22 +1204,6 @@ static int dm_thread_func(void *dm_dev)
if (t == 0)
post_status(dm);
- scan_start = jiffies;
- switch (dm->state) {
- case DM_BALLOON_UP:
- balloon_up(dm, (struct dm_balloon *)recv_buffer);
- break;
-
- case DM_HOT_ADD:
- hot_add_req(dm, (struct dm_hot_add *)recv_buffer);
- break;
- default:
- break;
- }
-
- if (!time_in_range(jiffies, scan_start, scan_start + HZ))
- post_status(dm);
-
}
return 0;
@@ -814,6 +1276,10 @@ static void balloon_onchannelcallback(void *context)
struct dm_message *dm_msg;
struct dm_header *dm_hdr;
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
+ struct dm_balloon *bal_msg;
+ struct dm_hot_add *ha_msg;
+ union dm_mem_page_range *ha_pg_range;
+ union dm_mem_page_range *ha_region;
memset(recv_buffer, 0, sizeof(recv_buffer));
vmbus_recvpacket(dev->channel, recv_buffer,
@@ -835,8 +1301,12 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_BALLOON_REQUEST:
+ if (dm->state == DM_BALLOON_UP)
+ pr_warn("Currently ballooning\n");
+ bal_msg = (struct dm_balloon *)recv_buffer;
dm->state = DM_BALLOON_UP;
- complete(&dm->config_event);
+ dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
+ schedule_work(&dm_device.balloon_wrk.wrk);
break;
case DM_UNBALLOON_REQUEST:
@@ -846,8 +1316,31 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_MEM_HOT_ADD_REQUEST:
+ if (dm->state == DM_HOT_ADD)
+ pr_warn("Currently hot-adding\n");
dm->state = DM_HOT_ADD;
- complete(&dm->config_event);
+ ha_msg = (struct dm_hot_add *)recv_buffer;
+ if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
+ /*
+ * This is a normal hot-add request specifying
+ * hot-add memory.
+ */
+ ha_pg_range = &ha_msg->range;
+ dm->ha_wrk.ha_page_range = *ha_pg_range;
+ dm->ha_wrk.ha_region_range.page_range = 0;
+ } else {
+ /*
+ * Host is specifying that we first hot-add
+ * a region and then partially populate this
+ * region.
+ */
+ dm->host_specified_ha_region = true;
+ ha_pg_range = &ha_msg->range;
+ ha_region = &ha_pg_range[1];
+ dm->ha_wrk.ha_page_range = *ha_pg_range;
+ dm->ha_wrk.ha_region_range = *ha_region;
+ }
+ schedule_work(&dm_device.ha_wrk.wrk);
break;
case DM_INFO_MESSAGE:
@@ -890,6 +1383,10 @@ static int balloon_probe(struct hv_device *dev,
dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
init_completion(&dm_device.host_event);
init_completion(&dm_device.config_event);
+ INIT_LIST_HEAD(&dm_device.ha_region_list);
+ INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
+ INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
+ dm_device.host_specified_ha_region = false;
dm_device.thread =
kthread_run(dm_thread_func, &dm_device, "hv_balloon");
@@ -898,6 +1395,10 @@ static int balloon_probe(struct hv_device *dev,
goto probe_error1;
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+ set_online_page_callback(&hv_online_page);
+#endif
+
hv_set_drvdata(dev, &dm_device);
/*
* Initiate the hand shake with the host and negotiate
@@ -915,8 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
ret = vmbus_sendpacket(dev->channel, &version_req,
sizeof(struct dm_version_request),
(unsigned long)NULL,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ VM_PKT_DATA_INBAND, 0);
if (ret)
goto probe_error2;
@@ -943,13 +1443,13 @@ static int balloon_probe(struct hv_device *dev,
cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
cap_msg.caps.cap_bits.balloon = 1;
+ cap_msg.caps.cap_bits.hot_add = 1;
+
/*
- * While we currently don't support hot-add,
- * we still advertise this capability since the
- * host requires that guests partcipating in the
- * dynamic memory protocol support hot add.
+ * Specify our alignment requirements as it relates
+ * memory hot-add. Specify 128MB alignment.
*/
- cap_msg.caps.cap_bits.hot_add = 1;
+ cap_msg.caps.cap_bits.hot_add_alignment = 7;
/*
* Currently the host does not use these
@@ -962,8 +1462,7 @@ static int balloon_probe(struct hv_device *dev,
ret = vmbus_sendpacket(dev->channel, &cap_msg,
sizeof(struct dm_capabilities),
(unsigned long)NULL,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ VM_PKT_DATA_INBAND, 0);
if (ret)
goto probe_error2;
@@ -987,6 +1486,9 @@ static int balloon_probe(struct hv_device *dev,
return 0;
probe_error2:
+#ifdef CONFIG_MEMORY_HOTPLUG
+ restore_online_page_callback(&hv_online_page);
+#endif
kthread_stop(dm_device.thread);
probe_error1:
@@ -999,13 +1501,26 @@ probe_error0:
static int balloon_remove(struct hv_device *dev)
{
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
+ struct list_head *cur, *tmp;
+ struct hv_hotadd_state *has;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+ cancel_work_sync(&dm->balloon_wrk.wrk);
+ cancel_work_sync(&dm->ha_wrk.wrk);
+
vmbus_close(dev->channel);
kthread_stop(dm->thread);
kfree(send_buffer);
+#ifdef CONFIG_MEMORY_HOTPLUG
+ restore_online_page_callback(&hv_online_page);
+#endif
+ list_for_each_safe(cur, tmp, &dm->ha_region_list) {
+ has = list_entry(cur, struct hv_hotadd_state, list);
+ list_del(&has->list);
+ kfree(has);
+ }
return 0;
}
@@ -1013,9 +1528,7 @@ static int balloon_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
- { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
- 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
- },
+ { HV_DM_GUID, },
{ },
};
@@ -1034,15 +1547,7 @@ static int __init init_balloon_drv(void)
return vmbus_driver_register(&balloon_drv);
}
-static void exit_balloon_drv(void)
-{
-
- vmbus_driver_unregister(&balloon_drv);
-}
-
module_init(init_balloon_drv);
-module_exit(exit_balloon_drv);
MODULE_DESCRIPTION("Hyper-V Balloon");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
new file mode 100644
index 00000000000..23b2ce294c4
--- /dev/null
+++ b/drivers/hv/hv_fcopy.c
@@ -0,0 +1,414 @@
+/*
+ * An implementation of file copy service.
+ *
+ * Copyright (C) 2014, Microsoft, Inc.
+ *
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/nls.h>
+#include <linux/workqueue.h>
+#include <linux/cdev.h>
+#include <linux/hyperv.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "hyperv_vmbus.h"
+
+#define WIN8_SRV_MAJOR 1
+#define WIN8_SRV_MINOR 1
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+
+/*
+ * Global state maintained for transaction that is being processed.
+ * For a class of integration services, including the "file copy service",
+ * the specified protocol is a "request/response" protocol which means that
+ * there can only be single outstanding transaction from the host at any
+ * given point in time. We use this to simplify memory management in this
+ * driver - we cache and process only one message at a time.
+ *
+ * While the request/response protocol is guaranteed by the host, we further
+ * ensure this by serializing packet processing in this driver - we do not
+ * read additional packets from the VMBUs until the current packet is fully
+ * handled.
+ *
+ * The transaction "active" state is set when we receive a request from the
+ * host and we cleanup this state when the transaction is completed - when we
+ * respond to the host with our response. When the transaction active state is
+ * set, we defer handling incoming packets.
+ */
+
+static struct {
+ bool active; /* transaction status - active or not */
+ int recv_len; /* number of bytes received. */
+ struct hv_fcopy_hdr *fcopy_msg; /* current message */
+ struct hv_start_fcopy message; /* sent to daemon */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+ void *fcopy_context; /* for the channel callback */
+ struct semaphore read_sema;
+} fcopy_transaction;
+
+static bool opened; /* currently device opened */
+
+/*
+ * Before we can accept copy messages from the host, we need
+ * to handshake with the user level daemon. This state tracks
+ * if we are in the handshake phase.
+ */
+static bool in_hand_shake = true;
+static void fcopy_send_data(void);
+static void fcopy_respond_to_host(int error);
+static void fcopy_work_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func);
+static u8 *recv_buffer;
+
+static void fcopy_work_func(struct work_struct *dummy)
+{
+ /*
+ * If the timer fires, the user-mode component has not responded;
+ * process the pending transaction.
+ */
+ fcopy_respond_to_host(HV_E_FAIL);
+}
+
+static int fcopy_handle_handshake(u32 version)
+{
+ switch (version) {
+ case FCOPY_CURRENT_VERSION:
+ break;
+ default:
+ /*
+ * For now we will fail the registration.
+ * If and when we have multiple versions to
+ * deal with, we will be backward compatible.
+ * We will add this code when needed.
+ */
+ return -EINVAL;
+ }
+ pr_info("FCP: user-mode registering done. Daemon version: %d\n",
+ version);
+ fcopy_transaction.active = false;
+ if (fcopy_transaction.fcopy_context)
+ hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context);
+ in_hand_shake = false;
+ return 0;
+}
+
+static void fcopy_send_data(void)
+{
+ struct hv_start_fcopy *smsg_out = &fcopy_transaction.message;
+ int operation = fcopy_transaction.fcopy_msg->operation;
+ struct hv_start_fcopy *smsg_in;
+
+ /*
+ * The strings sent from the host are encoded in
+ * in utf16; convert it to utf8 strings.
+ * The host assures us that the utf16 strings will not exceed
+ * the max lengths specified. We will however, reserve room
+ * for the string terminating character - in the utf16s_utf8s()
+ * function we limit the size of the buffer where the converted
+ * string is placed to W_MAX_PATH -1 to guarantee
+ * that the strings can be properly terminated!
+ */
+
+ switch (operation) {
+ case START_FILE_COPY:
+ memset(smsg_out, 0, sizeof(struct hv_start_fcopy));
+ smsg_out->hdr.operation = operation;
+ smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)smsg_out->file_name, W_MAX_PATH - 1);
+
+ utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
+ UTF16_LITTLE_ENDIAN,
+ (__u8 *)smsg_out->path_name, W_MAX_PATH - 1);
+
+ smsg_out->copy_flags = smsg_in->copy_flags;
+ smsg_out->file_size = smsg_in->file_size;
+ break;
+
+ default:
+ break;
+ }
+ up(&fcopy_transaction.read_sema);
+ return;
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+fcopy_respond_to_host(int error)
+{
+ struct icmsg_hdr *icmsghdr;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time. This is guaranteed
+ * by the file copy protocol implemented by the host. Furthermore,
+ * the "transaction active" state we maintain ensures that there can
+ * only be one active transaction at a time.
+ */
+
+ buf_len = fcopy_transaction.recv_len;
+ channel = fcopy_transaction.recv_channel;
+ req_id = fcopy_transaction.recv_req_id;
+
+ fcopy_transaction.active = false;
+
+ icmsghdr = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdr->status = error;
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+void hv_fcopy_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct hv_fcopy_hdr *fcopy_msg;
+ struct icmsg_hdr *icmsghdr;
+ struct icmsg_negotiate *negop = NULL;
+ int util_fw_version;
+ int fcopy_srv_version;
+
+ if (fcopy_transaction.active) {
+ /*
+ * We will defer processing this callback once
+ * the current transaction is complete.
+ */
+ fcopy_transaction.fcopy_context = context;
+ return;
+ }
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ &requestid);
+ if (recvlen <= 0)
+ return;
+
+ icmsghdr = (struct icmsg_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr)];
+ if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ util_fw_version = UTIL_FW_VERSION;
+ fcopy_srv_version = WIN8_SRV_VERSION;
+ vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
+ util_fw_version, fcopy_srv_version);
+ } else {
+ fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ fcopy_transaction.active = true;
+ fcopy_transaction.recv_len = recvlen;
+ fcopy_transaction.recv_channel = channel;
+ fcopy_transaction.recv_req_id = requestid;
+ fcopy_transaction.fcopy_msg = fcopy_msg;
+
+ /*
+ * Send the information to the user-level daemon.
+ */
+ schedule_delayed_work(&fcopy_work, 5*HZ);
+ fcopy_send_data();
+ return;
+ }
+ icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+}
+
+/*
+ * Create a char device that can support read/write for passing
+ * the payload.
+ */
+
+static ssize_t fcopy_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ void *src;
+ size_t copy_size;
+ int operation;
+
+ /*
+ * Wait until there is something to be read.
+ */
+ if (down_interruptible(&fcopy_transaction.read_sema))
+ return -EINTR;
+
+ /*
+ * The channel may be rescinded and in this case, we will wakeup the
+ * the thread blocked on the semaphore and we will use the opened
+ * state to correctly handle this case.
+ */
+ if (!opened)
+ return -ENODEV;
+
+ operation = fcopy_transaction.fcopy_msg->operation;
+
+ if (operation == START_FILE_COPY) {
+ src = &fcopy_transaction.message;
+ copy_size = sizeof(struct hv_start_fcopy);
+ if (count < copy_size)
+ return 0;
+ } else {
+ src = fcopy_transaction.fcopy_msg;
+ copy_size = sizeof(struct hv_do_fcopy);
+ if (count < copy_size)
+ return 0;
+ }
+ if (copy_to_user(buf, src, copy_size))
+ return -EFAULT;
+
+ return copy_size;
+}
+
+static ssize_t fcopy_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int response = 0;
+
+ if (count != sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_user(&response, buf, sizeof(int)))
+ return -EFAULT;
+
+ if (in_hand_shake) {
+ if (fcopy_handle_handshake(response))
+ return -EINVAL;
+ return sizeof(int);
+ }
+
+ /*
+ * Complete the transaction by forwarding the result
+ * to the host. But first, cancel the timeout.
+ */
+ if (cancel_delayed_work_sync(&fcopy_work))
+ fcopy_respond_to_host(response);
+
+ return sizeof(int);
+}
+
+static int fcopy_open(struct inode *inode, struct file *f)
+{
+ /*
+ * The user level daemon that will open this device is
+ * really an extension of this driver. We can have only
+ * active open at a time.
+ */
+ if (opened)
+ return -EBUSY;
+
+ /*
+ * The daemon is alive; setup the state.
+ */
+ opened = true;
+ return 0;
+}
+
+static int fcopy_release(struct inode *inode, struct file *f)
+{
+ /*
+ * The daemon has exited; reset the state.
+ */
+ in_hand_shake = true;
+ opened = false;
+ return 0;
+}
+
+
+static const struct file_operations fcopy_fops = {
+ .read = fcopy_read,
+ .write = fcopy_write,
+ .release = fcopy_release,
+ .open = fcopy_open,
+};
+
+static struct miscdevice fcopy_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vmbus/hv_fcopy",
+ .fops = &fcopy_fops,
+};
+
+static int fcopy_dev_init(void)
+{
+ return misc_register(&fcopy_misc);
+}
+
+static void fcopy_dev_deinit(void)
+{
+
+ /*
+ * The device is going away - perhaps because the
+ * host has rescinded the channel. Setup state so that
+ * user level daemon can gracefully exit if it is blocked
+ * on the read semaphore.
+ */
+ opened = false;
+ /*
+ * Signal the semaphore as the device is
+ * going away.
+ */
+ up(&fcopy_transaction.read_sema);
+ misc_deregister(&fcopy_misc);
+}
+
+int hv_fcopy_init(struct hv_util_service *srv)
+{
+ recv_buffer = srv->recv_buffer;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ fcopy_transaction.active = true;
+ sema_init(&fcopy_transaction.read_sema, 0);
+
+ return fcopy_dev_init();
+}
+
+void hv_fcopy_deinit(void)
+{
+ cancel_delayed_work_sync(&fcopy_work);
+ fcopy_dev_deinit();
+}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ed50e9e83c6..521c14625b3 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -29,6 +29,20 @@
#include <linux/hyperv.h>
+/*
+ * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
+ */
+#define WS2008_SRV_MAJOR 1
+#define WS2008_SRV_MINOR 0
+#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
+
+#define WIN7_SRV_MAJOR 3
+#define WIN7_SRV_MINOR 0
+#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
+
+#define WIN8_SRV_MAJOR 4
+#define WIN8_SRV_MINOR 0
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
/*
* Global state maintained for transaction that is being processed.
@@ -76,7 +90,9 @@ static u8 *recv_buffer;
/*
* Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number.
+ * This number has no meaning, it satisfies the registration protocol.
*/
+#define HV_DRV_VERSION "3.1"
static void
kvp_register(int reg_value)
@@ -97,7 +113,7 @@ kvp_register(int reg_value)
kvp_msg->kvp_hdr.operation = reg_value;
strcpy(version, HV_DRV_VERSION);
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
}
}
@@ -111,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
kvp_respond_to_host(NULL, HV_E_FAIL);
}
+static void poll_channel(struct vmbus_channel *channel)
+{
+ if (channel->target_cpu != smp_processor_id())
+ smp_call_function_single(channel->target_cpu,
+ hv_kvp_onchannelcallback,
+ channel, true);
+ else
+ hv_kvp_onchannelcallback(channel);
+}
+
+
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
int ret = 1;
@@ -139,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
kvp_register(dm_reg_value);
kvp_transaction.active = false;
if (kvp_transaction.kvp_context)
- hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
+ poll_channel(kvp_transaction.kvp_context);
}
return ret;
}
@@ -419,7 +446,7 @@ kvp_send_key(struct work_struct *dummy)
}
msg->len = sizeof(struct hv_kvp_msg);
- cn_netlink_send(msg, 0, GFP_ATOMIC);
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
kfree(msg);
return;
@@ -552,7 +579,7 @@ response_done:
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
-
+ poll_channel(channel);
}
/*
@@ -575,6 +602,8 @@ void hv_kvp_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
+ int util_fw_version;
+ int kvp_srv_version;
if (kvp_transaction.active) {
/*
@@ -585,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
return;
}
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {
@@ -593,8 +622,28 @@ void hv_kvp_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ /*
+ * Based on the host, select appropriate
+ * framework and service versions we will
+ * negotiate.
+ */
+ switch (vmbus_proto_version) {
+ case (VERSION_WS2008):
+ util_fw_version = UTIL_WS2K8_FW_VERSION;
+ kvp_srv_version = WS2008_SRV_VERSION;
+ break;
+ case (VERSION_WIN7):
+ util_fw_version = UTIL_FW_VERSION;
+ kvp_srv_version = WIN7_SRV_VERSION;
+ break;
+ default:
+ util_fw_version = UTIL_FW_VERSION;
+ kvp_srv_version = WIN8_SRV_VERSION;
+ }
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, MAX_SRV_VER, MAX_SRV_VER);
+ recv_buffer, util_fw_version,
+ kvp_srv_version);
+
} else {
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
new file mode 100644
index 00000000000..34f14fddb66
--- /dev/null
+++ b/drivers/hv/hv_snapshot.c
@@ -0,0 +1,281 @@
+/*
+ * An implementation of host initiated guest snapshot.
+ *
+ *
+ * Copyright (C) 2013, Microsoft, Inc.
+ * Author : K. Y. Srinivasan <kys@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/net.h>
+#include <linux/nls.h>
+#include <linux/connector.h>
+#include <linux/workqueue.h>
+#include <linux/hyperv.h>
+
+#define VSS_MAJOR 5
+#define VSS_MINOR 0
+#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
+
+
+
+/*
+ * Global state maintained for transaction that is being processed.
+ * Note that only one transaction can be active at any point in time.
+ *
+ * This state is set when we receive a request from the host; we
+ * cleanup this state when the transaction is completed - when we respond
+ * to the host with the key value.
+ */
+
+static struct {
+ bool active; /* transaction status - active or not */
+ int recv_len; /* number of bytes received. */
+ struct vmbus_channel *recv_channel; /* chn we got the request */
+ u64 recv_req_id; /* request ID. */
+ struct hv_vss_msg *msg; /* current message */
+} vss_transaction;
+
+
+static void vss_respond_to_host(int error);
+
+static struct cb_id vss_id = { CN_VSS_IDX, CN_VSS_VAL };
+static const char vss_name[] = "vss_kernel_module";
+static __u8 *recv_buffer;
+
+static void vss_send_op(struct work_struct *dummy);
+static DECLARE_WORK(vss_send_op_work, vss_send_op);
+
+/*
+ * Callback when data is received from user mode.
+ */
+
+static void
+vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+{
+ struct hv_vss_msg *vss_msg;
+
+ vss_msg = (struct hv_vss_msg *)msg->data;
+
+ if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER) {
+ pr_info("VSS daemon registered\n");
+ vss_transaction.active = false;
+ if (vss_transaction.recv_channel != NULL)
+ hv_vss_onchannelcallback(vss_transaction.recv_channel);
+ return;
+
+ }
+ vss_respond_to_host(vss_msg->error);
+}
+
+
+static void vss_send_op(struct work_struct *dummy)
+{
+ int op = vss_transaction.msg->vss_hdr.operation;
+ struct cn_msg *msg;
+ struct hv_vss_msg *vss_msg;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ vss_msg = (struct hv_vss_msg *)msg->data;
+
+ msg->id.idx = CN_VSS_IDX;
+ msg->id.val = CN_VSS_VAL;
+
+ vss_msg->vss_hdr.operation = op;
+ msg->len = sizeof(struct hv_vss_msg);
+
+ cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
+ kfree(msg);
+
+ return;
+}
+
+/*
+ * Send a response back to the host.
+ */
+
+static void
+vss_respond_to_host(int error)
+{
+ struct icmsg_hdr *icmsghdrp;
+ u32 buf_len;
+ struct vmbus_channel *channel;
+ u64 req_id;
+
+ /*
+ * If a transaction is not active; log and return.
+ */
+
+ if (!vss_transaction.active) {
+ /*
+ * This is a spurious call!
+ */
+ pr_warn("VSS: Transaction not active\n");
+ return;
+ }
+ /*
+ * Copy the global state for completing the transaction. Note that
+ * only one transaction can be active at a time.
+ */
+
+ buf_len = vss_transaction.recv_len;
+ channel = vss_transaction.recv_channel;
+ req_id = vss_transaction.recv_req_id;
+ vss_transaction.active = false;
+
+ icmsghdrp = (struct icmsg_hdr *)
+ &recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (channel->onchannel_callback == NULL)
+ /*
+ * We have raced with util driver being unloaded;
+ * silently return.
+ */
+ return;
+
+ icmsghdrp->status = error;
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
+ VM_PKT_DATA_INBAND, 0);
+
+}
+
+/*
+ * This callback is invoked when we get a VSS message from the host.
+ * The host ensures that only one VSS transaction can be active at a time.
+ */
+
+void hv_vss_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u32 recvlen;
+ u64 requestid;
+ struct hv_vss_msg *vss_msg;
+
+
+ struct icmsg_hdr *icmsghdrp;
+ struct icmsg_negotiate *negop = NULL;
+
+ if (vss_transaction.active) {
+ /*
+ * We will defer processing this callback once
+ * the current transaction is complete.
+ */
+ vss_transaction.recv_channel = channel;
+ return;
+ }
+
+ vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ &requestid);
+
+ if (recvlen > 0) {
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ vmbus_prep_negotiate_resp(icmsghdrp, negop,
+ recv_buffer, UTIL_FW_VERSION,
+ VSS_VERSION);
+ } else {
+ vss_msg = (struct hv_vss_msg *)&recv_buffer[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
+
+ vss_transaction.recv_len = recvlen;
+ vss_transaction.recv_channel = channel;
+ vss_transaction.recv_req_id = requestid;
+ vss_transaction.active = true;
+ vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
+
+ switch (vss_msg->vss_hdr.operation) {
+ /*
+ * Initiate a "freeze/thaw"
+ * operation in the guest.
+ * We respond to the host once
+ * the operation is complete.
+ *
+ * We send the message to the
+ * user space daemon and the
+ * operation is performed in
+ * the daemon.
+ */
+ case VSS_OP_FREEZE:
+ case VSS_OP_THAW:
+ schedule_work(&vss_send_op_work);
+ return;
+
+ case VSS_OP_HOT_BACKUP:
+ vss_msg->vss_cf.flags =
+ VSS_HBU_NO_AUTO_RECOVERY;
+ vss_respond_to_host(0);
+ return;
+
+ case VSS_OP_GET_DM_INFO:
+ vss_msg->dm_info.flags = 0;
+ vss_respond_to_host(0);
+ return;
+
+ default:
+ vss_respond_to_host(0);
+ return;
+
+ }
+
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+ }
+
+}
+
+int
+hv_vss_init(struct hv_util_service *srv)
+{
+ int err;
+
+ err = cn_add_callback(&vss_id, vss_name, vss_cn_callback);
+ if (err)
+ return err;
+ recv_buffer = srv->recv_buffer;
+
+ /*
+ * When this driver loads, the user level daemon that
+ * processes the host requests may not yet be running.
+ * Defer processing channel callbacks until the daemon
+ * has registered.
+ */
+ vss_transaction.active = true;
+ return 0;
+}
+
+void hv_vss_deinit(void)
+{
+ cn_del_callback(&vss_id);
+ cancel_work_sync(&vss_send_op_work);
+}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de7a04..3b9c9ef0deb 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,6 +28,34 @@
#include <linux/reboot.h>
#include <linux/hyperv.h>
+#include "hyperv_vmbus.h"
+
+#define SD_MAJOR 3
+#define SD_MINOR 0
+#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
+
+#define SD_WS2008_MAJOR 1
+#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
+
+#define TS_MAJOR 3
+#define TS_MINOR 0
+#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
+
+#define TS_WS2008_MAJOR 1
+#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
+
+#define HB_MAJOR 3
+#define HB_MINOR 0
+#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
+
+#define HB_WS2008_MAJOR 1
+#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
+
+static int sd_srv_version;
+static int ts_srv_version;
+static int hb_srv_version;
+static int util_fw_version;
+
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
.util_cb = shutdown_onchannelcallback,
@@ -49,12 +77,34 @@ static struct hv_util_service util_kvp = {
.util_deinit = hv_kvp_deinit,
};
+static struct hv_util_service util_vss = {
+ .util_cb = hv_vss_onchannelcallback,
+ .util_init = hv_vss_init,
+ .util_deinit = hv_vss_deinit,
+};
+
+static struct hv_util_service util_fcopy = {
+ .util_cb = hv_fcopy_onchannelcallback,
+ .util_init = hv_fcopy_init,
+ .util_deinit = hv_fcopy_deinit,
+};
+
+static void perform_shutdown(struct work_struct *dummy)
+{
+ orderly_poweroff(true);
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
- u8 execute_shutdown = false;
+ bool execute_shutdown = false;
u8 *shut_txf_buf = util_shutdown.recv_buffer;
struct shutdown_msg_data *shutdown_msg;
@@ -71,7 +121,8 @@ static void shutdown_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- shut_txf_buf, MAX_SRV_VER, MAX_SRV_VER);
+ shut_txf_buf, util_fw_version,
+ sd_srv_version);
} else {
shutdown_msg =
(struct shutdown_msg_data *)&shut_txf_buf[
@@ -106,7 +157,7 @@ static void shutdown_onchannelcallback(void *context)
}
if (execute_shutdown == true)
- orderly_poweroff(true);
+ schedule_work(&shutdown_work);
}
/*
@@ -187,6 +238,7 @@ static void timesync_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
u8 *time_txf_buf = util_timesynch.recv_buffer;
+ struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, time_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
@@ -196,8 +248,10 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
- MAX_SRV_VER, MAX_SRV_VER);
+ vmbus_prep_negotiate_resp(icmsghdrp, negop,
+ time_txf_buf,
+ util_fw_version,
+ ts_srv_version);
} else {
timedatap = (struct ictimesync_data *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr) +
@@ -227,6 +281,7 @@ static void heartbeat_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+ struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
@@ -236,8 +291,9 @@ static void heartbeat_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, NULL,
- hbeat_txf_buf, MAX_SRV_VER, MAX_SRV_VER);
+ vmbus_prep_negotiate_resp(icmsghdrp, negop,
+ hbeat_txf_buf, util_fw_version,
+ hb_srv_version);
} else {
heartbeat_msg =
(struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -263,7 +319,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
- srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+ srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
if (srv->util_init) {
@@ -274,12 +330,41 @@ static int util_probe(struct hv_device *dev,
}
}
+ /*
+ * The set of services managed by the util driver are not performance
+ * critical and do not need batched reading. Furthermore, some services
+ * such as KVP can only handle one message from the host at a time.
+ * Turn off batched reading for all util drivers before we open the
+ * channel.
+ */
+
+ set_channel_read_state(dev->channel, false);
+
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
goto error;
hv_set_drvdata(dev, srv);
+ /*
+ * Based on the host; initialize the framework and
+ * service version numbers we will negotiate.
+ */
+ switch (vmbus_proto_version) {
+ case (VERSION_WS2008):
+ util_fw_version = UTIL_WS2K8_FW_VERSION;
+ sd_srv_version = SD_WS2008_VERSION;
+ ts_srv_version = TS_WS2008_VERSION;
+ hb_srv_version = HB_WS2008_VERSION;
+ break;
+
+ default:
+ util_fw_version = UTIL_FW_VERSION;
+ sd_srv_version = SD_VERSION;
+ ts_srv_version = TS_VERSION;
+ hb_srv_version = HB_VERSION;
+ }
+
return 0;
error:
@@ -304,21 +389,29 @@ static int util_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
- { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
- 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
- .driver_data = (unsigned long)&util_shutdown },
+ { HV_SHUTDOWN_GUID,
+ .driver_data = (unsigned long)&util_shutdown
+ },
/* Time synch guid */
- { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
- 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
- .driver_data = (unsigned long)&util_timesynch },
+ { HV_TS_GUID,
+ .driver_data = (unsigned long)&util_timesynch
+ },
/* Heartbeat guid */
- { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
- 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
- .driver_data = (unsigned long)&util_heartbeat },
+ { HV_HEART_BEAT_GUID,
+ .driver_data = (unsigned long)&util_heartbeat
+ },
/* KVP guid */
- { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
- 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6)
- .driver_data = (unsigned long)&util_kvp },
+ { HV_KVP_GUID,
+ .driver_data = (unsigned long)&util_kvp
+ },
+ /* VSS GUID */
+ { HV_VSS_GUID,
+ .driver_data = (unsigned long)&util_vss
+ },
+ /* File copy GUID */
+ { HV_FCOPY_GUID,
+ .driver_data = (unsigned long)&util_fcopy
+ },
{ },
};
@@ -350,5 +443,4 @@ module_init(init_hyperv_utils);
module_exit(exit_hyperv_utils);
MODULE_DESCRIPTION("Hyper-V Utilities");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fadb398..22b750749a3 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@ enum hv_message_type {
/* Define invalid partition identifier. */
#define HV_PARTITION_ID_INVALID ((u64)0x0)
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
/* Define port identifier type. */
union hv_port_id {
u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
/*
* Versioning definitions used for guests reporting themselves to the
* hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
@@ -513,20 +492,40 @@ struct hv_context {
bool synic_initialized;
- /*
- * This is used as an input param to HvCallSignalEvent hypercall. The
- * input param is immutable in our usage and must be dynamic mem (vs
- * stack or global). */
- struct hv_input_signal_event_buffer *signal_event_buffer;
- /* 8-bytes aligned of the buffer above */
- struct hv_input_signal_event *signal_event_param;
-
void *synic_message_page[NR_CPUS];
void *synic_event_page[NR_CPUS];
+ /*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information:
+ *
+ * vp_index[a] is the Hyper-V's processor ID corresponding to
+ * Linux cpuid 'a'.
+ */
+ u32 vp_index[NR_CPUS];
+ /*
+ * Starting with win8, we can take channel interrupts on any CPU;
+ * we will manage the tasklet that handles events on a per CPU
+ * basis.
+ */
+ struct tasklet_struct *event_dpc[NR_CPUS];
+ /*
+ * To optimize the mapping of relid to channel, maintain
+ * per-cpu list of the channels based on their CPU affinity.
+ */
+ struct list_head percpu_list[NR_CPUS];
};
extern struct hv_context hv_context;
+struct hv_ring_buffer_debug_info {
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
/* Hv Interface */
@@ -538,12 +537,23 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
-extern u16 hv_signal_event(void);
+extern u16 hv_signal_event(void *con_id);
+
+extern int hv_synic_alloc(void);
+
+extern void hv_synic_free(void);
extern void hv_synic_init(void *irqarg);
extern void hv_synic_cleanup(void *arg);
+/*
+ * Host version information.
+ */
+extern unsigned int host_info_eax;
+extern unsigned int host_info_ebx;
+extern unsigned int host_info_ecx;
+extern unsigned int host_info_edx;
/* Interface */
@@ -554,8 +564,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
- struct scatterlist *sglist,
- u32 sgcount);
+ struct kvec *kv_list,
+ u32 kv_count, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@@ -563,13 +573,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
void *buffer,
u32 buflen,
- u32 offset);
+ u32 offset, bool *signal);
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+void hv_begin_read(struct hv_ring_buffer_info *rbi);
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+
/*
* Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -611,7 +624,7 @@ struct vmbus_connection {
* 2 pages - 1st page for parent->child notification and 2nd
* is child->parent notification
*/
- void *monitor_pages;
+ struct hv_monitor_page *monitor_pages[2];
struct list_head chn_msg_list;
spinlock_t channelmsg_lock;
@@ -636,9 +649,9 @@ extern struct vmbus_connection vmbus_connection;
/* General vmbus interface */
-struct hv_device *vmbus_device_create(uuid_le *type,
- uuid_le *instance,
- struct vmbus_channel *channel);
+struct hv_device *vmbus_device_create(const uuid_le *type,
+ const uuid_le *instance,
+ struct vmbus_channel *channel);
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
@@ -657,9 +670,13 @@ int vmbus_connect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-int vmbus_set_event(u32 child_relid);
+int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
+int hv_fcopy_init(struct hv_util_service *);
+void hv_fcopy_deinit(void);
+void hv_fcopy_onchannelcallback(void *);
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88f01b..15db66b7414 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -26,9 +26,112 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
+#include <linux/uio.h>
#include "hyperv_vmbus.h"
+void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+ rbi->ring_buffer->interrupt_mask = 1;
+ mb();
+}
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 read;
+ u32 write;
+
+ rbi->ring_buffer->interrupt_mask = 0;
+ mb();
+
+ /*
+ * Now check to see if the ring buffer is still empty.
+ * If it is not, we raced and we need to process new
+ * incoming messages.
+ */
+ hv_get_ringbuffer_availbytes(rbi, &read, &write);
+
+ return read;
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ * 1. The host guarantees that while it is draining the
+ * ring buffer, it will set the interrupt_mask to
+ * indicate it does not need to be interrupted when
+ * new data is placed.
+ *
+ * 2. The host guarantees that it will completely drain
+ * the ring buffer before exiting the read loop. Further,
+ * once the ring buffer is empty, it will clear the
+ * interrupt_mask and re-check to see if new data has
+ * arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+ mb();
+ if (rbi->ring_buffer->interrupt_mask)
+ return false;
+
+ /* check interrupt_mask before read_index */
+ rmb();
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_write == rbi->ring_buffer->read_index)
+ return true;
+
+ return false;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ * producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ * the consumer of the ring will signal when the ring
+ * state transitions from being full to a state where
+ * there is room for the producer to send the pending packet.
+ */
+
+static bool hv_need_to_signal_on_read(u32 old_rd,
+ struct hv_ring_buffer_info *rbi)
+{
+ u32 prev_write_sz;
+ u32 cur_write_sz;
+ u32 r_size;
+ u32 write_loc = rbi->ring_buffer->write_index;
+ u32 read_loc = rbi->ring_buffer->read_index;
+ u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+
+ /*
+ * If the other end is not blocked on write don't bother.
+ */
+ if (pending_sz == 0)
+ return false;
+
+ r_size = rbi->ring_datasize;
+ cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ read_loc - write_loc;
+
+ prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+ old_rd - write_loc;
+
+
+ if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+ return true;
+
+ return false;
+}
/*
* hv_get_next_write_location()
@@ -239,19 +342,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
}
}
-
-/*
- *
- * hv_get_ringbuffer_interrupt_mask()
- *
- * Get the interrupt mask for the specified ring buffer
- *
- */
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->interrupt_mask;
-}
-
/*
*
* hv_ringbuffer_init()
@@ -298,22 +388,20 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount)
+ struct kvec *kv_list, u32 kv_count, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 totalbytes_towrite = 0;
- struct scatterlist *sg;
u32 next_write_location;
+ u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
- for_each_sg(sglist, sg, sgcount, i)
- {
- totalbytes_towrite += sg->length;
- }
+ for (i = 0; i < kv_count; i++)
+ totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
@@ -335,12 +423,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
- for_each_sg(sglist, sg, sgcount, i)
- {
+ old_write = next_write_location;
+
+ for (i = 0; i < kv_count; i++) {
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
- sg_virt(sg),
- sg->length);
+ kv_list[i].iov_base,
+ kv_list[i].iov_len);
}
/* Set previous packet start */
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
&prev_indices,
sizeof(u64));
- /* Make sure we flush all writes before updating the writeIndex */
- smp_wmb();
+ /* Issue a full memory barrier before updating the write index */
+ mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ *signal = hv_need_to_signal(old_write, outring_info);
return 0;
}
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
*
*/
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
- u32 buflen, u32 offset)
+ u32 buflen, u32 offset, bool *signal)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
+ u32 old_read;
if (buflen <= 0)
return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
&bytes_avail_toread,
&bytes_avail_towrite);
+ old_read = bytes_avail_toread;
+
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -454,12 +548,14 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
/* Make sure all reads are done before we update the read index since */
/* the writer may start writing to the read area once the read index */
/*is updated */
- smp_mb();
+ mb();
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+ *signal = hv_need_to_signal_on_read(old_read, inring_info);
+
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec5300..4d6b26979fb 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -25,43 +25,29 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
+#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
-
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
static struct completion probe_event;
static int irq;
-struct hv_device_info {
- u32 chn_id;
- u32 chn_state;
- uuid_le chn_type;
- uuid_le chn_instance;
-
- u32 monitor_id;
- u32 server_monitor_pending;
- u32 server_monitor_latency;
- u32 server_monitor_conn_id;
- u32 client_monitor_pending;
- u32 client_monitor_latency;
- u32 client_monitor_conn_id;
-
- struct hv_dev_port_info inbound;
- struct hv_dev_port_info outbound;
+struct resource hyperv_mmio = {
+ .name = "hyperv mmio",
+ .flags = IORESOURCE_MEM,
};
+EXPORT_SYMBOL_GPL(hyperv_mmio);
static int vmbus_exists(void)
{
@@ -71,169 +57,361 @@ static int vmbus_exists(void)
return 0;
}
+#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
+static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
+{
+ int i;
+ for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
+ sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
+}
-static void get_channel_info(struct hv_device *device,
- struct hv_device_info *info)
+static u8 channel_monitor_group(struct vmbus_channel *channel)
{
- struct vmbus_channel_debug_info debug_info;
+ return (u8)channel->offermsg.monitorid / 32;
+}
- if (!device->channel)
- return;
+static u8 channel_monitor_offset(struct vmbus_channel *channel)
+{
+ return (u8)channel->offermsg.monitorid % 32;
+}
- vmbus_get_debug_info(device->channel, &debug_info);
+static u32 channel_pending(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ return monitor_page->trigger_group[monitor_group].pending;
+}
- info->chn_id = debug_info.relid;
- info->chn_state = debug_info.state;
- memcpy(&info->chn_type, &debug_info.interfacetype,
- sizeof(uuid_le));
- memcpy(&info->chn_instance, &debug_info.interface_instance,
- sizeof(uuid_le));
+static u32 channel_latency(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ u8 monitor_offset = channel_monitor_offset(channel);
+ return monitor_page->latency[monitor_group][monitor_offset];
+}
- info->monitor_id = debug_info.monitorid;
+static u32 channel_conn_id(struct vmbus_channel *channel,
+ struct hv_monitor_page *monitor_page)
+{
+ u8 monitor_group = channel_monitor_group(channel);
+ u8 monitor_offset = channel_monitor_offset(channel);
+ return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
+}
- info->server_monitor_pending = debug_info.servermonitor_pending;
- info->server_monitor_latency = debug_info.servermonitor_latency;
- info->server_monitor_conn_id = debug_info.servermonitor_connectionid;
+static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
- info->client_monitor_pending = debug_info.clientmonitor_pending;
- info->client_monitor_latency = debug_info.clientmonitor_latency;
- info->client_monitor_conn_id = debug_info.clientmonitor_connectionid;
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+}
+static DEVICE_ATTR_RO(id);
- info->inbound.int_mask = debug_info.inbound.current_interrupt_mask;
- info->inbound.read_idx = debug_info.inbound.current_read_index;
- info->inbound.write_idx = debug_info.inbound.current_write_index;
- info->inbound.bytes_avail_toread =
- debug_info.inbound.bytes_avail_toread;
- info->inbound.bytes_avail_towrite =
- debug_info.inbound.bytes_avail_towrite;
+static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
- info->outbound.int_mask =
- debug_info.outbound.current_interrupt_mask;
- info->outbound.read_idx = debug_info.outbound.current_read_index;
- info->outbound.write_idx = debug_info.outbound.current_write_index;
- info->outbound.bytes_avail_toread =
- debug_info.outbound.bytes_avail_toread;
- info->outbound.bytes_avail_towrite =
- debug_info.outbound.bytes_avail_towrite;
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->state);
}
+static DEVICE_ATTR_RO(state);
-#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
-static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
+static ssize_t monitor_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
{
- int i;
- for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
- sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
+static DEVICE_ATTR_RO(monitor_id);
-/*
- * vmbus_show_device_attr - Show the device attribute in sysfs.
- *
- * This is invoked when user does a
- * "cat /sys/bus/vmbus/devices/<busdevice>/<attr name>"
- */
-static ssize_t vmbus_show_device_attr(struct device *dev,
- struct device_attribute *dev_attr,
- char *buf)
+static ssize_t class_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "{%pUl}\n",
+ hv_dev->channel->offermsg.offer.if_type.b);
+}
+static DEVICE_ATTR_RO(class_id);
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "{%pUl}\n",
+ hv_dev->channel->offermsg.offer.if_instance.b);
+}
+static DEVICE_ATTR_RO(device_id);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- struct hv_device_info *device_info;
char alias_name[VMBUS_ALIAS_LEN + 1];
- int ret = 0;
- device_info = kzalloc(sizeof(struct hv_device_info), GFP_KERNEL);
- if (!device_info)
- return ret;
+ print_alias_name(hv_dev, alias_name);
+ return sprintf(buf, "vmbus:%s\n", alias_name);
+}
+static DEVICE_ATTR_RO(modalias);
- get_channel_info(hv_dev, device_info);
-
- if (!strcmp(dev_attr->attr.name, "class_id")) {
- ret = sprintf(buf, "{%pUl}\n", device_info->chn_type.b);
- } else if (!strcmp(dev_attr->attr.name, "device_id")) {
- ret = sprintf(buf, "{%pUl}\n", device_info->chn_instance.b);
- } else if (!strcmp(dev_attr->attr.name, "modalias")) {
- print_alias_name(hv_dev, alias_name);
- ret = sprintf(buf, "vmbus:%s\n", alias_name);
- } else if (!strcmp(dev_attr->attr.name, "state")) {
- ret = sprintf(buf, "%d\n", device_info->chn_state);
- } else if (!strcmp(dev_attr->attr.name, "id")) {
- ret = sprintf(buf, "%d\n", device_info->chn_id);
- } else if (!strcmp(dev_attr->attr.name, "out_intr_mask")) {
- ret = sprintf(buf, "%d\n", device_info->outbound.int_mask);
- } else if (!strcmp(dev_attr->attr.name, "out_read_index")) {
- ret = sprintf(buf, "%d\n", device_info->outbound.read_idx);
- } else if (!strcmp(dev_attr->attr.name, "out_write_index")) {
- ret = sprintf(buf, "%d\n", device_info->outbound.write_idx);
- } else if (!strcmp(dev_attr->attr.name, "out_read_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->outbound.bytes_avail_toread);
- } else if (!strcmp(dev_attr->attr.name, "out_write_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->outbound.bytes_avail_towrite);
- } else if (!strcmp(dev_attr->attr.name, "in_intr_mask")) {
- ret = sprintf(buf, "%d\n", device_info->inbound.int_mask);
- } else if (!strcmp(dev_attr->attr.name, "in_read_index")) {
- ret = sprintf(buf, "%d\n", device_info->inbound.read_idx);
- } else if (!strcmp(dev_attr->attr.name, "in_write_index")) {
- ret = sprintf(buf, "%d\n", device_info->inbound.write_idx);
- } else if (!strcmp(dev_attr->attr.name, "in_read_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->inbound.bytes_avail_toread);
- } else if (!strcmp(dev_attr->attr.name, "in_write_bytes_avail")) {
- ret = sprintf(buf, "%d\n",
- device_info->inbound.bytes_avail_towrite);
- } else if (!strcmp(dev_attr->attr.name, "monitor_id")) {
- ret = sprintf(buf, "%d\n", device_info->monitor_id);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_pending")) {
- ret = sprintf(buf, "%d\n", device_info->server_monitor_pending);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_latency")) {
- ret = sprintf(buf, "%d\n", device_info->server_monitor_latency);
- } else if (!strcmp(dev_attr->attr.name, "server_monitor_conn_id")) {
- ret = sprintf(buf, "%d\n",
- device_info->server_monitor_conn_id);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_pending")) {
- ret = sprintf(buf, "%d\n", device_info->client_monitor_pending);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_latency")) {
- ret = sprintf(buf, "%d\n", device_info->client_monitor_latency);
- } else if (!strcmp(dev_attr->attr.name, "client_monitor_conn_id")) {
- ret = sprintf(buf, "%d\n",
- device_info->client_monitor_conn_id);
- }
+static ssize_t server_monitor_pending_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
- kfree(device_info);
- return ret;
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(server_monitor_pending);
+
+static ssize_t client_monitor_pending_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_pending);
+
+static ssize_t server_monitor_latency_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_latency);
+
+static ssize_t client_monitor_latency_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_latency);
+
+static ssize_t server_monitor_conn_id_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
+}
+static DEVICE_ATTR_RO(server_monitor_conn_id);
+
+static ssize_t client_monitor_conn_id_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ return sprintf(buf, "%d\n",
+ channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
+}
+static DEVICE_ATTR_RO(client_monitor_conn_id);
+
+static ssize_t out_intr_mask_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
}
+static DEVICE_ATTR_RO(out_intr_mask);
+
+static ssize_t out_read_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.current_read_index);
+}
+static DEVICE_ATTR_RO(out_read_index);
+
+static ssize_t out_write_index_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.current_write_index);
+}
+static DEVICE_ATTR_RO(out_write_index);
+
+static ssize_t out_read_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+}
+static DEVICE_ATTR_RO(out_read_bytes_avail);
+
+static ssize_t out_write_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info outbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+ return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+}
+static DEVICE_ATTR_RO(out_write_bytes_avail);
+
+static ssize_t in_intr_mask_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+}
+static DEVICE_ATTR_RO(in_intr_mask);
+
+static ssize_t in_read_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.current_read_index);
+}
+static DEVICE_ATTR_RO(in_read_index);
+
+static ssize_t in_write_index_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.current_write_index);
+}
+static DEVICE_ATTR_RO(in_write_index);
+
+static ssize_t in_read_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+}
+static DEVICE_ATTR_RO(in_read_bytes_avail);
+
+static ssize_t in_write_bytes_avail_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ struct hv_ring_buffer_debug_info inbound;
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+ hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+}
+static DEVICE_ATTR_RO(in_write_bytes_avail);
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct device_attribute vmbus_device_attrs[] = {
- __ATTR(id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(state, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(class_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(device_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(monitor_id, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(modalias, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(server_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(server_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(server_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(client_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(client_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(client_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(out_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(out_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
-
- __ATTR(in_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_read_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_write_index, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR(in_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL),
- __ATTR_NULL
+static struct attribute *vmbus_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_state.attr,
+ &dev_attr_monitor_id.attr,
+ &dev_attr_class_id.attr,
+ &dev_attr_device_id.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_server_monitor_pending.attr,
+ &dev_attr_client_monitor_pending.attr,
+ &dev_attr_server_monitor_latency.attr,
+ &dev_attr_client_monitor_latency.attr,
+ &dev_attr_server_monitor_conn_id.attr,
+ &dev_attr_client_monitor_conn_id.attr,
+ &dev_attr_out_intr_mask.attr,
+ &dev_attr_out_read_index.attr,
+ &dev_attr_out_write_index.attr,
+ &dev_attr_out_read_bytes_avail.attr,
+ &dev_attr_out_write_bytes_avail.attr,
+ &dev_attr_in_intr_mask.attr,
+ &dev_attr_in_read_index.attr,
+ &dev_attr_in_write_index.attr,
+ &dev_attr_in_read_bytes_avail.attr,
+ &dev_attr_in_write_bytes_avail.attr,
+ NULL,
};
-
+ATTRIBUTE_GROUPS(vmbus);
/*
* vmbus_uevent - add uevent for our device
@@ -257,7 +435,7 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
return ret;
}
-static uuid_le null_guid;
+static const uuid_le null_guid;
static inline bool is_null_guid(const __u8 *guid)
{
@@ -272,7 +450,7 @@ static inline bool is_null_guid(const __u8 *guid)
*/
static const struct hv_vmbus_device_id *hv_vmbus_get_id(
const struct hv_vmbus_device_id *id,
- __u8 *guid)
+ const __u8 *guid)
{
for (; !is_null_guid(id->guid); id++)
if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
@@ -382,12 +560,9 @@ static struct bus_type hv_bus = {
.remove = vmbus_remove,
.probe = vmbus_probe,
.uevent = vmbus_uevent,
- .dev_attrs = vmbus_device_attrs,
+ .dev_groups = vmbus_groups,
};
-static const char *driver_name = "hyperv";
-
-
struct onmessage_work_context {
struct work_struct work;
struct hv_message msg;
@@ -433,7 +608,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
* will not deliver any more messages since there is
* no empty slot
*/
- smp_mb();
+ mb();
if (msg->header.message_flags.msg_pending) {
/*
@@ -446,7 +621,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
}
}
-static irqreturn_t vmbus_isr(int irq, void *dev_id)
+static void vmbus_isr(void)
{
int cpu = smp_processor_id();
void *page_addr;
@@ -454,34 +629,46 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
union hv_synic_event_flags *event;
bool handled = false;
+ page_addr = hv_context.synic_event_page[cpu];
+ if (page_addr == NULL)
+ return;
+
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
/*
* Check for events before checking for messages. This is the order
* in which events and messages are checked in Windows guests on
* Hyper-V, and the Windows team suggested we do the same.
*/
- page_addr = hv_context.synic_event_page[cpu];
- event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
- /* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+ /* Since we are a child, we only need to check bit 0 */
+ if (sync_test_and_clear_bit(0,
+ (unsigned long *) &event->flags32[0])) {
+ handled = true;
+ }
+ } else {
+ /*
+ * Our host is win8 or above. The signaling mechanism
+ * has changed and we can directly look at the event page.
+ * If bit n is set then we have an interrup on the channel
+ * whose id is n.
+ */
handled = true;
- tasklet_schedule(&event_dpc);
}
+ if (handled)
+ tasklet_schedule(hv_context.event_dpc[cpu]);
+
+
page_addr = hv_context.synic_message_page[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
- if (msg->header.message_type != HVMSG_NONE) {
- handled = true;
+ if (msg->header.message_type != HVMSG_NONE)
tasklet_schedule(&msg_dpc);
- }
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
}
/*
@@ -496,7 +683,6 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
static int vmbus_bus_init(int irq)
{
int ret;
- unsigned int vector;
/* Hypervisor initialization...setup hypercall page..etc */
ret = hv_init();
@@ -506,39 +692,33 @@ static int vmbus_bus_init(int irq)
}
tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
- tasklet_init(&event_dpc, vmbus_on_event, 0);
ret = bus_register(&hv_bus);
if (ret)
goto err_cleanup;
- ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
-
- if (ret != 0) {
- pr_err("Unable to request IRQ %d\n",
- irq);
- goto err_unregister;
- }
-
- vector = IRQ0_VECTOR + irq;
+ hv_setup_vmbus_irq(vmbus_isr);
+ ret = hv_synic_alloc();
+ if (ret)
+ goto err_alloc;
/*
- * Notify the hypervisor of our irq and
+ * Initialize the per-cpu interrupt state and
* connect to the host.
*/
- on_each_cpu(hv_synic_init, (void *)&vector, 1);
+ on_each_cpu(hv_synic_init, NULL, 1);
ret = vmbus_connect();
if (ret)
- goto err_irq;
+ goto err_alloc;
vmbus_request_offers();
return 0;
-err_irq:
- free_irq(irq, hv_acpi_dev);
+err_alloc:
+ hv_synic_free();
+ hv_remove_vmbus_irq();
-err_unregister:
bus_unregister(&hv_bus);
err_cleanup:
@@ -575,8 +755,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
ret = driver_register(&hv_driver->driver);
- vmbus_request_offers();
-
return ret;
}
EXPORT_SYMBOL_GPL(__vmbus_driver_register);
@@ -601,9 +779,9 @@ EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
-struct hv_device *vmbus_device_create(uuid_le *type,
- uuid_le *instance,
- struct vmbus_channel *channel)
+struct hv_device *vmbus_device_create(const uuid_le *type,
+ const uuid_le *instance,
+ struct vmbus_channel *channel)
{
struct hv_device *child_device_obj;
@@ -647,7 +825,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
if (ret)
pr_err("Unable to register child device\n");
else
- pr_info("child device %s registered\n",
+ pr_debug("child device %s registered\n",
dev_name(&child_device_obj->device));
return ret;
@@ -659,30 +837,33 @@ int vmbus_device_register(struct hv_device *child_device_obj)
*/
void vmbus_device_unregister(struct hv_device *device_obj)
{
+ pr_debug("child device %s unregistered\n",
+ dev_name(&device_obj->device));
+
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
*/
device_unregister(&device_obj->device);
-
- pr_info("child device %s unregistered\n",
- dev_name(&device_obj->device));
}
/*
- * VMBUS is an acpi enumerated device. Get the the IRQ information
- * from DSDT.
+ * VMBUS is an acpi enumerated device. Get the the information we
+ * need from DSDT.
*/
-static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
+static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{
-
- if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
- struct acpi_resource_irq *irqp;
- irqp = &res->data.irq;
-
- *((unsigned int *)irq) = irqp->interrupts[0];
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ irq = res->data.irq.interrupts[0];
+ break;
+
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ hyperv_mmio.start = res->data.address64.minimum;
+ hyperv_mmio.end = res->data.address64.maximum;
+ break;
}
return AE_OK;
@@ -691,18 +872,34 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
static int vmbus_acpi_add(struct acpi_device *device)
{
acpi_status result;
+ int ret_val = -ENODEV;
hv_acpi_dev = device;
result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- vmbus_walk_resources, &irq);
+ vmbus_walk_resources, NULL);
- if (ACPI_FAILURE(result)) {
- complete(&probe_event);
- return -ENODEV;
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ /*
+ * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
+ * has the mmio ranges. Get that.
+ */
+ if (device->parent) {
+ result = acpi_walk_resources(device->parent->handle,
+ METHOD_NAME__CRS,
+ vmbus_walk_resources, NULL);
+
+ if (ACPI_FAILURE(result))
+ goto acpi_walk_err;
+ if (hyperv_mmio.start && hyperv_mmio.end)
+ request_resource(&iomem_resource, &hyperv_mmio);
}
+ ret_val = 0;
+
+acpi_walk_err:
complete(&probe_event);
- return 0;
+ return ret_val;
}
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@@ -732,7 +929,6 @@ static int __init hv_acpi_init(void)
/*
* Get irq resources first.
*/
-
ret = acpi_bus_register_driver(&vmbus_acpi_driver);
if (ret)
@@ -763,8 +959,7 @@ cleanup:
static void __exit vmbus_exit(void)
{
-
- free_irq(irq, hv_acpi_dev);
+ hv_remove_vmbus_irq();
vmbus_free_channels();
bus_unregister(&hv_bus);
hv_cleanup();
@@ -773,7 +968,6 @@ static void __exit vmbus_exit(void)
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
subsys_initcall(hv_acpi_init);
module_exit(vmbus_exit);