aboutsummaryrefslogtreecommitdiff
path: root/include/xen
diff options
context:
space:
mode:
Diffstat (limited to 'include/xen')
-rw-r--r--include/xen/acpi.h59
-rw-r--r--include/xen/balloon.h3
-rw-r--r--include/xen/events.h19
-rw-r--r--include/xen/gntalloc.h82
-rw-r--r--include/xen/gntdev.h150
-rw-r--r--include/xen/grant_table.h10
-rw-r--r--include/xen/hvm.h4
-rw-r--r--include/xen/interface/callback.h2
-rw-r--r--include/xen/interface/elfnote.h13
-rw-r--r--include/xen/interface/event_channel.h68
-rw-r--r--include/xen/interface/io/blkif.h75
-rw-r--r--include/xen/interface/io/netif.h102
-rw-r--r--include/xen/interface/io/protocols.h5
-rw-r--r--include/xen/interface/io/ring.h5
-rw-r--r--include/xen/interface/io/tpmif.h52
-rw-r--r--include/xen/interface/memory.h6
-rw-r--r--include/xen/interface/physdev.h27
-rw-r--r--include/xen/interface/platform.h28
-rw-r--r--include/xen/interface/vcpu.h2
-rw-r--r--include/xen/interface/xen.h18
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/platform_pci.h25
-rw-r--r--include/xen/swiotlb-xen.h3
-rw-r--r--include/xen/tmem.h8
-rw-r--r--include/xen/xen-ops.h15
-rw-r--r--include/xen/xen.h14
-rw-r--r--include/xen/xenbus.h2
-rw-r--r--include/xen/xencomm.h77
28 files changed, 510 insertions, 405 deletions
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 48a9c0171b6..4ddd7dc4a61 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -40,14 +40,67 @@
#include <xen/xen.h>
#include <linux/acpi.h>
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+#define ACPI_MEMORY_DEVICE_CLASS "memory"
+#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
+#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
+
+int xen_stub_memory_device_init(void);
+void xen_stub_memory_device_exit(void);
+
+#define ACPI_PROCESSOR_CLASS "processor"
+#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
+#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
+
+int xen_stub_processor_init(void);
+void xen_stub_processor_exit(void);
+
+void xen_pcpu_hotplug_sync(void);
+int xen_pcpu_id(uint32_t acpi_id);
+
+static inline int xen_acpi_get_pxm(acpi_handle h)
+{
+ unsigned long long pxm;
+ acpi_status status;
+ acpi_handle handle;
+ acpi_handle phandle = h;
+
+ do {
+ handle = phandle;
+ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
+ if (ACPI_SUCCESS(status))
+ return pxm;
+ status = acpi_get_parent(handle, &phandle);
+ } while (ACPI_SUCCESS(status));
+
+ return -ENXIO;
+}
+
+int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd);
+int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
+ u32 val_a, u32 val_b);
+
+static inline int xen_acpi_suspend_lowlevel(void)
+{
+ /*
+ * Xen will save and restore CPU context, so
+ * we can skip that and just go straight to
+ * the suspend.
+ */
+ acpi_enter_sleep_state(ACPI_STATE_S3);
+ return 0;
+}
static inline void xen_acpi_sleep_register(void)
{
- if (xen_initial_domain())
+ if (xen_initial_domain()) {
acpi_os_set_prepare_sleep(
- &xen_acpi_notify_hypervisor_state);
+ &xen_acpi_notify_hypervisor_sleep);
+ acpi_os_set_prepare_extended_sleep(
+ &xen_acpi_notify_hypervisor_extended_sleep);
+
+ acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
+ }
}
#else
static inline void xen_acpi_sleep_register(void)
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index cc2e1a7e44e..a4c1c6a9369 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -29,6 +29,9 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages,
bool highmem);
void free_xenballooned_pages(int nr_pages, struct page **pages);
+struct page *get_balloon_scratch_page(void);
+void put_balloon_scratch_page(void);
+
struct device;
#ifdef CONFIG_XEN_SELFBALLOONING
extern int register_xen_selfballooning(struct device *dev);
diff --git a/include/xen/events.h b/include/xen/events.h
index c6bfe01acf6..8bee7a75e85 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -2,11 +2,16 @@
#define _XEN_EVENTS_H
#include <linux/interrupt.h>
+#ifdef CONFIG_PCI_MSI
+#include <linux/msi.h>
+#endif
#include <xen/interface/event_channel.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/events.h>
+unsigned xen_evtchn_nr_channels(void);
+
int bind_evtchn_to_irq(unsigned int evtchn);
int bind_evtchn_to_irqhandler(unsigned int evtchn,
irq_handler_t handler,
@@ -37,6 +42,11 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
*/
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+#define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX
+#define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
+#define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN
+int xen_set_irq_priority(unsigned irq, unsigned priority);
+
/*
* Allow extra references to event channels exposed to userspace by evtchn
*/
@@ -45,7 +55,6 @@ int evtchn_get(unsigned int evtchn);
void evtchn_put(unsigned int evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
-int resend_irq_on_evtchn(unsigned int irq);
void rebind_evtchn_irq(int evtchn, int irq);
static inline void notify_remote_via_evtchn(int port)
@@ -73,9 +82,14 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
/* Determine the IRQ which is bound to an event channel */
unsigned irq_from_evtchn(unsigned int evtchn);
+int irq_from_virq(unsigned int cpu, unsigned int virq);
+unsigned int evtchn_from_irq(unsigned irq);
/* Xen HVM evtchn vector callback */
void xen_hvm_callback_vector(void);
+#ifdef CONFIG_TRACING
+#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
+#endif
extern int xen_have_vector_callback;
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
@@ -90,8 +104,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
/* Bind an PSI pirq to an irq. */
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
- int pirq, int vector, const char *name,
- domid_t domid);
+ int pirq, int nvec, const char *name, domid_t domid);
#endif
/* De-allocates the above mentioned physical interrupt. */
diff --git a/include/xen/gntalloc.h b/include/xen/gntalloc.h
deleted file mode 100644
index 76bd58065f4..00000000000
--- a/include/xen/gntalloc.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/******************************************************************************
- * gntalloc.h
- *
- * Interface to /dev/xen/gntalloc.
- *
- * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
- *
- * This file is in the public domain.
- */
-
-#ifndef __LINUX_PUBLIC_GNTALLOC_H__
-#define __LINUX_PUBLIC_GNTALLOC_H__
-
-/*
- * Allocates a new page and creates a new grant reference.
- */
-#define IOCTL_GNTALLOC_ALLOC_GREF \
-_IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref))
-struct ioctl_gntalloc_alloc_gref {
- /* IN parameters */
- /* The ID of the domain to be given access to the grants. */
- uint16_t domid;
- /* Flags for this mapping */
- uint16_t flags;
- /* Number of pages to map */
- uint32_t count;
- /* OUT parameters */
- /* The offset to be used on a subsequent call to mmap(). */
- uint64_t index;
- /* The grant references of the newly created grant, one per page */
- /* Variable size, depending on count */
- uint32_t gref_ids[1];
-};
-
-#define GNTALLOC_FLAG_WRITABLE 1
-
-/*
- * Deallocates the grant reference, allowing the associated page to be freed if
- * no other domains are using it.
- */
-#define IOCTL_GNTALLOC_DEALLOC_GREF \
-_IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref))
-struct ioctl_gntalloc_dealloc_gref {
- /* IN parameters */
- /* The offset returned in the map operation */
- uint64_t index;
- /* Number of references to unmap */
- uint32_t count;
-};
-
-/*
- * Sets up an unmap notification within the page, so that the other side can do
- * cleanup if this side crashes. Required to implement cross-domain robust
- * mutexes or close notification on communication channels.
- *
- * Each mapped page only supports one notification; multiple calls referring to
- * the same page overwrite the previous notification. You must clear the
- * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
- * to occur.
- */
-#define IOCTL_GNTALLOC_SET_UNMAP_NOTIFY \
-_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntalloc_unmap_notify))
-struct ioctl_gntalloc_unmap_notify {
- /* IN parameters */
- /* Offset in the file descriptor for a byte within the page (same as
- * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
- * be cleared. Otherwise, it can be any byte in the page whose
- * notification we are adjusting.
- */
- uint64_t index;
- /* Action(s) to take on unmap */
- uint32_t action;
- /* Event channel to notify */
- uint32_t event_channel_port;
-};
-
-/* Clear (set to zero) the byte specified by index */
-#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
-/* Send an interrupt on the indicated event channel */
-#define UNMAP_NOTIFY_SEND_EVENT 0x2
-
-#endif /* __LINUX_PUBLIC_GNTALLOC_H__ */
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
deleted file mode 100644
index 5304bd3c84c..00000000000
--- a/include/xen/gntdev.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/******************************************************************************
- * gntdev.h
- *
- * Interface to /dev/xen/gntdev.
- *
- * Copyright (c) 2007, D G Murray
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __LINUX_PUBLIC_GNTDEV_H__
-#define __LINUX_PUBLIC_GNTDEV_H__
-
-struct ioctl_gntdev_grant_ref {
- /* The domain ID of the grant to be mapped. */
- uint32_t domid;
- /* The grant reference of the grant to be mapped. */
- uint32_t ref;
-};
-
-/*
- * Inserts the grant references into the mapping table of an instance
- * of gntdev. N.B. This does not perform the mapping, which is deferred
- * until mmap() is called with @index as the offset.
- */
-#define IOCTL_GNTDEV_MAP_GRANT_REF \
-_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
-struct ioctl_gntdev_map_grant_ref {
- /* IN parameters */
- /* The number of grants to be mapped. */
- uint32_t count;
- uint32_t pad;
- /* OUT parameters */
- /* The offset to be used on a subsequent call to mmap(). */
- uint64_t index;
- /* Variable IN parameter. */
- /* Array of grant references, of size @count. */
- struct ioctl_gntdev_grant_ref refs[1];
-};
-
-/*
- * Removes the grant references from the mapping table of an instance of
- * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
- * before this ioctl is called, or an error will result.
- */
-#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
-_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
-struct ioctl_gntdev_unmap_grant_ref {
- /* IN parameters */
- /* The offset was returned by the corresponding map operation. */
- uint64_t index;
- /* The number of pages to be unmapped. */
- uint32_t count;
- uint32_t pad;
-};
-
-/*
- * Returns the offset in the driver's address space that corresponds
- * to @vaddr. This can be used to perform a munmap(), followed by an
- * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
- * the caller. The number of pages that were allocated at the same time as
- * @vaddr is returned in @count.
- *
- * N.B. Where more than one page has been mapped into a contiguous range, the
- * supplied @vaddr must correspond to the start of the range; otherwise
- * an error will result. It is only possible to munmap() the entire
- * contiguously-allocated range at once, and not any subrange thereof.
- */
-#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
-_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
-struct ioctl_gntdev_get_offset_for_vaddr {
- /* IN parameters */
- /* The virtual address of the first mapped page in a range. */
- uint64_t vaddr;
- /* OUT parameters */
- /* The offset that was used in the initial mmap() operation. */
- uint64_t offset;
- /* The number of pages mapped in the VM area that begins at @vaddr. */
- uint32_t count;
- uint32_t pad;
-};
-
-/*
- * Sets the maximum number of grants that may mapped at once by this gntdev
- * instance.
- *
- * N.B. This must be called before any other ioctl is performed on the device.
- */
-#define IOCTL_GNTDEV_SET_MAX_GRANTS \
-_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
-struct ioctl_gntdev_set_max_grants {
- /* IN parameter */
- /* The maximum number of grants that may be mapped at once. */
- uint32_t count;
-};
-
-/*
- * Sets up an unmap notification within the page, so that the other side can do
- * cleanup if this side crashes. Required to implement cross-domain robust
- * mutexes or close notification on communication channels.
- *
- * Each mapped page only supports one notification; multiple calls referring to
- * the same page overwrite the previous notification. You must clear the
- * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
- * to occur.
- */
-#define IOCTL_GNTDEV_SET_UNMAP_NOTIFY \
-_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntdev_unmap_notify))
-struct ioctl_gntdev_unmap_notify {
- /* IN parameters */
- /* Offset in the file descriptor for a byte within the page (same as
- * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
- * be cleared. Otherwise, it can be any byte in the page whose
- * notification we are adjusting.
- */
- uint64_t index;
- /* Action(s) to take on unmap */
- uint32_t action;
- /* Event channel to notify */
- uint32_t event_channel_port;
-};
-
-/* Clear (set to zero) the byte specified by index */
-#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
-/* Send an interrupt on the indicated event channel */
-#define UNMAP_NOTIFY_SEND_EVENT 0x2
-
-#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 694dcaf266e..5c1aba154b6 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
unmap->dev_bus_addr = 0;
}
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared);
@@ -178,8 +179,15 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
grant_status_t **__shared);
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
-extern unsigned long xen_hvm_resume_frames;
+struct grant_frames {
+ xen_pfn_t *pfn;
+ unsigned int count;
+ void *vaddr;
+};
+extern struct grant_frames xen_auto_xlat_grant_frames;
unsigned int gnttab_max_grant_frames(void);
+int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
+void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
index 13e43e41637..63917a8de3b 100644
--- a/include/xen/hvm.h
+++ b/include/xen/hvm.h
@@ -44,8 +44,8 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
xhv.index = idx;
r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
if (r < 0) {
- printk(KERN_ERR "Cannot get hvm parameter %s (%d): %d!\n",
- param_name(idx), idx, r);
+ pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+ param_name(idx), idx, r);
return r;
}
*value = xhv.value;
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
index 8c5fa0e2015..dc3193f4b58 100644
--- a/include/xen/interface/callback.h
+++ b/include/xen/interface/callback.h
@@ -36,7 +36,7 @@
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
-/* ia64, x86: Callback for event delivery. */
+/* x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 0360b15f488..6f4eae328ca 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -140,6 +140,19 @@
*/
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
+/*
+ * The features supported by this kernel (numeric).
+ *
+ * Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a
+ * kernel to specify support for features that older hypervisors don't
+ * know about. The set of features 4.2 and newer hypervisors will
+ * consider supported by the kernel is the combination of the sets
+ * specified through this and the string note.
+ *
+ * LEGACY: FEATURES
+ */
+#define XEN_ELFNOTE_SUPPORTED_FEATURES 17
+
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
/*
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
index f4942921e20..7e6acef5415 100644
--- a/include/xen/interface/event_channel.h
+++ b/include/xen/interface/event_channel.h
@@ -190,6 +190,39 @@ struct evtchn_reset {
};
typedef struct evtchn_reset evtchn_reset_t;
+/*
+ * EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
+ */
+#define EVTCHNOP_init_control 11
+struct evtchn_init_control {
+ /* IN parameters. */
+ uint64_t control_gfn;
+ uint32_t offset;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ uint8_t link_bits;
+ uint8_t _pad[7];
+};
+
+/*
+ * EVTCHNOP_expand_array: add an additional page to the event array.
+ */
+#define EVTCHNOP_expand_array 12
+struct evtchn_expand_array {
+ /* IN parameters. */
+ uint64_t array_gfn;
+};
+
+/*
+ * EVTCHNOP_set_priority: set the priority for an event channel.
+ */
+#define EVTCHNOP_set_priority 13
+struct evtchn_set_priority {
+ /* IN parameters. */
+ uint32_t port;
+ uint32_t priority;
+};
+
struct evtchn_op {
uint32_t cmd; /* EVTCHNOP_* */
union {
@@ -207,4 +240,39 @@ struct evtchn_op {
};
DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
+/*
+ * 2-level ABI
+ */
+
+#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
+
+/*
+ * FIFO ABI
+ */
+
+/* Events may have priorities from 0 (highest) to 15 (lowest). */
+#define EVTCHN_FIFO_PRIORITY_MAX 0
+#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
+#define EVTCHN_FIFO_PRIORITY_MIN 15
+
+#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
+
+typedef uint32_t event_word_t;
+
+#define EVTCHN_FIFO_PENDING 31
+#define EVTCHN_FIFO_MASKED 30
+#define EVTCHN_FIFO_LINKED 29
+#define EVTCHN_FIFO_BUSY 28
+
+#define EVTCHN_FIFO_LINK_BITS 17
+#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
+
+#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
+
+struct evtchn_fifo_control_block {
+ uint32_t ready;
+ uint32_t _rsvd;
+ event_word_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+
#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 01c3d62436e..c33e1c489eb 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -86,7 +86,7 @@ typedef uint64_t blkif_sector_t;
* Interface%20manuals/100293068c.pdf
* The backend can optionally provide three extra XenBus attributes to
* further optimize the discard functionality:
- * 'discard-aligment' - Devices that support discard functionality may
+ * 'discard-alignment' - Devices that support discard functionality may
* internally allocate space in units that are bigger than the exported
* logical block size. The discard-alignment parameter indicates how many bytes
* the beginning of the partition is offset from the internal allocation unit's
@@ -103,33 +103,61 @@ typedef uint64_t blkif_sector_t;
#define BLKIF_OP_DISCARD 5
/*
+ * Recognized if "feature-max-indirect-segments" in present in the backend
+ * xenbus info. The "feature-max-indirect-segments" node contains the maximum
+ * number of segments allowed by the backend per request. If the node is
+ * present, the frontend might use blkif_request_indirect structs in order to
+ * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
+ * maximum number of indirect segments is fixed by the backend, but the
+ * frontend can issue requests with any number of indirect segments as long as
+ * it's less than the number provided by the backend. The indirect_grefs field
+ * in blkif_request_indirect should be filled by the frontend with the
+ * grant references of the pages that are holding the indirect segments.
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
+ *
+ * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
+ * create the "feature-max-indirect-segments" node!
+ */
+#define BLKIF_OP_INDIRECT 6
+
+/*
* Maximum scatter/gather segments per request.
* This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
* NB. This could be 12 if the ring indexes weren't stored in the same page.
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
+
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
+
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
#endif
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));
struct blkif_request_discard {
uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t _pad1; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -138,11 +166,40 @@ struct blkif_request_discard {
uint8_t _pad3;
} __attribute__((__packed__));
+struct blkif_request_other {
+ uint8_t _pad1;
+ blkif_vdev_t _pad2; /* only for read/write requests */
+#ifndef CONFIG_X86_32
+ uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
+#endif
+ uint64_t id; /* private guest value, echoed in resp */
+} __attribute__((__packed__));
+
+struct blkif_request_indirect {
+ uint8_t indirect_op;
+ uint16_t nr_segments;
+#ifndef CONFIG_X86_32
+ uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
+#endif
+ uint64_t id;
+ blkif_sector_t sector_number;
+ blkif_vdev_t handle;
+ uint16_t _pad2;
+ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+#ifndef CONFIG_X86_32
+ uint32_t _pad3; /* make it 64 byte aligned */
+#else
+ uint64_t _pad3; /* make it 64 byte aligned */
+#endif
+} __attribute__((__packed__));
+
struct blkif_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_request_rw rw;
struct blkif_request_discard discard;
+ struct blkif_request_other other;
+ struct blkif_request_indirect indirect;
} u;
} __attribute__((__packed__));
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 9dfc1200098..70054cc0708 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -13,6 +13,24 @@
#include <xen/interface/grant_table.h>
/*
+ * Older implementation of Xen network frontend / backend has an
+ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
+ * ring slots a skb can use. Netfront / netback may not work as
+ * expected when frontend and backend have different MAX_SKB_FRAGS.
+ *
+ * A better approach is to add mechanism for netfront / netback to
+ * negotiate this value. However we cannot fix all possible
+ * frontends, so we need to define a value which states the minimum
+ * slots backend must support.
+ *
+ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
+ * (18), which is proved to work with most frontends. Any new backend
+ * which doesn't negotiate with frontend should expect frontend to
+ * send a valid packet using slots up to this value.
+ */
+#define XEN_NETIF_NR_SLOTS_MIN 18
+
+/*
* Notifications after enqueuing any type of message should be conditional on
* the appropriate req_event or rsp_event field in the shared ring.
* If the client sends notification for rx requests then it should specify
@@ -20,6 +38,85 @@
* that it cannot safely queue packets (as it may not be kicked to send them).
*/
+ /*
+ * "feature-split-event-channels" is introduced to separate guest TX
+ * and RX notificaion. Backend either doesn't support this feature or
+ * advertise it via xenstore as 0 (disabled) or 1 (enabled).
+ *
+ * To make use of this feature, frontend should allocate two event
+ * channels for TX and RX, advertise them to backend as
+ * "event-channel-tx" and "event-channel-rx" respectively. If frontend
+ * doesn't want to use this feature, it just writes "event-channel"
+ * node as before.
+ */
+
+/*
+ * Multiple transmit and receive queues:
+ * If supported, the backend will write the key "multi-queue-max-queues" to
+ * the directory for that vif, and set its value to the maximum supported
+ * number of queues.
+ * Frontends that are aware of this feature and wish to use it can write the
+ * key "multi-queue-num-queues", set to the number they wish to use, which
+ * must be greater than zero, and no more than the value reported by the backend
+ * in "multi-queue-max-queues".
+ *
+ * Queues replicate the shared rings and event channels.
+ * "feature-split-event-channels" may optionally be used when using
+ * multiple queues, but is not mandatory.
+ *
+ * Each queue consists of one shared ring pair, i.e. there must be the same
+ * number of tx and rx rings.
+ *
+ * For frontends requesting just one queue, the usual event-channel and
+ * ring-ref keys are written as before, simplifying the backend processing
+ * to avoid distinguishing between a frontend that doesn't understand the
+ * multi-queue feature, and one that does, but requested only one queue.
+ *
+ * Frontends requesting two or more queues must not write the toplevel
+ * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys,
+ * instead writing those keys under sub-keys having the name "queue-N" where
+ * N is the integer ID of the queue for which those keys belong. Queues
+ * are indexed from zero. For example, a frontend with two queues and split
+ * event channels must write the following set of queue-related keys:
+ *
+ * /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
+ * /local/domain/1/device/vif/0/queue-0 = ""
+ * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
+ * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
+ * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
+ * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
+ * /local/domain/1/device/vif/0/queue-1 = ""
+ * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
+ * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
+ * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
+ * /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
+ *
+ * If there is any inconsistency in the XenStore data, the backend may
+ * choose not to connect any queues, instead treating the request as an
+ * error. This includes scenarios where more (or fewer) queues were
+ * requested than the frontend provided details for.
+ *
+ * Mapping of packets to queues is considered to be a function of the
+ * transmitting system (backend or frontend) and is not negotiated
+ * between the two. Guests are free to transmit packets on any queue
+ * they choose, provided it has been set up correctly. Guests must be
+ * prepared to receive packets on any queue they have requested be set up.
+ */
+
+/*
+ * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
+ * offload off or on. If it is missing then the feature is assumed to be on.
+ * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
+ * offload on or off. If it is missing then the feature is assumed to be off.
+ */
+
+/*
+ * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
+ * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
+ * frontends nor backends are assumed to be capable unless the flags are
+ * present.
+ */
+
/*
* This is the 'wire' format for packets:
* Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags)
@@ -47,6 +144,7 @@
#define _XEN_NETTXF_extra_info (3)
#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
+#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
struct xen_netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */
uint16_t offset; /* Offset within buffer page */
@@ -64,8 +162,10 @@ struct xen_netif_tx_request {
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
-/* GSO types - only TCPv4 currently supported. */
+/* GSO types */
+#define XEN_NETIF_GSO_TYPE_NONE (0)
#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
+#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
/*
* This structure needs to fit within both netif_tx_request and
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
index 0eafaf254ff..545a14ba0bb 100644
--- a/include/xen/interface/io/protocols.h
+++ b/include/xen/interface/io/protocols.h
@@ -3,7 +3,6 @@
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
-#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
@@ -11,11 +10,9 @@
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
-#elif defined(__ia64__)
-# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
#elif defined(__powerpc64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
#else
# error arch fixup needed here
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 75271b9a8f6..7d28aff605c 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -188,6 +188,11 @@ struct __name##_back_ring { \
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+/* Ill-behaved frontend determination: Can there be this many requests? */
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
+ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+
+
#define RING_PUSH_REQUESTS(_r) do { \
wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
new file mode 100644
index 00000000000..28e7dcd75e8
--- /dev/null
+++ b/include/xen/interface/io/tpmif.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+ * tpmif.h
+ *
+ * TPM I/O interface for Xen guest OSes, v2
+ *
+ * This file is in the public domain.
+ *
+ */
+
+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
+#define __XEN_PUBLIC_IO_TPMIF_H__
+
+/*
+ * Xenbus state machine
+ *
+ * Device open:
+ * 1. Both ends start in XenbusStateInitialising
+ * 2. Backend transitions to InitWait (frontend does not wait on this step)
+ * 3. Frontend populates ring-ref, event-channel, feature-protocol-v2
+ * 4. Frontend transitions to Initialised
+ * 5. Backend maps grant and event channel, verifies feature-protocol-v2
+ * 6. Backend transitions to Connected
+ * 7. Frontend verifies feature-protocol-v2, transitions to Connected
+ *
+ * Device close:
+ * 1. State is changed to XenbusStateClosing
+ * 2. Frontend transitions to Closed
+ * 3. Backend unmaps grant and event, changes state to InitWait
+ */
+
+enum vtpm_shared_page_state {
+ VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */
+ VTPM_STATE_SUBMIT, /* request ready / vTPM working */
+ VTPM_STATE_FINISH, /* response ready / vTPM idle */
+ VTPM_STATE_CANCEL, /* cancel requested / vTPM working */
+};
+/* The backend should only change state to IDLE or FINISH, while the
+ * frontend should only change to SUBMIT or CANCEL. */
+
+
+struct vtpm_shared_page {
+ uint32_t length; /* request/response length in bytes */
+
+ uint8_t state; /* enum vtpm_shared_page_state */
+ uint8_t locality; /* for the current request */
+ uint8_t pad;
+
+ uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
+ uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */
+};
+
+#endif
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index b40a4315cb8..2ecfe4f700d 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -190,6 +190,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
#define XENMEM_add_to_physmap_range 23
struct xen_add_to_physmap_range {
+ /* IN */
/* Which domain to change the mapping for. */
domid_t domid;
uint16_t space; /* => enum phys_map_space */
@@ -203,6 +204,11 @@ struct xen_add_to_physmap_range {
/* GPFN in domid where the source mapping page should appear. */
GUEST_HANDLE(xen_pfn_t) gpfns;
+
+ /* OUT */
+
+ /* Per index error code. */
+ GUEST_HANDLE(int) errs;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 1844d31f455..610dba9b620 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -131,6 +131,7 @@ struct physdev_irq {
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
#define MAP_PIRQ_TYPE_MSI_SEG 0x3
+#define MAP_PIRQ_TYPE_MULTI_MSI 0x4
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -141,11 +142,16 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
- /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
+ /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
int bus;
/* IN */
int devfn;
- /* IN */
+ /* IN
+ * - For MSI-X contains entry number.
+ * - For MSI with ..._MULTI_MSI contains number of vectors.
+ * OUT (..._MULTI_MSI only)
+ * - Number of vectors allocated.
+ */
int entry_nr;
/* IN */
uint64_t table_base;
@@ -231,6 +237,17 @@ struct physdev_get_free_pirq {
#define XEN_PCI_DEV_VIRTFN 0x2
#define XEN_PCI_DEV_PXM 0x4
+#define XEN_PCI_MMCFG_RESERVED 0x1
+
+#define PHYSDEVOP_pci_mmcfg_reserved 24
+struct physdev_pci_mmcfg_reserved {
+ uint64_t address;
+ uint16_t segment;
+ uint8_t start_bus;
+ uint8_t end_bus;
+ uint32_t flags;
+};
+
#define PHYSDEVOP_pci_device_add 25
struct physdev_pci_device_add {
/* IN */
@@ -251,6 +268,12 @@ struct physdev_pci_device_add {
#define PHYSDEVOP_pci_device_remove 26
#define PHYSDEVOP_restore_msi_ext 27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix 30
+#define PHYSDEVOP_release_msix 31
struct physdev_pci_device {
/* IN */
uint16_t seg;
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 5e36932ab40..f1331e3e727 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -152,10 +152,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t);
#define XENPF_enter_acpi_sleep 51
struct xenpf_enter_acpi_sleep {
/* IN variables */
- uint16_t pm1a_cnt_val; /* PM1a control value. */
- uint16_t pm1b_cnt_val; /* PM1b control value. */
+ uint16_t val_a; /* PM1a control / sleep type A. */
+ uint16_t val_b; /* PM1b control / sleep type B. */
uint32_t sleep_state; /* Which state to enter (Sn). */
- uint32_t flags; /* Must be zero. */
+#define XENPF_ACPI_SLEEP_EXTENDED 0x00000001
+ uint32_t flags; /* XENPF_ACPI_SLEEP_*. */
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t);
@@ -324,10 +325,21 @@ struct xenpf_cpu_ol {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
-/*
- * CMD 58 and 59 are reserved for cpu hotadd and memory hotadd,
- * which are already occupied at Xen hypervisor side.
- */
+#define XENPF_cpu_hotadd 58
+struct xenpf_cpu_hotadd {
+ uint32_t apic_id;
+ uint32_t acpi_id;
+ uint32_t pxm;
+};
+
+#define XENPF_mem_hotadd 59
+struct xenpf_mem_hotadd {
+ uint64_t spfn;
+ uint64_t epfn;
+ uint32_t pxm;
+ uint32_t flags;
+};
+
#define XENPF_core_parking 60
struct xenpf_core_parking {
/* IN variables */
@@ -357,6 +369,8 @@ struct xen_platform_op {
struct xenpf_set_processor_pminfo set_pminfo;
struct xenpf_pcpuinfo pcpu_info;
struct xenpf_cpu_ol cpu_ol;
+ struct xenpf_cpu_hotadd cpu_add;
+ struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
uint8_t pad[128];
} u;
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index 87e6f8a4866..b05288ce399 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -170,4 +170,6 @@ struct vcpu_register_vcpu_info {
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
+/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
+#define VCPUOP_send_nmi 11
#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 886a5d80a18..de082130ba4 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -275,18 +275,12 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
* NB. The fields are natural register size for this architecture.
*/
struct multicall_entry {
- unsigned long op;
- long result;
- unsigned long args[6];
+ xen_ulong_t op;
+ xen_long_t result;
+ xen_ulong_t args[6];
};
DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
-/*
- * Event channel endpoints per domain:
- * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
- */
-#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
-
struct vcpu_time_info {
/*
* Updates to the following values are preceded and followed
@@ -341,7 +335,7 @@ struct vcpu_info {
*/
uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
- unsigned long evtchn_pending_sel;
+ xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
@@ -384,8 +378,8 @@ struct shared_info {
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array.
*/
- unsigned long evtchn_pending[sizeof(unsigned long) * 8];
- unsigned long evtchn_mask[sizeof(unsigned long) * 8];
+ xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
+ xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e0712af..00000000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright (C) IBM Corp. 2006
- */
-
-#ifndef _XEN_XENCOMM_H_
-#define _XEN_XENCOMM_H_
-
-/* A xencomm descriptor is a scatter/gather list containing physical
- * addresses corresponding to a virtually contiguous memory area. The
- * hypervisor translates these physical addresses to machine addresses to copy
- * to and from the virtually contiguous area.
- */
-
-#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-#define XENCOMM_INVALID (~0UL)
-
-struct xencomm_desc {
- uint32_t magic;
- uint32_t nr_addrs; /* the number of entries in address[] */
- uint64_t address[0];
-};
-
-#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
index 438c256c274..5c52b558391 100644
--- a/include/xen/platform_pci.h
+++ b/include/xen/platform_pci.h
@@ -46,6 +46,27 @@ static inline int xen_must_unplug_disks(void) {
#endif
}
-extern int xen_platform_pci_unplug;
-
+#if defined(CONFIG_XEN_PVHVM)
+extern bool xen_has_pv_devices(void);
+extern bool xen_has_pv_disk_devices(void);
+extern bool xen_has_pv_nic_devices(void);
+extern bool xen_has_pv_and_legacy_disk_devices(void);
+#else
+static inline bool xen_has_pv_devices(void)
+{
+ return IS_ENABLED(CONFIG_XEN);
+}
+static inline bool xen_has_pv_disk_devices(void)
+{
+ return IS_ENABLED(CONFIG_XEN);
+}
+static inline bool xen_has_pv_nic_devices(void)
+{
+ return IS_ENABLED(CONFIG_XEN);
+}
+static inline bool xen_has_pv_and_legacy_disk_devices(void)
+{
+ return false;
+}
+#endif
#endif /* _XEN_PLATFORM_PCI_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index de8bcc641c4..8b2eb93ae8b 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_SWIOTLB_XEN_H
#define __LINUX_SWIOTLB_XEN_H
+#include <linux/dma-direction.h>
#include <linux/swiotlb.h>
extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,6 @@ xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern int
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/include/xen/tmem.h b/include/xen/tmem.h
index 591550a22ac..3930a90045f 100644
--- a/include/xen/tmem.h
+++ b/include/xen/tmem.h
@@ -3,7 +3,15 @@
#include <linux/types.h>
+#ifdef CONFIG_XEN_TMEM_MODULE
+#define tmem_enabled true
+#else
/* defined in drivers/xen/tmem.c */
extern bool tmem_enabled;
+#endif
+
+#ifdef CONFIG_XEN_SELFBALLOONING
+extern int xen_selfballoon_init(bool, bool);
+#endif
#endif /* _XEN_TMEM_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index d6fe062cad6..0b3149ed7ea 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -2,27 +2,28 @@
#define INCLUDE_XEN_OPS_H
#include <linux/percpu.h>
+#include <linux/notifier.h>
#include <asm/xen/interface.h>
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
void xen_arch_pre_suspend(void);
void xen_arch_post_suspend(int suspend_cancelled);
-void xen_arch_hvm_post_suspend(int suspend_cancelled);
-
-void xen_mm_pin_all(void);
-void xen_mm_unpin_all(void);
void xen_timer_resume(void);
void xen_arch_resume(void);
+void xen_resume_notifier_register(struct notifier_block *nb);
+void xen_resume_notifier_unregister(struct notifier_block *nb);
+
int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
- unsigned int address_bits);
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle);
-void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
diff --git a/include/xen/xen.h b/include/xen/xen.h
index a74d4362c4f..0c0e3ef4c45 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,4 +29,18 @@ extern enum xen_domain_type xen_domain_type;
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
+#ifdef CONFIG_XEN_PVH
+/* This functionality exists only for x86. The XEN_PVHVM support exists
+ * only in x86 world - hence on ARM it will be always disabled.
+ * N.B. ARM guests are neither PV nor HVM nor PVHVM.
+ * It's a bit like PVH but is different also (it's further towards the H
+ * end of the spectrum than even PVH).
+ */
+#include <xen/features.h>
+#define xen_pvh_domain() (xen_pv_domain() && \
+ xen_feature(XENFEAT_auto_translated_physmap) && \
+ xen_have_vector_callback)
+#else
+#define xen_pvh_domain() (0)
+#endif
#endif /* _XEN_XEN_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 0a7515c1e3a..0324c6d340c 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -70,6 +70,7 @@ struct xenbus_device {
struct device dev;
enum xenbus_state state;
struct completion down;
+ struct work_struct work;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)
@@ -206,7 +207,6 @@ int xenbus_unmap_ring(struct xenbus_device *dev,
grant_handle_t handle, void *vaddr);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
-int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port);
enum xenbus_state xenbus_read_driver_state(const char *path);
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039be11..00000000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- * Jerone Young <jyoung5@us.ibm.com>
- */
-
-#ifndef _LINUX_XENCOMM_H_
-#define _LINUX_XENCOMM_H_
-
-#include <xen/interface/xencomm.h>
-
-#define XENCOMM_MINI_ADDRS 3
-struct xencomm_mini {
- struct xencomm_desc _desc;
- uint64_t address[XENCOMM_MINI_ADDRS];
-};
-
-/* To avoid additionnal virt to phys conversion, an opaque structure is
- presented. */
-struct xencomm_handle;
-
-extern void xencomm_free(struct xencomm_handle *desc);
-extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
-extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
- unsigned long bytes, struct xencomm_mini *xc_area);
-
-#if 0
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- struct xencomm_mini xc_desc ## _base[(n)] \
- __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
- struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
-#else
-/*
- * gcc bug workaround:
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
- * gcc doesn't handle properly stack variable with
- * __attribute__((__align__(sizeof(struct xencomm_mini))))
- */
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- unsigned char xc_desc ## _base[((n) + 1 ) * \
- sizeof(struct xencomm_mini)]; \
- struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
- ((unsigned long)xc_desc ## _base + \
- (sizeof(struct xencomm_mini) - \
- ((unsigned long)xc_desc ## _base) % \
- sizeof(struct xencomm_mini)));
-#endif
-#define xencomm_map_no_alloc(ptr, bytes) \
- ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
- __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
-
-/* provided by architecture code: */
-extern unsigned long xencomm_vtop(unsigned long vaddr);
-
-static inline void *xencomm_pa(void *ptr)
-{
- return (void *)xencomm_vtop((unsigned long)ptr);
-}
-
-#define xen_guest_handle(hnd) ((hnd).p)
-
-#endif /* _LINUX_XENCOMM_H_ */