Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2012-12-18 16:39:47 +0100
committerJan Beulich <jbeulich@suse.com>2012-12-18 16:39:47 +0100
commitfa45001b4da411723b03d5c2cea176e375e0f0e5 (patch)
treef9f6505bc302ac420d16858834caed0f1fa8436f
parent88e64906fbcef710d69dd12a0bb2fc493b719c49 (diff)
- Update Xen patches to 3.7.1 and c/s 1213.rpm-3.7.1-1
-rw-r--r--drivers/xen/blkback/xenbus.c64
-rw-r--r--drivers/xen/scsiback/scsiback.c4
-rw-r--r--include/xen/interface/arch-x86/xen.h24
-rw-r--r--include/xen/interface/domctl.h10
-rw-r--r--include/xen/interface/io/blkif.h60
-rw-r--r--include/xen/interface/io/vscsiif.h40
-rw-r--r--include/xen/interface/memory.h46
-rw-r--r--include/xen/interface/sched.h82
-rw-r--r--include/xen/interface/xen-compat.h2
-rw-r--r--include/xen/interface/xen.h39
10 files changed, 282 insertions, 89 deletions
diff --git a/drivers/xen/blkback/xenbus.c b/drivers/xen/blkback/xenbus.c
index 5cd53c65d1bf..5d4568098f0e 100644
--- a/drivers/xen/blkback/xenbus.c
+++ b/drivers/xen/blkback/xenbus.c
@@ -208,6 +208,7 @@ static int blkback_remove(struct xenbus_device *dev)
be->blkif = NULL;
}
+ kfree(be->mode);
kfree(be);
dev_set_drvdata(&dev->dev, NULL);
return 0;
@@ -342,6 +343,7 @@ static void backend_changed(struct xenbus_watch *watch,
= container_of(watch, struct backend_info, backend_watch);
struct xenbus_device *dev = be->dev;
int cdrom = 0;
+ unsigned long handle;
char *device_type;
DPRINTK("");
@@ -359,11 +361,11 @@ static void backend_changed(struct xenbus_watch *watch,
return;
}
- if ((be->major || be->minor) &&
- ((be->major != major) || (be->minor != minor))) {
- pr_warning("blkback: changing physical device (from %x:%x to"
- " %x:%x) not supported\n", be->major, be->minor,
- major, minor);
+ if (be->major | be->minor) {
+ if (be->major != major || be->minor != minor)
+ pr_warning("blkback: changing physical device"
+ " (from %x:%x to %x:%x) not supported\n",
+ be->major, be->minor, major, minor);
return;
}
@@ -381,40 +383,40 @@ static void backend_changed(struct xenbus_watch *watch,
kfree(device_type);
}
- if (be->major == 0 && be->minor == 0) {
- /* Front end dir is a number, which is used as the handle. */
-
- char *p = strrchr(dev->otherend, '/') + 1;
- long handle = simple_strtoul(p, NULL, 0);
-
- be->major = major;
- be->minor = minor;
-
- err = vbd_create(be->blkif, handle, major, minor,
- FMODE_READ
- | (strchr(be->mode, 'w') ? FMODE_WRITE : 0)
- | (strchr(be->mode, '!') ? 0 : FMODE_EXCL),
- cdrom);
- switch (err) {
- case -ENOMEDIUM:
- if (be->blkif->vbd.type
- & (VDISK_CDROM | VDISK_REMOVABLE))
- case 0:
- break;
- default:
- be->major = be->minor = 0;
+ /* Front end dir is a number, which is used as the handle. */
+ handle = simple_strtoul(strrchr(dev->otherend, '/') + 1, NULL, 0);
+
+ be->major = major;
+ be->minor = minor;
+
+ err = vbd_create(be->blkif, handle, major, minor,
+ FMODE_READ
+ | (strchr(be->mode, 'w') ? FMODE_WRITE : 0)
+ | (strchr(be->mode, '!') ? 0 : FMODE_EXCL),
+ cdrom);
+
+ switch (err) {
+ case -ENOMEDIUM:
+ if (!(be->blkif->vbd.type & (VDISK_CDROM | VDISK_REMOVABLE))) {
+ default:
xenbus_dev_fatal(dev, err, "creating vbd structure");
- return;
+ break;
}
-
+ /* fall through */
+ case 0:
err = xenvbd_sysfs_addif(dev);
if (err) {
vbd_free(&be->blkif->vbd);
- be->major = be->minor = 0;
xenbus_dev_fatal(dev, err, "creating sysfs entries");
- return;
}
+ break;
+ }
+ if (err) {
+ kfree(be->mode);
+ be->mode = NULL;
+ be->major = be->minor = 0;
+ } else {
/* We're potentially connected now */
update_blkif_status(be->blkif);
diff --git a/drivers/xen/scsiback/scsiback.c b/drivers/xen/scsiback/scsiback.c
index 6a846a5ca889..9991b70457bb 100644
--- a/drivers/xen/scsiback/scsiback.c
+++ b/drivers/xen/scsiback/scsiback.c
@@ -52,8 +52,8 @@ struct list_head pending_free;
DEFINE_SPINLOCK(pending_free_lock);
DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
-int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
-module_param_named(reqs, vscsiif_reqs, int, 0);
+static unsigned int vscsiif_reqs = 128;
+module_param_named(reqs, vscsiif_reqs, uint, 0);
MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
static unsigned int log_print_stat = 0;
diff --git a/include/xen/interface/arch-x86/xen.h b/include/xen/interface/arch-x86/xen.h
index c13e42a4a733..191947045f4c 100644
--- a/include/xen/interface/arch-x86/xen.h
+++ b/include/xen/interface/arch-x86/xen.h
@@ -38,12 +38,21 @@
typedef type * __guest_handle_ ## name
#endif
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory.
+ * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
+ * hypercall argument.
+ * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but
+ * they might not be on other architectures.
+ */
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
@@ -65,7 +74,7 @@ typedef unsigned long xen_pfn_t;
#endif
/*
- * SEGMENT DESCRIPTOR TABLES
+ * `incontents 200 segdesc Segment Descriptor Tables
*/
/*
* ` enum neg_errnoval
@@ -77,11 +86,24 @@ typedef unsigned long xen_pfn_t;
* start of the GDT because some stupid OSes export hard-coded selector values
* in their ABI. These hard-coded values are always near the start of the GDT,
* so Xen places itself out of the way, at the far end of the GDT.
+ *
+ * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op
*/
#define FIRST_RESERVED_GDT_PAGE 14
#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc);
+ * `
+ * ` @pa The machine physical address of the descriptor to
+ * ` update. Must be either a descriptor page or writable.
+ * ` @desc The descriptor value to update, in the same format as a
+ * ` native descriptor table entry.
+ */
+
/* Maximum number of virtual CPUs in legacy multi-processor guests. */
#define XEN_LEGACY_MAX_VCPUS 32
diff --git a/include/xen/interface/domctl.h b/include/xen/interface/domctl.h
index 026dc715422b..40f1009f79a9 100644
--- a/include/xen/interface/domctl.h
+++ b/include/xen/interface/domctl.h
@@ -136,7 +136,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
#define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */
-#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
+#define XEN_DOMCTL_PFINFO_BROKEN (0xdU<<28) /* broken page */
#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
struct xen_domctl_getpageframeinfo {
@@ -857,6 +857,12 @@ struct xen_domctl_set_access_required {
typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
+struct xen_domctl_set_broken_page_p2m {
+ uint64_aligned_t pfn;
+};
+typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -922,6 +928,7 @@ struct xen_domctl {
#define XEN_DOMCTL_set_access_required 64
#define XEN_DOMCTL_audit_p2m 65
#define XEN_DOMCTL_set_virq_handler 66
+#define XEN_DOMCTL_set_broken_page_p2m 67
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -978,6 +985,7 @@ struct xen_domctl {
struct xen_domctl_audit_p2m audit_p2m;
struct xen_domctl_set_virq_handler set_virq_handler;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
uint8_t pad[128];
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index f2a9f4dbb4fa..47d1bf83431b 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -126,6 +126,34 @@
* of this type may still be returned at any time with the
* BLKIF_RSP_EOPNOTSUPP result code.
*
+ * feature-persistent
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ * Notes: 7
+ *
+ * A value of "1" indicates that the backend can keep the grants used
+ * by the frontend driver mapped, so the same set of grants should be
+ * used in all transactions. The maximum number of grants the backend
+ * can map persistently depends on the implementation, but ideally it
+ * should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
+ * feature the backend doesn't need to unmap each grant, preventing
+ * costly TLB flushes. The backend driver should only map grants
+ * persistently if the frontend supports it. If a backend driver chooses
+ * to use the persistent protocol when the frontend doesn't support it,
+ * it will probably hit the maximum number of persistently mapped grants
+ * (due to the fact that the frontend won't be reusing the same grants),
+ * and fall back to non-persistent mode. Backend implementations may
+ * shrink or expand the number of persistently mapped grants without
+ * notifying the frontend depending on memory constraints (this might
+ * cause a performance degradation).
+ *
+ * If a backend driver wants to limit the maximum number of persistently
+ * mapped grants to a value less than RING_SIZE *
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
+ * discard the grants that are less commonly used. Using a LRU in the
+ * backend driver paired with a LIFO queue in the frontend will
+ * allow us to have better performance in this scenario.
+ *
*----------------------- Request Transport Parameters ------------------------
*
* max-ring-page-order
@@ -242,6 +270,27 @@
* The size of the frontend allocated request ring buffer in units of
* machine pages. The value must be a power of 2.
*
+ * feature-persistent
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ * Notes: 7, 8, 9
+ *
+ * A value of "1" indicates that the frontend will reuse the same grants
+ * for all transactions, allowing the backend to map them with write
+ * access (even when it should be read-only). If the frontend hits the
+ * maximum number of allowed persistently mapped grants, it can fallback
+ * to non persistent mode. This will cause a performance degradation,
+ * since the the backend driver will still try to map those grants
+ * persistently. Since the persistent grants protocol is compatible with
+ * the previous protocol, a frontend driver can choose to work in
+ * persistent mode even when the backend doesn't support it.
+ *
+ * It is recommended that the frontend driver stores the persistently
+ * mapped grants in a LIFO queue, so a subset of all persistently mapped
+ * grants gets used commonly. This is done in case the backend driver
+ * decides to limit the maximum number of persistently mapped grants
+ * to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
+ *
*------------------------- Virtual Device Properties -------------------------
*
* device-type
@@ -279,6 +328,17 @@
* 'ring-ref' is used to communicate the grant reference for this
* page to the backend. When using a multi-page ring, the 'ring-ref'
* node is not created. Instead 'ring-ref0' - 'ring-refN' are used.
+ * (7) When using persistent grants data has to be copied from/to the page
+ * where the grant is currently mapped. The overhead of doing this copy
+ * however doesn't suppress the speed improvement of not having to unmap
+ * the grants.
+ * (8) The frontend driver has to allow the backend driver to map all grants
+ * with write access, even when they should be mapped read-only, since
+ * further requests may reuse these grants and require write permissions.
+ * (9) Linux implementation doesn't have a limit on the maximum number of
+ * grants that can be persistently mapped in the frontend driver, but
+ * due to the frontent driver implementation it should never be bigger
+ * than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
*/
/*
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
index 3ce2914ff430..c84643a19c94 100644
--- a/include/xen/interface/io/vscsiif.h
+++ b/include/xen/interface/io/vscsiif.h
@@ -30,29 +30,33 @@
#include "ring.h"
#include "../grant_table.h"
-/* command between backend and frontend */
+/* commands between backend and frontend */
#define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */
#define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/
#define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/
-
-
-#define VSCSIIF_BACK_MAX_PENDING_REQS 128
+#define VSCSIIF_ACT_SCSI_SG_PRESET 4 /* Preset SG elements */
/*
* Maximum scatter/gather segments per request.
*
- * Considering balance between allocating al least 16 "vscsiif_request"
- * structures on one page (4096bytes) and number of scatter gather
- * needed, we decided to use 26 as a magic number.
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
*/
#define VSCSIIF_SG_TABLESIZE 26
/*
- * base on linux kernel 2.6.18
+ * based on Linux kernel 2.6.18
*/
#define VSCSIIF_MAX_COMMAND_SIZE 16
#define VSCSIIF_SENSE_BUFFERSIZE 96
+struct scsiif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+typedef struct scsiif_request_segment vscsiif_segment_t;
struct vscsiif_request {
uint16_t rqid; /* private guest value, echoed in resp */
@@ -69,18 +73,26 @@ struct vscsiif_request {
DMA_NONE(3) requests */
uint8_t nr_segments; /* Number of pieces of scatter-gather */
- struct scsiif_request_segment {
- grant_ref_t gref;
- uint16_t offset;
- uint16_t length;
- } seg[VSCSIIF_SG_TABLESIZE];
+ vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
uint32_t reserved[3];
};
typedef struct vscsiif_request vscsiif_request_t;
+#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) \
+ / sizeof(vscsiif_segment_t))
+
+struct vscsiif_sg_list {
+ /* First two fields must match struct vscsiif_request! */
+ uint16_t rqid; /* private guest value, must match main req */
+ uint8_t act; /* VSCSIIF_ACT_SCSI_SG_PRESET */
+ uint8_t nr_segments; /* Number of pieces of scatter-gather */
+ vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
+};
+typedef struct vscsiif_sg_list vscsiif_sg_list_t;
+
struct vscsiif_response {
uint16_t rqid;
- uint8_t padding;
+ uint8_t act; /* valid only when backend supports SG_PRESET */
uint8_t sense_len;
uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
int32_t rslt;
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 5b6d032900a5..1885f8da2c92 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -208,6 +208,15 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping);
typedef struct xen_machphys_mapping xen_machphys_mapping_t;
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
+/* Source mapping space. */
+/* ` enum phys_map_space { */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_gmfn 2 /* GMFN */
+#define XENMAPSPACE_gmfn_range 3 /* GMFN range */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom */
+/* ` } */
+
/*
* Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space.
@@ -221,25 +230,40 @@ struct xen_add_to_physmap {
/* Number of pages to go through for gmfn_range */
uint16_t size;
- /* Source mapping space. */
-#define XENMAPSPACE_shared_info 0 /* shared info page */
-#define XENMAPSPACE_grant_table 1 /* grant table page */
-#define XENMAPSPACE_gmfn 2 /* GMFN */
-#define XENMAPSPACE_gmfn_range 3 /* GMFN range */
- unsigned int space;
+ unsigned int space; /* => enum phys_map_space */
#define XENMAPIDX_grant_table_status 0x80000000
- /* Index into source mapping space. */
+ /* Index into space being mapped. */
xen_ulong_t idx;
- /* GPFN where the source mapping page should appear. */
+ /* GPFN in domid where the source mapping page should appear. */
xen_pfn_t gpfn;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
typedef struct xen_add_to_physmap xen_add_to_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+/* A batched version of add_to_physmap. */
+#define XENMEM_add_to_physmap_range 23
+struct xen_add_to_physmap_range {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+ uint16_t space; /* => enum phys_map_space */
+
+ /* Number of pages to go through */
+ uint16_t size;
+ domid_t foreign_domid; /* IFF gmfn_foreign */
+
+ /* Indexes into space being mapped. */
+ XEN_GUEST_HANDLE(xen_ulong_t) idxs;
+
+ /* GPFN in domdwhere the source mapping page should appear. */
+ XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
+};
+typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
@@ -409,6 +433,12 @@ struct xen_mem_sharing_op {
typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
+/*
+ * Reserve ops for future/out-of-tree "claim" patches (Oracle)
+ */
+#define XENMEM_claim_pages 24
+#define XENMEM_get_unclaimed_pages 25
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#ifndef CONFIG_XEN
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 3531702434b5..9bf5509d76d9 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -30,10 +30,20 @@
#include "event_channel.h"
/*
+ * `incontents 150 sched Guest Scheduler Operations
+ *
+ * The SCHEDOP interface provides mechanisms for a guest to interact
+ * with the scheduler, including yield, blocking and shutting itself
+ * down.
+ */
+
+/*
* The prototype for this hypercall is:
- * long sched_op(int cmd, void *arg)
+ * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
+ *
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == Operation-specific extra argument(s), as described below.
+ * ... == Additional Operation-specific extra arguments, described below.
*
* Versions of Xen prior to 3.0.2 provide only the following legacy version
* of this hypercall, supporting only the commands yield, block and shutdown:
@@ -41,9 +51,12 @@
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
* == SHUTDOWN_* code (SCHEDOP_shutdown)
- * This legacy version is available to new guests as sched_op_compat().
+ *
+ * This legacy version is available to new guests as:
+ * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
*/
+/* ` enum sched_op { // SCHEDOP_* => struct sched_* */
/*
* Voluntarily yield the CPU.
* @arg == NULL.
@@ -61,61 +74,74 @@
/*
* Halt execution of this domain (all VCPUs) and notify the system controller.
- * @arg == pointer to sched_shutdown structure.
+ * @arg == pointer to sched_shutdown_t structure.
+ *
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then this
+ * hypercall takes an additional extra argument which should be the
+ * MFN of the guest's start_info_t.
+ *
+ * In addition, which reason is SHUTDOWN_suspend this hypercall
+ * returns 1 if suspend was cancelled or the domain was merely
+ * checkpointed, and 0 if it is resuming in a new domain.
*/
#define SCHEDOP_shutdown 2
-struct sched_shutdown {
- unsigned int reason; /* SHUTDOWN_* */
-};
-DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
-typedef struct sched_shutdown sched_shutdown_t;
-DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
/*
* Poll a set of event-channel ports. Return when one or more are pending. An
* optional timeout may be specified.
- * @arg == pointer to sched_poll structure.
+ * @arg == pointer to sched_poll_t structure.
*/
#define SCHEDOP_poll 3
-struct sched_poll {
- XEN_GUEST_HANDLE(evtchn_port_t) ports;
- unsigned int nr_ports;
- uint64_t timeout;
-};
-DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
-typedef struct sched_poll sched_poll_t;
-DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
/*
* Declare a shutdown for another domain. The main use of this function is
* in interpreting shutdown requests and reasons for fully-virtualized
* domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
- * @arg == pointer to sched_remote_shutdown structure.
+ * @arg == pointer to sched_remote_shutdown_t structure.
*/
#define SCHEDOP_remote_shutdown 4
-struct sched_remote_shutdown {
- domid_t domain_id; /* Remote domain ID */
- unsigned int reason; /* SHUTDOWN_xxx reason */
-};
-typedef struct sched_remote_shutdown sched_remote_shutdown_t;
-DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
/*
* Latch a shutdown code, so that when the domain later shuts down it
* reports this code to the control tools.
- * @arg == as for SCHEDOP_shutdown.
+ * @arg == sched_shutdown_t, as for SCHEDOP_shutdown.
*/
#define SCHEDOP_shutdown_code 5
/*
* Setup, poke and destroy a domain watchdog timer.
- * @arg == pointer to sched_watchdog structure.
+ * @arg == pointer to sched_watchdog_t structure.
* With id == 0, setup a domain watchdog timer to cause domain shutdown
* after timeout, returns watchdog id.
* With id != 0 and timeout == 0, destroy domain watchdog timer.
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
+/* ` } */
+
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
+typedef struct sched_shutdown sched_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
+
+struct sched_poll {
+ XEN_GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
+typedef struct sched_poll sched_poll_t;
+DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
+
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+};
+typedef struct sched_remote_shutdown sched_remote_shutdown_t;
+DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
+
struct sched_watchdog {
uint32_t id; /* watchdog ID */
uint32_t timeout; /* timeout */
@@ -128,10 +154,12 @@ DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
*/
+/* ` enum sched_shutdown_reason { */
#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+/* ` } */
#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/interface/xen-compat.h b/include/xen/interface/xen-compat.h
index d8c55bfc8f63..69141c403fc5 100644
--- a/include/xen/interface/xen-compat.h
+++ b/include/xen/interface/xen-compat.h
@@ -27,7 +27,7 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040200
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040300
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 90a670abebad..fed906d4421f 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -47,12 +47,15 @@ DEFINE_XEN_GUEST_HANDLE(char);
__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
DEFINE_XEN_GUEST_HANDLE(int);
__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
DEFINE_XEN_GUEST_HANDLE(long);
__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+#endif
DEFINE_XEN_GUEST_HANDLE(void);
DEFINE_XEN_GUEST_HANDLE(uint64_t);
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#endif
/*
@@ -322,7 +325,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
/*
* MMU EXTENDED OPERATIONS
*
- * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * ` unsigned int count,
+ * ` unsigned int *pdone,
+ * ` unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
* A foreigndom (FD) can be specified (or DOMID_SELF for none).
* Where the FD has some effect, it is described below.
*
@@ -377,6 +386,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
* cmd: MMUEXT_[UN]MARK_SUPER
* mfn: Machine frame number of head of superpage to be [un]marked.
*/
+/* ` enum mmuext_cmd { */
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
#define MMUEXT_PIN_L3_TABLE 2
@@ -397,10 +407,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#define MMUEXT_MARK_SUPER 19
#define MMUEXT_UNMARK_SUPER 20
+/* ` } */
#ifndef __ASSEMBLY__
struct mmuext_op {
- unsigned int cmd;
+ unsigned int cmd; /* => enum mmuext_cmd */
union {
/* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
* CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
@@ -426,9 +437,24 @@ typedef struct mmuext_op mmuext_op_t;
DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
#endif
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val,
+ * ` enum uvm_flags flags)
+ * `
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val,
+ * ` enum uvm_flags flags,
+ * ` domid_t domid)
+ * `
+ * ` @va: The virtual address whose mapping we want to change
+ * ` @val: The new page table entry, must contain a machine address
+ * ` @flags: Control TLB flushes
+ */
/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+/* ` enum uvm_flags { */
#define UVMF_NONE (0UL<<0) /* No flushing at all. */
#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
@@ -436,6 +462,7 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
+/* ` } */
/*
* Commands to HYPERVISOR_console_io().
@@ -519,7 +546,10 @@ typedef struct mmu_update mmu_update_t;
DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
/*
- * Send an array of these to HYPERVISOR_multicall().
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_multicall(multicall_entry_t call_list[],
+ * ` unsigned int nr_calls);
+ *
* NB. The fields are natural register size for this architecture.
*/
struct multicall_entry {
@@ -673,7 +703,8 @@ typedef struct shared_info shared_info_t;
#endif
/*
- * Start-of-day memory layout:
+ * `incontents 200 startofday Start-of-day memory layout
+ *
* 1. The domain is started within contiguous virtual-memory region.
* 2. The contiguous region ends on an aligned 4MB boundary.
* 3. This the order of bootstrap elements in the initial virtual region: