Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2010-05-12 10:43:46 +0200
committerJan Beulich <jbeulich@novell.com>2010-05-12 10:43:46 +0200
commit67752184536630965804102a039c7dfea49b98c2 (patch)
tree347e1d56b4d8d87f1b3705a23a114d17648154da
parentf99df4ec1dc218c455b04711e0a531a398d47fa5 (diff)
- Update Xen patches to 2.6.34-rc7 and c/s 1017.
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/mach-xen/asm/io.h1
-rw-r--r--arch/x86/kernel/apic/io_apic-xen.c3
-rw-r--r--arch/x86/kernel/process_64-xen.c4
-rw-r--r--arch/x86/lib/cache-smp-xen.c2
-rw-r--r--arch/x86/mm/ioremap-xen.c16
-rw-r--r--arch/x86/mm/pgtable_32-xen.c1
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/xen/core/domctl.c33
-rw-r--r--drivers/xen/netback/common.h12
-rw-r--r--drivers/xen/netback/interface.c19
-rw-r--r--drivers/xen/netback/netback.c76
-rw-r--r--include/xen/interface/arch-x86/xen-mca.h3
-rw-r--r--include/xen/interface/domctl.h21
-rw-r--r--include/xen/interface/hvm/hvm_info_table.h2
-rw-r--r--include/xen/interface/sysctl.h192
-rw-r--r--include/xen/interface/trace.h19
-rw-r--r--include/xen/interface/xen.h5
24 files changed, 348 insertions, 121 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 308142237478..5104c1ec0ef1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2218,7 +2218,7 @@ endif # X86_32
config K8_NB
def_bool y
- depends on CPU_SUP_AMD && PCI
+ depends on CPU_SUP_AMD && PCI && !XEN_UNPRIVILEGED_GUEST
source "drivers/pcmcia/Kconfig"
diff --git a/arch/x86/include/mach-xen/asm/io.h b/arch/x86/include/mach-xen/asm/io.h
index 74c53152f274..4cadc775bbe9 100644
--- a/arch/x86/include/mach-xen/asm/io.h
+++ b/arch/x86/include/mach-xen/asm/io.h
@@ -352,6 +352,7 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
extern void __iomem *early_memremap(resource_size_t phys_addr,
unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void fixup_early_ioremap(void);
#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/x86/kernel/apic/io_apic-xen.c b/arch/x86/kernel/apic/io_apic-xen.c
index a8384439e537..45fe28de9c14 100644
--- a/arch/x86/kernel/apic/io_apic-xen.c
+++ b/arch/x86/kernel/apic/io_apic-xen.c
@@ -2644,6 +2644,9 @@ void irq_force_complete_move(int irq)
struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg = desc->chip_data;
+ if (!cfg)
+ return;
+
__irq_complete_move(&desc, cfg->vector);
}
#else
diff --git a/arch/x86/kernel/process_64-xen.c b/arch/x86/kernel/process_64-xen.c
index 87a086ce73d4..b9020ec802e2 100644
--- a/arch/x86/kernel/process_64-xen.c
+++ b/arch/x86/kernel/process_64-xen.c
@@ -289,12 +289,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
set_tsk_thread_flag(p, TIF_FORK);
- p->thread.fs = me->thread.fs;
- p->thread.gs = me->thread.gs;
p->thread.io_bitmap_ptr = NULL;
savesegment(gs, p->thread.gsindex);
+ p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
savesegment(fs, p->thread.fsindex);
+ p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
diff --git a/arch/x86/lib/cache-smp-xen.c b/arch/x86/lib/cache-smp-xen.c
index 78094713c8db..48bfd376895d 100644
--- a/arch/x86/lib/cache-smp-xen.c
+++ b/arch/x86/lib/cache-smp-xen.c
@@ -17,12 +17,10 @@ EXPORT_SYMBOL(wbinvd_on_cpu);
int wbinvd_on_all_cpus(void)
{
-#ifdef MMUEXT_FLUSH_CACHE_GLOBAL /* XXX remove */
struct mmuext_op op = { .cmd = MMUEXT_FLUSH_CACHE_GLOBAL };
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) == 0)
return 0;
-#endif
/* Best effort as fallback. */
return on_each_cpu(__wbinvd, NULL, 1);
}
diff --git a/arch/x86/mm/ioremap-xen.c b/arch/x86/mm/ioremap-xen.c
index 2f2e6dc591e4..c53d1ac72df9 100644
--- a/arch/x86/mm/ioremap-xen.c
+++ b/arch/x86/mm/ioremap-xen.c
@@ -635,6 +635,22 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
+#ifndef CONFIG_XEN
+void __init fixup_early_ioremap(void)
+{
+ int i;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+ if (prev_map[i]) {
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ early_ioremap_init();
+}
+#endif
+
static int __init check_early_ioremap_leak(void)
{
int count = 0;
diff --git a/arch/x86/mm/pgtable_32-xen.c b/arch/x86/mm/pgtable_32-xen.c
index 6268f5aac17d..ffa7cb8fe6f1 100644
--- a/arch/x86/mm/pgtable_32-xen.c
+++ b/arch/x86/mm/pgtable_32-xen.c
@@ -141,6 +141,7 @@ static int __init parse_reservetop(char *arg)
address = memparse(arg, &arg);
reserve_top_address(address);
+ fixup_early_ioremap();
return 0;
}
early_param("reservetop", parse_reservetop);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ac2b43e6661c..ff135c94f352 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -584,6 +584,8 @@ end:
return result;
}
+#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
+
int acpi_processor_preregister_performance(
struct acpi_processor_performance __percpu *performance)
{
@@ -799,3 +801,5 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
}
EXPORT_SYMBOL(acpi_processor_unregister_performance);
+
+#endif /* !CONFIG_PROCESSOR_EXTERNAL_CONTROL */
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 1d6ee8b55154..f1fecc3e018f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -267,6 +267,15 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
+#ifdef CONFIG_XEN
+ if (!dev_priv->gart_info.sg_dummy_page)
+ ret = ENOMEM;
+ else
+ ret = xen_limit_pages_to_max_mfn(
+ dev_priv->gart_info.sg_dummy_page, 0, 32);
+ if (ret)
+ NV_WARN(dev, "Error restricting SG dummy page: %d\n", ret);
+#endif
set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
dev_priv->gart_info.sg_dummy_bus =
pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e305560..31eb2697e39f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -345,6 +345,18 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
+#ifdef CONFIG_XEN
+ {
+ int ret = xen_limit_pages_to_max_mfn(rdev->dummy_page.page,
+ 0, 32);
+
+ if (!ret)
+ clear_page(page_address(rdev->dummy_page.page));
+ else
+ dev_warn(rdev->dev,
+ "Error restricting dummy page: %d\n", ret);
+ }
+#endif
rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (!rdev->dummy_page.addr) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a3a303..767a581448f6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1404,6 +1404,14 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
ret = -ENOMEM;
goto out_no_drp;
}
+#ifdef CONFIG_XEN
+ ret = xen_limit_pages_to_max_mfn(glob->dummy_read_page, 0, 32);
+ if (!ret)
+ clear_page(page_address(glob->dummy_read_page));
+ else
+ printk(KERN_WARNING
+ "Error restricting dummy read page: %d\n", ret);
+#endif
INIT_LIST_HEAD(&glob->swap_lru);
INIT_LIST_HEAD(&glob->device_list);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..bce5f3a2b6d3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -519,6 +519,10 @@ int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
PAGE_SHIFT));
*prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
+#if defined(CONFIG_XEN) && defined(_PAGE_IOMAP)
+ if (bus_size != 0)
+ pgprot_val(*prot) |= _PAGE_IOMAP;
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..a32e4dbc32a3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -158,6 +158,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
+#if defined(CONFIG_XEN) && defined(_PAGE_IOMAP)
+ pgprot_val(vma->vm_page_prot) |= _PAGE_IOMAP;
+#endif
} else {
ttm = bo->ttm;
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..afabe051d13f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -68,7 +68,25 @@ static struct page *ttm_tt_alloc_page(unsigned page_flags)
else
gfp_flags |= __GFP_HIGHMEM;
+#ifndef CONFIG_XEN
return alloc_page(gfp_flags);
+#else
+ {
+ struct page *page = alloc_page(gfp_flags);
+
+ if (page && (page_flags & TTM_PAGE_FLAG_DMA32)) {
+ int ret = xen_limit_pages_to_max_mfn(page, 0, 32);
+
+ if (ret)
+ printk(KERN_WARNING TTM_PFX
+ "Error restricting pfn %lx: %d\n",
+ page_to_pfn(page), ret);
+ else if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ clear_page(page_address(page));
+ }
+ return page;
+ }
+#endif
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
diff --git a/drivers/xen/core/domctl.c b/drivers/xen/core/domctl.c
index 5c330e424b94..896e26611d53 100644
--- a/drivers/xen/core/domctl.c
+++ b/drivers/xen/core/domctl.c
@@ -60,7 +60,7 @@ struct xen_domctl_vcpuaffinity_v5 {
};
union xen_domctl {
- /* v4: sles10 sp1: xen 3.0.4 + 32-on-64 patches */
+ /* v4: sle10 sp1: xen 3.0.4 + 32-on-64 patches */
struct {
uint32_t cmd;
uint32_t interface_version;
@@ -74,7 +74,11 @@ union xen_domctl {
};
} v4;
- /* v5: upstream: xen 3.1, v6: upstream: xen 4.0 */
+ /*
+ * v5: upstream: xen 3.1
+ * v6: upstream: xen 4.0
+ * v7: sle11 sp1: xen 4.0 + cpupools patches
+ */
struct {
uint32_t cmd;
uint32_t interface_version;
@@ -85,7 +89,7 @@ union xen_domctl {
uint64_aligned_t dummy_align;
uint8_t dummy_pad[128];
};
- } v5, v6;
+ } v5, v6, v7;
};
/* The actual code comes here */
@@ -113,8 +117,11 @@ int xen_guest_address_size(int domid)
} \
} while (0)
- BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 6);
+ BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 7);
+ guest_address_size(7);
+#if CONFIG_XEN_COMPAT < 0x040100
guest_address_size(6);
+#endif
#if CONFIG_XEN_COMPAT < 0x040000
guest_address_size(5);
#endif
@@ -123,7 +130,7 @@ int xen_guest_address_size(int domid)
#endif
ret = BITS_PER_LONG;
- printk("v%d...6 domctls failed, assuming dom%d is native: %d\n",
+ printk("v%d...7 domctls failed, assuming dom%d is native: %d\n",
low, domid, ret);
return ret;
@@ -163,8 +170,12 @@ static inline int get_vcpuaffinity(unsigned int nr, void *mask)
union xen_domctl domctl;
int rc;
- BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 6);
- rc = vcpuaffinity(get, 6);
+ BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 7);
+ rc = vcpuaffinity(get, 7);
+#if CONFIG_XEN_COMPAT < 0x040100
+ if (rc)
+ rc = vcpuaffinity(get, 6);
+#endif
#if CONFIG_XEN_COMPAT < 0x040000
if (rc)
rc = vcpuaffinity(get, 5);
@@ -181,8 +192,12 @@ static inline int set_vcpuaffinity(unsigned int nr, void *mask)
union xen_domctl domctl;
int rc;
- BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 6);
- rc = vcpuaffinity(set, 6);
+ BUILD_BUG_ON(XEN_DOMCTL_INTERFACE_VERSION > 7);
+ rc = vcpuaffinity(set, 7);
+#if CONFIG_XEN_COMPAT < 0x040100
+ if (rc)
+ rc = vcpuaffinity(set, 6);
+#endif
#if CONFIG_XEN_COMPAT < 0x040000
if (rc)
rc = vcpuaffinity(set, 5);
diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
index b249892b7429..1300027e43b6 100644
--- a/drivers/xen/netback/common.h
+++ b/drivers/xen/netback/common.h
@@ -100,7 +100,6 @@ typedef struct netif_st {
/* Miscellaneous private stuff. */
struct list_head list; /* scheduling list */
- struct list_head group_list;
atomic_t refcnt;
struct net_device *dev;
struct net_device_stats stats;
@@ -225,11 +224,6 @@ struct pending_tx_info {
};
typedef unsigned int pending_ring_idx_t;
-struct page_ext {
- unsigned int group;
- unsigned int idx;
-};
-
struct netbk_rx_meta {
skb_frag_t frag;
int id;
@@ -269,19 +263,15 @@ struct xen_netbk {
struct list_head pending_inuse_head;
struct list_head net_schedule_list;
- struct list_head group_domain_list;
spinlock_t net_schedule_list_lock;
spinlock_t release_lock;
- spinlock_t group_domain_list_lock;
struct page **mmap_pages;
- unsigned int group_domain_nr;
+ atomic_t nr_groups;
unsigned int alloc_index;
- struct page_ext page_extinfo[MAX_PENDING_REQS];
-
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
index 0e5b49522e1e..467672ec1684 100644
--- a/drivers/xen/netback/interface.c
+++ b/drivers/xen/netback/interface.c
@@ -55,22 +55,20 @@ module_param_named(queue_length, netbk_queue_length, ulong, 0644);
static void __netif_up(netif_t *netif)
{
unsigned int group = 0;
- unsigned int min_domains = xen_netbk[0].group_domain_nr;
+ unsigned int min_groups = atomic_read(&xen_netbk[0].nr_groups);
unsigned int i;
/* Find the list which contains least number of domains. */
for (i = 1; i < netbk_nr_groups; i++) {
- if (xen_netbk[i].group_domain_nr < min_domains) {
+ unsigned int nr_groups = atomic_read(&xen_netbk[i].nr_groups);
+
+ if (nr_groups < min_groups) {
group = i;
- min_domains = xen_netbk[i].group_domain_nr;
+ min_groups = nr_groups;
}
}
- spin_lock(&xen_netbk[group].group_domain_list_lock);
- list_add_tail(&netif->group_list,
- &xen_netbk[group].group_domain_list);
- xen_netbk[group].group_domain_nr++;
- spin_unlock(&xen_netbk[group].group_domain_list_lock);
+ atomic_inc(&xen_netbk[group].nr_groups);
netif->group = group;
enable_irq(netif->irq);
@@ -85,10 +83,7 @@ static void __netif_down(netif_t *netif)
netif_deschedule_work(netif);
netif->group = UINT_MAX;
- spin_lock(&netbk->group_domain_list_lock);
- netbk->group_domain_nr--;
- list_del(&netif->group_list);
- spin_unlock(&netbk->group_domain_list_lock);
+ atomic_dec(&netbk->nr_groups);
}
static int net_open(struct net_device *dev)
diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
index 20f606f32546..1fb78838b4ab 100644
--- a/drivers/xen/netback/netback.c
+++ b/drivers/xen/netback/netback.c
@@ -42,9 +42,9 @@
/*define NETBE_DEBUG_INTERRUPT*/
-struct xen_netbk *xen_netbk;
-unsigned int netbk_nr_groups;
-static bool use_kthreads = true;
+struct xen_netbk *__read_mostly xen_netbk;
+unsigned int __read_mostly netbk_nr_groups;
+static bool __read_mostly use_kthreads = true;
static bool __initdata bind_threads;
#define GET_GROUP_INDEX(netif) ((netif)->group)
@@ -74,9 +74,42 @@ static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, unsigned int i
}
/* extra field used in struct page */
-static inline void netif_set_page_ext(struct page *pg, struct page_ext *ext)
+union page_ext {
+ struct {
+#if BITS_PER_LONG < 64
+#define GROUP_WIDTH (BITS_PER_LONG - CONFIG_XEN_NETDEV_TX_SHIFT)
+#define MAX_GROUPS ((1U << GROUP_WIDTH) - 1)
+ unsigned int grp:GROUP_WIDTH;
+ unsigned int idx:CONFIG_XEN_NETDEV_TX_SHIFT;
+#else
+#define MAX_GROUPS UINT_MAX
+ unsigned int grp, idx;
+#endif
+ } e;
+ void *mapping;
+};
+
+static inline void netif_set_page_ext(struct page *pg, unsigned int group,
+ unsigned int idx)
{
- pg->mapping = (void *)ext;
+ union page_ext ext = { .e = { .grp = group + 1, .idx = idx } };
+
+ BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
+ pg->mapping = ext.mapping;
+}
+
+static inline unsigned int netif_page_group(const struct page *pg)
+{
+ union page_ext ext = { .mapping = pg->mapping };
+
+ return ext.e.grp - 1;
+}
+
+static inline unsigned int netif_page_index(const struct page *pg)
+{
+ union page_ext ext = { .mapping = pg->mapping };
+
+ return ext.e.idx;
}
#define PKT_PROT_LEN 64
@@ -368,7 +401,7 @@ static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
if (netif->copying_receiver) {
- struct page_ext *ext;
+ unsigned int group, idx;
/* The fragment needs to be copied rather than
flipped. */
@@ -376,18 +409,15 @@ static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
copy_gop = npo->copy + npo->copy_prod++;
copy_gop->flags = GNTCOPY_dest_gref;
if (PageForeign(page) &&
- (ext = (void *)page->mapping) != NULL &&
- ext->idx < MAX_PENDING_REQS &&
- ext->group < netbk_nr_groups) {
+ page->mapping != NULL &&
+ (idx = netif_page_index(page)) < MAX_PENDING_REQS &&
+ (group = netif_page_group(page)) < netbk_nr_groups) {
struct pending_tx_info *src_pend;
- netbk = &xen_netbk[ext->group];
- BUG_ON(ext < netbk->page_extinfo ||
- ext >= netbk->page_extinfo +
- ARRAY_SIZE(netbk->page_extinfo));
- BUG_ON(netbk->mmap_pages[ext->idx] != page);
- src_pend = &netbk->pending_tx_info[ext->idx];
- BUG_ON(ext->group != GET_GROUP_INDEX(src_pend->netif));
+ netbk = &xen_netbk[group];
+ BUG_ON(netbk->mmap_pages[idx] != page);
+ src_pend = &netbk->pending_tx_info[idx];
+ BUG_ON(group != GET_GROUP_INDEX(src_pend->netif));
copy_gop->source.domid = src_pend->netif->domid;
copy_gop->source.u.ref = src_pend->req.gref;
copy_gop->flags |= GNTCOPY_source_gref;
@@ -1537,9 +1567,8 @@ static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
static void netif_page_release(struct page *page, unsigned int order)
{
- struct page_ext *ext = (void *)page->mapping;
- unsigned int idx = ext->idx;
- unsigned int group = ext->group;
+ unsigned int idx = netif_page_index(page);
+ unsigned int group = netif_page_group(page);
struct xen_netbk *netbk = &xen_netbk[group];
BUG_ON(order);
@@ -1722,6 +1751,8 @@ static int __init netback_init(void)
if (!netbk_nr_groups)
netbk_nr_groups = (num_online_cpus() + 1) / 2;
+ if (netbk_nr_groups > MAX_GROUPS)
+ netbk_nr_groups = MAX_GROUPS;
/* We can increase reservation by this much in net_rx_action(). */
balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE);
@@ -1781,19 +1812,14 @@ static int __init netback_init(void)
INIT_LIST_HEAD(&netbk->pending_inuse_head);
INIT_LIST_HEAD(&netbk->net_schedule_list);
- INIT_LIST_HEAD(&netbk->group_domain_list);
spin_lock_init(&netbk->net_schedule_list_lock);
spin_lock_init(&netbk->release_lock);
- spin_lock_init(&netbk->group_domain_list_lock);
for (i = 0; i < MAX_PENDING_REQS; i++) {
page = netbk->mmap_pages[i];
SetPageForeign(page, netif_page_release);
- netbk->page_extinfo[i].group = group;
- netbk->page_extinfo[i].idx = i;
- netif_set_page_ext(page,
- &netbk->page_extinfo[i]);
+ netif_set_page_ext(page, group, i);
netbk->pending_ring[i] = i;
INIT_LIST_HEAD(&netbk->pending_inuse[i].list);
}
diff --git a/include/xen/interface/arch-x86/xen-mca.h b/include/xen/interface/arch-x86/xen-mca.h
index 13fc5b1866f6..127971468871 100644
--- a/include/xen/interface/arch-x86/xen-mca.h
+++ b/include/xen/interface/arch-x86/xen-mca.h
@@ -233,10 +233,11 @@ struct mcinfo_recovery
#define MCINFO_HYPERCALLSIZE 1024
#define MCINFO_MAXSIZE 768
+#define MCINFO_FLAGS_UNCOMPLETE 0x1
struct mc_info {
/* Number of mcinfo_* entries in mi_data */
uint32_t mi_nentries;
- uint32_t _pad0;
+ uint32_t flags;
uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8];
};
typedef struct mc_info mc_info_t;
diff --git a/include/xen/interface/domctl.h b/include/xen/interface/domctl.h
index af949f4fc6f6..5bae1cb406db 100644
--- a/include/xen/interface/domctl.h
+++ b/include/xen/interface/domctl.h
@@ -35,7 +35,7 @@
#include "xen.h"
#include "grant_table.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007
struct xenctl_cpumap {
XEN_GUEST_HANDLE_64(uint8) bitmap;
@@ -60,10 +60,10 @@ struct xen_domctl_createdomain {
/* Should domain memory integrity be verifed by tboot during Sx? */
#define _XEN_DOMCTL_CDF_s3_integrity 2
#define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity)
- uint32_t flags;
/* Disable out-of-sync shadow page tables? */
#define _XEN_DOMCTL_CDF_oos_off 3
#define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off)
+ uint32_t flags;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
@@ -106,6 +106,7 @@ struct xen_domctl_getdomaininfo {
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
uint32_t ssidref;
xen_domain_handle_t handle;
+ uint32_t cpupool;
};
typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
@@ -161,6 +162,14 @@ struct xen_domctl_getpageframeinfo2 {
typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
+/* XEN_DOMCTL_getpageframeinfo3 */
+struct xen_domctl_getpageframeinfo3 {
+ /* IN variables. */
+ uint64_aligned_t num;
+ /* IN/OUT variables. */
+ XEN_GUEST_HANDLE_64(xen_pfn_t) array;
+};
+
/*
* Control shadow pagetables operation
@@ -295,6 +304,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF 4
#define XEN_SCHEDULER_CREDIT 5
+#define XEN_SCHEDULER_CREDIT2 6
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
@@ -313,6 +323,9 @@ struct xen_domctl_scheduler_op {
uint16_t weight;
uint16_t cap;
} credit;
+ struct xen_domctl_sched_credit2 {
+ uint16_t weight;
+ } credit2;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
@@ -431,6 +444,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
#define XEN_DOMCTL_SENDTRIGGER_RESET 1
#define XEN_DOMCTL_SENDTRIGGER_INIT 2
#define XEN_DOMCTL_SENDTRIGGER_POWER 3
+#define XEN_DOMCTL_SENDTRIGGER_SLEEP 4
struct xen_domctl_sendtrigger {
uint32_t trigger; /* IN */
uint32_t vcpu; /* IN */
@@ -772,7 +786,6 @@ struct xen_domctl_mem_sharing_op {
typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
-
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -832,6 +845,7 @@ struct xen_domctl {
#define XEN_DOMCTL_disable_migrate 58
#define XEN_DOMCTL_gettscinfo 59
#define XEN_DOMCTL_settscinfo 60
+#define XEN_DOMCTL_getpageframeinfo3 61
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -844,6 +858,7 @@ struct xen_domctl {
struct xen_domctl_getmemlist getmemlist;
struct xen_domctl_getpageframeinfo getpageframeinfo;
struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
+ struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
struct xen_domctl_vcpuaffinity vcpuaffinity;
struct xen_domctl_shadow_op shadow_op;
struct xen_domctl_max_mem max_mem;
diff --git a/include/xen/interface/hvm/hvm_info_table.h b/include/xen/interface/hvm/hvm_info_table.h
index adb3fb9041ce..bdb5995e716b 100644
--- a/include/xen/interface/hvm/hvm_info_table.h
+++ b/include/xen/interface/hvm/hvm_info_table.h
@@ -69,7 +69,7 @@ struct hvm_info_table {
uint32_t high_mem_pgend;
/* Bitmap of which CPUs are online at boot time. */
- uint8_t vcpu_online[HVM_MAX_VCPUS/8];
+ uint8_t vcpu_online[(HVM_MAX_VCPUS + 7)/8];
};
#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
diff --git a/include/xen/interface/sysctl.h b/include/xen/interface/sysctl.h
index f869f9c5afdd..32445f5a52f6 100644
--- a/include/xen/interface/sysctl.h
+++ b/include/xen/interface/sysctl.h
@@ -34,12 +34,12 @@
#include "xen.h"
#include "domctl.h"
-#define XEN_SYSCTL_INTERFACE_VERSION 0x00000007
+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000008
/*
* Read console content from Xen buffer ring.
*/
-#define XEN_SYSCTL_readconsole 1
+/* XEN_SYSCTL_readconsole */
struct xen_sysctl_readconsole {
/* IN: Non-zero -> clear after reading. */
uint8_t clear;
@@ -60,7 +60,7 @@ typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
/* Get trace buffers machine base address */
-#define XEN_SYSCTL_tbuf_op 2
+/* XEN_SYSCTL_tbuf_op */
struct xen_sysctl_tbuf_op {
/* IN variables */
#define XEN_SYSCTL_TBUFOP_get_info 0
@@ -75,7 +75,7 @@ struct xen_sysctl_tbuf_op {
uint32_t evt_mask;
/* OUT variables */
uint64_aligned_t buffer_mfn;
- uint32_t size;
+ uint32_t size; /* Also an IN variable! */
};
typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
@@ -83,7 +83,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
/*
* Get physical information about the host machine
*/
-#define XEN_SYSCTL_physinfo 3
+/* XEN_SYSCTL_physinfo */
/* (x86) The platform supports HVM guests. */
#define _XEN_SYSCTL_PHYSCAP_hvm 0
#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
@@ -93,30 +93,14 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
struct xen_sysctl_physinfo {
uint32_t threads_per_core;
uint32_t cores_per_socket;
- uint32_t nr_cpus;
- uint32_t max_node_id;
+ uint32_t nr_cpus, max_cpu_id;
+ uint32_t nr_nodes, max_node_id;
uint32_t cpu_khz;
uint64_aligned_t total_pages;
uint64_aligned_t free_pages;
uint64_aligned_t scrub_pages;
uint32_t hw_cap[8];
- /*
- * IN: maximum addressable entry in the caller-provided cpu_to_node array.
- * OUT: largest cpu identifier in the system.
- * If OUT is greater than IN then the cpu_to_node array is truncated!
- */
- uint32_t max_cpu_id;
- /*
- * If not NULL, this array is filled with node identifier for each cpu.
- * If a cpu has no node information (e.g., cpu not present) then the
- * sentinel value ~0u is written.
- * The size of this array is specified by the caller in @max_cpu_id.
- * If the actual @max_cpu_id is smaller than the array then the trailing
- * elements of the array will not be written by the sysctl.
- */
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
-
/* XEN_SYSCTL_PHYSCAP_??? */
uint32_t capabilities;
};
@@ -126,7 +110,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
/*
* Get the ID of the current scheduler.
*/
-#define XEN_SYSCTL_sched_id 4
+/* XEN_SYSCTL_sched_id */
struct xen_sysctl_sched_id {
/* OUT variable */
uint32_t sched_id;
@@ -135,7 +119,7 @@ typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
/* Interface for controlling Xen software performance counters. */
-#define XEN_SYSCTL_perfc_op 5
+/* XEN_SYSCTL_perfc_op */
/* Sub-operations: */
#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
@@ -162,7 +146,7 @@ struct xen_sysctl_perfc_op {
typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
-#define XEN_SYSCTL_getdomaininfolist 6
+/* XEN_SYSCTL_getdomaininfolist */
struct xen_sysctl_getdomaininfolist {
/* IN variables. */
domid_t first_domain;
@@ -175,7 +159,7 @@ typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
/* Inject debug keys into Xen. */
-#define XEN_SYSCTL_debug_keys 7
+/* XEN_SYSCTL_debug_keys */
struct xen_sysctl_debug_keys {
/* IN variables. */
XEN_GUEST_HANDLE_64(char) keys;
@@ -185,7 +169,7 @@ typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
/* Get physical CPU information. */
-#define XEN_SYSCTL_getcpuinfo 8
+/* XEN_SYSCTL_getcpuinfo */
struct xen_sysctl_cpuinfo {
uint64_aligned_t idletime;
};
@@ -201,7 +185,7 @@ struct xen_sysctl_getcpuinfo {
typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
-#define XEN_SYSCTL_availheap 9
+/* XEN_SYSCTL_availheap */
struct xen_sysctl_availheap {
/* IN variables. */
uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
@@ -213,7 +197,7 @@ struct xen_sysctl_availheap {
typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
-#define XEN_SYSCTL_get_pmstat 10
+/* XEN_SYSCTL_get_pmstat */
struct pm_px_val {
uint64_aligned_t freq; /* Px core frequency */
uint64_aligned_t residency; /* Px residency time */
@@ -270,7 +254,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
#define XEN_CPU_HOTPLUG_STATUS_ONLINE 2
#define XEN_CPU_HOTPLUG_STATUS_NEW 3
-#define XEN_SYSCTL_cpu_hotplug 11
+/* XEN_SYSCTL_cpu_hotplug */
struct xen_sysctl_cpu_hotplug {
/* IN variables */
uint32_t cpu; /* Physical cpu. */
@@ -286,7 +270,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
* Get/set xen power management, include
* 1. cpufreq governors and related parameters
*/
-#define XEN_SYSCTL_pm_op 12
+/* XEN_SYSCTL_pm_op */
struct xen_userspace {
uint32_t scaling_setspeed;
};
@@ -333,6 +317,8 @@ struct xen_get_cpufreq_para {
struct xen_userspace userspace;
struct xen_ondemand ondemand;
} u;
+
+ int32_t turbo_enabled;
};
struct xen_set_cpufreq_gov {
@@ -350,22 +336,6 @@ struct xen_set_cpufreq_para {
uint32_t ctrl_value;
};
-/* Get physical CPU topology information. */
-#define INVALID_TOPOLOGY_ID (~0U)
-struct xen_get_cputopo {
- /* IN: maximum addressable entry in
- * the caller-provided cpu_to_core/socket.
- */
- uint32_t max_cpus;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
-
- /* OUT: number of cpus returned
- * If OUT is greater than IN then the cpu_to_core/socket is truncated!
- */
- uint32_t nr_cpus;
-};
-
struct xen_sysctl_pm_op {
#define PM_PARA_CATEGORY_MASK 0xf0
#define CPUFREQ_PARA 0x10
@@ -376,9 +346,6 @@ struct xen_sysctl_pm_op {
#define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03)
#define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04)
- /* get CPU topology */
- #define XEN_SYSCTL_pm_op_get_cputopo 0x20
-
/* set/reset scheduler power saving option */
#define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21
@@ -390,6 +357,10 @@ struct xen_sysctl_pm_op {
#define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24
#define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25
+ /* enable/disable turbo mode when in dbs governor */
+ #define XEN_SYSCTL_pm_op_enable_turbo 0x26
+ #define XEN_SYSCTL_pm_op_disable_turbo 0x27
+
uint32_t cmd;
uint32_t cpuid;
union {
@@ -397,7 +368,6 @@ struct xen_sysctl_pm_op {
struct xen_set_cpufreq_gov set_gov;
struct xen_set_cpufreq_para set_para;
uint64_aligned_t get_avgfreq;
- struct xen_get_cputopo get_topo;
uint32_t set_sched_opt_smt;
uint32_t get_max_cstate;
uint32_t set_max_cstate;
@@ -406,7 +376,7 @@ struct xen_sysctl_pm_op {
} u;
};
-#define XEN_SYSCTL_page_offline_op 14
+/* XEN_SYSCTL_page_offline_op */
struct xen_sysctl_page_offline_op {
/* IN: range of page to be offlined */
#define sysctl_page_offline 1
@@ -454,7 +424,7 @@ struct xen_sysctl_page_offline_op {
#define PG_OFFLINE_OWNER_SHIFT 16
-#define XEN_SYSCTL_lockprof_op 15
+/* XEN_SYSCTL_lockprof_op */
/* Sub-operations: */
#define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */
#define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */
@@ -486,13 +456,125 @@ struct xen_sysctl_lockprof_op {
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
+/* XEN_SYSCTL_topologyinfo */
+#define INVALID_TOPOLOGY_ID (~0U)
+struct xen_sysctl_topologyinfo {
+ /*
+ * IN: maximum addressable entry in the caller-provided arrays.
+ * OUT: largest cpu identifier in the system.
+ * If OUT is greater than IN then the arrays are truncated!
+ */
+ uint32_t max_cpu_index;
+
+ /*
+ * If not NULL, this array is filled with core/socket/node identifier for
+ * each cpu.
+ * If a cpu has no core/socket/node information (e.g., cpu not present)
+ * then the sentinel value ~0u is written.
+ * The size of this array is specified by the caller in @max_cpu_index.
+ * If the actual @max_cpu_index is smaller than the array then the trailing
+ * elements of the array will not be written by the sysctl.
+ */
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+};
+typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
+
+/* XEN_SYSCTL_numainfo */
+struct xen_sysctl_numainfo {
+ /*
+ * IN: maximum addressable entry in the caller-provided arrays.
+ * OUT: largest node identifier in the system.
+ * If OUT is greater than IN then the arrays are truncated!
+ */
+ uint32_t max_node_index;
+
+ /* NB. Entries are 0 if node is not present. */
+ XEN_GUEST_HANDLE_64(uint64) node_to_memsize;
+ XEN_GUEST_HANDLE_64(uint64) node_to_memfree;
+
+ /*
+ * Array, of size (max_node_index+1)^2, listing memory access distances
+ * between nodes. If an entry has no node distance information (e.g., node
+ * not present) then the value ~0u is written.
+ *
+ * Note that the array rows must be indexed by multiplying by the minimum
+ * of the caller-provided max_node_index and the returned value of
+ * max_node_index. That is, if the largest node index in the system is
+ * smaller than the caller can handle, a smaller 2-d array is constructed
+ * within the space provided by the caller. When this occurs, trailing
+ * space provided by the caller is not modified. If the largest node index
+ * in the system is larger than the caller can handle, then a 2-d array of
+ * the maximum size handleable by the caller is constructed.
+ */
+ XEN_GUEST_HANDLE_64(uint32) node_to_node_distance;
+};
+typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
+
+/* XEN_SYSCTL_cpupool_op */
+#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */
+#define XEN_SYSCTL_CPUPOOL_OP_INFO 3 /* I */
+#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU 4 /* A */
+#define XEN_SYSCTL_CPUPOOL_OP_RMCPU 5 /* R */
+#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */
+#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO 7 /* F */
+#define XEN_SYSCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF
+struct xen_sysctl_cpupool_op {
+ uint32_t op; /* IN */
+ uint32_t cpupool_id; /* IN: CDIARM OUT: CI */
+ uint32_t sched_id; /* IN: C OUT: I */
+ uint32_t domid; /* IN: M */
+ uint32_t cpu; /* IN: AR */
+ uint32_t n_dom; /* OUT: I */
+ struct xenctl_cpumap cpumap; /* OUT: IF */
+};
+typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
+
+/* XEN_SYSCTL_scheduler_op */
+/* Set or get info? */
+#define XEN_SYSCTL_SCHEDOP_putinfo 0
+#define XEN_SYSCTL_SCHEDOP_getinfo 1
+struct xen_sysctl_scheduler_op {
+ uint32_t sched_id; /* XEN_SCHEDULER_* (domctl.h) */
+ uint32_t cmd; /* XEN_SYSCTL_SCHEDOP_* */
+ union {
+ } u;
+};
+typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
+
struct xen_sysctl {
uint32_t cmd;
+#define XEN_SYSCTL_readconsole 1
+#define XEN_SYSCTL_tbuf_op 2
+#define XEN_SYSCTL_physinfo 3
+#define XEN_SYSCTL_sched_id 4
+#define XEN_SYSCTL_perfc_op 5
+#define XEN_SYSCTL_getdomaininfolist 6
+#define XEN_SYSCTL_debug_keys 7
+#define XEN_SYSCTL_getcpuinfo 8
+#define XEN_SYSCTL_availheap 9
+#define XEN_SYSCTL_get_pmstat 10
+#define XEN_SYSCTL_cpu_hotplug 11
+#define XEN_SYSCTL_pm_op 12
+#define XEN_SYSCTL_page_offline_op 14
+#define XEN_SYSCTL_lockprof_op 15
+#define XEN_SYSCTL_topologyinfo 16
+#define XEN_SYSCTL_numainfo 17
+#define XEN_SYSCTL_cpupool_op 18
+#define XEN_SYSCTL_scheduler_op 19
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_tbuf_op tbuf_op;
struct xen_sysctl_physinfo physinfo;
+ struct xen_sysctl_topologyinfo topologyinfo;
+ struct xen_sysctl_numainfo numainfo;
struct xen_sysctl_sched_id sched_id;
struct xen_sysctl_perfc_op perfc_op;
struct xen_sysctl_getdomaininfolist getdomaininfolist;
@@ -504,6 +586,8 @@ struct xen_sysctl {
struct xen_sysctl_pm_op pm_op;
struct xen_sysctl_page_offline_op page_offline;
struct xen_sysctl_lockprof_op lockprof_op;
+ struct xen_sysctl_cpupool_op cpupool_op;
+ struct xen_sysctl_scheduler_op scheduler_op;
uint8_t pad[128];
} u;
};
diff --git a/include/xen/interface/trace.h b/include/xen/interface/trace.h
index 6eb125a510af..0a8f1adfa3cd 100644
--- a/include/xen/interface/trace.h
+++ b/include/xen/interface/trace.h
@@ -53,6 +53,7 @@
#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
+#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
/* Trace events per class */
@@ -82,6 +83,12 @@
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
#define TRC_PV_HYPERCALL (TRC_PV + 1)
#define TRC_PV_TRAP (TRC_PV + 3)
@@ -149,6 +156,8 @@
#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21)
+
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
@@ -195,6 +204,16 @@ struct t_buf {
/* Records follow immediately after the meta-data header. */
};
+/* Structure used to pass MFNs to the trace buffers back to trace consumers.
+ * Offset is an offset into the mapped structure where the mfn list will be held.
+ * MFNs will be at ((unsigned long *)(t_info))+(t_info->cpu_offset[cpu]).
+ */
+struct t_info {
+ uint16_t tbuf_size; /* Size in pages of each trace buffer */
+ uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */
+ /* MFN lists immediately after the header */
+};
+
#endif /* __XEN_PUBLIC_TRACE_H__ */
/*
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 41e8f6e45910..c744b99e7d5e 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -247,6 +247,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
*
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
+ *
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
@@ -275,6 +279,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_NEW_USER_BASEPTR 15
#define MMUEXT_CLEAR_PAGE 16
#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#ifndef __ASSEMBLY__
struct mmuext_op {