diff options
author | Ian Delaney <idella4@gentoo.org> | 2013-05-15 18:03:57 +0000 |
---|---|---|
committer | Ian Delaney <idella4@gentoo.org> | 2013-05-15 18:03:57 +0000 |
commit | 745fe57bd4d778b388cea692b9f9758ffbe17042 (patch) | |
tree | bca0151f9e26fbde49417264aab76dd303fe9891 /app-emulation/xen | |
parent | Fix dependency on php's xsl extensionls (diff) | |
download | gentoo-2-745fe57bd4d778b388cea692b9f9758ffbe17042.tar.gz gentoo-2-745fe57bd4d778b388cea692b9f9758ffbe17042.tar.bz2 gentoo-2-745fe57bd4d778b388cea692b9f9758ffbe17042.zip |
revbump 4.2.1-r3; updated security patches, bump 4.2.2; updated security patches
(Portage version: 2.1.11.62/cvs/Linux x86_64, signed Manifest commit with key 0xB8072B0D)
Diffstat (limited to 'app-emulation/xen')
-rw-r--r-- | app-emulation/xen/ChangeLog | 20 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-0153-XSA-36.patch | 323 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1917-XSA-44.patch | 77 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_1.patch | 252 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_2.patch | 169 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_3.patch | 74 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_4.patch | 200 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_5.patch | 204 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_6.patch | 127 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_7.patch | 255 | ||||
-rw-r--r-- | app-emulation/xen/files/xen-4-CVE-2013-1920-XSA-47.patch | 31 | ||||
-rw-r--r-- | app-emulation/xen/xen-4.2.1-r3.ebuild | 136 | ||||
-rw-r--r-- | app-emulation/xen/xen-4.2.2.ebuild | 131 |
13 files changed, 1998 insertions, 1 deletions
diff --git a/app-emulation/xen/ChangeLog b/app-emulation/xen/ChangeLog index 3b4501784445..ae49cdcfffc7 100644 --- a/app-emulation/xen/ChangeLog +++ b/app-emulation/xen/ChangeLog @@ -1,6 +1,24 @@ # ChangeLog for app-emulation/xen # Copyright 1999-2013 Gentoo Foundation; Distributed under the GPL v2 -# $Header: /var/cvsroot/gentoo-x86/app-emulation/xen/ChangeLog,v 1.114 2013/03/08 10:33:14 idella4 Exp $ +# $Header: /var/cvsroot/gentoo-x86/app-emulation/xen/ChangeLog,v 1.115 2013/05/15 18:03:57 idella4 Exp $ + +*xen-4.2.2 (15 May 2013) +*xen-4.2.1-r3 (15 May 2013) + + 15 May 2013; Ian Delaney <idella4@gentoo.org> + +files/xen-4-CVE-2013-0153-XSA-36.patch, + +files/xen-4-CVE-2013-1917-XSA-44.patch, + +files/xen-4-CVE-2013-1918-XSA-45_1.patch, + +files/xen-4-CVE-2013-1918-XSA-45_2.patch, + +files/xen-4-CVE-2013-1918-XSA-45_3.patch, + +files/xen-4-CVE-2013-1918-XSA-45_4.patch, + +files/xen-4-CVE-2013-1918-XSA-45_5.patch, + +files/xen-4-CVE-2013-1918-XSA-45_6.patch, + +files/xen-4-CVE-2013-1918-XSA-45_7.patch, + +files/xen-4-CVE-2013-1920-XSA-47.patch, +xen-4.2.1-r3.ebuild, + +xen-4.2.2.ebuild, xen-4.2.1-r1.ebuild, xen-4.2.1-r2.ebuild: + revbump 4.2.1-r3; updated security patches, bump 4.2.2; updated security + patches 08 Mar 2013; Ian Delaney <idella4@gentoo.org> xen-4.2.0-r2.ebuild, xen-4.2.1-r1.ebuild, xen-4.2.1-r2.ebuild: diff --git a/app-emulation/xen/files/xen-4-CVE-2013-0153-XSA-36.patch b/app-emulation/xen/files/xen-4-CVE-2013-0153-XSA-36.patch new file mode 100644 index 000000000000..8477701a224c --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-0153-XSA-36.patch @@ -0,0 +1,323 @@ +ACPI: acpi_table_parse() should return handler's error code + +Currently, the error code returned by acpi_table_parse()'s handler +is ignored. This patch will propagate handler's return value to +acpi_table_parse()'s caller. + +AMD,IOMMU: Clean up old entries in remapping tables when creating new +interrupt mapping. + +When changing the affinity of an IRQ associated with a passed +through PCI device, clear previous mapping. + +In addition, because some BIOSes may incorrectly program IVRS +entries for IOAPIC try to check for entry's consistency. Specifically, +if conflicting entries are found disable IOMMU if per-device +remapping table is used. If entries refer to bogus IOAPIC IDs +disable IOMMU unconditionally + +AMD,IOMMU: Disable IOMMU if SATA Combined mode is on + +AMD's SP5100 chipset can be placed into SATA Combined mode +that may cause prevent dom0 from booting when IOMMU is +enabled and per-device interrupt remapping table is used. +While SP5100 erratum 28 requires BIOSes to disable this mode, +some may still use it. + +This patch checks whether this mode is on and, if per-device +table is in use, disables IOMMU. + +AMD,IOMMU: Make per-device interrupt remapping table default + +Using global interrupt remapping table may be insecure, as +described by XSA-36. This patch makes per-device mode default. + +This is XSA-36 / CVE-2013-0153. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Signed-off-by: Boris Ostrovsky <boris.ostrovsky@amd.com> + +--- a/xen/arch/x86/irq.c ++++ b/xen/arch/x86/irq.c +@@ -1942,9 +1942,6 @@ int map_domain_pirq( + spin_lock_irqsave(&desc->lock, flags); + set_domain_irq_pirq(d, irq, info); + spin_unlock_irqrestore(&desc->lock, flags); +- +- if ( opt_irq_vector_map == OPT_IRQ_VECTOR_MAP_PERDEV ) +- printk(XENLOG_INFO "Per-device vector maps for GSIs not implemented yet.\n"); + } + + done: +--- a/xen/drivers/acpi/tables.c ++++ b/xen/drivers/acpi/tables.c +@@ -267,7 +267,7 @@ acpi_table_parse_madt(enum acpi_madt_typ + * @handler: handler to run + * + * Scan the ACPI System Descriptor Table (STD) for a table matching @id, +- * run @handler on it. Return 0 if table found, return on if not. ++ * run @handler on it. + */ + int __init acpi_table_parse(char *id, acpi_table_handler handler) + { +@@ -282,8 +282,7 @@ int __init acpi_table_parse(char *id, ac + acpi_get_table(id, 0, &table); + + if (table) { +- handler(table); +- return 0; ++ return handler(table); + } else + return 1; + } +--- a/xen/drivers/passthrough/amd/iommu_acpi.c ++++ b/xen/drivers/passthrough/amd/iommu_acpi.c +@@ -22,6 +22,7 @@ + #include <xen/errno.h> + #include <xen/acpi.h> + #include <asm/apicdef.h> ++#include <asm/io_apic.h> + #include <asm/amd-iommu.h> + #include <asm/hvm/svm/amd-iommu-proto.h> + +@@ -635,6 +636,7 @@ static u16 __init parse_ivhd_device_spec + u16 header_length, u16 block_length, struct amd_iommu *iommu) + { + u16 dev_length, bdf; ++ int apic; + + dev_length = sizeof(*special); + if ( header_length < (block_length + dev_length) ) +@@ -651,10 +653,59 @@ static u16 __init parse_ivhd_device_spec + } + + add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu); +- /* set device id of ioapic */ +- ioapic_sbdf[special->handle].bdf = bdf; +- ioapic_sbdf[special->handle].seg = seg; +- return dev_length; ++ ++ if ( special->variety != ACPI_IVHD_IOAPIC ) ++ { ++ if ( special->variety != ACPI_IVHD_HPET ) ++ printk(XENLOG_ERR "Unrecognized IVHD special variety %#x\n", ++ special->variety); ++ return dev_length; ++ } ++ ++ /* ++ * Some BIOSes have IOAPIC broken entries so we check for IVRS ++ * consistency here --- whether entry's IOAPIC ID is valid and ++ * whether there are conflicting/duplicated entries. ++ */ ++ for ( apic = 0; apic < nr_ioapics; apic++ ) ++ { ++ if ( IO_APIC_ID(apic) != special->handle ) ++ continue; ++ ++ if ( ioapic_sbdf[special->handle].pin_setup ) ++ { ++ if ( ioapic_sbdf[special->handle].bdf == bdf && ++ ioapic_sbdf[special->handle].seg == seg ) ++ AMD_IOMMU_DEBUG("IVHD Warning: Duplicate IO-APIC %#x entries\n", ++ special->handle); ++ else ++ { ++ printk(XENLOG_ERR "IVHD Error: Conflicting IO-APIC %#x entries\n", ++ special->handle); ++ if ( amd_iommu_perdev_intremap ) ++ return 0; ++ } ++ } ++ else ++ { ++ /* set device id of ioapic */ ++ ioapic_sbdf[special->handle].bdf = bdf; ++ ioapic_sbdf[special->handle].seg = seg; ++ ++ ioapic_sbdf[special->handle].pin_setup = xzalloc_array( ++ unsigned long, BITS_TO_LONGS(nr_ioapic_entries[apic])); ++ if ( nr_ioapic_entries[apic] && ++ !ioapic_sbdf[IO_APIC_ID(apic)].pin_setup ) ++ { ++ printk(XENLOG_ERR "IVHD Error: Out of memory\n"); ++ return 0; ++ } ++ } ++ return dev_length; ++ } ++ ++ printk(XENLOG_ERR "IVHD Error: Invalid IO-APIC %#x\n", special->handle); ++ return 0; + } + + static int __init parse_ivhd_block(const struct acpi_ivrs_hardware *ivhd_block) +--- a/xen/drivers/passthrough/amd/iommu_init.c ++++ b/xen/drivers/passthrough/amd/iommu_init.c +@@ -1126,12 +1126,45 @@ static int __init amd_iommu_setup_device + return 0; + } + ++/* Check whether SP5100 SATA Combined mode is on */ ++static bool_t __init amd_sp5100_erratum28(void) ++{ ++ u32 bus, id; ++ u16 vendor_id, dev_id; ++ u8 byte; ++ ++ for (bus = 0; bus < 256; bus++) ++ { ++ id = pci_conf_read32(0, bus, 0x14, 0, PCI_VENDOR_ID); ++ ++ vendor_id = id & 0xffff; ++ dev_id = (id >> 16) & 0xffff; ++ ++ /* SP5100 SMBus module sets Combined mode on */ ++ if (vendor_id != 0x1002 || dev_id != 0x4385) ++ continue; ++ ++ byte = pci_conf_read8(0, bus, 0x14, 0, 0xad); ++ if ( (byte >> 3) & 1 ) ++ { ++ printk(XENLOG_WARNING "AMD-Vi: SP5100 erratum 28 detected, disabling IOMMU.\n" ++ "If possible, disable SATA Combined mode in BIOS or contact your vendor for BIOS update.\n"); ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ + int __init amd_iommu_init(void) + { + struct amd_iommu *iommu; + + BUG_ON( !iommu_found() ); + ++ if ( amd_iommu_perdev_intremap && amd_sp5100_erratum28() ) ++ goto error_out; ++ + ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries(); + + if ( !ivrs_bdf_entries ) +--- a/xen/drivers/passthrough/amd/iommu_intr.c ++++ b/xen/drivers/passthrough/amd/iommu_intr.c +@@ -99,12 +99,12 @@ static void update_intremap_entry(u32* e + static void update_intremap_entry_from_ioapic( + int bdf, + struct amd_iommu *iommu, +- struct IO_APIC_route_entry *ioapic_rte) ++ const struct IO_APIC_route_entry *rte, ++ const struct IO_APIC_route_entry *old_rte) + { + unsigned long flags; + u32* entry; + u8 delivery_mode, dest, vector, dest_mode; +- struct IO_APIC_route_entry *rte = ioapic_rte; + int req_id; + spinlock_t *lock; + int offset; +@@ -120,6 +120,14 @@ static void update_intremap_entry_from_i + spin_lock_irqsave(lock, flags); + + offset = get_intremap_offset(vector, delivery_mode); ++ if ( old_rte ) ++ { ++ int old_offset = get_intremap_offset(old_rte->vector, ++ old_rte->delivery_mode); ++ ++ if ( offset != old_offset ) ++ free_intremap_entry(iommu->seg, bdf, old_offset); ++ } + entry = (u32*)get_intremap_entry(iommu->seg, req_id, offset); + update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); + +@@ -188,6 +196,7 @@ int __init amd_iommu_setup_ioapic_remapp + amd_iommu_flush_intremap(iommu, req_id); + spin_unlock_irqrestore(&iommu->lock, flags); + } ++ set_bit(pin, ioapic_sbdf[IO_APIC_ID(apic)].pin_setup); + } + } + return 0; +@@ -199,6 +208,7 @@ void amd_iommu_ioapic_update_ire( + struct IO_APIC_route_entry old_rte = { 0 }; + struct IO_APIC_route_entry new_rte = { 0 }; + unsigned int rte_lo = (reg & 1) ? reg - 1 : reg; ++ unsigned int pin = (reg - 0x10) / 2; + int saved_mask, seg, bdf; + struct amd_iommu *iommu; + +@@ -236,6 +246,14 @@ void amd_iommu_ioapic_update_ire( + *(((u32 *)&new_rte) + 1) = value; + } + ++ if ( new_rte.mask && ++ !test_bit(pin, ioapic_sbdf[IO_APIC_ID(apic)].pin_setup) ) ++ { ++ ASSERT(saved_mask); ++ __io_apic_write(apic, reg, value); ++ return; ++ } ++ + /* mask the interrupt while we change the intremap table */ + if ( !saved_mask ) + { +@@ -244,7 +262,11 @@ void amd_iommu_ioapic_update_ire( + } + + /* Update interrupt remapping entry */ +- update_intremap_entry_from_ioapic(bdf, iommu, &new_rte); ++ update_intremap_entry_from_ioapic( ++ bdf, iommu, &new_rte, ++ test_and_set_bit(pin, ++ ioapic_sbdf[IO_APIC_ID(apic)].pin_setup) ? &old_rte ++ : NULL); + + /* Forward write access to IO-APIC RTE */ + __io_apic_write(apic, reg, value); +@@ -354,6 +376,12 @@ void amd_iommu_msi_msg_update_ire( + return; + } + ++ if ( msi_desc->remap_index >= 0 ) ++ update_intremap_entry_from_msi_msg(iommu, pdev, msi_desc, NULL); ++ ++ if ( !msg ) ++ return; ++ + update_intremap_entry_from_msi_msg(iommu, pdev, msi_desc, msg); + } + +--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c ++++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c +@@ -205,6 +205,8 @@ int __init amd_iov_detect(void) + { + printk("AMD-Vi: Not overriding irq_vector_map setting\n"); + } ++ if ( !amd_iommu_perdev_intremap ) ++ printk(XENLOG_WARNING "AMD-Vi: Using global interrupt remap table is not recommended (see XSA-36)!\n"); + return scan_pci_devices(); + } + +--- a/xen/drivers/passthrough/iommu.c ++++ b/xen/drivers/passthrough/iommu.c +@@ -52,7 +52,7 @@ bool_t __read_mostly iommu_qinval = 1; + bool_t __read_mostly iommu_intremap = 1; + bool_t __read_mostly iommu_hap_pt_share = 1; + bool_t __read_mostly iommu_debug; +-bool_t __read_mostly amd_iommu_perdev_intremap; ++bool_t __read_mostly amd_iommu_perdev_intremap = 1; + + DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb); + +--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h ++++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +@@ -100,6 +100,7 @@ void amd_iommu_read_msi_from_ire( + + extern struct ioapic_sbdf { + u16 bdf, seg; ++ unsigned long *pin_setup; + } ioapic_sbdf[MAX_IO_APICS]; + extern void *shared_intremap_table; + diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1917-XSA-44.patch b/app-emulation/xen/files/xen-4-CVE-2013-1917-XSA-44.patch new file mode 100644 index 000000000000..07ed9386f6f9 --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1917-XSA-44.patch @@ -0,0 +1,77 @@ +x86: clear EFLAGS.NT in SYSENTER entry path + +... as it causes problems if we happen to exit back via IRET: In the +course of trying to handle the fault, the hypervisor creates a stack +frame by hand, and uses PUSHFQ to set the respective EFLAGS field, but +expects to be able to IRET through that stack frame to the second +portion of the fixup code (which causes a #GP due to the stored EFLAGS +having NT set). + +And even if this worked (e.g if we cleared NT in that path), it would +then (through the fail safe callback) cause a #GP in the guest with the +SYSENTER handler's first instruction as the source, which in turn would +allow guest user mode code to crash the guest kernel. + +Inject a #GP on the fake (NULL) address of the SYSENTER instruction +instead, just like in the case where the guest kernel didn't register +a corresponding entry point. + +On 32-bit we also need to make sure we clear SYSENTER_CS for all CPUs +(neither #RESET nor #INIT guarantee this). + +This is CVE-2013-1917 / XSA-44. + +Reported-by: Andrew Cooper <andrew.cooper3@citirx.com> +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Tested-by: Andrew Cooper <andrew.cooper3@citrix.com> +Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> + +--- a/xen/arch/x86/acpi/suspend.c ++++ b/xen/arch/x86/acpi/suspend.c +@@ -81,8 +81,12 @@ void restore_rest_processor_state(void) + } + + #else /* !defined(CONFIG_X86_64) */ +- if ( supervisor_mode_kernel && cpu_has_sep ) +- wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0); ++ if ( cpu_has_sep ) ++ { ++ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); ++ if ( supervisor_mode_kernel ) ++ wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0); ++ } + #endif + + /* Maybe load the debug registers. */ +--- a/xen/arch/x86/cpu/common.c ++++ b/xen/arch/x86/cpu/common.c +@@ -655,8 +655,11 @@ void __cpuinit cpu_init(void) + #if defined(CONFIG_X86_32) + t->ss0 = __HYPERVISOR_DS; + t->esp0 = get_stack_bottom(); +- if ( supervisor_mode_kernel && cpu_has_sep ) ++ if ( cpu_has_sep ) { ++ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); ++ if ( supervisor_mode_kernel ) + wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0); ++ } + #elif defined(CONFIG_X86_64) + /* Bottom-of-stack must be 16-byte aligned! */ + BUG_ON((get_stack_bottom() & 15) != 0); +--- a/xen/arch/x86/x86_64/entry.S ++++ b/xen/arch/x86/x86_64/entry.S +@@ -284,7 +284,14 @@ sysenter_eflags_saved: + cmpb $0,VCPU_sysenter_disables_events(%rbx) + movq VCPU_sysenter_addr(%rbx),%rax + setne %cl ++ testl $X86_EFLAGS_NT,UREGS_eflags(%rsp) + leaq VCPU_trap_bounce(%rbx),%rdx ++UNLIKELY_START(nz, sysenter_nt_set) ++ pushfq ++ andl $~X86_EFLAGS_NT,(%rsp) ++ popfq ++ xorl %eax,%eax ++UNLIKELY_END(sysenter_nt_set) + testq %rax,%rax + leal (,%rcx,TBF_INTERRUPT),%ecx + UNLIKELY_START(z, sysenter_gpf) diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_1.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_1.patch new file mode 100644 index 000000000000..f5ded0a1a564 --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_1.patch @@ -0,0 +1,252 @@ +x86: make vcpu_destroy_pagetables() preemptible + +... as it may take significant amounts of time. + +The function, being moved to mm.c as the better home for it anyway, and +to avoid having to make a new helper function there non-static, is +given a "preemptible" parameter temporarily (until, in a subsequent +patch, its other caller is also being made capable of dealing with +preemption). + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -73,8 +73,6 @@ void (*dead_idle) (void) __read_mostly = + static void paravirt_ctxt_switch_from(struct vcpu *v); + static void paravirt_ctxt_switch_to(struct vcpu *v); + +-static void vcpu_destroy_pagetables(struct vcpu *v); +- + static void default_idle(void) + { + local_irq_disable(); +@@ -1058,7 +1056,7 @@ void arch_vcpu_reset(struct vcpu *v) + if ( !is_hvm_vcpu(v) ) + { + destroy_gdt(v); +- vcpu_destroy_pagetables(v); ++ vcpu_destroy_pagetables(v, 0); + } + else + { +@@ -2069,63 +2067,6 @@ static int relinquish_memory( + return ret; + } + +-static void vcpu_destroy_pagetables(struct vcpu *v) +-{ +- struct domain *d = v->domain; +- unsigned long pfn; +- +-#ifdef __x86_64__ +- if ( is_pv_32on64_vcpu(v) ) +- { +- pfn = l4e_get_pfn(*(l4_pgentry_t *) +- __va(pagetable_get_paddr(v->arch.guest_table))); +- +- if ( pfn != 0 ) +- { +- if ( paging_mode_refcounts(d) ) +- put_page(mfn_to_page(pfn)); +- else +- put_page_and_type(mfn_to_page(pfn)); +- } +- +- l4e_write( +- (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)), +- l4e_empty()); +- +- v->arch.cr3 = 0; +- return; +- } +-#endif +- +- pfn = pagetable_get_pfn(v->arch.guest_table); +- if ( pfn != 0 ) +- { +- if ( paging_mode_refcounts(d) ) +- put_page(mfn_to_page(pfn)); +- else +- put_page_and_type(mfn_to_page(pfn)); +- v->arch.guest_table = pagetable_null(); +- } +- +-#ifdef __x86_64__ +- /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */ +- pfn = pagetable_get_pfn(v->arch.guest_table_user); +- if ( pfn != 0 ) +- { +- if ( !is_pv_32bit_vcpu(v) ) +- { +- if ( paging_mode_refcounts(d) ) +- put_page(mfn_to_page(pfn)); +- else +- put_page_and_type(mfn_to_page(pfn)); +- } +- v->arch.guest_table_user = pagetable_null(); +- } +-#endif +- +- v->arch.cr3 = 0; +-} +- + int domain_relinquish_resources(struct domain *d) + { + int ret; +@@ -2143,7 +2084,11 @@ int domain_relinquish_resources(struct d + + /* Drop the in-use references to page-table bases. */ + for_each_vcpu ( d, v ) +- vcpu_destroy_pagetables(v); ++ { ++ ret = vcpu_destroy_pagetables(v, 1); ++ if ( ret ) ++ return ret; ++ } + + if ( !is_hvm_domain(d) ) + { +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -2808,6 +2808,82 @@ static void put_superpage(unsigned long + + #endif + ++static int put_old_guest_table(struct vcpu *v) ++{ ++ int rc; ++ ++ if ( !v->arch.old_guest_table ) ++ return 0; ++ ++ switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table, 1) ) ++ { ++ case -EINTR: ++ case -EAGAIN: ++ return -EAGAIN; ++ } ++ ++ v->arch.old_guest_table = NULL; ++ ++ return rc; ++} ++ ++int vcpu_destroy_pagetables(struct vcpu *v, bool_t preemptible) ++{ ++ unsigned long mfn = pagetable_get_pfn(v->arch.guest_table); ++ struct page_info *page; ++ int rc = put_old_guest_table(v); ++ ++ if ( rc ) ++ return rc; ++ ++#ifdef __x86_64__ ++ if ( is_pv_32on64_vcpu(v) ) ++ mfn = l4e_get_pfn(*(l4_pgentry_t *)mfn_to_virt(mfn)); ++#endif ++ ++ if ( mfn ) ++ { ++ page = mfn_to_page(mfn); ++ if ( paging_mode_refcounts(v->domain) ) ++ put_page(page); ++ else ++ rc = put_page_and_type_preemptible(page, preemptible); ++ } ++ ++#ifdef __x86_64__ ++ if ( is_pv_32on64_vcpu(v) ) ++ { ++ if ( !rc ) ++ l4e_write( ++ (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)), ++ l4e_empty()); ++ } ++ else ++#endif ++ if ( !rc ) ++ { ++ v->arch.guest_table = pagetable_null(); ++ ++#ifdef __x86_64__ ++ /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */ ++ mfn = pagetable_get_pfn(v->arch.guest_table_user); ++ if ( mfn ) ++ { ++ page = mfn_to_page(mfn); ++ if ( paging_mode_refcounts(v->domain) ) ++ put_page(page); ++ else ++ rc = put_page_and_type_preemptible(page, preemptible); ++ } ++ if ( !rc ) ++ v->arch.guest_table_user = pagetable_null(); ++#endif ++ } ++ ++ v->arch.cr3 = 0; ++ ++ return rc; ++} + + int new_guest_cr3(unsigned long mfn) + { +@@ -2994,12 +3070,21 @@ long do_mmuext_op( + unsigned int foreigndom) + { + struct mmuext_op op; +- int rc = 0, i = 0, okay; + unsigned long type; +- unsigned int done = 0; ++ unsigned int i = 0, done = 0; + struct vcpu *curr = current; + struct domain *d = curr->domain; + struct domain *pg_owner; ++ int okay, rc = put_old_guest_table(curr); ++ ++ if ( unlikely(rc) ) ++ { ++ if ( likely(rc == -EAGAIN) ) ++ rc = hypercall_create_continuation( ++ __HYPERVISOR_mmuext_op, "hihi", uops, count, pdone, ++ foreigndom); ++ return rc; ++ } + + if ( unlikely(count & MMU_UPDATE_PREEMPTED) ) + { +--- a/xen/arch/x86/x86_64/compat/mm.c ++++ b/xen/arch/x86/x86_64/compat/mm.c +@@ -365,7 +365,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm + : mcs->call.args[1]; + unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED; + +- BUG_ON(left == arg1); ++ BUG_ON(left == arg1 && left != i); + BUG_ON(left > count); + guest_handle_add_offset(nat_ops, i - left); + guest_handle_subtract_offset(cmp_uops, left); +--- a/xen/include/asm-x86/domain.h ++++ b/xen/include/asm-x86/domain.h +@@ -464,6 +464,7 @@ struct arch_vcpu + pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */ + #endif + pagetable_t guest_table; /* (MFN) guest notion of cr3 */ ++ struct page_info *old_guest_table; /* partially destructed pagetable */ + /* guest_table holds a ref to the page, and also a type-count unless + * shadow refcounts are in use */ + pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */ +--- a/xen/include/asm-x86/mm.h ++++ b/xen/include/asm-x86/mm.h +@@ -605,6 +605,7 @@ void audit_domains(void); + int new_guest_cr3(unsigned long pfn); + void make_cr3(struct vcpu *v, unsigned long mfn); + void update_cr3(struct vcpu *v); ++int vcpu_destroy_pagetables(struct vcpu *, bool_t preemptible); + void propagate_page_fault(unsigned long addr, u16 error_code); + void *do_page_walk(struct vcpu *v, unsigned long addr); + diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_2.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_2.patch new file mode 100644 index 000000000000..f730b67eca4c --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_2.patch @@ -0,0 +1,169 @@ +x86: make new_guest_cr3() preemptible + +... as it may take significant amounts of time. + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -2889,44 +2889,69 @@ int new_guest_cr3(unsigned long mfn) + { + struct vcpu *curr = current; + struct domain *d = curr->domain; +- int okay; ++ int rc; + unsigned long old_base_mfn; + + #ifdef __x86_64__ + if ( is_pv_32on64_domain(d) ) + { +- okay = paging_mode_refcounts(d) +- ? 0 /* Old code was broken, but what should it be? */ +- : mod_l4_entry( ++ rc = paging_mode_refcounts(d) ++ ? -EINVAL /* Old code was broken, but what should it be? */ ++ : mod_l4_entry( + __va(pagetable_get_paddr(curr->arch.guest_table)), + l4e_from_pfn( + mfn, + (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)), +- pagetable_get_pfn(curr->arch.guest_table), 0, 0, curr) == 0; +- if ( unlikely(!okay) ) ++ pagetable_get_pfn(curr->arch.guest_table), 0, 1, curr); ++ switch ( rc ) + { ++ case 0: ++ break; ++ case -EINTR: ++ case -EAGAIN: ++ return -EAGAIN; ++ default: + MEM_LOG("Error while installing new compat baseptr %lx", mfn); +- return 0; ++ return rc; + } + + invalidate_shadow_ldt(curr, 0); + write_ptbase(curr); + +- return 1; ++ return 0; + } + #endif +- okay = paging_mode_refcounts(d) +- ? get_page_from_pagenr(mfn, d) +- : !get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 0); +- if ( unlikely(!okay) ) ++ rc = put_old_guest_table(curr); ++ if ( unlikely(rc) ) ++ return rc; ++ ++ old_base_mfn = pagetable_get_pfn(curr->arch.guest_table); ++ /* ++ * This is particularly important when getting restarted after the ++ * previous attempt got preempted in the put-old-MFN phase. ++ */ ++ if ( old_base_mfn == mfn ) + { +- MEM_LOG("Error while installing new baseptr %lx", mfn); ++ write_ptbase(curr); + return 0; + } + +- invalidate_shadow_ldt(curr, 0); ++ rc = paging_mode_refcounts(d) ++ ? (get_page_from_pagenr(mfn, d) ? 0 : -EINVAL) ++ : get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 1); ++ switch ( rc ) ++ { ++ case 0: ++ break; ++ case -EINTR: ++ case -EAGAIN: ++ return -EAGAIN; ++ default: ++ MEM_LOG("Error while installing new baseptr %lx", mfn); ++ return rc; ++ } + +- old_base_mfn = pagetable_get_pfn(curr->arch.guest_table); ++ invalidate_shadow_ldt(curr, 0); + + curr->arch.guest_table = pagetable_from_pfn(mfn); + update_cr3(curr); +@@ -2935,13 +2960,25 @@ int new_guest_cr3(unsigned long mfn) + + if ( likely(old_base_mfn != 0) ) + { ++ struct page_info *page = mfn_to_page(old_base_mfn); ++ + if ( paging_mode_refcounts(d) ) +- put_page(mfn_to_page(old_base_mfn)); ++ put_page(page); + else +- put_page_and_type(mfn_to_page(old_base_mfn)); ++ switch ( rc = put_page_and_type_preemptible(page, 1) ) ++ { ++ case -EINTR: ++ rc = -EAGAIN; ++ case -EAGAIN: ++ curr->arch.old_guest_table = page; ++ break; ++ default: ++ BUG_ON(rc); ++ break; ++ } + } + +- return 1; ++ return rc; + } + + static struct domain *get_pg_owner(domid_t domid) +@@ -3239,8 +3276,13 @@ long do_mmuext_op( + } + + case MMUEXT_NEW_BASEPTR: +- okay = (!paging_mode_translate(d) +- && new_guest_cr3(op.arg1.mfn)); ++ if ( paging_mode_translate(d) ) ++ okay = 0; ++ else ++ { ++ rc = new_guest_cr3(op.arg1.mfn); ++ okay = !rc; ++ } + break; + + +--- a/xen/arch/x86/traps.c ++++ b/xen/arch/x86/traps.c +@@ -2407,12 +2407,23 @@ static int emulate_privileged_op(struct + #endif + } + page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC); +- rc = page ? new_guest_cr3(page_to_mfn(page)) : 0; + if ( page ) ++ { ++ rc = new_guest_cr3(page_to_mfn(page)); + put_page(page); ++ } ++ else ++ rc = -EINVAL; + domain_unlock(v->domain); +- if ( rc == 0 ) /* not okay */ ++ switch ( rc ) ++ { ++ case 0: ++ break; ++ case -EAGAIN: /* retry after preemption */ ++ goto skip; ++ default: /* not okay */ + goto fail; ++ } + break; + } + diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_3.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_3.patch new file mode 100644 index 000000000000..9d5679e7853e --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_3.patch @@ -0,0 +1,74 @@ +x86: make MMUEXT_NEW_USER_BASEPTR preemptible + +... as it may take significant amounts of time. + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -3296,29 +3296,56 @@ long do_mmuext_op( + break; + } + ++ old_mfn = pagetable_get_pfn(curr->arch.guest_table_user); ++ /* ++ * This is particularly important when getting restarted after the ++ * previous attempt got preempted in the put-old-MFN phase. ++ */ ++ if ( old_mfn == op.arg1.mfn ) ++ break; ++ + if ( op.arg1.mfn != 0 ) + { + if ( paging_mode_refcounts(d) ) + okay = get_page_from_pagenr(op.arg1.mfn, d); + else +- okay = !get_page_and_type_from_pagenr( +- op.arg1.mfn, PGT_root_page_table, d, 0, 0); ++ { ++ rc = get_page_and_type_from_pagenr( ++ op.arg1.mfn, PGT_root_page_table, d, 0, 1); ++ okay = !rc; ++ } + if ( unlikely(!okay) ) + { +- MEM_LOG("Error while installing new mfn %lx", op.arg1.mfn); ++ if ( rc == -EINTR ) ++ rc = -EAGAIN; ++ else if ( rc != -EAGAIN ) ++ MEM_LOG("Error while installing new mfn %lx", ++ op.arg1.mfn); + break; + } + } + +- old_mfn = pagetable_get_pfn(curr->arch.guest_table_user); + curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn); + + if ( old_mfn != 0 ) + { ++ struct page_info *page = mfn_to_page(old_mfn); ++ + if ( paging_mode_refcounts(d) ) +- put_page(mfn_to_page(old_mfn)); ++ put_page(page); + else +- put_page_and_type(mfn_to_page(old_mfn)); ++ switch ( rc = put_page_and_type_preemptible(page, 1) ) ++ { ++ case -EINTR: ++ rc = -EAGAIN; ++ case -EAGAIN: ++ curr->arch.old_guest_table = page; ++ okay = 0; ++ break; ++ default: ++ BUG_ON(rc); ++ break; ++ } + } + + break; diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_4.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_4.patch new file mode 100644 index 000000000000..bbce56789c0c --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_4.patch @@ -0,0 +1,200 @@ +x86: make vcpu_reset() preemptible + +... as dropping the old page tables may take significant amounts of +time. + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -1051,17 +1051,16 @@ int arch_set_info_guest( + #undef c + } + +-void arch_vcpu_reset(struct vcpu *v) ++int arch_vcpu_reset(struct vcpu *v) + { + if ( !is_hvm_vcpu(v) ) + { + destroy_gdt(v); +- vcpu_destroy_pagetables(v, 0); +- } +- else +- { +- vcpu_end_shutdown_deferral(v); ++ return vcpu_destroy_pagetables(v); + } ++ ++ vcpu_end_shutdown_deferral(v); ++ return 0; + } + + /* +@@ -2085,7 +2084,7 @@ int domain_relinquish_resources(struct d + /* Drop the in-use references to page-table bases. */ + for_each_vcpu ( d, v ) + { +- ret = vcpu_destroy_pagetables(v, 1); ++ ret = vcpu_destroy_pagetables(v); + if ( ret ) + return ret; + } +--- a/xen/arch/x86/hvm/hvm.c ++++ b/xen/arch/x86/hvm/hvm.c +@@ -3509,8 +3509,11 @@ static void hvm_s3_suspend(struct domain + + for_each_vcpu ( d, v ) + { ++ int rc; ++ + vlapic_reset(vcpu_vlapic(v)); +- vcpu_reset(v); ++ rc = vcpu_reset(v); ++ ASSERT(!rc); + } + + vpic_reset(d); +--- a/xen/arch/x86/hvm/vlapic.c ++++ b/xen/arch/x86/hvm/vlapic.c +@@ -252,10 +252,13 @@ static void vlapic_init_sipi_action(unsi + { + case APIC_DM_INIT: { + bool_t fpu_initialised; ++ int rc; ++ + domain_lock(target->domain); + /* Reset necessary VCPU state. This does not include FPU state. */ + fpu_initialised = target->fpu_initialised; +- vcpu_reset(target); ++ rc = vcpu_reset(target); ++ ASSERT(!rc); + target->fpu_initialised = fpu_initialised; + vlapic_reset(vcpu_vlapic(target)); + domain_unlock(target->domain); +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -2827,7 +2827,7 @@ static int put_old_guest_table(struct vc + return rc; + } + +-int vcpu_destroy_pagetables(struct vcpu *v, bool_t preemptible) ++int vcpu_destroy_pagetables(struct vcpu *v) + { + unsigned long mfn = pagetable_get_pfn(v->arch.guest_table); + struct page_info *page; +@@ -2847,7 +2847,7 @@ int vcpu_destroy_pagetables(struct vcpu + if ( paging_mode_refcounts(v->domain) ) + put_page(page); + else +- rc = put_page_and_type_preemptible(page, preemptible); ++ rc = put_page_and_type_preemptible(page, 1); + } + + #ifdef __x86_64__ +@@ -2873,7 +2873,7 @@ int vcpu_destroy_pagetables(struct vcpu + if ( paging_mode_refcounts(v->domain) ) + put_page(page); + else +- rc = put_page_and_type_preemptible(page, preemptible); ++ rc = put_page_and_type_preemptible(page, 1); + } + if ( !rc ) + v->arch.guest_table_user = pagetable_null(); +--- a/xen/common/domain.c ++++ b/xen/common/domain.c +@@ -779,14 +779,18 @@ void domain_unpause_by_systemcontroller( + domain_unpause(d); + } + +-void vcpu_reset(struct vcpu *v) ++int vcpu_reset(struct vcpu *v) + { + struct domain *d = v->domain; ++ int rc; + + vcpu_pause(v); + domain_lock(d); + +- arch_vcpu_reset(v); ++ set_bit(_VPF_in_reset, &v->pause_flags); ++ rc = arch_vcpu_reset(v); ++ if ( rc ) ++ goto out_unlock; + + set_bit(_VPF_down, &v->pause_flags); + +@@ -802,9 +806,13 @@ void vcpu_reset(struct vcpu *v) + #endif + cpumask_clear(v->cpu_affinity_tmp); + clear_bit(_VPF_blocked, &v->pause_flags); ++ clear_bit(_VPF_in_reset, &v->pause_flags); + ++ out_unlock: + domain_unlock(v->domain); + vcpu_unpause(v); ++ ++ return rc; + } + + +--- a/xen/common/domctl.c ++++ b/xen/common/domctl.c +@@ -306,8 +306,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc + + if ( guest_handle_is_null(op->u.vcpucontext.ctxt) ) + { +- vcpu_reset(v); +- ret = 0; ++ ret = vcpu_reset(v); ++ if ( ret == -EAGAIN ) ++ ret = hypercall_create_continuation( ++ __HYPERVISOR_domctl, "h", u_domctl); + goto svc_out; + } + +--- a/xen/include/asm-x86/mm.h ++++ b/xen/include/asm-x86/mm.h +@@ -605,7 +605,7 @@ void audit_domains(void); + int new_guest_cr3(unsigned long pfn); + void make_cr3(struct vcpu *v, unsigned long mfn); + void update_cr3(struct vcpu *v); +-int vcpu_destroy_pagetables(struct vcpu *, bool_t preemptible); ++int vcpu_destroy_pagetables(struct vcpu *); + void propagate_page_fault(unsigned long addr, u16 error_code); + void *do_page_walk(struct vcpu *v, unsigned long addr); + +--- a/xen/include/xen/domain.h ++++ b/xen/include/xen/domain.h +@@ -13,7 +13,7 @@ typedef union { + struct vcpu *alloc_vcpu( + struct domain *d, unsigned int vcpu_id, unsigned int cpu_id); + struct vcpu *alloc_dom0_vcpu0(void); +-void vcpu_reset(struct vcpu *v); ++int vcpu_reset(struct vcpu *); + + struct xen_domctl_getdomaininfo; + void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info); +@@ -67,7 +67,7 @@ void arch_dump_vcpu_info(struct vcpu *v) + + void arch_dump_domain_info(struct domain *d); + +-void arch_vcpu_reset(struct vcpu *v); ++int arch_vcpu_reset(struct vcpu *); + + extern spinlock_t vcpu_alloc_lock; + bool_t domctl_lock_acquire(void); +--- a/xen/include/xen/sched.h ++++ b/xen/include/xen/sched.h +@@ -644,6 +644,9 @@ static inline struct domain *next_domain + /* VCPU is blocked due to missing mem_sharing ring. */ + #define _VPF_mem_sharing 6 + #define VPF_mem_sharing (1UL<<_VPF_mem_sharing) ++ /* VCPU is being reset. */ ++#define _VPF_in_reset 7 ++#define VPF_in_reset (1UL<<_VPF_in_reset) + + static inline int vcpu_runnable(struct vcpu *v) + { diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_5.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_5.patch new file mode 100644 index 000000000000..b584b382c0af --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_5.patch @@ -0,0 +1,204 @@ +x86: make arch_set_info_guest() preemptible + +.. as the root page table validation (and the dropping of an eventual +old one) can require meaningful amounts of time. + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -858,6 +858,9 @@ int arch_set_info_guest( + + if ( !v->is_initialised ) + { ++ if ( !compat && !(flags & VGCF_in_kernel) && !c.nat->ctrlreg[1] ) ++ return -EINVAL; ++ + v->arch.pv_vcpu.ldt_base = c(ldt_base); + v->arch.pv_vcpu.ldt_ents = c(ldt_ents); + } +@@ -955,24 +958,44 @@ int arch_set_info_guest( + if ( rc != 0 ) + return rc; + ++ set_bit(_VPF_in_reset, &v->pause_flags); ++ + if ( !compat ) +- { + cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]); +- cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); +- +- if ( !cr3_page ) +- { +- destroy_gdt(v); +- return -EINVAL; +- } +- if ( !paging_mode_refcounts(d) +- && !get_page_type(cr3_page, PGT_base_page_table) ) +- { +- put_page(cr3_page); +- destroy_gdt(v); +- return -EINVAL; +- } ++#ifdef CONFIG_COMPAT ++ else ++ cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]); ++#endif ++ cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); + ++ if ( !cr3_page ) ++ rc = -EINVAL; ++ else if ( paging_mode_refcounts(d) ) ++ /* nothing */; ++ else if ( cr3_page == v->arch.old_guest_table ) ++ { ++ v->arch.old_guest_table = NULL; ++ put_page(cr3_page); ++ } ++ else ++ { ++ /* ++ * Since v->arch.guest_table{,_user} are both NULL, this effectively ++ * is just a call to put_old_guest_table(). ++ */ ++ if ( !compat ) ++ rc = vcpu_destroy_pagetables(v); ++ if ( !rc ) ++ rc = get_page_type_preemptible(cr3_page, ++ !compat ? PGT_root_page_table ++ : PGT_l3_page_table); ++ if ( rc == -EINTR ) ++ rc = -EAGAIN; ++ } ++ if ( rc ) ++ /* handled below */; ++ else if ( !compat ) ++ { + v->arch.guest_table = pagetable_from_page(cr3_page); + #ifdef __x86_64__ + if ( c.nat->ctrlreg[1] ) +@@ -980,56 +1003,44 @@ int arch_set_info_guest( + cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]); + cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); + +- if ( !cr3_page || +- (!paging_mode_refcounts(d) +- && !get_page_type(cr3_page, PGT_base_page_table)) ) ++ if ( !cr3_page ) ++ rc = -EINVAL; ++ else if ( !paging_mode_refcounts(d) ) + { +- if (cr3_page) +- put_page(cr3_page); +- cr3_page = pagetable_get_page(v->arch.guest_table); +- v->arch.guest_table = pagetable_null(); +- if ( paging_mode_refcounts(d) ) +- put_page(cr3_page); +- else +- put_page_and_type(cr3_page); +- destroy_gdt(v); +- return -EINVAL; ++ rc = get_page_type_preemptible(cr3_page, PGT_root_page_table); ++ switch ( rc ) ++ { ++ case -EINTR: ++ rc = -EAGAIN; ++ case -EAGAIN: ++ v->arch.old_guest_table = ++ pagetable_get_page(v->arch.guest_table); ++ v->arch.guest_table = pagetable_null(); ++ break; ++ } + } +- +- v->arch.guest_table_user = pagetable_from_page(cr3_page); +- } +- else if ( !(flags & VGCF_in_kernel) ) +- { +- destroy_gdt(v); +- return -EINVAL; ++ if ( !rc ) ++ v->arch.guest_table_user = pagetable_from_page(cr3_page); + } + } + else + { + l4_pgentry_t *l4tab; + +- cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]); +- cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); +- +- if ( !cr3_page) +- { +- destroy_gdt(v); +- return -EINVAL; +- } +- +- if (!paging_mode_refcounts(d) +- && !get_page_type(cr3_page, PGT_l3_page_table) ) +- { +- put_page(cr3_page); +- destroy_gdt(v); +- return -EINVAL; +- } +- + l4tab = __va(pagetable_get_paddr(v->arch.guest_table)); + *l4tab = l4e_from_pfn(page_to_mfn(cr3_page), + _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED); + #endif + } ++ if ( rc ) ++ { ++ if ( cr3_page ) ++ put_page(cr3_page); ++ destroy_gdt(v); ++ return rc; ++ } ++ ++ clear_bit(_VPF_in_reset, &v->pause_flags); + + if ( v->vcpu_id == 0 ) + update_domain_wallclock_time(d); +--- a/xen/common/compat/domain.c ++++ b/xen/common/compat/domain.c +@@ -50,6 +50,10 @@ int compat_vcpu_op(int cmd, int vcpuid, + rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, cmp_ctxt); + domain_unlock(d); + ++ if ( rc == -EAGAIN ) ++ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih", ++ cmd, vcpuid, arg); ++ + xfree(cmp_ctxt); + break; + } +--- a/xen/common/domain.c ++++ b/xen/common/domain.c +@@ -849,6 +849,11 @@ long do_vcpu_op(int cmd, int vcpuid, XEN + domain_unlock(d); + + free_vcpu_guest_context(ctxt); ++ ++ if ( rc == -EAGAIN ) ++ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih", ++ cmd, vcpuid, arg); ++ + break; + + case VCPUOP_up: { +--- a/xen/common/domctl.c ++++ b/xen/common/domctl.c +@@ -338,6 +338,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc + domain_pause(d); + ret = arch_set_info_guest(v, c); + domain_unpause(d); ++ ++ if ( ret == -EAGAIN ) ++ ret = hypercall_create_continuation( ++ __HYPERVISOR_domctl, "h", u_domctl); + } + + svc_out: diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_6.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_6.patch new file mode 100644 index 000000000000..a18f3ddec493 --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_6.patch @@ -0,0 +1,127 @@ +x86: make page table unpinning preemptible + +... as it may take significant amounts of time. + +Since we can't re-invoke the operation in a second attempt, the +continuation logic must be slightly tweaked so that we make sure +do_mmuext_op() gets run one more time even when the preempted unpin +operation was the last one in a batch. + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -3123,6 +3123,14 @@ long do_mmuext_op( + return rc; + } + ++ if ( unlikely(count == MMU_UPDATE_PREEMPTED) && ++ likely(guest_handle_is_null(uops)) ) ++ { ++ /* See the curr->arch.old_guest_table related ++ * hypercall_create_continuation() below. */ ++ return (int)foreigndom; ++ } ++ + if ( unlikely(count & MMU_UPDATE_PREEMPTED) ) + { + count &= ~MMU_UPDATE_PREEMPTED; +@@ -3146,7 +3154,7 @@ long do_mmuext_op( + + for ( i = 0; i < count; i++ ) + { +- if ( hypercall_preempt_check() ) ++ if ( curr->arch.old_guest_table || hypercall_preempt_check() ) + { + rc = -EAGAIN; + break; +@@ -3266,7 +3274,17 @@ long do_mmuext_op( + break; + } + +- put_page_and_type(page); ++ switch ( rc = put_page_and_type_preemptible(page, 1) ) ++ { ++ case -EINTR: ++ case -EAGAIN: ++ curr->arch.old_guest_table = page; ++ rc = 0; ++ break; ++ default: ++ BUG_ON(rc); ++ break; ++ } + put_page(page); + + /* A page is dirtied when its pin status is cleared. */ +@@ -3587,9 +3605,27 @@ long do_mmuext_op( + } + + if ( rc == -EAGAIN ) ++ { ++ ASSERT(i < count); + rc = hypercall_create_continuation( + __HYPERVISOR_mmuext_op, "hihi", + uops, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom); ++ } ++ else if ( curr->arch.old_guest_table ) ++ { ++ XEN_GUEST_HANDLE(void) null; ++ ++ ASSERT(rc || i == count); ++ set_xen_guest_handle(null, NULL); ++ /* ++ * In order to have a way to communicate the final return value to ++ * our continuation, we pass this in place of "foreigndom", building ++ * on the fact that this argument isn't needed anymore. ++ */ ++ rc = hypercall_create_continuation( ++ __HYPERVISOR_mmuext_op, "hihi", null, ++ MMU_UPDATE_PREEMPTED, null, rc); ++ } + + put_pg_owner(pg_owner); + +--- a/xen/arch/x86/x86_64/compat/mm.c ++++ b/xen/arch/x86/x86_64/compat/mm.c +@@ -268,6 +268,13 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm + int rc = 0; + XEN_GUEST_HANDLE(mmuext_op_t) nat_ops; + ++ if ( unlikely(count == MMU_UPDATE_PREEMPTED) && ++ likely(guest_handle_is_null(cmp_uops)) ) ++ { ++ set_xen_guest_handle(nat_ops, NULL); ++ return do_mmuext_op(nat_ops, count, pdone, foreigndom); ++ } ++ + preempt_mask = count & MMU_UPDATE_PREEMPTED; + count ^= preempt_mask; + +@@ -370,12 +377,18 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm + guest_handle_add_offset(nat_ops, i - left); + guest_handle_subtract_offset(cmp_uops, left); + left = 1; +- BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops)); +- BUG_ON(left != arg1); +- if (!test_bit(_MCSF_in_multicall, &mcs->flags)) +- regs->_ecx += count - i; ++ if ( arg1 != MMU_UPDATE_PREEMPTED ) ++ { ++ BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, ++ cmp_uops)); ++ if ( !test_bit(_MCSF_in_multicall, &mcs->flags) ) ++ regs->_ecx += count - i; ++ else ++ mcs->compat_call.args[1] += count - i; ++ } + else +- mcs->compat_call.args[1] += count - i; ++ BUG_ON(hypercall_xlat_continuation(&left, 0)); ++ BUG_ON(left != arg1); + } + else + BUG_ON(err > 0); diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_7.patch b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_7.patch new file mode 100644 index 000000000000..0b7ce18d09cb --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1918-XSA-45_7.patch @@ -0,0 +1,255 @@ +x86: make page table handling error paths preemptible + +... as they may take significant amounts of time. + +This requires cloning the tweaked continuation logic from +do_mmuext_op() to do_mmu_update(). + +Note that in mod_l[34]_entry() a negative "preemptible" value gets +passed to put_page_from_l[34]e() now, telling the callee to store the +respective page in current->arch.old_guest_table (for a hypercall +continuation to pick up), rather than carrying out the put right away. +This is going to be made a little more explicit by a subsequent cleanup +patch. + +This is part of CVE-2013-1918 / XSA-45. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Acked-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -1241,7 +1241,16 @@ static int put_page_from_l3e(l3_pgentry_ + #endif + + if ( unlikely(partial > 0) ) ++ { ++ ASSERT(preemptible >= 0); + return __put_page_type(l3e_get_page(l3e), preemptible); ++ } ++ ++ if ( preemptible < 0 ) ++ { ++ current->arch.old_guest_table = l3e_get_page(l3e); ++ return 0; ++ } + + return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible); + } +@@ -1254,7 +1263,17 @@ static int put_page_from_l4e(l4_pgentry_ + (l4e_get_pfn(l4e) != pfn) ) + { + if ( unlikely(partial > 0) ) ++ { ++ ASSERT(preemptible >= 0); + return __put_page_type(l4e_get_page(l4e), preemptible); ++ } ++ ++ if ( preemptible < 0 ) ++ { ++ current->arch.old_guest_table = l4e_get_page(l4e); ++ return 0; ++ } ++ + return put_page_and_type_preemptible(l4e_get_page(l4e), preemptible); + } + return 1; +@@ -1549,12 +1568,17 @@ static int alloc_l3_table(struct page_in + if ( rc < 0 && rc != -EAGAIN && rc != -EINTR ) + { + MEM_LOG("Failure in alloc_l3_table: entry %d", i); ++ if ( i ) ++ { ++ page->nr_validated_ptes = i; ++ page->partial_pte = 0; ++ current->arch.old_guest_table = page; ++ } + while ( i-- > 0 ) + { + if ( !is_guest_l3_slot(i) ) + continue; + unadjust_guest_l3e(pl3e[i], d); +- put_page_from_l3e(pl3e[i], pfn, 0, 0); + } + } + +@@ -1584,22 +1608,24 @@ static int alloc_l4_table(struct page_in + page->nr_validated_ptes = i; + page->partial_pte = partial ?: 1; + } +- else if ( rc == -EINTR ) ++ else if ( rc < 0 ) + { ++ if ( rc != -EINTR ) ++ MEM_LOG("Failure in alloc_l4_table: entry %d", i); + if ( i ) + { + page->nr_validated_ptes = i; + page->partial_pte = 0; +- rc = -EAGAIN; ++ if ( rc == -EINTR ) ++ rc = -EAGAIN; ++ else ++ { ++ if ( current->arch.old_guest_table ) ++ page->nr_validated_ptes++; ++ current->arch.old_guest_table = page; ++ } + } + } +- else if ( rc < 0 ) +- { +- MEM_LOG("Failure in alloc_l4_table: entry %d", i); +- while ( i-- > 0 ) +- if ( is_guest_l4_slot(d, i) ) +- put_page_from_l4e(pl4e[i], pfn, 0, 0); +- } + if ( rc < 0 ) + return rc; + +@@ -2047,7 +2073,7 @@ static int mod_l3_entry(l3_pgentry_t *pl + pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e); + } + +- put_page_from_l3e(ol3e, pfn, 0, 0); ++ put_page_from_l3e(ol3e, pfn, 0, -preemptible); + return rc; + } + +@@ -2110,7 +2136,7 @@ static int mod_l4_entry(l4_pgentry_t *pl + return -EFAULT; + } + +- put_page_from_l4e(ol4e, pfn, 0, 0); ++ put_page_from_l4e(ol4e, pfn, 0, -preemptible); + return rc; + } + +@@ -2268,7 +2294,15 @@ static int alloc_page_type(struct page_i + PRtype_info ": caf=%08lx taf=%" PRtype_info, + page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)), + type, page->count_info, page->u.inuse.type_info); +- page->u.inuse.type_info = 0; ++ if ( page != current->arch.old_guest_table ) ++ page->u.inuse.type_info = 0; ++ else ++ { ++ ASSERT((page->u.inuse.type_info & ++ (PGT_count_mask | PGT_validated)) == 1); ++ get_page_light(page); ++ page->u.inuse.type_info |= PGT_partial; ++ } + } + else + { +@@ -3218,21 +3252,17 @@ long do_mmuext_op( + } + + if ( (rc = xsm_memory_pin_page(d, pg_owner, page)) != 0 ) +- { +- put_page_and_type(page); + okay = 0; +- break; +- } +- +- if ( unlikely(test_and_set_bit(_PGT_pinned, +- &page->u.inuse.type_info)) ) ++ else if ( unlikely(test_and_set_bit(_PGT_pinned, ++ &page->u.inuse.type_info)) ) + { + MEM_LOG("Mfn %lx already pinned", page_to_mfn(page)); +- put_page_and_type(page); + okay = 0; +- break; + } + ++ if ( unlikely(!okay) ) ++ goto pin_drop; ++ + /* A page is dirtied when its pin status is set. */ + paging_mark_dirty(pg_owner, page_to_mfn(page)); + +@@ -3246,7 +3276,13 @@ long do_mmuext_op( + &page->u.inuse.type_info)); + spin_unlock(&pg_owner->page_alloc_lock); + if ( drop_ref ) +- put_page_and_type(page); ++ { ++ pin_drop: ++ if ( type == PGT_l1_page_table ) ++ put_page_and_type(page); ++ else ++ curr->arch.old_guest_table = page; ++ } + } + + break; +@@ -3652,11 +3688,28 @@ long do_mmu_update( + void *va; + unsigned long gpfn, gmfn, mfn; + struct page_info *page; +- int rc = 0, i = 0; +- unsigned int cmd, done = 0, pt_dom; +- struct vcpu *v = current; ++ unsigned int cmd, i = 0, done = 0, pt_dom; ++ struct vcpu *curr = current, *v = curr; + struct domain *d = v->domain, *pt_owner = d, *pg_owner; + struct domain_mmap_cache mapcache; ++ int rc = put_old_guest_table(curr); ++ ++ if ( unlikely(rc) ) ++ { ++ if ( likely(rc == -EAGAIN) ) ++ rc = hypercall_create_continuation( ++ __HYPERVISOR_mmu_update, "hihi", ureqs, count, pdone, ++ foreigndom); ++ return rc; ++ } ++ ++ if ( unlikely(count == MMU_UPDATE_PREEMPTED) && ++ likely(guest_handle_is_null(ureqs)) ) ++ { ++ /* See the curr->arch.old_guest_table related ++ * hypercall_create_continuation() below. */ ++ return (int)foreigndom; ++ } + + if ( unlikely(count & MMU_UPDATE_PREEMPTED) ) + { +@@ -3705,7 +3758,7 @@ long do_mmu_update( + + for ( i = 0; i < count; i++ ) + { +- if ( hypercall_preempt_check() ) ++ if ( curr->arch.old_guest_table || hypercall_preempt_check() ) + { + rc = -EAGAIN; + break; +@@ -3886,9 +3939,27 @@ long do_mmu_update( + } + + if ( rc == -EAGAIN ) ++ { ++ ASSERT(i < count); + rc = hypercall_create_continuation( + __HYPERVISOR_mmu_update, "hihi", + ureqs, (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom); ++ } ++ else if ( curr->arch.old_guest_table ) ++ { ++ XEN_GUEST_HANDLE(void) null; ++ ++ ASSERT(rc || i == count); ++ set_xen_guest_handle(null, NULL); ++ /* ++ * In order to have a way to communicate the final return value to ++ * our continuation, we pass this in place of "foreigndom", building ++ * on the fact that this argument isn't needed anymore. ++ */ ++ rc = hypercall_create_continuation( ++ __HYPERVISOR_mmu_update, "hihi", null, ++ MMU_UPDATE_PREEMPTED, null, rc); ++ } + + put_pg_owner(pg_owner); + diff --git a/app-emulation/xen/files/xen-4-CVE-2013-1920-XSA-47.patch b/app-emulation/xen/files/xen-4-CVE-2013-1920-XSA-47.patch new file mode 100644 index 000000000000..7ebb8c8a3178 --- /dev/null +++ b/app-emulation/xen/files/xen-4-CVE-2013-1920-XSA-47.patch @@ -0,0 +1,31 @@ +defer event channel bucket pointer store until after XSM checks + +Otherwise a dangling pointer can be left, which would cause subsequent +memory corruption as soon as the space got re-allocated for some other +purpose. + +This is CVE-2013-1920 / XSA-47. + +Reported-by: Wei Liu <wei.liu2@citrix.com> +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Reviewed-by: Tim Deegan <tim@xen.org> + +--- a/xen/common/event_channel.c ++++ b/xen/common/event_channel.c +@@ -140,7 +140,6 @@ static int get_free_port(struct domain * + chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET); + if ( unlikely(chn == NULL) ) + return -ENOMEM; +- bucket_from_port(d, port) = chn; + + for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ ) + { +@@ -153,6 +152,8 @@ static int get_free_port(struct domain * + } + } + ++ bucket_from_port(d, port) = chn; ++ + return port; + } + diff --git a/app-emulation/xen/xen-4.2.1-r3.ebuild b/app-emulation/xen/xen-4.2.1-r3.ebuild new file mode 100644 index 000000000000..7e25f172ea7e --- /dev/null +++ b/app-emulation/xen/xen-4.2.1-r3.ebuild @@ -0,0 +1,136 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: /var/cvsroot/gentoo-x86/app-emulation/xen/xen-4.2.1-r3.ebuild,v 1.1 2013/05/15 18:03:57 idella4 Exp $ + +EAPI=5 + +PYTHON_COMPAT=( python{2_6,2_7} ) + +if [[ $PV == *9999 ]]; then + KEYWORDS="" + REPO="xen-unstable.hg" + EHG_REPO_URI="http://xenbits.xensource.com/${REPO}" + S="${WORKDIR}/${REPO}" + live_eclass="mercurial" +else + KEYWORDS="~amd64 ~x86" + SRC_URI="http://bits.xensource.com/oss-xen/release/${PV}/xen-${PV}.tar.gz" +fi + +inherit mount-boot flag-o-matic python-single-r1 toolchain-funcs ${live_eclass} + +DESCRIPTION="The Xen virtual machine monitor" +HOMEPAGE="http://xen.org/" +LICENSE="GPL-2" +SLOT="0" +IUSE="custom-cflags debug efi flask pae xsm" + +DEPEND="efi? ( >=sys-devel/binutils-2.22[multitarget] ) + !efi? ( >=sys-devel/binutils-2.22[-multitarget] )" +RDEPEND="" +PDEPEND="~app-emulation/xen-tools-${PV}" + +RESTRICT="test" + +# Approved by QA team in bug #144032 +QA_WX_LOAD="boot/xen-syms-${PV}" + +REQUIRED_USE=" + flask? ( xsm ) + " + +pkg_setup() { + python-single-r1_pkg_setup + if [[ -z ${XEN_TARGET_ARCH} ]]; then + if use x86 && use amd64; then + die "Confusion! Both x86 and amd64 are set in your use flags!" + elif use x86; then + export XEN_TARGET_ARCH="x86_32" + elif use amd64; then + export XEN_TARGET_ARCH="x86_64" + else + die "Unsupported architecture!" + fi + fi + + if use flask ; then + export "XSM_ENABLE=y" + export "FLASK_ENABLE=y" + elif use xsm ; then + export "XSM_ENABLE=y" + fi +} + +src_prepare() { + # Drop .config and fix gcc-4.6 + epatch "${FILESDIR}"/${PN/-pvgrub/}-4-fix_dotconfig-gcc.patch + + if use efi; then + epatch "${FILESDIR}"/${PN}-4.2-efi.patch + export EFI_VENDOR="gentoo" + export EFI_MOUNTPOINT="boot" + fi + + # if the user *really* wants to use their own custom-cflags, let them + if use custom-cflags; then + einfo "User wants their own CFLAGS - removing defaults" + # try and remove all the default custom-cflags + find "${S}" -name Makefile -o -name Rules.mk -o -name Config.mk -exec sed \ + -e 's/CFLAGS\(.*\)=\(.*\)-O3\(.*\)/CFLAGS\1=\2\3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-march=i686\(.*\)/CFLAGS\1=\2\3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-fomit-frame-pointer\(.*\)/CFLAGS\1=\2\3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-g3*\s\(.*\)/CFLAGS\1=\2 \3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-O2\(.*\)/CFLAGS\1=\2\3/' \ + -i {} \; || die "failed to re-set custom-cflags" + fi + + # not strictly necessary to fix this + sed -i 's/, "-Werror"//' "${S}/tools/python/setup.py" || die "failed to re-set setup.py" + + #Security patches + epatch "${FILESDIR}"/${PN}-4-CVE-2012-5634-XSA-33.patch \ + "${FILESDIR}"/${PN}-4-CVE-2013-0151-XSA-34_35.patch \ + "${FILESDIR}"/${PN}-4-CVE-2013-0154-XSA-37.patch \ + "${FILESDIR}"/${PN}-4-CVE-2013-0153-XSA-36.patch \ + "${FILESDIR}"/${PN}-4-CVE-2013-1917-XSA-44.patch \ + "${FILESDIR}"/${PN}-4-CVE-2013-1918-XSA-45_[1-7].patch +} + +src_configure() { + use debug && myopt="${myopt} debug=y" + use pae && myopt="${myopt} pae=y" + + if use custom-cflags; then + filter-flags -fPIE -fstack-protector + replace-flags -O3 -O2 + else + unset CFLAGS + fi +} + +src_compile() { + # Send raw LDFLAGS so that --as-needed works + emake CC="$(tc-getCC)" LDFLAGS="$(raw-ldflags)" LD="$(tc-getLD)" -C xen ${myopt} +} + +src_install() { + local myopt + use debug && myopt="${myopt} debug=y" + use pae && myopt="${myopt} pae=y" + + # The 'make install' doesn't 'mkdir -p' the subdirs + if use efi; then + mkdir -p "${D}"${EFI_MOUNTPOINT}/efi/${EFI_VENDOR} || die + fi + + emake LDFLAGS="$(raw-ldflags)" DESTDIR="${D}" -C xen ${myopt} install +} + +pkg_postinst() { + elog "Official Xen Guide and the unoffical wiki page:" + elog " http://www.gentoo.org/doc/en/xen-guide.xml" + elog " http://en.gentoo-wiki.com/wiki/Xen/" + + use pae && ewarn "This is a PAE build of Xen. It will *only* boot PAE kernels!" + use efi && einfo "The efi executable is installed in boot/efi/gentoo" +} diff --git a/app-emulation/xen/xen-4.2.2.ebuild b/app-emulation/xen/xen-4.2.2.ebuild new file mode 100644 index 000000000000..b8dcda756a27 --- /dev/null +++ b/app-emulation/xen/xen-4.2.2.ebuild @@ -0,0 +1,131 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: /var/cvsroot/gentoo-x86/app-emulation/xen/xen-4.2.2.ebuild,v 1.1 2013/05/15 18:03:57 idella4 Exp $ + +EAPI=5 + +PYTHON_COMPAT=( python{2_6,2_7} ) + +if [[ $PV == *9999 ]]; then + KEYWORDS="" + REPO="xen-unstable.hg" + EHG_REPO_URI="http://xenbits.xensource.com/${REPO}" + S="${WORKDIR}/${REPO}" + live_eclass="mercurial" +else + KEYWORDS="~amd64 ~x86" + SRC_URI="http://bits.xensource.com/oss-xen/release/${PV}/xen-${PV}.tar.gz" +fi + +inherit mount-boot flag-o-matic python-single-r1 toolchain-funcs ${live_eclass} + +DESCRIPTION="The Xen virtual machine monitor" +HOMEPAGE="http://xen.org/" +LICENSE="GPL-2" +SLOT="0" +IUSE="custom-cflags debug efi flask pae xsm" + +DEPEND="efi? ( >=sys-devel/binutils-2.22[multitarget] ) + !efi? ( >=sys-devel/binutils-2.22[-multitarget] )" +RDEPEND="" +PDEPEND="~app-emulation/xen-tools-${PV}" + +RESTRICT="test" + +# Approved by QA team in bug #144032 +QA_WX_LOAD="boot/xen-syms-${PV}" + +REQUIRED_USE=" + flask? ( xsm ) + " + +pkg_setup() { + python-single-r1_pkg_setup + if [[ -z ${XEN_TARGET_ARCH} ]]; then + if use x86 && use amd64; then + die "Confusion! Both x86 and amd64 are set in your use flags!" + elif use x86; then + export XEN_TARGET_ARCH="x86_32" + elif use amd64; then + export XEN_TARGET_ARCH="x86_64" + else + die "Unsupported architecture!" + fi + fi + + if use flask ; then + export "XSM_ENABLE=y" + export "FLASK_ENABLE=y" + elif use xsm ; then + export "XSM_ENABLE=y" + fi +} + +src_prepare() { + # Drop .config and fix gcc-4.6 + epatch "${FILESDIR}"/${PN/-pvgrub/}-4-fix_dotconfig-gcc.patch + + if use efi; then + epatch "${FILESDIR}"/${PN}-4.2-efi.patch + export EFI_VENDOR="gentoo" + export EFI_MOUNTPOINT="boot" + fi + + # if the user *really* wants to use their own custom-cflags, let them + if use custom-cflags; then + einfo "User wants their own CFLAGS - removing defaults" + # try and remove all the default custom-cflags + find "${S}" -name Makefile -o -name Rules.mk -o -name Config.mk -exec sed \ + -e 's/CFLAGS\(.*\)=\(.*\)-O3\(.*\)/CFLAGS\1=\2\3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-march=i686\(.*\)/CFLAGS\1=\2\3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-fomit-frame-pointer\(.*\)/CFLAGS\1=\2\3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-g3*\s\(.*\)/CFLAGS\1=\2 \3/' \ + -e 's/CFLAGS\(.*\)=\(.*\)-O2\(.*\)/CFLAGS\1=\2\3/' \ + -i {} \; || die "failed to re-set custom-cflags" + fi + + # not strictly necessary to fix this + sed -i 's/, "-Werror"//' "${S}/tools/python/setup.py" || die "failed to re-set setup.py" + + #Security patches + epatch "${FILESDIR}"/${PN}-4-CVE-2013-1918-XSA-45_[1-7].patch +} + +src_configure() { + use debug && myopt="${myopt} debug=y" + use pae && myopt="${myopt} pae=y" + + if use custom-cflags; then + filter-flags -fPIE -fstack-protector + replace-flags -O3 -O2 + else + unset CFLAGS + fi +} + +src_compile() { + # Send raw LDFLAGS so that --as-needed works + emake CC="$(tc-getCC)" LDFLAGS="$(raw-ldflags)" LD="$(tc-getLD)" -C xen ${myopt} +} + +src_install() { + local myopt + use debug && myopt="${myopt} debug=y" + use pae && myopt="${myopt} pae=y" + + # The 'make install' doesn't 'mkdir -p' the subdirs + if use efi; then + mkdir -p "${D}"${EFI_MOUNTPOINT}/efi/${EFI_VENDOR} || die + fi + + emake LDFLAGS="$(raw-ldflags)" DESTDIR="${D}" -C xen ${myopt} install +} + +pkg_postinst() { + elog "Official Xen Guide and the unoffical wiki page:" + elog " http://www.gentoo.org/doc/en/xen-guide.xml" + elog " http://en.gentoo-wiki.com/wiki/Xen/" + + use pae && ewarn "This is a PAE build of Xen. It will *only* boot PAE kernels!" + use efi && einfo "The efi executable is installed in boot/efi/gentoo" +} |