diff options
author | 2014-12-22 21:40:19 -0500 | |
---|---|---|
committer | 2014-12-22 21:40:19 -0500 | |
commit | 3e8f0d6dc3f29abd2b355664d40489e025ba86e0 (patch) | |
tree | a914991c1cd00556fd13b6f6c48eca3094972c76 | |
parent | Grsec/PaX: 3.0-{3.2.65,3.14.27,3.17.7}-201412170700 (diff) | |
download | hardened-patchset-3e8f0d6dc3f29abd2b355664d40489e025ba86e0.tar.gz hardened-patchset-3e8f0d6dc3f29abd2b355664d40489e025ba86e0.tar.bz2 hardened-patchset-3e8f0d6dc3f29abd2b355664d40489e025ba86e0.zip |
Grsec/PaX: 3.0-{3.2.65,3.14.27,3.17.7}-20141221191020141221
-rw-r--r-- | 3.14.27/0000_README | 2 | ||||
-rw-r--r-- | 3.14.27/4420_grsecurity-3.0-3.14.27-201412211908.patch (renamed from 3.14.27/4420_grsecurity-3.0-3.14.27-201412170659.patch) | 358 | ||||
-rw-r--r-- | 3.17.7/0000_README | 2 | ||||
-rw-r--r-- | 3.17.7/4420_grsecurity-3.0-3.17.7-201412211910.patch (renamed from 3.17.7/4420_grsecurity-3.0-3.17.7-201412170700.patch) | 668 | ||||
-rw-r--r-- | 3.2.65/0000_README | 2 | ||||
-rw-r--r-- | 3.2.65/4420_grsecurity-3.0-3.2.65-201412211905.patch (renamed from 3.2.65/4420_grsecurity-3.0-3.2.65-201412170654.patch) | 297 |
6 files changed, 1241 insertions, 88 deletions
diff --git a/3.14.27/0000_README b/3.14.27/0000_README index 373bdca..21a4163 100644 --- a/3.14.27/0000_README +++ b/3.14.27/0000_README @@ -2,7 +2,7 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-3.0-3.14.27-201412170659.patch +Patch: 4420_grsecurity-3.0-3.14.27-201412211908.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.14.27/4420_grsecurity-3.0-3.14.27-201412170659.patch b/3.14.27/4420_grsecurity-3.0-3.14.27-201412211908.patch index f606d8d..9c6d797 100644 --- a/3.14.27/4420_grsecurity-3.0-3.14.27-201412170659.patch +++ b/3.14.27/4420_grsecurity-3.0-3.14.27-201412211908.patch @@ -12350,7 +12350,7 @@ index ad8f795..2c7eec6 100644 /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 98aa930..d2cef74 100644 +index 98aa930..9cfc3c7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -22,6 +22,7 @@ config X86_64 @@ -12387,7 +12387,15 @@ index 98aa930..d2cef74 100644 ---help--- Say Y here to enable options for running Linux under various hyper- visors. This option enables basic hypervisor detection and platform -@@ -1129,7 +1131,7 @@ choice +@@ -973,6 +975,7 @@ config VM86 + + config X86_16BIT + bool "Enable support for 16-bit segments" if EXPERT ++ depends on !GRKERNSEC + default y + ---help--- + This option is required by programs like Wine to run 16-bit +@@ -1129,7 +1132,7 @@ choice config NOHIGHMEM bool "off" @@ -12396,7 +12404,7 @@ index 98aa930..d2cef74 100644 ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 -@@ -1166,7 +1168,7 @@ config NOHIGHMEM +@@ -1166,7 +1169,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" @@ -12405,7 +12413,7 @@ index 98aa930..d2cef74 100644 ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. -@@ -1219,7 +1221,7 @@ config PAGE_OFFSET +@@ -1219,7 +1222,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G @@ -12414,7 +12422,7 @@ index 98aa930..d2cef74 100644 default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 -@@ -1624,6 +1626,7 @@ source kernel/Kconfig.hz +@@ -1624,6 +1627,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" @@ -12422,7 +12430,7 @@ index 98aa930..d2cef74 100644 ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot -@@ -1775,7 +1778,9 @@ config X86_NEED_RELOCS +@@ -1775,7 +1779,9 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" @@ -12433,7 +12441,7 @@ index 98aa930..d2cef74 100644 range 0x2000 0x1000000 if X86_32 range 0x200000 0x1000000 if X86_64 ---help--- -@@ -1855,9 +1860,10 @@ config DEBUG_HOTPLUG_CPU0 +@@ -1855,9 +1861,10 @@ config DEBUG_HOTPLUG_CPU0 If unsure, say N. config COMPAT_VDSO @@ -16959,6 +16967,18 @@ index 9454c16..e4100e3 100644 #define flush_insn_slot(p) do { } while (0) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index e9dc029..468a823 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -55,6 +55,7 @@ + #define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL + #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ + 0xFFFFFF0000000000ULL) ++#define CR3_PCID_INVD (1UL << 63) + #define CR4_RESERVED_BITS \ + (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ + | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 4ad6560..75c7bdd 100644 --- a/arch/x86/include/asm/local.h @@ -20502,6 +20522,24 @@ index bbae024..e1528f9 100644 #define BIOS_END 0x00100000 #define BIOS_ROM_BASE 0xffe00000 +diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h +index 46727eb..6e1aaf7 100644 +--- a/arch/x86/include/uapi/asm/ldt.h ++++ b/arch/x86/include/uapi/asm/ldt.h +@@ -28,6 +28,13 @@ struct user_desc { + unsigned int seg_not_present:1; + unsigned int useable:1; + #ifdef __x86_64__ ++ /* ++ * Because this bit is not present in 32-bit user code, user ++ * programs can pass uninitialized values here. Therefore, in ++ * any context in which a user_desc comes from a 32-bit program, ++ * the kernel must act as though lm == 0, regardless of the ++ * actual value. ++ */ + unsigned int lm:1; + #endif + }; diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h index 7b0a55a..ad115bf 100644 --- a/arch/x86/include/uapi/asm/ptrace-abi.h @@ -24296,10 +24334,42 @@ index 02553d6..54e9bd5 100644 /* * End of kprobes section diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c -index 94d857f..bf1f0bf 100644 +index 94d857f..5bce89c 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c -@@ -197,7 +197,7 @@ void init_espfix_ap(void) +@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex); + #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE) + static void *espfix_pages[ESPFIX_MAX_PAGES]; + +-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD] +- __aligned(PAGE_SIZE); ++static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata; + + static unsigned int page_random, slot_random; + +@@ -122,14 +121,17 @@ static void init_espfix_random(void) + void __init init_espfix_bsp(void) + { + pgd_t *pgd_p; +- pteval_t ptemask; +- +- ptemask = __supported_pte_mask; ++ unsigned long index = pgd_index(ESPFIX_BASE_ADDR); + + /* Install the espfix pud into the kernel page directory */ +- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; ++ pgd_p = &init_level4_pgt[index]; + pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1); ++ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1); ++#endif ++ + /* Randomize the locations */ + init_espfix_random(); + +@@ -197,7 +199,7 @@ void init_espfix_ap(void) set_pte(&pte_p[n*PTE_STRIDE], pte); /* Job is done for this CPU and any CPU which shares this page */ @@ -25793,6 +25863,38 @@ index c2bedae..25e7ab60 100644 .attr = { .name = "data", .mode = S_IRUGO, +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index 713f1b3..0b1e1d5 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -280,7 +280,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) + static void __init paravirt_ops_setup(void) + { + pv_info.name = "KVM"; +- pv_info.paravirt_enabled = 1; ++ ++ /* ++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM ++ * guest kernel works like a bare metal kernel with additional ++ * features, and paravirt_enabled is about features that are ++ * missing. ++ */ ++ pv_info.paravirt_enabled = 0; + + if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) + pv_cpu_ops.io_delay = kvm_io_delay; +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c +index e604109..c8e98cd 100644 +--- a/arch/x86/kernel/kvmclock.c ++++ b/arch/x86/kernel/kvmclock.c +@@ -263,7 +263,6 @@ void __init kvmclock_init(void) + #endif + kvm_get_preset_lpj(); + clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); +- pv_info.paravirt_enabled = 1; + pv_info.name = "KVM"; + + if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index c37886d..d851d32 100644 --- a/arch/x86/kernel/ldt.c @@ -26297,6 +26399,30 @@ index 1b10af8..45bfbec 100644 }; EXPORT_SYMBOL_GPL(pv_time_ops); +diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c +index a1da673..2c72d5b 100644 +--- a/arch/x86/kernel/paravirt_patch_64.c ++++ b/arch/x86/kernel/paravirt_patch_64.c +@@ -9,7 +9,9 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); + DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); + DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); + DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); ++#ifndef CONFIG_PAX_MEMORY_UDEREF + DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); ++#endif + DEF_NATIVE(pv_cpu_ops, clts, "clts"); + DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); + +@@ -57,7 +59,9 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, + PATCH_SITE(pv_mmu_ops, read_cr3); + PATCH_SITE(pv_mmu_ops, write_cr3); + PATCH_SITE(pv_cpu_ops, clts); ++#ifndef CONFIG_PAX_MEMORY_UDEREF + PATCH_SITE(pv_mmu_ops, flush_tlb_single); ++#endif + PATCH_SITE(pv_cpu_ops, wbinvd); + + patch_site: diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 299d493..2ccb0ee 100644 --- a/arch/x86/kernel/pci-calgary_64.c @@ -27733,10 +27859,58 @@ index 24d3c91..d06b473 100644 return pc; } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c -index f7fec09..9991981 100644 +index f7fec09..d0f623f 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c -@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx, +@@ -27,6 +27,37 @@ static int get_free_idx(void) + return -ESRCH; + } + ++static bool tls_desc_okay(const struct user_desc *info) ++{ ++ if (LDT_empty(info)) ++ return true; ++ ++ /* ++ * espfix is required for 16-bit data segments, but espfix ++ * only works for LDT segments. ++ */ ++ if (!info->seg_32bit) ++ return false; ++ ++ /* Only allow data segments in the TLS array. */ ++ if (info->contents > 1) ++ return false; ++ ++ /* ++ * Non-present segments with DPL 3 present an interesting attack ++ * surface. The kernel should handle such segments correctly, ++ * but TLS is very difficult to protect in a sandbox, so prevent ++ * such segments from being created. ++ * ++ * If userspace needs to remove a TLS entry, it can still delete ++ * it outright. ++ */ ++ if (info->seg_not_present) ++ return false; ++ ++ return true; ++} ++ + static void set_tls_desc(struct task_struct *p, int idx, + const struct user_desc *info, int n) + { +@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx, + if (copy_from_user(&info, u_info, sizeof(info))) + return -EFAULT; + ++ if (!tls_desc_okay(&info)) ++ return -EINVAL; ++ + if (idx == -1) + idx = info.entry_number; + +@@ -84,6 +118,11 @@ int do_set_thread_area(struct task_struct *p, int idx, if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; @@ -27748,7 +27922,15 @@ index f7fec09..9991981 100644 set_tls_desc(p, idx, &info, 1); return 0; -@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, +@@ -192,6 +231,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, + { + struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; + const struct user_desc *info; ++ int i; + + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || + (pos % sizeof(struct user_desc)) != 0 || +@@ -200,11 +240,15 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, if (kbuf) info = kbuf; @@ -27757,6 +27939,14 @@ index f7fec09..9991981 100644 return -EFAULT; else info = infobuf; + ++ for (i = 0; i < count / sizeof(struct user_desc); i++) ++ if (!tls_desc_okay(info + i)) ++ return -EINVAL; ++ + set_tls_desc(target, + GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), + info, count / sizeof(struct user_desc)); diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 1c113db..287b42e 100644 --- a/arch/x86/kernel/tracepoint.c @@ -28542,6 +28732,19 @@ index c697625..a032162 100644 return 0; out: +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 38d3751..e6fcffb 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -3436,7 +3436,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + if (efer & EFER_LMA) +- rsvd = CR3_L_MODE_RESERVED_BITS; ++ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; + else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE) + rsvd = CR3_PAE_RESERVED_BITS; + else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 453e5fb..214168f 100644 --- a/arch/x86/kvm/lapic.c @@ -28794,10 +28997,19 @@ index 0c90f4b..9fca4d7 100644 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index fab97ad..394306f 100644 +index fab97ad..bb69607 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -1806,8 +1806,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) +@@ -688,6 +688,8 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); + + int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) + { ++ cr3 &= ~CR3_PCID_INVD; ++ + if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { + kvm_mmu_sync_roots(vcpu); + kvm_mmu_flush_tlb(vcpu); +@@ -1806,8 +1808,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); @@ -28808,7 +29020,7 @@ index fab97ad..394306f 100644 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; -@@ -2718,6 +2718,8 @@ long kvm_arch_dev_ioctl(struct file *filp, +@@ -2718,6 +2720,8 @@ long kvm_arch_dev_ioctl(struct file *filp, if (n < msr_list.nmsrs) goto out; r = -EFAULT; @@ -28817,7 +29029,7 @@ index fab97ad..394306f 100644 if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; -@@ -5532,7 +5534,7 @@ static struct notifier_block pvclock_gtod_notifier = { +@@ -5532,7 +5536,7 @@ static struct notifier_block pvclock_gtod_notifier = { }; #endif @@ -63431,6 +63643,37 @@ index e846a32..bb06bd0 100644 put_cpu_var(last_ino); return res; } +diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c +index f488bba..bb63254 100644 +--- a/fs/isofs/rock.c ++++ b/fs/isofs/rock.c +@@ -30,6 +30,7 @@ struct rock_state { + int cont_size; + int cont_extent; + int cont_offset; ++ int cont_loops; + struct inode *inode; + }; + +@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode) + rs->inode = inode; + } + ++/* Maximum number of Rock Ridge continuation entries */ ++#define RR_MAX_CE_ENTRIES 32 ++ + /* + * Returns 0 if the caller should continue scanning, 1 if the scan must end + * and -ve on error. +@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs) + goto out; + } + ret = -EIO; ++ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) ++ goto out; + bh = sb_bread(rs->inode->i_sb, rs->cont_extent); + if (bh) { + memcpy(rs->buffer, bh->b_data + rs->cont_offset, diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 4a6cf28..d3a29d3 100644 --- a/fs/jffs2/erase.c @@ -64282,10 +64525,19 @@ index 0dd72c8..34dd17d 100644 out: return len; diff --git a/fs/namespace.c b/fs/namespace.c -index d9bf3ef..93207ab 100644 +index d9bf3ef..359b08c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c -@@ -1371,6 +1371,9 @@ static int do_umount(struct mount *mnt, int flags) +@@ -1295,6 +1295,8 @@ void umount_tree(struct mount *mnt, int how) + } + if (last) { + last->mnt_hash.next = unmounted.first; ++ if (unmounted.first) ++ unmounted.first->pprev = &last->mnt_hash.next; + unmounted.first = tmp_list.first; + unmounted.first->pprev = &unmounted.first; + } +@@ -1371,6 +1373,9 @@ static int do_umount(struct mount *mnt, int flags) if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); @@ -64295,7 +64547,7 @@ index d9bf3ef..93207ab 100644 return retval; } -@@ -1393,6 +1396,9 @@ static int do_umount(struct mount *mnt, int flags) +@@ -1393,6 +1398,9 @@ static int do_umount(struct mount *mnt, int flags) } unlock_mount_hash(); namespace_unlock(); @@ -64305,7 +64557,7 @@ index d9bf3ef..93207ab 100644 return retval; } -@@ -1412,7 +1418,7 @@ static inline bool may_mount(void) +@@ -1412,7 +1420,7 @@ static inline bool may_mount(void) * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ @@ -64314,7 +64566,7 @@ index d9bf3ef..93207ab 100644 { struct path path; struct mount *mnt; -@@ -1454,7 +1460,7 @@ out: +@@ -1454,7 +1462,7 @@ out: /* * The 2.0 compatible umount. No flags. */ @@ -64323,7 +64575,7 @@ index d9bf3ef..93207ab 100644 { return sys_umount(name, 0); } -@@ -2503,6 +2509,16 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2503,6 +2511,16 @@ long do_mount(const char *dev_name, const char *dir_name, MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); @@ -64340,7 +64592,7 @@ index d9bf3ef..93207ab 100644 if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); -@@ -2517,6 +2533,9 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2517,6 +2535,9 @@ long do_mount(const char *dev_name, const char *dir_name, dev_name, data_page); dput_out: path_put(&path); @@ -64350,7 +64602,7 @@ index d9bf3ef..93207ab 100644 return retval; } -@@ -2534,7 +2553,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) +@@ -2534,7 +2555,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ @@ -64359,7 +64611,7 @@ index d9bf3ef..93207ab 100644 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { -@@ -2549,7 +2568,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2549,7 +2570,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) kfree(new_ns); return ERR_PTR(ret); } @@ -64368,7 +64620,7 @@ index d9bf3ef..93207ab 100644 atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); -@@ -2559,7 +2578,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2559,7 +2580,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) return new_ns; } @@ -64377,7 +64629,7 @@ index d9bf3ef..93207ab 100644 struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; -@@ -2680,8 +2699,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) +@@ -2680,8 +2701,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) } EXPORT_SYMBOL(mount_subtree); @@ -64388,7 +64640,7 @@ index d9bf3ef..93207ab 100644 { int ret; char *kernel_type; -@@ -2794,6 +2813,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, +@@ -2794,6 +2815,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, if (error) goto out2; @@ -64400,7 +64652,7 @@ index d9bf3ef..93207ab 100644 get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); -@@ -3065,7 +3089,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) +@@ -3065,7 +3091,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; @@ -100401,6 +100653,34 @@ index 78370ab..1cb3614 100644 atomic_t bcast_queue_left; atomic_t batman_queue_left; char num_ifaces; +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index a841d3e..c7a19a1 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) + + BT_DBG(""); + ++ if (!l2cap_is_socket(sock)) ++ return -EBADFD; ++ + baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); + baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); + +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 67fe5e8..278a194 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + + BT_DBG(""); + ++ if (!l2cap_is_socket(sock)) ++ return -EBADFD; ++ + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 7552f9e..074ce29 100644 --- a/net/bluetooth/hci_sock.c @@ -100414,6 +100694,26 @@ index 7552f9e..074ce29 100644 if (copy_from_user(&uf, optval, len)) { err = -EFAULT; break; +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index d9fb934..6134618 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -1332,13 +1332,14 @@ int hidp_connection_add(struct hidp_connadd_req *req, + { + struct hidp_session *session; + struct l2cap_conn *conn; +- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan; ++ struct l2cap_chan *chan; + int ret; + + ret = hidp_verify_sockets(ctrl_sock, intr_sock); + if (ret) + return ret; + ++ chan = l2cap_pi(ctrl_sock->sk)->chan; + conn = NULL; + l2cap_chan_lock(chan); + if (chan->conn) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0007c9e..f11541b 100644 --- a/net/bluetooth/l2cap_core.c diff --git a/3.17.7/0000_README b/3.17.7/0000_README index 202e6df..0970663 100644 --- a/3.17.7/0000_README +++ b/3.17.7/0000_README @@ -2,7 +2,7 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-3.0-3.17.7-201412170700.patch +Patch: 4420_grsecurity-3.0-3.17.7-201412211910.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.17.7/4420_grsecurity-3.0-3.17.7-201412170700.patch b/3.17.7/4420_grsecurity-3.0-3.17.7-201412211910.patch index f3e2d34..cdfe8b5 100644 --- a/3.17.7/4420_grsecurity-3.0-3.17.7-201412170700.patch +++ b/3.17.7/4420_grsecurity-3.0-3.17.7-201412211910.patch @@ -13308,7 +13308,7 @@ index ad8f795..2c7eec6 100644 /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 3632743..630a8bb 100644 +index 3632743..5cf7bff 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -130,7 +130,7 @@ config X86 @@ -13337,7 +13337,15 @@ index 3632743..630a8bb 100644 ---help--- Say Y here to enable options for running Linux under various hyper- visors. This option enables basic hypervisor detection and platform -@@ -1083,6 +1084,7 @@ choice +@@ -928,6 +929,7 @@ config VM86 + + config X86_16BIT + bool "Enable support for 16-bit segments" if EXPERT ++ depends on !GRKERNSEC + default y + ---help--- + This option is required by programs like Wine to run 16-bit +@@ -1083,6 +1085,7 @@ choice config NOHIGHMEM bool "off" @@ -13345,7 +13353,7 @@ index 3632743..630a8bb 100644 ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 -@@ -1119,6 +1121,7 @@ config NOHIGHMEM +@@ -1119,6 +1122,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" @@ -13353,7 +13361,7 @@ index 3632743..630a8bb 100644 ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. -@@ -1171,7 +1174,7 @@ config PAGE_OFFSET +@@ -1171,7 +1175,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G @@ -13362,7 +13370,7 @@ index 3632743..630a8bb 100644 default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 -@@ -1586,6 +1589,7 @@ source kernel/Kconfig.hz +@@ -1586,6 +1590,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" @@ -13370,7 +13378,7 @@ index 3632743..630a8bb 100644 ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot -@@ -1771,7 +1775,9 @@ config X86_NEED_RELOCS +@@ -1771,7 +1776,9 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" @@ -13381,7 +13389,7 @@ index 3632743..630a8bb 100644 range 0x2000 0x1000000 if X86_32 range 0x200000 0x1000000 if X86_64 ---help--- -@@ -1854,6 +1860,7 @@ config COMPAT_VDSO +@@ -1854,6 +1861,7 @@ config COMPAT_VDSO def_bool n prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)" depends on X86_32 || IA32_EMULATION @@ -17911,6 +17919,18 @@ index 53cdfb2..d1369e6 100644 #define flush_insn_slot(p) do { } while (0) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 0d47ae1..b0e3ee1 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -51,6 +51,7 @@ + | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) + + #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL ++#define CR3_PCID_INVD (1UL << 63) + #define CR4_RESERVED_BITS \ + (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ + | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 4ad6560..75c7bdd 100644 --- a/arch/x86/include/asm/local.h @@ -21382,6 +21402,24 @@ index bbae024..e1528f9 100644 #define BIOS_END 0x00100000 #define BIOS_ROM_BASE 0xffe00000 +diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h +index 46727eb..6e1aaf7 100644 +--- a/arch/x86/include/uapi/asm/ldt.h ++++ b/arch/x86/include/uapi/asm/ldt.h +@@ -28,6 +28,13 @@ struct user_desc { + unsigned int seg_not_present:1; + unsigned int useable:1; + #ifdef __x86_64__ ++ /* ++ * Because this bit is not present in 32-bit user code, user ++ * programs can pass uninitialized values here. Therefore, in ++ * any context in which a user_desc comes from a 32-bit program, ++ * the kernel must act as though lm == 0, regardless of the ++ * actual value. ++ */ + unsigned int lm:1; + #endif + }; diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h index 7b0a55a..ad115bf 100644 --- a/arch/x86/include/uapi/asm/ptrace-abi.h @@ -25030,10 +25068,42 @@ index b9dde27..6e9dc4e 100644 +ENDPROC(ignore_sysret) diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c -index 94d857f..bf1f0bf 100644 +index 94d857f..5bce89c 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c -@@ -197,7 +197,7 @@ void init_espfix_ap(void) +@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex); + #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE) + static void *espfix_pages[ESPFIX_MAX_PAGES]; + +-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD] +- __aligned(PAGE_SIZE); ++static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata; + + static unsigned int page_random, slot_random; + +@@ -122,14 +121,17 @@ static void init_espfix_random(void) + void __init init_espfix_bsp(void) + { + pgd_t *pgd_p; +- pteval_t ptemask; +- +- ptemask = __supported_pte_mask; ++ unsigned long index = pgd_index(ESPFIX_BASE_ADDR); + + /* Install the espfix pud into the kernel page directory */ +- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; ++ pgd_p = &init_level4_pgt[index]; + pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1); ++ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1); ++#endif ++ + /* Randomize the locations */ + init_espfix_random(); + +@@ -197,7 +199,7 @@ void init_espfix_ap(void) set_pte(&pte_p[n*PTE_STRIDE], pte); /* Job is done for this CPU and any CPU which shares this page */ @@ -26507,6 +26577,38 @@ index c2bedae..25e7ab60 100644 .attr = { .name = "data", .mode = S_IRUGO, +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index 3dd8e2c..07de51f 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -282,7 +282,14 @@ NOKPROBE_SYMBOL(do_async_page_fault); + static void __init paravirt_ops_setup(void) + { + pv_info.name = "KVM"; +- pv_info.paravirt_enabled = 1; ++ ++ /* ++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM ++ * guest kernel works like a bare metal kernel with additional ++ * features, and paravirt_enabled is about features that are ++ * missing. ++ */ ++ pv_info.paravirt_enabled = 0; + + if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) + pv_cpu_ops.io_delay = kvm_io_delay; +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c +index d9156ce..a2de9bc 100644 +--- a/arch/x86/kernel/kvmclock.c ++++ b/arch/x86/kernel/kvmclock.c +@@ -263,7 +263,6 @@ void __init kvmclock_init(void) + #endif + kvm_get_preset_lpj(); + clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); +- pv_info.paravirt_enabled = 1; + pv_info.name = "KVM"; + + if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index c37886d..d851d32 100644 --- a/arch/x86/kernel/ldt.c @@ -27145,6 +27247,30 @@ index 548d25f..f8fb99c 100644 }; EXPORT_SYMBOL_GPL(pv_time_ops); +diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c +index a1da673..2c72d5b 100644 +--- a/arch/x86/kernel/paravirt_patch_64.c ++++ b/arch/x86/kernel/paravirt_patch_64.c +@@ -9,7 +9,9 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); + DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); + DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); + DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); ++#ifndef CONFIG_PAX_MEMORY_UDEREF + DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); ++#endif + DEF_NATIVE(pv_cpu_ops, clts, "clts"); + DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); + +@@ -57,7 +59,9 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, + PATCH_SITE(pv_mmu_ops, read_cr3); + PATCH_SITE(pv_mmu_ops, write_cr3); + PATCH_SITE(pv_cpu_ops, clts); ++#ifndef CONFIG_PAX_MEMORY_UDEREF + PATCH_SITE(pv_mmu_ops, flush_tlb_single); ++#endif + PATCH_SITE(pv_cpu_ops, wbinvd); + + patch_site: diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 0497f71..7186c0d 100644 --- a/arch/x86/kernel/pci-calgary_64.c @@ -28577,10 +28703,58 @@ index 0fa2960..91eabbe 100644 return pc; } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c -index f7fec09..9991981 100644 +index f7fec09..d0f623f 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c -@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx, +@@ -27,6 +27,37 @@ static int get_free_idx(void) + return -ESRCH; + } + ++static bool tls_desc_okay(const struct user_desc *info) ++{ ++ if (LDT_empty(info)) ++ return true; ++ ++ /* ++ * espfix is required for 16-bit data segments, but espfix ++ * only works for LDT segments. ++ */ ++ if (!info->seg_32bit) ++ return false; ++ ++ /* Only allow data segments in the TLS array. */ ++ if (info->contents > 1) ++ return false; ++ ++ /* ++ * Non-present segments with DPL 3 present an interesting attack ++ * surface. The kernel should handle such segments correctly, ++ * but TLS is very difficult to protect in a sandbox, so prevent ++ * such segments from being created. ++ * ++ * If userspace needs to remove a TLS entry, it can still delete ++ * it outright. ++ */ ++ if (info->seg_not_present) ++ return false; ++ ++ return true; ++} ++ + static void set_tls_desc(struct task_struct *p, int idx, + const struct user_desc *info, int n) + { +@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx, + if (copy_from_user(&info, u_info, sizeof(info))) + return -EFAULT; + ++ if (!tls_desc_okay(&info)) ++ return -EINVAL; ++ + if (idx == -1) + idx = info.entry_number; + +@@ -84,6 +118,11 @@ int do_set_thread_area(struct task_struct *p, int idx, if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; @@ -28592,7 +28766,15 @@ index f7fec09..9991981 100644 set_tls_desc(p, idx, &info, 1); return 0; -@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, +@@ -192,6 +231,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, + { + struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; + const struct user_desc *info; ++ int i; + + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || + (pos % sizeof(struct user_desc)) != 0 || +@@ -200,11 +240,15 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, if (kbuf) info = kbuf; @@ -28601,6 +28783,14 @@ index f7fec09..9991981 100644 return -EFAULT; else info = infobuf; + ++ for (i = 0; i < count / sizeof(struct user_desc); i++) ++ if (!tls_desc_okay(info + i)) ++ return -EINVAL; ++ + set_tls_desc(target, + GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), + info, count / sizeof(struct user_desc)); diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 1c113db..287b42e 100644 --- a/arch/x86/kernel/tracepoint.c @@ -29377,6 +29567,19 @@ index 38a0afe..94421a9 100644 return 0; out: +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 9254069..bf25b97 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -3550,7 +3550,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + if (efer & EFER_LMA) +- rsvd = CR3_L_MODE_RESERVED_BITS; ++ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; + + if (new_val & rsvd) + return emulate_gp(ctxt, 0); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 08e8a89..0e9183e 100644 --- a/arch/x86/kvm/lapic.c @@ -29591,10 +29794,19 @@ index 41a5426..c0b3c00 100644 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index d6aeccf..cea125a 100644 +index d6aeccf..f0f150c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -1857,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) +@@ -729,6 +729,8 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); + + int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) + { ++ cr3 &= ~CR3_PCID_INVD; ++ + if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { + kvm_mmu_sync_roots(vcpu); + kvm_mmu_flush_tlb(vcpu); +@@ -1857,8 +1859,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); @@ -29605,7 +29817,7 @@ index d6aeccf..cea125a 100644 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; -@@ -2779,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp, +@@ -2779,6 +2781,8 @@ long kvm_arch_dev_ioctl(struct file *filp, if (n < msr_list.nmsrs) goto out; r = -EFAULT; @@ -29614,7 +29826,7 @@ index d6aeccf..cea125a 100644 if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; -@@ -5639,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = { +@@ -5639,7 +5643,7 @@ static struct notifier_block pvclock_gtod_notifier = { }; #endif @@ -42727,6 +42939,304 @@ index 9bf8637..f462416 100644 /* case 1) */ if (data[0] != REPORT_ID_DJ_SHORT) +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index c372368..ecb9d90 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -797,6 +797,12 @@ union sixaxis_output_report_01 { + __u8 buf[36]; + }; + ++#define DS4_REPORT_0x02_SIZE 37 ++#define DS4_REPORT_0x05_SIZE 32 ++#define DS4_REPORT_0x11_SIZE 78 ++#define DS4_REPORT_0x81_SIZE 7 ++#define SIXAXIS_REPORT_0xF2_SIZE 18 ++ + static spinlock_t sony_dev_list_lock; + static LIST_HEAD(sony_device_list); + static DEFINE_IDA(sony_device_id_allocator); +@@ -810,6 +816,7 @@ struct sony_sc { + struct work_struct state_worker; + struct power_supply battery; + int device_id; ++ __u8 *output_report_dmabuf; + + #ifdef CONFIG_SONY_FF + __u8 left; +@@ -1108,9 +1115,20 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev) + + static int sixaxis_set_operational_bt(struct hid_device *hdev) + { +- unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; +- return hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), ++ static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; ++ __u8 *buf; ++ int ret; ++ ++ buf = kmemdup(report, sizeof(report), GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(report), + HID_FEATURE_REPORT, HID_REQ_SET_REPORT); ++ ++ kfree(buf); ++ ++ return ret; + } + + /* +@@ -1119,10 +1137,19 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev) + */ + static int dualshock4_set_operational_bt(struct hid_device *hdev) + { +- __u8 buf[37] = { 0 }; ++ __u8 *buf; ++ int ret; + +- return hid_hw_raw_request(hdev, 0x02, buf, sizeof(buf), ++ buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE, + HID_FEATURE_REPORT, HID_REQ_GET_REPORT); ++ ++ kfree(buf); ++ ++ return ret; + } + + static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS]) +@@ -1437,9 +1464,7 @@ error_leds: + + static void sixaxis_state_worker(struct work_struct *work) + { +- struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); +- int n; +- union sixaxis_output_report_01 report = { ++ static const union sixaxis_output_report_01 default_report = { + .buf = { + 0x01, + 0x00, 0xff, 0x00, 0xff, 0x00, +@@ -1451,20 +1476,27 @@ static void sixaxis_state_worker(struct work_struct *work) + 0x00, 0x00, 0x00, 0x00, 0x00 + } + }; ++ struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); ++ struct sixaxis_output_report *report = ++ (struct sixaxis_output_report *)sc->output_report_dmabuf; ++ int n; ++ ++ /* Initialize the report with default values */ ++ memcpy(report, &default_report, sizeof(struct sixaxis_output_report)); + + #ifdef CONFIG_SONY_FF +- report.data.rumble.right_motor_on = sc->right ? 1 : 0; +- report.data.rumble.left_motor_force = sc->left; ++ report->rumble.right_motor_on = sc->right ? 1 : 0; ++ report->rumble.left_motor_force = sc->left; + #endif + +- report.data.leds_bitmap |= sc->led_state[0] << 1; +- report.data.leds_bitmap |= sc->led_state[1] << 2; +- report.data.leds_bitmap |= sc->led_state[2] << 3; +- report.data.leds_bitmap |= sc->led_state[3] << 4; ++ report->leds_bitmap |= sc->led_state[0] << 1; ++ report->leds_bitmap |= sc->led_state[1] << 2; ++ report->leds_bitmap |= sc->led_state[2] << 3; ++ report->leds_bitmap |= sc->led_state[3] << 4; + + /* Set flag for all leds off, required for 3rd party INTEC controller */ +- if ((report.data.leds_bitmap & 0x1E) == 0) +- report.data.leds_bitmap |= 0x20; ++ if ((report->leds_bitmap & 0x1E) == 0) ++ report->leds_bitmap |= 0x20; + + /* + * The LEDs in the report are indexed in reverse order to their +@@ -1477,28 +1509,30 @@ static void sixaxis_state_worker(struct work_struct *work) + */ + for (n = 0; n < 4; n++) { + if (sc->led_delay_on[n] || sc->led_delay_off[n]) { +- report.data.led[3 - n].duty_off = sc->led_delay_off[n]; +- report.data.led[3 - n].duty_on = sc->led_delay_on[n]; ++ report->led[3 - n].duty_off = sc->led_delay_off[n]; ++ report->led[3 - n].duty_on = sc->led_delay_on[n]; + } + } + +- hid_hw_raw_request(sc->hdev, report.data.report_id, report.buf, +- sizeof(report), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); ++ hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report, ++ sizeof(struct sixaxis_output_report), ++ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); + } + + static void dualshock4_state_worker(struct work_struct *work) + { + struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); + struct hid_device *hdev = sc->hdev; ++ __u8 *buf = sc->output_report_dmabuf; + int offset; + +- __u8 buf[78] = { 0 }; +- + if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) { ++ memset(buf, 0, DS4_REPORT_0x05_SIZE); + buf[0] = 0x05; + buf[1] = 0xFF; + offset = 4; + } else { ++ memset(buf, 0, DS4_REPORT_0x11_SIZE); + buf[0] = 0x11; + buf[1] = 0xB0; + buf[3] = 0x0F; +@@ -1526,12 +1560,33 @@ static void dualshock4_state_worker(struct work_struct *work) + buf[offset++] = sc->led_delay_off[3]; + + if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) +- hid_hw_output_report(hdev, buf, 32); ++ hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE); + else +- hid_hw_raw_request(hdev, 0x11, buf, 78, ++ hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE, + HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); + } + ++static int sony_allocate_output_report(struct sony_sc *sc) ++{ ++ if (sc->quirks & SIXAXIS_CONTROLLER) ++ sc->output_report_dmabuf = ++ kmalloc(sizeof(union sixaxis_output_report_01), ++ GFP_KERNEL); ++ else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) ++ sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE, ++ GFP_KERNEL); ++ else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ++ sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE, ++ GFP_KERNEL); ++ else ++ return 0; ++ ++ if (!sc->output_report_dmabuf) ++ return -ENOMEM; ++ ++ return 0; ++} ++ + #ifdef CONFIG_SONY_FF + static int sony_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +@@ -1740,6 +1795,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc) + + static int sony_check_add(struct sony_sc *sc) + { ++ __u8 *buf = NULL; + int n, ret; + + if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) || +@@ -1755,36 +1811,44 @@ static int sony_check_add(struct sony_sc *sc) + return 0; + } + } else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) { +- __u8 buf[7]; ++ buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; + + /* + * The MAC address of a DS4 controller connected via USB can be + * retrieved with feature report 0x81. The address begins at + * offset 1. + */ +- ret = hid_hw_raw_request(sc->hdev, 0x81, buf, sizeof(buf), +- HID_FEATURE_REPORT, HID_REQ_GET_REPORT); ++ ret = hid_hw_raw_request(sc->hdev, 0x81, buf, ++ DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT, ++ HID_REQ_GET_REPORT); + +- if (ret != 7) { ++ if (ret != DS4_REPORT_0x81_SIZE) { + hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n"); +- return ret < 0 ? ret : -EINVAL; ++ ret = ret < 0 ? ret : -EINVAL; ++ goto out_free; + } + + memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address)); + } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) { +- __u8 buf[18]; ++ buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; + + /* + * The MAC address of a Sixaxis controller connected via USB can + * be retrieved with feature report 0xf2. The address begins at + * offset 4. + */ +- ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, sizeof(buf), +- HID_FEATURE_REPORT, HID_REQ_GET_REPORT); ++ ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, ++ SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT, ++ HID_REQ_GET_REPORT); + +- if (ret != 18) { ++ if (ret != SIXAXIS_REPORT_0xF2_SIZE) { + hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n"); +- return ret < 0 ? ret : -EINVAL; ++ ret = ret < 0 ? ret : -EINVAL; ++ goto out_free; + } + + /* +@@ -1797,7 +1861,13 @@ static int sony_check_add(struct sony_sc *sc) + return 0; + } + +- return sony_check_add_dev_list(sc); ++ ret = sony_check_add_dev_list(sc); ++ ++out_free: ++ ++ kfree(buf); ++ ++ return ret; + } + + static int sony_set_device_id(struct sony_sc *sc) +@@ -1881,6 +1951,12 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) + return ret; + } + ++ ret = sony_allocate_output_report(sc); ++ if (ret < 0) { ++ hid_err(hdev, "failed to allocate the output report buffer\n"); ++ goto err_stop; ++ } ++ + ret = sony_set_device_id(sc); + if (ret < 0) { + hid_err(hdev, "failed to allocate the device id\n"); +@@ -1977,6 +2053,7 @@ err_stop: + if (sc->quirks & SONY_BATTERY_SUPPORT) + sony_battery_remove(sc); + sony_cancel_work_sync(sc); ++ kfree(sc->output_report_dmabuf); + sony_remove_dev_list(sc); + sony_release_device_id(sc); + hid_hw_stop(hdev); +@@ -1997,6 +2074,8 @@ static void sony_remove(struct hid_device *hdev) + + sony_cancel_work_sync(sc); + ++ kfree(sc->output_report_dmabuf); ++ + sony_remove_dev_list(sc); + + sony_release_device_id(sc); diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c index c13fb5b..55a3802 100644 --- a/drivers/hid/hid-wiimote-debug.c @@ -64218,6 +64728,37 @@ index 26753ba..d19eb34 100644 put_cpu_var(last_ino); return res; } +diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c +index f488bba..bb63254 100644 +--- a/fs/isofs/rock.c ++++ b/fs/isofs/rock.c +@@ -30,6 +30,7 @@ struct rock_state { + int cont_size; + int cont_extent; + int cont_offset; ++ int cont_loops; + struct inode *inode; + }; + +@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode) + rs->inode = inode; + } + ++/* Maximum number of Rock Ridge continuation entries */ ++#define RR_MAX_CE_ENTRIES 32 ++ + /* + * Returns 0 if the caller should continue scanning, 1 if the scan must end + * and -ve on error. +@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs) + goto out; + } + ret = -EIO; ++ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) ++ goto out; + bh = sb_bread(rs->inode->i_sb, rs->cont_extent); + if (bh) { + memcpy(rs->buffer, bh->b_data + rs->cont_offset, diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 4a6cf28..d3a29d3 100644 --- a/fs/jffs2/erase.c @@ -65016,10 +65557,19 @@ index bb02687..79cba2c 100644 out: return len; diff --git a/fs/namespace.c b/fs/namespace.c -index 550dbff..c4ad324 100644 +index 550dbff..6c24d43 100644 --- a/fs/namespace.c +++ b/fs/namespace.c -@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags) +@@ -1286,6 +1286,8 @@ void umount_tree(struct mount *mnt, int how) + } + if (last) { + last->mnt_hash.next = unmounted.first; ++ if (unmounted.first) ++ unmounted.first->pprev = &last->mnt_hash.next; + unmounted.first = tmp_list.first; + unmounted.first->pprev = &unmounted.first; + } +@@ -1362,6 +1364,9 @@ static int do_umount(struct mount *mnt, int flags) if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); @@ -65029,7 +65579,7 @@ index 550dbff..c4ad324 100644 return retval; } -@@ -1384,6 +1387,9 @@ static int do_umount(struct mount *mnt, int flags) +@@ -1384,6 +1389,9 @@ static int do_umount(struct mount *mnt, int flags) } unlock_mount_hash(); namespace_unlock(); @@ -65039,7 +65589,7 @@ index 550dbff..c4ad324 100644 return retval; } -@@ -1403,7 +1409,7 @@ static inline bool may_mount(void) +@@ -1403,7 +1411,7 @@ static inline bool may_mount(void) * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ @@ -65048,7 +65598,7 @@ index 550dbff..c4ad324 100644 { struct path path; struct mount *mnt; -@@ -1445,7 +1451,7 @@ out: +@@ -1445,7 +1453,7 @@ out: /* * The 2.0 compatible umount. No flags. */ @@ -65057,7 +65607,7 @@ index 550dbff..c4ad324 100644 { return sys_umount(name, 0); } -@@ -2494,6 +2500,16 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2494,6 +2502,16 @@ long do_mount(const char *dev_name, const char *dir_name, MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); @@ -65074,7 +65624,7 @@ index 550dbff..c4ad324 100644 if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); -@@ -2508,6 +2524,9 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2508,6 +2526,9 @@ long do_mount(const char *dev_name, const char *dir_name, dev_name, data_page); dput_out: path_put(&path); @@ -65084,7 +65634,7 @@ index 550dbff..c4ad324 100644 return retval; } -@@ -2525,7 +2544,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) +@@ -2525,7 +2546,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ @@ -65093,7 +65643,7 @@ index 550dbff..c4ad324 100644 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { -@@ -2540,7 +2559,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2540,7 +2561,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) kfree(new_ns); return ERR_PTR(ret); } @@ -65102,7 +65652,7 @@ index 550dbff..c4ad324 100644 atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); -@@ -2550,7 +2569,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2550,7 +2571,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) return new_ns; } @@ -65111,7 +65661,7 @@ index 550dbff..c4ad324 100644 struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; -@@ -2671,8 +2690,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) +@@ -2671,8 +2692,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) } EXPORT_SYMBOL(mount_subtree); @@ -65122,7 +65672,7 @@ index 550dbff..c4ad324 100644 { int ret; char *kernel_type; -@@ -2785,6 +2804,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, +@@ -2785,6 +2806,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, if (error) goto out2; @@ -65134,7 +65684,7 @@ index 550dbff..c4ad324 100644 get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); -@@ -3056,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) +@@ -3056,7 +3082,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; @@ -101242,6 +101792,46 @@ index 8854c05..ee5d5497 100644 atomic_t bcast_queue_left; atomic_t batman_queue_left; char num_ifaces; +diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c +index 075f20d..62972c9 100644 +--- a/net/bluetooth/6lowpan.c ++++ b/net/bluetooth/6lowpan.c +@@ -367,7 +367,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, + + drop: + dev->stats.rx_dropped++; +- kfree_skb(skb); + return NET_RX_DROP; + } + +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c +index 85bcc21..ce82722d 100644 +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) + + BT_DBG(""); + ++ if (!l2cap_is_socket(sock)) ++ return -EBADFD; ++ + baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); + baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); + +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 67fe5e8..278a194 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + + BT_DBG(""); + ++ if (!l2cap_is_socket(sock)) ++ return -EBADFD; ++ + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 115f149..f0ba286 100644 --- a/net/bluetooth/hci_sock.c @@ -101255,6 +101845,26 @@ index 115f149..f0ba286 100644 if (copy_from_user(&uf, optval, len)) { err = -EFAULT; break; +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 6c7ecf1..cf765c7 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -1318,13 +1318,14 @@ int hidp_connection_add(struct hidp_connadd_req *req, + { + struct hidp_session *session; + struct l2cap_conn *conn; +- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan; ++ struct l2cap_chan *chan; + int ret; + + ret = hidp_verify_sockets(ctrl_sock, intr_sock); + if (ret) + return ret; + ++ chan = l2cap_pi(ctrl_sock->sk)->chan; + conn = NULL; + l2cap_chan_lock(chan); + if (chan->conn) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 14ca8ae..262d49a 100644 --- a/net/bluetooth/l2cap_core.c diff --git a/3.2.65/0000_README b/3.2.65/0000_README index a5e4133..f781dcc 100644 --- a/3.2.65/0000_README +++ b/3.2.65/0000_README @@ -178,7 +178,7 @@ Patch: 1064_linux-3.2.65.patch From: http://www.kernel.org Desc: Linux 3.2.65 -Patch: 4420_grsecurity-3.0-3.2.65-201412170654.patch +Patch: 4420_grsecurity-3.0-3.2.65-201412211905.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.2.65/4420_grsecurity-3.0-3.2.65-201412170654.patch b/3.2.65/4420_grsecurity-3.0-3.2.65-201412211905.patch index 6097404..b1bebf4 100644 --- a/3.2.65/4420_grsecurity-3.0-3.2.65-201412170654.patch +++ b/3.2.65/4420_grsecurity-3.0-3.2.65-201412211905.patch @@ -10373,7 +10373,7 @@ index ad8f795..2c7eec6 100644 /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 28a1bca..5ebf70a 100644 +index 28a1bca..6eebf04 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -75,6 +75,7 @@ config X86 @@ -10401,7 +10401,15 @@ index 28a1bca..5ebf70a 100644 ---help--- Say Y here to get to see options related to running Linux under various hypervisors. This option alone does not add any kernel code. -@@ -1040,7 +1042,7 @@ choice +@@ -903,6 +905,7 @@ config VM86 + + config X86_16BIT + bool "Enable support for 16-bit segments" if EXPERT ++ depends on !GRKERNSEC + default y + ---help--- + This option is required by programs like Wine to run 16-bit +@@ -1040,7 +1043,7 @@ choice config NOHIGHMEM bool "off" @@ -10410,7 +10418,7 @@ index 28a1bca..5ebf70a 100644 ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 -@@ -1077,7 +1079,7 @@ config NOHIGHMEM +@@ -1077,7 +1080,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" @@ -10419,7 +10427,7 @@ index 28a1bca..5ebf70a 100644 ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. -@@ -1131,7 +1133,7 @@ config PAGE_OFFSET +@@ -1131,7 +1134,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G @@ -10428,7 +10436,7 @@ index 28a1bca..5ebf70a 100644 default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 -@@ -1514,6 +1516,7 @@ config SECCOMP +@@ -1514,6 +1517,7 @@ config SECCOMP config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" @@ -10436,7 +10444,7 @@ index 28a1bca..5ebf70a 100644 ---help--- This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of functions, a canary value on -@@ -1532,6 +1535,7 @@ source kernel/Kconfig.hz +@@ -1532,6 +1536,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" @@ -10444,7 +10452,7 @@ index 28a1bca..5ebf70a 100644 ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot -@@ -1634,6 +1638,8 @@ config X86_NEED_RELOCS +@@ -1634,6 +1639,8 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" if X86_32 default "0x1000000" @@ -10453,7 +10461,7 @@ index 28a1bca..5ebf70a 100644 range 0x2000 0x1000000 ---help--- This value puts the alignment restrictions on physical address -@@ -1665,9 +1671,10 @@ config HOTPLUG_CPU +@@ -1665,9 +1672,10 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. config COMPAT_VDSO @@ -14151,10 +14159,18 @@ index 5478825..839e88c 100644 #define flush_insn_slot(p) do { } while (0) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index 9171618..481a636 100644 +index 9171618..fe2b1da 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h -@@ -460,7 +460,7 @@ struct kvm_arch { +@@ -45,6 +45,7 @@ + #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) + #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ + 0xFFFFFF0000000000ULL) ++#define CR3_PCID_INVD (1UL << 63) + #define CR4_RESERVED_BITS \ + (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ + | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ +@@ -460,7 +461,7 @@ struct kvm_arch { unsigned int n_requested_mmu_pages; unsigned int n_max_mmu_pages; unsigned int indirect_shadow_pages; @@ -14163,6 +14179,24 @@ index 9171618..481a636 100644 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; /* * Hash table of struct kvm_mmu_page. +diff --git a/arch/x86/include/asm/ldt.h b/arch/x86/include/asm/ldt.h +index 46727eb..6e1aaf7 100644 +--- a/arch/x86/include/asm/ldt.h ++++ b/arch/x86/include/asm/ldt.h +@@ -28,6 +28,13 @@ struct user_desc { + unsigned int seg_not_present:1; + unsigned int useable:1; + #ifdef __x86_64__ ++ /* ++ * Because this bit is not present in 32-bit user code, user ++ * programs can pass uninitialized values here. Therefore, in ++ * any context in which a user_desc comes from a 32-bit program, ++ * the kernel must act as though lm == 0, regardless of the ++ * actual value. ++ */ + unsigned int lm:1; + #endif + }; diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 9cdae5d..3534f04 100644 --- a/arch/x86/include/asm/local.h @@ -20634,10 +20668,42 @@ index 9d28dbac..9d8b7a4 100644 /* * End of kprobes section diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c -index 94d857f..bf1f0bf 100644 +index 94d857f..5bce89c 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c -@@ -197,7 +197,7 @@ void init_espfix_ap(void) +@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex); + #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE) + static void *espfix_pages[ESPFIX_MAX_PAGES]; + +-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD] +- __aligned(PAGE_SIZE); ++static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata; + + static unsigned int page_random, slot_random; + +@@ -122,14 +121,17 @@ static void init_espfix_random(void) + void __init init_espfix_bsp(void) + { + pgd_t *pgd_p; +- pteval_t ptemask; +- +- ptemask = __supported_pte_mask; ++ unsigned long index = pgd_index(ESPFIX_BASE_ADDR); + + /* Install the espfix pud into the kernel page directory */ +- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; ++ pgd_p = &init_level4_pgt[index]; + pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1); ++ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1); ++#endif ++ + /* Randomize the locations */ + init_espfix_random(); + +@@ -197,7 +199,7 @@ void init_espfix_ap(void) set_pte(&pte_p[n*PTE_STRIDE], pte); /* Job is done for this CPU and any CPU which shares this page */ @@ -22165,10 +22231,26 @@ index 7da647d..6e9fab5 100644 reset_current_kprobe(); preempt_enable_no_resched(); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c -index a9c2116..94c1e1a 100644 +index a9c2116..1a3dcdb 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c -@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void) +@@ -419,7 +419,14 @@ static void kvm_leave_lazy_mmu(void) + static void __init paravirt_ops_setup(void) + { + pv_info.name = "KVM"; +- pv_info.paravirt_enabled = 1; ++ ++ /* ++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM ++ * guest kernel works like a bare metal kernel with additional ++ * features, and paravirt_enabled is about features that are ++ * missing. ++ */ ++ pv_info.paravirt_enabled = 0; + + if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) + pv_cpu_ops.io_delay = kvm_io_delay; +@@ -437,6 +444,7 @@ static void __init paravirt_ops_setup(void) pv_mmu_ops.set_pud = kvm_set_pud; #if PAGETABLE_LEVELS == 4 pv_mmu_ops.set_pgd = kvm_set_pgd; @@ -22176,7 +22258,7 @@ index a9c2116..94c1e1a 100644 #endif #endif pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; -@@ -579,7 +580,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self, +@@ -579,7 +587,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } @@ -22185,6 +22267,18 @@ index a9c2116..94c1e1a 100644 .notifier_call = kvm_cpu_notify, }; #endif +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c +index 44842d7..e90eca0 100644 +--- a/arch/x86/kernel/kvmclock.c ++++ b/arch/x86/kernel/kvmclock.c +@@ -203,7 +203,6 @@ void __init kvmclock_init(void) + #endif + kvm_get_preset_lpj(); + clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); +- pv_info.paravirt_enabled = 1; + pv_info.name = "KVM"; + + if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 0a8e65e..288a4b0 100644 --- a/arch/x86/kernel/ldt.c @@ -22693,6 +22787,30 @@ index 84c938f..fa25421 100644 }; EXPORT_SYMBOL_GPL(pv_time_ops); +diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c +index a1da673..2c72d5b 100644 +--- a/arch/x86/kernel/paravirt_patch_64.c ++++ b/arch/x86/kernel/paravirt_patch_64.c +@@ -9,7 +9,9 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); + DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); + DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); + DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); ++#ifndef CONFIG_PAX_MEMORY_UDEREF + DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); ++#endif + DEF_NATIVE(pv_cpu_ops, clts, "clts"); + DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); + +@@ -57,7 +59,9 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, + PATCH_SITE(pv_mmu_ops, read_cr3); + PATCH_SITE(pv_mmu_ops, write_cr3); + PATCH_SITE(pv_cpu_ops, clts); ++#ifndef CONFIG_PAX_MEMORY_UDEREF + PATCH_SITE(pv_mmu_ops, flush_tlb_single); ++#endif + PATCH_SITE(pv_cpu_ops, wbinvd); + + patch_site: diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 726494b..5d942a3 100644 --- a/arch/x86/kernel/pci-calgary_64.c @@ -24236,10 +24354,58 @@ index dd5fbf4..b7f2232 100644 return pc; } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c -index bcfec2d..aeb81c2 100644 +index bcfec2d..36ed955 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c -@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx, +@@ -28,6 +28,37 @@ static int get_free_idx(void) + return -ESRCH; + } + ++static bool tls_desc_okay(const struct user_desc *info) ++{ ++ if (LDT_empty(info)) ++ return true; ++ ++ /* ++ * espfix is required for 16-bit data segments, but espfix ++ * only works for LDT segments. ++ */ ++ if (!info->seg_32bit) ++ return false; ++ ++ /* Only allow data segments in the TLS array. */ ++ if (info->contents > 1) ++ return false; ++ ++ /* ++ * Non-present segments with DPL 3 present an interesting attack ++ * surface. The kernel should handle such segments correctly, ++ * but TLS is very difficult to protect in a sandbox, so prevent ++ * such segments from being created. ++ * ++ * If userspace needs to remove a TLS entry, it can still delete ++ * it outright. ++ */ ++ if (info->seg_not_present) ++ return false; ++ ++ return true; ++} ++ + static void set_tls_desc(struct task_struct *p, int idx, + const struct user_desc *info, int n) + { +@@ -67,6 +98,9 @@ int do_set_thread_area(struct task_struct *p, int idx, + if (copy_from_user(&info, u_info, sizeof(info))) + return -EFAULT; + ++ if (!tls_desc_okay(&info)) ++ return -EINVAL; ++ + if (idx == -1) + idx = info.entry_number; + +@@ -85,6 +119,11 @@ int do_set_thread_area(struct task_struct *p, int idx, if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; @@ -24251,7 +24417,15 @@ index bcfec2d..aeb81c2 100644 set_tls_desc(p, idx, &info, 1); return 0; -@@ -205,7 +210,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, +@@ -197,6 +236,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, + { + struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; + const struct user_desc *info; ++ int i; + + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || + (pos % sizeof(struct user_desc)) != 0 || +@@ -205,11 +245,15 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, if (kbuf) info = kbuf; @@ -24260,6 +24434,14 @@ index bcfec2d..aeb81c2 100644 return -EFAULT; else info = infobuf; + ++ for (i = 0; i < count / sizeof(struct user_desc); i++) ++ if (!tls_desc_okay(info + i)) ++ return -EINVAL; ++ + set_tls_desc(target, + GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), + info, count / sizeof(struct user_desc)); diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 451c0a7..e57f551 100644 --- a/arch/x86/kernel/trampoline_32.S @@ -24959,7 +25141,7 @@ index 7110911..069da9c 100644 /* * Encountered an error while doing the restore from the diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index f0ac042..f6e5b65 100644 +index f0ac042..0ca3004 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -249,6 +249,7 @@ struct gprefix { @@ -24997,6 +25179,15 @@ index f0ac042..f6e5b65 100644 } while (0) /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ +@@ -3038,7 +3035,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + if (efer & EFER_LMA) +- rsvd = CR3_L_MODE_RESERVED_BITS; ++ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; + else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE) + rsvd = CR3_PAE_RESERVED_BITS; + else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 176205a..920cd58 100644 --- a/arch/x86/kvm/lapic.c @@ -25201,10 +25392,19 @@ index 8831c43..98f1a3e 100644 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 2d7d0df..1c1bd67 100644 +index 2d7d0df..2de279c0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -1369,8 +1369,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) +@@ -668,6 +668,8 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); + + int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) + { ++ cr3 &= ~CR3_PCID_INVD; ++ + if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { + kvm_mmu_sync_roots(vcpu); + kvm_mmu_flush_tlb(vcpu); +@@ -1369,8 +1371,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); @@ -25215,7 +25415,7 @@ index 2d7d0df..1c1bd67 100644 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; -@@ -2187,6 +2187,8 @@ long kvm_arch_dev_ioctl(struct file *filp, +@@ -2187,6 +2189,8 @@ long kvm_arch_dev_ioctl(struct file *filp, if (n < msr_list.nmsrs) goto out; r = -EFAULT; @@ -25224,7 +25424,7 @@ index 2d7d0df..1c1bd67 100644 if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; -@@ -2362,15 +2364,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, +@@ -2362,15 +2366,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { @@ -25248,7 +25448,7 @@ index 2d7d0df..1c1bd67 100644 vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); -@@ -2385,15 +2392,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, +@@ -2385,15 +2394,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { @@ -25271,7 +25471,7 @@ index 2d7d0df..1c1bd67 100644 return 0; out: -@@ -2768,7 +2779,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, +@@ -2768,7 +2781,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { @@ -25280,7 +25480,7 @@ index 2d7d0df..1c1bd67 100644 return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; -@@ -4846,7 +4857,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) +@@ -4846,7 +4859,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); @@ -25289,7 +25489,7 @@ index 2d7d0df..1c1bd67 100644 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; -@@ -5209,7 +5220,7 @@ static void kvm_set_mmio_spte_mask(void) +@@ -5209,7 +5222,7 @@ static void kvm_set_mmio_spte_mask(void) kvm_mmu_set_mmio_spte_mask(mask); } @@ -61589,6 +61789,37 @@ index 2f9197f..e2f03bf 100644 MODULE_LICENSE("GPL"); -/* Actual filesystem name is iso9660, as requested in filesystems.c */ -MODULE_ALIAS("iso9660"); +diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c +index ee62cc0..26859de 100644 +--- a/fs/isofs/rock.c ++++ b/fs/isofs/rock.c +@@ -30,6 +30,7 @@ struct rock_state { + int cont_size; + int cont_extent; + int cont_offset; ++ int cont_loops; + struct inode *inode; + }; + +@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode) + rs->inode = inode; + } + ++/* Maximum number of Rock Ridge continuation entries */ ++#define RR_MAX_CE_ENTRIES 32 ++ + /* + * Returns 0 if the caller should continue scanning, 1 if the scan must end + * and -ve on error. +@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs) + goto out; + } + ret = -EIO; ++ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) ++ goto out; + bh = sb_bread(rs->inode->i_sb, rs->cont_extent); + if (bh) { + memcpy(rs->buffer, bh->b_data + rs->cont_offset, diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index e513f19..2ab1351 100644 --- a/fs/jffs2/erase.c @@ -81627,6 +81858,18 @@ index d773b21..95a0913 100644 struct ata_port_info { unsigned long flags; +diff --git a/include/linux/linkage.h b/include/linux/linkage.h +index 3f46aed..dee75fe 100644 +--- a/include/linux/linkage.h ++++ b/include/linux/linkage.h +@@ -15,6 +15,7 @@ + #endif + + #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) ++#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE) + #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) + + /* diff --git a/include/linux/list.h b/include/linux/list.h index cc6d2aa..c10ee83 100644 --- a/include/linux/list.h |