public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.4/
@ 2014-05-28 16:26 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2014-05-28 16:26 UTC (permalink / raw
  To: gentoo-commits

commit:     9a41f2d531a27a9cbbf7071595929f89f79ab809
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Wed May 28 16:28:07 2014 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Wed May 28 16:28:07 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=9a41f2d5

Grsec/PaX: 3.0-3.14.4-201405271114

---
 3.14.4/0000_README                                 |   2 +-
 ... 4420_grsecurity-3.0-3.14.4-201405271114.patch} | 283 ++++++++++++++++-----
 3.14.4/4450_grsec-kconfig-default-gids.patch       |  12 +-
 3.14.4/4465_selinux-avc_audit-log-curr_ip.patch    |   2 +-
 4 files changed, 228 insertions(+), 71 deletions(-)

diff --git a/3.14.4/0000_README b/3.14.4/0000_README
index 1ddd194..4203555 100644
--- a/3.14.4/0000_README
+++ b/3.14.4/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.0-3.14.4-201405252047.patch
+Patch:	4420_grsecurity-3.0-3.14.4-201405271114.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.14.4/4420_grsecurity-3.0-3.14.4-201405252047.patch b/3.14.4/4420_grsecurity-3.0-3.14.4-201405271114.patch
similarity index 99%
rename from 3.14.4/4420_grsecurity-3.0-3.14.4-201405252047.patch
rename to 3.14.4/4420_grsecurity-3.0-3.14.4-201405271114.patch
index f294dbc..3537db8 100644
--- a/3.14.4/4420_grsecurity-3.0-3.14.4-201405252047.patch
+++ b/3.14.4/4420_grsecurity-3.0-3.14.4-201405271114.patch
@@ -27813,7 +27813,7 @@ index 1c113db..287b42e 100644
  static int trace_irq_vector_refcount;
  static DEFINE_MUTEX(irq_vector_mutex);
 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 57409f6..e2c17e1 100644
+index 57409f6..b505597 100644
 --- a/arch/x86/kernel/traps.c
 +++ b/arch/x86/kernel/traps.c
 @@ -66,7 +66,7 @@
@@ -27892,7 +27892,19 @@ index 57409f6..e2c17e1 100644
  			regs->ip, regs->sp, error_code);
  		print_vma_addr(" in ", regs->ip);
  		pr_cont("\n");
-@@ -273,7 +285,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -251,6 +263,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ 	tsk->thread.error_code = error_code;
+ 	tsk->thread.trap_nr = X86_TRAP_DF;
+ 
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++	if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
++		die("grsec: kernel stack overflow detected", regs, error_code);	
++#endif
++
+ #ifdef CONFIG_DOUBLEFAULT
+ 	df_debug(regs, error_code);
+ #endif
+@@ -273,7 +290,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
  	conditional_sti(regs);
  
  #ifdef CONFIG_X86_32
@@ -27901,7 +27913,7 @@ index 57409f6..e2c17e1 100644
  		local_irq_enable();
  		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  		goto exit;
-@@ -281,18 +293,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -281,18 +298,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
  #endif
  
  	tsk = current;
@@ -27946,7 +27958,7 @@ index 57409f6..e2c17e1 100644
  	tsk->thread.error_code = error_code;
  	tsk->thread.trap_nr = X86_TRAP_GP;
  
-@@ -453,7 +489,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -453,7 +494,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
  	/* It's safe to allow irq's after DR6 has been saved */
  	preempt_conditional_sti(regs);
  
@@ -27955,7 +27967,7 @@ index 57409f6..e2c17e1 100644
  		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
  					X86_TRAP_DB);
  		preempt_conditional_cli(regs);
-@@ -468,7 +504,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -468,7 +509,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
  	 * We already checked v86 mode above, so we can check for kernel mode
  	 * by just checking the CPL of CS.
  	 */
@@ -27964,7 +27976,7 @@ index 57409f6..e2c17e1 100644
  		tsk->thread.debugreg6 &= ~DR_STEP;
  		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  		regs->flags &= ~X86_EFLAGS_TF;
-@@ -500,7 +536,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+@@ -500,7 +541,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
  		return;
  	conditional_sti(regs);
  
@@ -58826,7 +58838,7 @@ index e4141f2..d8263e8 100644
  		i += packet_length_size;
  		if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
 diff --git a/fs/exec.c b/fs/exec.c
-index 3d78fcc..460e2a0 100644
+index 3d78fcc..cd4f983 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,8 +55,20 @@
@@ -59135,7 +59147,15 @@ index 3d78fcc..460e2a0 100644
  	set_fs(old_fs);
  	return result;
  }
-@@ -1258,7 +1336,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+@@ -846,6 +924,7 @@ static int exec_mmap(struct mm_struct *mm)
+ 	tsk->mm = mm;
+ 	tsk->active_mm = mm;
+ 	activate_mm(active_mm, mm);
++	populate_stack();
+ 	task_unlock(tsk);
+ 	if (old_mm) {
+ 		up_read(&old_mm->mmap_sem);
+@@ -1258,7 +1337,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
  	}
  	rcu_read_unlock();
  
@@ -59144,7 +59164,7 @@ index 3d78fcc..460e2a0 100644
  		bprm->unsafe |= LSM_UNSAFE_SHARE;
  	else
  		p->fs->in_exec = 1;
-@@ -1434,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
+@@ -1434,6 +1513,31 @@ static int exec_binprm(struct linux_binprm *bprm)
  	return ret;
  }
  
@@ -59176,7 +59196,7 @@ index 3d78fcc..460e2a0 100644
  /*
   * sys_execve() executes a new program.
   */
-@@ -1441,6 +1544,11 @@ static int do_execve_common(struct filename *filename,
+@@ -1441,6 +1545,11 @@ static int do_execve_common(struct filename *filename,
  				struct user_arg_ptr argv,
  				struct user_arg_ptr envp)
  {
@@ -59188,7 +59208,7 @@ index 3d78fcc..460e2a0 100644
  	struct linux_binprm *bprm;
  	struct file *file;
  	struct files_struct *displaced;
-@@ -1449,6 +1557,8 @@ static int do_execve_common(struct filename *filename,
+@@ -1449,6 +1558,8 @@ static int do_execve_common(struct filename *filename,
  	if (IS_ERR(filename))
  		return PTR_ERR(filename);
  
@@ -59197,7 +59217,7 @@ index 3d78fcc..460e2a0 100644
  	/*
  	 * We move the actual failure in case of RLIMIT_NPROC excess from
  	 * set*uid() to execve() because too many poorly written programs
-@@ -1486,11 +1596,21 @@ static int do_execve_common(struct filename *filename,
+@@ -1486,11 +1597,21 @@ static int do_execve_common(struct filename *filename,
  	if (IS_ERR(file))
  		goto out_unmark;
  
@@ -59219,7 +59239,7 @@ index 3d78fcc..460e2a0 100644
  	retval = bprm_mm_init(bprm);
  	if (retval)
  		goto out_unmark;
-@@ -1507,24 +1627,70 @@ static int do_execve_common(struct filename *filename,
+@@ -1507,24 +1628,70 @@ static int do_execve_common(struct filename *filename,
  	if (retval < 0)
  		goto out;
  
@@ -59294,7 +59314,7 @@ index 3d78fcc..460e2a0 100644
  	current->fs->in_exec = 0;
  	current->in_execve = 0;
  	acct_update_integrals(current);
-@@ -1535,6 +1701,14 @@ static int do_execve_common(struct filename *filename,
+@@ -1535,6 +1702,14 @@ static int do_execve_common(struct filename *filename,
  		put_files_struct(displaced);
  	return retval;
  
@@ -59309,7 +59329,7 @@ index 3d78fcc..460e2a0 100644
  out:
  	if (bprm->mm) {
  		acct_arg_size(bprm, 0);
-@@ -1626,3 +1800,296 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+@@ -1626,3 +1801,296 @@ asmlinkage long compat_sys_execve(const char __user * filename,
  	return compat_do_execve(getname(filename), argv, envp);
  }
  #endif
@@ -65953,10 +65973,10 @@ index bcfe612..aa399c0 100644
  
 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
 new file mode 100644
-index 0000000..3abaf02
+index 0000000..a14eb52
 --- /dev/null
 +++ b/grsecurity/Kconfig
-@@ -0,0 +1,1161 @@
+@@ -0,0 +1,1174 @@
 +#
 +# grecurity configuration
 +#
@@ -66095,6 +66115,19 @@ index 0000000..3abaf02
 +	  If you use PaX it is essential that you say Y here as it closes up
 +	  several holes that make full ASLR useless locally.
 +
++
++config GRKERNSEC_KSTACKOVERFLOW
++	bool "Prevent kernel stack overflows"
++	default y if GRKERNSEC_CONFIG_AUTO
++	depends on !IA64 && 64BIT
++	help
++	  If you say Y here, the kernel's process stacks will be allocated
++	  with vmalloc instead of the kernel's default allocator.  This
++	  introduces guard pages that in combination with the alloca checking
++	  of the STACKLEAK feature prevents all forms of kernel process stack
++	  overflow abuse.  Note that this is different from kernel stack
++	  buffer overflows.
++
 +config GRKERNSEC_BRUTE
 +	bool "Deter exploit bruteforcing"
 +	default y if GRKERNSEC_CONFIG_AUTO
@@ -82073,7 +82106,7 @@ index b66c211..13d2915 100644
  static inline void anon_vma_merge(struct vm_area_struct *vma,
  				  struct vm_area_struct *next)
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index a781dec..be1d2a3 100644
+index a781dec..2c03225 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -129,6 +129,7 @@ struct fs_struct;
@@ -82309,7 +82342,33 @@ index a781dec..be1d2a3 100644
  {
  	return tsk->pid;
  }
-@@ -2112,7 +2223,9 @@ void yield(void);
+@@ -1988,6 +2099,25 @@ extern u64 sched_clock_cpu(int cpu);
+ 
+ extern void sched_clock_init(void);
+ 
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static inline void populate_stack(void)
++{
++	struct task_struct *curtask = current;
++	int c;
++	int *ptr = curtask->stack;
++	int *end = curtask->stack + THREAD_SIZE;
++
++	while (ptr < end) {
++		c = *(volatile int *)ptr;
++		ptr += PAGE_SIZE/sizeof(int);
++	}
++}
++#else
++static inline void populate_stack(void)
++{
++}
++#endif
++
+ #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+ static inline void sched_clock_tick(void)
+ {
+@@ -2112,7 +2242,9 @@ void yield(void);
  extern struct exec_domain	default_exec_domain;
  
  union thread_union {
@@ -82319,7 +82378,7 @@ index a781dec..be1d2a3 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -2145,6 +2258,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2145,6 +2277,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -82327,7 +82386,7 @@ index a781dec..be1d2a3 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2307,7 +2421,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2307,7 +2440,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -82336,7 +82395,7 @@ index a781dec..be1d2a3 100644
  
  extern int allow_signal(int);
  extern int disallow_signal(int);
-@@ -2508,9 +2622,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2508,9 +2641,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  
  #endif
  
@@ -83302,7 +83361,7 @@ index 502073a..a7de024 100644
  #endif
  #endif /* _LINUX_VGA_SWITCHEROO_H_ */
 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 4b8a891..cb8df6e 100644
+index 4b8a891..05f2361 100644
 --- a/include/linux/vmalloc.h
 +++ b/include/linux/vmalloc.h
 @@ -16,6 +16,11 @@ struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
@@ -83317,7 +83376,15 @@ index 4b8a891..cb8df6e 100644
  /* bits [20..32] reserved for arch specific ioremap internals */
  
  /*
-@@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
+@@ -72,6 +77,7 @@ extern void *vzalloc_node(unsigned long size, int node);
+ extern void *vmalloc_exec(unsigned long size);
+ extern void *vmalloc_32(unsigned long size);
+ extern void *vmalloc_32_user(unsigned long size);
++extern void *vmalloc_stack(int node);
+ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ 			unsigned long start, unsigned long end, gfp_t gfp_mask,
+@@ -142,7 +148,7 @@ extern void free_vm_area(struct vm_struct *area);
  
  /* for /dev/kmem */
  extern long vread(char *buf, char *addr, unsigned long count);
@@ -86418,10 +86485,49 @@ index 81b3d67..ef189a4 100644
  {
  	struct signal_struct *sig = current->signal;
 diff --git a/kernel/fork.c b/kernel/fork.c
-index a17621c..b77fef8 100644
+index a17621c..d9e4b37 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
-@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+@@ -137,6 +137,18 @@ void __weak arch_release_thread_info(struct thread_info *ti)
+ {
+ }
+ 
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
++						  int node)
++{
++	return vmalloc_stack(node);
++}
++
++static inline void free_thread_info(struct thread_info *ti)
++{
++	vfree(ti);
++}
++#else
+ #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+ 
+ /*
+@@ -179,6 +191,7 @@ void thread_info_cache_init(void)
+ }
+ # endif
+ #endif
++#endif
+ 
+ /* SLAB cache for signal_struct structures (tsk->signal) */
+ static struct kmem_cache *signal_cachep;
+@@ -200,9 +213,11 @@ static struct kmem_cache *mm_cachep;
+ 
+ static void account_kernel_stack(struct thread_info *ti, int account)
+ {
++#ifndef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+ 	struct zone *zone = page_zone(virt_to_page(ti));
+ 
+ 	mod_zone_page_state(zone, NR_KERNEL_STACK, account);
++#endif
+ }
+ 
+ void free_task(struct task_struct *tsk)
+@@ -319,7 +334,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
  	*stackend = STACK_END_MAGIC;	/* for overflow detection */
  
  #ifdef CONFIG_CC_STACKPROTECTOR
@@ -86430,7 +86536,7 @@ index a17621c..b77fef8 100644
  #endif
  
  	/*
-@@ -345,12 +345,80 @@ free_tsk:
+@@ -345,12 +360,80 @@ free_tsk:
  }
  
  #ifdef CONFIG_MMU
@@ -86513,7 +86619,7 @@ index a17621c..b77fef8 100644
  
  	uprobe_start_dup_mmap();
  	down_write(&oldmm->mmap_sem);
-@@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -379,55 +462,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
  
  	prev = NULL;
  	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -86573,7 +86679,7 @@ index a17621c..b77fef8 100644
  		}
  
  		/*
-@@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -459,6 +502,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
  		if (retval)
  			goto out;
  	}
@@ -86605,7 +86711,7 @@ index a17621c..b77fef8 100644
  	/* a new mm has just been created */
  	arch_dup_mmap(oldmm, mm);
  	retval = 0;
-@@ -468,14 +521,6 @@ out:
+@@ -468,14 +536,6 @@ out:
  	up_write(&oldmm->mmap_sem);
  	uprobe_end_dup_mmap();
  	return retval;
@@ -86620,7 +86726,7 @@ index a17621c..b77fef8 100644
  }
  
  static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -689,8 +734,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+@@ -689,8 +749,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
  		return ERR_PTR(err);
  
  	mm = get_task_mm(task);
@@ -86631,7 +86737,7 @@ index a17621c..b77fef8 100644
  		mmput(mm);
  		mm = ERR_PTR(-EACCES);
  	}
-@@ -906,13 +951,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+@@ -906,13 +966,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
  			spin_unlock(&fs->lock);
  			return -EAGAIN;
  		}
@@ -86653,7 +86759,7 @@ index a17621c..b77fef8 100644
  	return 0;
  }
  
-@@ -1130,7 +1182,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
+@@ -1130,7 +1197,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
   * parts of the process environment (as per the clone
   * flags). The actual kick-off is left to the caller.
   */
@@ -86662,7 +86768,7 @@ index a17621c..b77fef8 100644
  					unsigned long stack_start,
  					unsigned long stack_size,
  					int __user *child_tidptr,
-@@ -1202,6 +1254,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1202,6 +1269,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
  #endif
  	retval = -EAGAIN;
@@ -86672,7 +86778,7 @@ index a17621c..b77fef8 100644
  	if (atomic_read(&p->real_cred->user->processes) >=
  			task_rlimit(p, RLIMIT_NPROC)) {
  		if (p->real_cred->user != INIT_USER &&
-@@ -1449,6 +1504,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1449,6 +1519,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  		goto bad_fork_free_pid;
  	}
  
@@ -86684,7 +86790,7 @@ index a17621c..b77fef8 100644
  	if (likely(p->pid)) {
  		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
  
-@@ -1537,6 +1597,8 @@ bad_fork_cleanup_count:
+@@ -1537,6 +1612,8 @@ bad_fork_cleanup_count:
  bad_fork_free:
  	free_task(p);
  fork_out:
@@ -86693,7 +86799,7 @@ index a17621c..b77fef8 100644
  	return ERR_PTR(retval);
  }
  
-@@ -1598,6 +1660,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1598,6 +1675,7 @@ long do_fork(unsigned long clone_flags,
  
  	p = copy_process(clone_flags, stack_start, stack_size,
  			 child_tidptr, NULL, trace);
@@ -86701,7 +86807,7 @@ index a17621c..b77fef8 100644
  	/*
  	 * Do this prior waking up the new thread - the thread pointer
  	 * might get invalid after that point, if the thread exits quickly.
-@@ -1612,6 +1675,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1612,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
  		if (clone_flags & CLONE_PARENT_SETTID)
  			put_user(nr, parent_tidptr);
  
@@ -86710,7 +86816,7 @@ index a17621c..b77fef8 100644
  		if (clone_flags & CLONE_VFORK) {
  			p->vfork_done = &vfork;
  			init_completion(&vfork);
-@@ -1728,7 +1793,7 @@ void __init proc_caches_init(void)
+@@ -1728,7 +1808,7 @@ void __init proc_caches_init(void)
  	mm_cachep = kmem_cache_create("mm_struct",
  			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -86719,7 +86825,7 @@ index a17621c..b77fef8 100644
  	mmap_init();
  	nsproxy_cache_init();
  }
-@@ -1768,7 +1833,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1768,7 +1848,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
  		return 0;
  
  	/* don't need lock here; in the worst case we'll do useless copy */
@@ -86728,7 +86834,7 @@ index a17621c..b77fef8 100644
  		return 0;
  
  	*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1940,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1875,7 +1955,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
  			fs = current->fs;
  			spin_lock(&fs->lock);
  			current->fs = new_fs;
@@ -89806,7 +89912,7 @@ index a63f4dc..349bbb0 100644
  				     unsigned long timeout)
  {
 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index f5c6635..ab9f223 100644
+index f5c6635..7133356 100644
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
 @@ -1775,7 +1775,7 @@ void set_numabalancing_state(bool enabled)
@@ -89818,7 +89924,19 @@ index f5c6635..ab9f223 100644
  	int err;
  	int state = numabalancing_enabled;
  
-@@ -3049,6 +3049,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -2251,8 +2251,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ 		next->active_mm = oldmm;
+ 		atomic_inc(&oldmm->mm_count);
+ 		enter_lazy_tlb(oldmm, next);
+-	} else
++	} else {
+ 		switch_mm(oldmm, mm, next);
++		populate_stack();
++	}
+ 
+ 	if (!prev->mm) {
+ 		prev->active_mm = NULL;
+@@ -3049,6 +3051,8 @@ int can_nice(const struct task_struct *p, const int nice)
  	/* convert nice value [19,-20] to rlimit style value [1,40] */
  	int nice_rlim = 20 - nice;
  
@@ -89827,7 +89945,7 @@ index f5c6635..ab9f223 100644
  	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
  		capable(CAP_SYS_NICE));
  }
-@@ -3082,7 +3084,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -3082,7 +3086,8 @@ SYSCALL_DEFINE1(nice, int, increment)
  	if (nice > 19)
  		nice = 19;
  
@@ -89837,7 +89955,7 @@ index f5c6635..ab9f223 100644
  		return -EPERM;
  
  	retval = security_task_setnice(current, nice);
-@@ -3332,6 +3335,7 @@ recheck:
+@@ -3332,6 +3337,7 @@ recheck:
  			if (policy != p->policy && !rlim_rtprio)
  				return -EPERM;
  
@@ -89845,7 +89963,19 @@ index f5c6635..ab9f223 100644
  			/* can't increase priority */
  			if (attr->sched_priority > p->rt_priority &&
  			    attr->sched_priority > rlim_rtprio)
-@@ -4781,7 +4785,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+@@ -4702,8 +4708,10 @@ void idle_task_exit(void)
+ 
+ 	BUG_ON(cpu_online(smp_processor_id()));
+ 
+-	if (mm != &init_mm)
++	if (mm != &init_mm) {
+ 		switch_mm(mm, &init_mm, current);
++		populate_stack();
++	}
+ 	mmdrop(mm);
+ }
+ 
+@@ -4781,7 +4789,7 @@ static void migrate_tasks(unsigned int dead_cpu)
  
  #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
  
@@ -89854,7 +89984,7 @@ index f5c6635..ab9f223 100644
  	{
  		.procname	= "sched_domain",
  		.mode		= 0555,
-@@ -4798,17 +4802,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -4798,17 +4806,17 @@ static struct ctl_table sd_ctl_root[] = {
  	{}
  };
  
@@ -89876,7 +90006,7 @@ index f5c6635..ab9f223 100644
  
  	/*
  	 * In the intermediate directories, both the child directory and
-@@ -4816,22 +4820,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -4816,22 +4824,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
  	 * will always be set. In the lowest directory the names are
  	 * static strings and all have proc handlers.
  	 */
@@ -89908,7 +90038,7 @@ index f5c6635..ab9f223 100644
  		const char *procname, void *data, int maxlen,
  		umode_t mode, proc_handler *proc_handler,
  		bool load_idx)
-@@ -4851,7 +4858,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -4851,7 +4862,7 @@ set_table_entry(struct ctl_table *entry,
  static struct ctl_table *
  sd_alloc_ctl_domain_table(struct sched_domain *sd)
  {
@@ -89917,7 +90047,7 @@ index f5c6635..ab9f223 100644
  
  	if (table == NULL)
  		return NULL;
-@@ -4886,9 +4893,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -4886,9 +4897,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
  	return table;
  }
  
@@ -89929,7 +90059,7 @@ index f5c6635..ab9f223 100644
  	struct sched_domain *sd;
  	int domain_num = 0, i;
  	char buf[32];
-@@ -4915,11 +4922,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -4915,11 +4926,13 @@ static struct ctl_table_header *sd_sysctl_header;
  static void register_sched_domain_sysctl(void)
  {
  	int i, cpu_num = num_possible_cpus();
@@ -89944,7 +90074,7 @@ index f5c6635..ab9f223 100644
  
  	if (entry == NULL)
  		return;
-@@ -4942,8 +4951,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -4942,8 +4955,12 @@ static void unregister_sched_domain_sysctl(void)
  	if (sd_sysctl_header)
  		unregister_sysctl_table(sd_sysctl_header);
  	sd_sysctl_header = NULL;
@@ -92241,10 +92371,24 @@ index 09d9591..165bb75 100644
  		bdi_destroy(bdi);
  		return err;
 diff --git a/mm/filemap.c b/mm/filemap.c
-index 7a13f6a..e52e841 100644
+index 7a13f6a..e31738b 100644
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
-@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+@@ -192,9 +192,11 @@ static int filemap_check_errors(struct address_space *mapping)
+ {
+ 	int ret = 0;
+ 	/* Check for outstanding write errors */
+-	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
++	if (test_bit(AS_ENOSPC, &mapping->flags) &&
++	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+ 		ret = -ENOSPC;
+-	if (test_and_clear_bit(AS_EIO, &mapping->flags))
++	if (test_bit(AS_EIO, &mapping->flags) &&
++	    test_and_clear_bit(AS_EIO, &mapping->flags))
+ 		ret = -EIO;
+ 	return ret;
+ }
+@@ -1766,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
  	struct address_space *mapping = file->f_mapping;
  
  	if (!mapping->a_ops->readpage)
@@ -92253,7 +92397,7 @@ index 7a13f6a..e52e841 100644
  	file_accessed(file);
  	vma->vm_ops = &generic_file_vm_ops;
  	return 0;
-@@ -1948,7 +1948,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+@@ -1948,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
  
  	while (bytes) {
  		char __user *buf = iov->iov_base + base;
@@ -92262,7 +92406,7 @@ index 7a13f6a..e52e841 100644
  
  		base = 0;
  		left = __copy_from_user_inatomic(vaddr, buf, copy);
-@@ -1977,7 +1977,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+@@ -1977,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
  	BUG_ON(!in_atomic());
  	kaddr = kmap_atomic(page);
  	if (likely(i->nr_segs == 1)) {
@@ -92271,7 +92415,7 @@ index 7a13f6a..e52e841 100644
  		char __user *buf = i->iov->iov_base + i->iov_offset;
  		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
  		copied = bytes - left;
-@@ -2005,7 +2005,7 @@ size_t iov_iter_copy_from_user(struct page *page,
+@@ -2005,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
  
  	kaddr = kmap(page);
  	if (likely(i->nr_segs == 1)) {
@@ -92280,7 +92424,7 @@ index 7a13f6a..e52e841 100644
  		char __user *buf = i->iov->iov_base + i->iov_offset;
  		left = __copy_from_user(kaddr + offset, buf, bytes);
  		copied = bytes - left;
-@@ -2035,7 +2035,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
+@@ -2035,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
  		 * zero-length segments (without overruning the iovec).
  		 */
  		while (bytes || unlikely(i->count && !iov->iov_len)) {
@@ -92289,7 +92433,7 @@ index 7a13f6a..e52e841 100644
  
  			copy = min(bytes, iov->iov_len - base);
  			BUG_ON(!i->count || i->count < copy);
-@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
+@@ -2106,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
                          *pos = i_size_read(inode);
  
  		if (limit != RLIM_INFINITY) {
@@ -96786,7 +96930,7 @@ index a24aa22..a0d41ae 100644
  }
  #endif
 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 0fdf968..d6686e8 100644
+index 0fdf968..2183ba3 100644
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
 @@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -96932,7 +97076,20 @@ index 0fdf968..d6686e8 100644
  	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
  				  start, end, node, gfp_mask, caller);
  	if (!area)
-@@ -1810,10 +1868,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1701,6 +1759,12 @@ static inline void *__vmalloc_node_flags(unsigned long size,
+ 					node, __builtin_return_address(0));
+ }
+ 
++void *vmalloc_stack(int node)
++{
++	return __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP, PAGE_KERNEL,
++				node, __builtin_return_address(0));
++}
++
+ /**
+  *	vmalloc  -  allocate virtually contiguous memory
+  *	@size:		allocation size
+@@ -1810,10 +1874,9 @@ EXPORT_SYMBOL(vzalloc_node);
   *	For tight control over page level allocator and protection flags
   *	use __vmalloc() instead.
   */
@@ -96944,7 +97101,7 @@ index 0fdf968..d6686e8 100644
  			      NUMA_NO_NODE, __builtin_return_address(0));
  }
  
-@@ -2120,6 +2177,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+@@ -2120,6 +2183,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
  {
  	struct vm_struct *area;
  
@@ -96953,7 +97110,7 @@ index 0fdf968..d6686e8 100644
  	size = PAGE_ALIGN(size);
  
  	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
-@@ -2602,7 +2661,11 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2602,7 +2667,11 @@ static int s_show(struct seq_file *m, void *p)
  		v->addr, v->addr + v->size, v->size);
  
  	if (v->caller)

diff --git a/3.14.4/4450_grsec-kconfig-default-gids.patch b/3.14.4/4450_grsec-kconfig-default-gids.patch
index ed2968f..a965a27 100644
--- a/3.14.4/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.4/4450_grsec-kconfig-default-gids.patch
@@ -16,7 +16,7 @@ from shooting themselves in the foot.
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2012-10-13 09:51:35.000000000 -0400
 +++ b/grsecurity/Kconfig	2012-10-13 09:52:32.000000000 -0400
-@@ -665,7 +665,7 @@
+@@ -678,7 +678,7 @@
  config GRKERNSEC_AUDIT_GID
  	int "GID for auditing"
  	depends on GRKERNSEC_AUDIT_GROUP
@@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  
  config GRKERNSEC_EXECLOG
  	bool "Exec logging"
-@@ -896,7 +896,7 @@
+@@ -909,7 +909,7 @@
  config GRKERNSEC_TPE_UNTRUSTED_GID
  	int "GID for TPE-untrusted users"
  	depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *enabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -905,7 +905,7 @@
+@@ -918,7 +918,7 @@
  config GRKERNSEC_TPE_TRUSTED_GID
  	int "GID for TPE-trusted users"
  	depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *disabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -998,7 +998,7 @@
+@@ -1011,7 +1011,7 @@
  config GRKERNSEC_SOCKET_ALL_GID
  	int "GID to deny all sockets for"
  	depends on GRKERNSEC_SOCKET_ALL
@@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable socket access for. Remember to
  	  add the users you want socket access disabled for to the GID
-@@ -1019,7 +1019,7 @@
+@@ -1032,7 +1032,7 @@
  config GRKERNSEC_SOCKET_CLIENT_GID
  	int "GID to deny client sockets for"
  	depends on GRKERNSEC_SOCKET_CLIENT
@@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable client socket access for.
  	  Remember to add the users you want client socket access disabled for to
-@@ -1037,7 +1037,7 @@
+@@ -1050,7 +1050,7 @@
  config GRKERNSEC_SOCKET_SERVER_GID
  	int "GID to deny server sockets for"
  	depends on GRKERNSEC_SOCKET_SERVER

diff --git a/3.14.4/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.4/4465_selinux-avc_audit-log-curr_ip.patch
index aa90a6f..2765cdc 100644
--- a/3.14.4/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.14.4/4465_selinux-avc_audit-log-curr_ip.patch
@@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2011-04-17 19:25:54.000000000 -0400
 +++ b/grsecurity/Kconfig	2011-04-17 19:32:53.000000000 -0400
-@@ -1132,6 +1132,27 @@
+@@ -1145,6 +1145,27 @@
  menu "Logging Options"
  depends on GRKERNSEC
  


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2014-05-28 16:26 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-05-28 16:26 [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.4/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox