* [gentoo-commits] proj/hardened-patchset:master commit in: 4.3.4/
@ 2016-01-30 12:29 Anthony G. Basile
0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2016-01-30 12:29 UTC (permalink / raw
To: gentoo-commits
commit: 92b230adb84942fe6bf8d05cc6012ce0f98050a7
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 30 12:37:58 2016 +0000
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sat Jan 30 12:37:58 2016 +0000
URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=92b230ad
grsecurity-3.1-4.3.4-201601292206
4.3.4/0000_README | 2 +-
...> 4420_grsecurity-3.1-4.3.4-201601292206.patch} | 204 ++++++++++++++++++---
2 files changed, 179 insertions(+), 27 deletions(-)
diff --git a/4.3.4/0000_README b/4.3.4/0000_README
index 158f0b1..ce73e44 100644
--- a/4.3.4/0000_README
+++ b/4.3.4/0000_README
@@ -6,7 +6,7 @@ Patch: 1003_linux-4.3.4.patch
From: http://www.kernel.org
Desc: Linux 4.3.4
-Patch: 4420_grsecurity-3.1-4.3.4-201601261954.patch
+Patch: 4420_grsecurity-3.1-4.3.4-201601292206.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.3.4/4420_grsecurity-3.1-4.3.4-201601261954.patch b/4.3.4/4420_grsecurity-3.1-4.3.4-201601292206.patch
similarity index 99%
rename from 4.3.4/4420_grsecurity-3.1-4.3.4-201601261954.patch
rename to 4.3.4/4420_grsecurity-3.1-4.3.4-201601292206.patch
index f866bc7..92cf754 100644
--- a/4.3.4/4420_grsecurity-3.1-4.3.4-201601261954.patch
+++ b/4.3.4/4420_grsecurity-3.1-4.3.4-201601292206.patch
@@ -75860,14 +75860,17 @@ index 155f842..89922d8 100644
file = aio_private_file(ctx, nr_pages);
diff --git a/fs/attr.c b/fs/attr.c
-index 6530ced..4a827e2 100644
+index 6530ced..559e5e6 100644
--- a/fs/attr.c
+++ b/fs/attr.c
-@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
+@@ -102,6 +102,10 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
unsigned long limit;
limit = rlimit(RLIMIT_FSIZE);
-+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
++ if (offset > ULONG_MAX)
++ gr_learn_resource(current, RLIMIT_FSIZE, ULONG_MAX, 1);
++ else if (offset > 0)
++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
if (limit != RLIM_INFINITY && offset > limit)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
@@ -77631,7 +77634,7 @@ index b406a32..243eb1c 100644
GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
-index 62203c3..93267bf 100644
+index 62203c3..fa2d9b3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2054,10 +2054,14 @@ static int cifs_writepages(struct address_space *mapping,
@@ -77652,6 +77655,15 @@ index 62203c3..93267bf 100644
scanned = true;
}
server = cifs_sb_master_tcon(cifs_sb)->ses->server;
+@@ -2531,7 +2535,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
+ wdata->pid = pid;
+ wdata->bytes = cur_len;
+ wdata->pagesz = PAGE_SIZE;
+- wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
++ wdata->tailsz = cur_len - nr_pages * PAGE_SIZE + PAGE_SIZE;
+ wdata->credits = credits;
+
+ if (!wdata->cfile->invalidHandle ||
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 8442b8b..ea6986f 100644
--- a/fs/cifs/misc.c
@@ -82037,6 +82049,19 @@ index ebb5e37..beae05b 100644
do_wakeup = 1;
}
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index cf4ab89..5a00960 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -781,7 +781,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
+ {
+ loff_t max = *len;
+ const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+- unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
++ unsigned int tmp, max_data = max_blocks - 3 * sdp->sd_max_height + 3;
+
+ for (tmp = max_data; tmp > sdp->sd_diptrs;) {
+ tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9bd1244..b8c82d9 100644
--- a/fs/gfs2/glock.c
@@ -112918,6 +112943,46 @@ index 6d2a119..ac24f34 100644
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 5bd4779..6bf906d 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
+ put_seccomp_filter(thread);
+ smp_store_release(&thread->seccomp.filter,
+ caller->seccomp.filter);
++
++ /*
++ * Don't let an unprivileged task work around
++ * the no_new_privs restriction by creating
++ * a thread that sets it up, enters seccomp,
++ * then dies.
++ */
++ if (task_no_new_privs(caller))
++ task_set_no_new_privs(thread);
++
+ /*
+ * Opt the other thread into seccomp if needed.
+ * As threads are considered to be trust-realm
+ * equivalent (see ptrace_may_access), it is safe to
+ * allow one thread to transition the other.
+ */
+- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
+- /*
+- * Don't let an unprivileged task work around
+- * the no_new_privs restriction by creating
+- * a thread that sets it up, enters seccomp,
+- * then dies.
+- */
+- if (task_no_new_privs(caller))
+- task_set_no_new_privs(thread);
+-
++ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+- }
+ }
+ }
+
diff --git a/kernel/signal.c b/kernel/signal.c
index 0f6bbbe..4791c7d 100644
--- a/kernel/signal.c
@@ -150284,10 +150349,10 @@ index 0000000..cc20d48
+#endif
diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
new file mode 100644
-index 0000000..7d9135d
+index 0000000..bd18a67
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
-@@ -0,0 +1,1032 @@
+@@ -0,0 +1,1116 @@
+/*
+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -150495,9 +150560,6 @@ index 0000000..7d9135d
+ switch (TREE_CODE(node)) {
+ case COMPONENT_REF:
+ cur_decl = search_field_decl(node);
-+ // !!! temporarily ignore bitfield types
-+ if (DECL_BIT_FIELD_TYPE(cur_decl))
-+ return MARK_YES;
+ if (is_turn_off_intentional_attr(cur_decl))
+ return MARK_TURN_OFF;
+ if (is_end_intentional_intentional_attr(cur_decl))
@@ -150526,9 +150588,6 @@ index 0000000..7d9135d
+ break;
+ }
+ case FIELD_DECL:
-+ // !!! temporarily ignore bitfield types
-+ if (DECL_BIT_FIELD_TYPE(node))
-+ return MARK_YES;
+ case VAR_DECL:
+ if (is_end_intentional_intentional_attr(node))
+ return MARK_END_INTENTIONAL;
@@ -151320,6 +151379,96 @@ index 0000000..7d9135d
+ add_rhs2 = gimple_assign_rhs2(add_stmt);
+ return check_add_stmt(add_rhs2);
+}
++
++/* True:
++ * _25 = (<unnamed-unsigned:1>) _24;
++ * r_5(D)->stereo = _25;
++ */
++bool is_bitfield_unnamed_cast(const_tree decl, gassign *assign)
++{
++ const_tree rhs, type;
++ gimple def_stmt;
++
++ if (TREE_CODE(decl) != FIELD_DECL)
++ return false;
++ if (!DECL_BIT_FIELD_TYPE(decl))
++ return false;
++ if (gimple_num_ops(assign) != 2)
++ return false;
++
++ rhs = gimple_assign_rhs1(assign);
++ if (is_gimple_constant(rhs))
++ return false;
++ type = TREE_TYPE(rhs);
++ if (TREE_CODE(type) == BOOLEAN_TYPE)
++ return false;
++
++ def_stmt = get_def_stmt(rhs);
++ if (!gimple_assign_cast_p(def_stmt))
++ return false;
++ return TYPE_PRECISION(type) < CHAR_TYPE_SIZE;
++}
++
++static bool is_mult_const(const_tree lhs)
++{
++ const_gimple def_stmt;
++ const_tree rhs1, rhs2;
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt || gimple_assign_rhs_code(def_stmt) != MULT_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ if (is_gimple_constant(rhs1))
++ return !is_lt_signed_type_max(rhs1);
++ else if (is_gimple_constant(rhs2))
++ return !is_lt_signed_type_max(rhs2);
++ return false;
++}
++
++/* True:
++ * fs/cifs/file.c cifs_write_from_iter()
++ * u32 = u64 - (u64 - constant) * constant
++ * wdata->tailsz = cur_len - (nr_pages - 1) * PAGE_SIZE;
++ *
++ * _51 = _50 * 4294963200;
++ * _52 = _49 + _51;
++ * _53 = _52 + 4096;
++ */
++
++bool uconst_neg_intentional_overflow(struct visited *visited, const gassign *stmt)
++{
++ const_gimple def_stmt;
++ const_tree noconst_rhs;
++ tree rhs1, rhs2;
++
++ // _53 = _52 + const;
++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
++ return false;
++ rhs1 = gimple_assign_rhs1(stmt);
++ rhs2 = gimple_assign_rhs2(stmt);
++ if (is_gimple_constant(rhs1))
++ noconst_rhs = rhs2;
++ else if (is_gimple_constant(rhs2))
++ noconst_rhs = rhs1;
++ else
++ return false;
++ def_stmt = get_def_stmt(noconst_rhs);
++
++ // _52 = _49 + _51;
++ if (!def_stmt)
++ return false;
++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++ return false;
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ // _51 = _50 * gt signed type max;
++ return is_mult_const(rhs1) || is_mult_const(rhs2);
++}
diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
new file mode 100644
index 0000000..5622b51
@@ -151465,10 +151614,10 @@ index 0000000..5622b51
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h
new file mode 100644
-index 0000000..5fd6c28
+index 0000000..ee57a00
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow.h
-@@ -0,0 +1,323 @@
+@@ -0,0 +1,325 @@
+#ifndef SIZE_OVERFLOW_H
+#define SIZE_OVERFLOW_H
+
@@ -151673,6 +151822,8 @@ index 0000000..5fd6c28
+extern enum intentional_overflow_type add_mul_intentional_overflow(const gassign *stmt);
+extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt);
+extern bool neg_short_add_intentional_overflow(gassign *stmt);
++extern bool is_bitfield_unnamed_cast(const_tree decl, gassign *assign);
++extern bool uconst_neg_intentional_overflow(struct visited *visited, const gassign *stmt);
+
+
+// insert_size_overflow_asm.c
@@ -175586,12 +175737,12 @@ index 0000000..6075e8f
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..f1cc040
+index 0000000..f50c635
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
@@ -0,0 +1,318 @@
+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
@@ -175621,7 +175772,7 @@ index 0000000..f1cc040
+tree size_overflow_type_TI;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20151201",
++ .version = "20160128",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -176268,10 +176419,10 @@ index 0000000..317cd6c
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_transform.c b/tools/gcc/size_overflow_plugin/size_overflow_transform.c
new file mode 100644
-index 0000000..8f42c7e
+index 0000000..f9de78e
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_transform.c
-@@ -0,0 +1,749 @@
+@@ -0,0 +1,745 @@
+/*
+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -176457,9 +176608,6 @@ index 0000000..8f42c7e
+
+ if (skip_types(orig_node))
+ return head;
-+ // !!! temporarily ignore bitfield types
-+ if (orig_code == FIELD_DECL && DECL_BIT_FIELD_TYPE(orig_node))
-+ return head;
+
+ // find a defining marked caller argument or struct field for arg
+ if (check_intentional_size_overflow_asm_and_attribute(orig_node) != MARK_NO)
@@ -176818,8 +176966,7 @@ index 0000000..8f42c7e
+ if (DECL_NAME(decl) == NULL_TREE)
+ return head;
+
-+ // !!! temporarily ignore bitfield types
-+ if (TREE_CODE(decl) == FIELD_DECL && DECL_BIT_FIELD_TYPE(decl))
++ if (is_bitfield_unnamed_cast(decl, assign))
+ return head;
+
+ next_node = get_interesting_function_next_node(decl, 0);
@@ -177023,10 +177170,10 @@ index 0000000..8f42c7e
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c b/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c
new file mode 100644
-index 0000000..8a30b3b
+index 0000000..2ab3b9e
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c
-@@ -0,0 +1,1010 @@
+@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -177945,6 +178092,11 @@ index 0000000..8a30b3b
+ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
+ return handle_comparison_code_class(visited, expand_from, def_stmt, new_rhs1, new_rhs2);
+
++ if (uconst_neg_intentional_overflow(visited, def_stmt)) {
++ inform(gimple_location(def_stmt), "%s: gcc intentional overflow", __func__);
++ gcc_unreachable();
++ }
++
+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
+}
+
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [gentoo-commits] proj/hardened-patchset:master commit in: 4.3.4/
@ 2016-01-28 5:23 Anthony G. Basile
0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2016-01-28 5:23 UTC (permalink / raw
To: gentoo-commits
commit: 8f65a787591f7fdc93c18637c2d33210e0cd738d
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 28 05:31:20 2016 +0000
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Thu Jan 28 05:31:20 2016 +0000
URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=8f65a787
grsecurity-3.1-4.3.4-201601261954
4.3.4/0000_README | 2 +-
...> 4420_grsecurity-3.1-4.3.4-201601261954.patch} | 444 +++++++++++++++++++--
4.3.4/4427_force_XATTR_PAX_tmpfs.patch | 4 +-
4.3.4/4450_grsec-kconfig-default-gids.patch | 12 +-
4.3.4/4465_selinux-avc_audit-log-curr_ip.patch | 2 +-
5 files changed, 411 insertions(+), 53 deletions(-)
diff --git a/4.3.4/0000_README b/4.3.4/0000_README
index f0bdee5..158f0b1 100644
--- a/4.3.4/0000_README
+++ b/4.3.4/0000_README
@@ -6,7 +6,7 @@ Patch: 1003_linux-4.3.4.patch
From: http://www.kernel.org
Desc: Linux 4.3.4
-Patch: 4420_grsecurity-3.1-4.3.4-201601231215.patch
+Patch: 4420_grsecurity-3.1-4.3.4-201601261954.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.3.4/4420_grsecurity-3.1-4.3.4-201601231215.patch b/4.3.4/4420_grsecurity-3.1-4.3.4-201601261954.patch
similarity index 99%
rename from 4.3.4/4420_grsecurity-3.1-4.3.4-201601231215.patch
rename to 4.3.4/4420_grsecurity-3.1-4.3.4-201601261954.patch
index db01d7f..f866bc7 100644
--- a/4.3.4/4420_grsecurity-3.1-4.3.4-201601231215.patch
+++ b/4.3.4/4420_grsecurity-3.1-4.3.4-201601261954.patch
@@ -12575,7 +12575,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 96d058a..b581500 100644
+index 96d058a..92a8d5b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -36,14 +36,13 @@ config X86
@@ -12661,6 +12661,15 @@ index 96d058a..b581500 100644
config ARCH_DMA_ADDR_T_64BIT
def_bool y
+@@ -1448,7 +1450,7 @@ config ARCH_PROC_KCORE_TEXT
+
+ config ILLEGAL_POINTER_VALUE
+ hex
+- default 0 if X86_32
++ default 0xfffff000 if X86_32
+ default 0xdead000000000000 if X86_64
+
+ source "mm/Kconfig"
@@ -1757,6 +1759,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
@@ -19780,7 +19789,7 @@ index 55234d5..7e3c4bf 100644
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index 379cd36..25f4ba2 100644
+index 379cd36..8ef26be 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -46,7 +46,7 @@ struct ldt_struct {
@@ -19792,7 +19801,31 @@ index 379cd36..25f4ba2 100644
};
/*
-@@ -98,26 +98,95 @@ static inline void load_mm_ldt(struct mm_struct *mm)
+@@ -58,6 +58,23 @@ void destroy_context(struct mm_struct *mm);
+ static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+ {
++ if (tsk == current) {
++ mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_clear(&mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return 0;
+ }
+ static inline void destroy_context(struct mm_struct *mm) {}
+@@ -98,26 +115,95 @@ static inline void load_mm_ldt(struct mm_struct *mm)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -19888,7 +19921,7 @@ index 379cd36..25f4ba2 100644
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
-@@ -142,9 +211,31 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -142,9 +228,31 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
#endif
@@ -19921,7 +19954,7 @@ index 379cd36..25f4ba2 100644
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-@@ -161,13 +252,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -161,13 +269,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -52619,6 +52652,86 @@ index ed00446..943fe2c 100644
break;
err = 0;
break;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index b910cae..f55670b 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+ return i < MAX_CALLID;
+ }
+
+-static int add_chan(struct pppox_sock *sock)
++static int add_chan(struct pppox_sock *sock,
++ struct pptp_addr *sa)
+ {
+ static int call_id;
+
+ spin_lock(&chan_lock);
+- if (!sock->proto.pptp.src_addr.call_id) {
++ if (!sa->call_id) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ if (call_id == MAX_CALLID) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ if (call_id == MAX_CALLID)
+ goto out_err;
+ }
+- sock->proto.pptp.src_addr.call_id = call_id;
+- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
++ sa->call_id = call_id;
++ } else if (test_bit(sa->call_id, callid_bitmap)) {
+ goto out_err;
++ }
+
+- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
++ sock->proto.pptp.src_addr = *sa;
++ set_bit(sa->call_id, callid_bitmap);
++ rcu_assign_pointer(callid_sock[sa->call_id], sock);
+ spin_unlock(&chan_lock);
+
+ return 0;
+@@ -415,7 +418,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+- struct pptp_opt *opt = &po->proto.pptp;
+ int error = 0;
+
+ if (sockaddr_len < sizeof(struct sockaddr_pppox))
+@@ -423,10 +425,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+
+ lock_sock(sk);
+
+- opt->src_addr = sp->sa_addr.pptp;
+- if (add_chan(po))
++ if (sk->sk_state & PPPOX_DEAD) {
++ error = -EALREADY;
++ goto out;
++ }
++
++ if (sk->sk_state & PPPOX_BOUND) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if (add_chan(po, &sp->sa_addr.pptp))
+ error = -EBUSY;
++ else
++ sk->sk_state |= PPPOX_BOUND;
+
++out:
+ release_sock(sk);
+ return error;
+ }
+@@ -497,7 +511,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ }
+
+ opt->dst_addr = sp->sa_addr.pptp;
+- sk->sk_state = PPPOX_CONNECTED;
++ sk->sk_state |= PPPOX_CONNECTED;
+
+ end:
+ release_sock(sk);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 079f7ad..7e59810 100644
--- a/drivers/net/slip/slhc.c
@@ -87041,10 +87154,10 @@ index 85f883d..db6eecc 100644
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..31f8fe4
+index 0000000..0841273
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1182 @@
+@@ -0,0 +1,1185 @@
+#
+# grecurity configuration
+#
@@ -87068,6 +87181,9 @@ index 0000000..31f8fe4
+ you use the RBAC system, as it is still possible for an attacker to
+ modify the running kernel through other more obscure methods.
+
++ Enabling this feature will prevent the "cpupower" and "powertop" tools
++ from working and excludes debugfs from being compiled into the kernel.
++
+ It is highly recommended that you say Y here if you meet all the
+ conditions above.
+
@@ -104592,6 +104708,22 @@ index 6fb8016..2cf60e7 100644
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 50777b5..92d112a 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -15,10 +15,7 @@ struct shmem_inode_info {
+ unsigned int seals; /* shmem seals */
+ unsigned long flags;
+ unsigned long alloced; /* data pages alloced to file */
+- union {
+- unsigned long swapped; /* subtotal assigned to swap */
+- char *symlink; /* unswappable short symlink */
+- };
++ unsigned long swapped; /* subtotal assigned to swap */
+ struct shared_policy policy; /* NUMA memory alloc policy */
+ struct list_head swaplist; /* chain of maybes on swap */
+ struct simple_xattrs xattrs; /* list of xattrs */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index ab1e039..ad4229e 100644
--- a/include/linux/signal.h
@@ -106575,6 +106707,49 @@ index 495c87e..5b327ff 100644
/* Structure to track chunk fragments that have been acked, but peer
+diff --git a/include/net/snmp.h b/include/net/snmp.h
+index 35512ac..edbd85b 100644
+--- a/include/net/snmp.h
++++ b/include/net/snmp.h
+@@ -67,7 +67,7 @@ struct icmp_mib {
+
+ #define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
+ struct icmpmsg_mib {
+- atomic_long_t mibs[ICMPMSG_MIB_MAX];
++ atomic_long_unchecked_t mibs[ICMPMSG_MIB_MAX];
+ };
+
+ /* ICMP6 (IPv6-ICMP) */
+@@ -78,17 +78,17 @@ struct icmpv6_mib {
+ };
+ /* per device counters, (shared on all cpus) */
+ struct icmpv6_mib_device {
+- atomic_long_t mibs[ICMP6_MIB_MAX];
++ atomic_long_unchecked_t mibs[ICMP6_MIB_MAX];
+ };
+
+ #define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
+ /* per network ns counters */
+ struct icmpv6msg_mib {
+- atomic_long_t mibs[ICMP6MSG_MIB_MAX];
++ atomic_long_unchecked_t mibs[ICMP6MSG_MIB_MAX];
+ };
+ /* per device counters, (shared on all cpus) */
+ struct icmpv6msg_mib_device {
+- atomic_long_t mibs[ICMP6MSG_MIB_MAX];
++ atomic_long_unchecked_t mibs[ICMP6MSG_MIB_MAX];
+ };
+
+
+@@ -130,7 +130,7 @@ struct linux_xfrm_mib {
+ this_cpu_inc(mib->mibs[field])
+
+ #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
+- atomic_long_inc(&mib->mibs[field])
++ atomic_long_inc_unchecked(&mib->mibs[field])
+
+ #define SNMP_INC_STATS(mib, field) \
+ this_cpu_inc(mib->mibs[field])
diff --git a/include/net/sock.h b/include/net/sock.h
index bca709a..75776c9 100644
--- a/include/net/sock.h
@@ -109116,7 +109291,7 @@ index ea95ee1..27177a8 100644
if (wo->wo_flags & __WNOTHREAD)
break;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 2845623..4b46ab9 100644
+index 2845623..baaf316 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -188,12 +188,54 @@ static void free_thread_info(struct thread_info *ti)
@@ -109398,7 +109573,7 @@ index 2845623..4b46ab9 100644
}
/*
-@@ -505,6 +584,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -505,6 +584,38 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
}
@@ -109407,11 +109582,16 @@ index 2845623..4b46ab9 100644
+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
+ struct vm_area_struct *mpnt_m;
+
-+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next) {
++ if (mpnt->vm_flags & VM_DONTCOPY)
++ continue;
++
+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
+
-+ if (!mpnt->vm_mirror)
++ if (!mpnt->vm_mirror) {
++ mpnt_m = mpnt_m->vm_next;
+ continue;
++ }
+
+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
@@ -109422,6 +109602,8 @@ index 2845623..4b46ab9 100644
+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
+ mpnt->vm_mirror->vm_mirror = mpnt;
+ }
++
++ mpnt_m = mpnt_m->vm_next;
+ }
+ BUG_ON(mpnt_m);
+ }
@@ -109430,7 +109612,7 @@ index 2845623..4b46ab9 100644
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
-@@ -514,14 +618,6 @@ out:
+@@ -514,14 +625,6 @@ out:
up_write(&oldmm->mmap_sem);
uprobe_end_dup_mmap();
return retval;
@@ -109445,7 +109627,7 @@ index 2845623..4b46ab9 100644
}
static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -796,8 +892,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+@@ -796,8 +899,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
return ERR_PTR(err);
mm = get_task_mm(task);
@@ -109456,7 +109638,7 @@ index 2845623..4b46ab9 100644
mmput(mm);
mm = ERR_PTR(-EACCES);
}
-@@ -998,13 +1094,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+@@ -998,13 +1101,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
spin_unlock(&fs->lock);
return -EAGAIN;
}
@@ -109478,7 +109660,7 @@ index 2845623..4b46ab9 100644
return 0;
}
-@@ -1241,7 +1344,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
+@@ -1241,7 +1351,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
@@ -109487,7 +109669,7 @@ index 2845623..4b46ab9 100644
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
-@@ -1313,6 +1416,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1313,6 +1423,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
@@ -109497,7 +109679,7 @@ index 2845623..4b46ab9 100644
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (p->real_cred->user != INIT_USER &&
-@@ -1572,6 +1678,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1572,6 +1685,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cancel_cgroup;
}
@@ -109509,7 +109691,7 @@ index 2845623..4b46ab9 100644
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
-@@ -1663,6 +1774,8 @@ bad_fork_cleanup_count:
+@@ -1663,6 +1781,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -109518,7 +109700,7 @@ index 2845623..4b46ab9 100644
return ERR_PTR(retval);
}
-@@ -1725,6 +1838,7 @@ long _do_fork(unsigned long clone_flags,
+@@ -1725,6 +1845,7 @@ long _do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace, tls);
@@ -109526,7 +109708,7 @@ index 2845623..4b46ab9 100644
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
-@@ -1741,6 +1855,8 @@ long _do_fork(unsigned long clone_flags,
+@@ -1741,6 +1862,8 @@ long _do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -109535,7 +109717,7 @@ index 2845623..4b46ab9 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1873,7 +1989,7 @@ void __init proc_caches_init(void)
+@@ -1873,7 +1996,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -109544,7 +109726,7 @@ index 2845623..4b46ab9 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1921,7 +2037,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1921,7 +2044,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -109553,7 +109735,7 @@ index 2845623..4b46ab9 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -2034,7 +2150,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -2034,7 +2157,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -109563,7 +109745,7 @@ index 2845623..4b46ab9 100644
new_fs = NULL;
else
new_fs = fs;
-@@ -2098,7 +2215,7 @@ int unshare_files(struct files_struct **displaced)
+@@ -2098,7 +2222,7 @@ int unshare_files(struct files_struct **displaced)
int sysctl_max_threads(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -119093,7 +119275,7 @@ index f5b5c1f..289c3dcb 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index 48ce829..4c30cd3 100644
+index 48ce829..a5a01a2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -119114,7 +119296,17 @@ index 48ce829..4c30cd3 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -835,14 +835,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+@@ -612,8 +612,7 @@ static void shmem_evict_inode(struct inode *inode)
+ list_del_init(&info->swaplist);
+ mutex_unlock(&shmem_swaplist_mutex);
+ }
+- } else
+- kfree(info->symlink);
++ }
+
+ simple_xattrs_free(&info->xattrs);
+ WARN_ON(inode->i_blocks);
+@@ -835,14 +834,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
list_add_tail(&info->swaplist, &shmem_swaplist);
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
@@ -119134,7 +119326,7 @@ index 48ce829..4c30cd3 100644
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
-@@ -1070,7 +1070,7 @@ repeat:
+@@ -1070,7 +1069,7 @@ repeat:
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
@@ -119143,7 +119335,7 @@ index 48ce829..4c30cd3 100644
}
if (page && sgp == SGP_WRITE)
-@@ -1238,11 +1238,15 @@ clear:
+@@ -1238,11 +1237,15 @@ clear:
/* Perhaps the file has been truncated since we checked */
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
@@ -119163,7 +119355,7 @@ index 48ce829..4c30cd3 100644
}
*pagep = page;
return 0;
-@@ -1250,23 +1254,13 @@ clear:
+@@ -1250,23 +1253,13 @@ clear:
/*
* Error recovery.
*/
@@ -119188,7 +119380,23 @@ index 48ce829..4c30cd3 100644
error = -EEXIST;
unlock:
if (page) {
-@@ -2564,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2460,13 +2453,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
+ info = SHMEM_I(inode);
+ inode->i_size = len-1;
+ if (len <= SHORT_SYMLINK_LEN) {
+- info->symlink = kmemdup(symname, len, GFP_KERNEL);
+- if (!info->symlink) {
++ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
++ if (!inode->i_link) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ inode->i_op = &shmem_short_symlink_operations;
+- inode->i_link = info->symlink;
+ } else {
+ error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
+ if (error) {
+@@ -2564,6 +2556,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -119200,7 +119408,7 @@ index 48ce829..4c30cd3 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2619,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2619,6 +2616,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -119216,7 +119424,7 @@ index 48ce829..4c30cd3 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -3002,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -3002,8 +3008,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -119226,6 +119434,14 @@ index 48ce829..4c30cd3 100644
if (!sbinfo)
return -ENOMEM;
+@@ -3081,6 +3086,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
+ static void shmem_destroy_callback(struct rcu_head *head)
+ {
+ struct inode *inode = container_of(head, struct inode, i_rcu);
++ kfree(inode->i_link);
+ kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
+ }
+
diff --git a/mm/slab.c b/mm/slab.c
index 4fcc5dd..8fb1a86 100644
--- a/mm/slab.c
@@ -122824,6 +123040,18 @@ index 214d44a..dcb7f86 100644
err_alloc:
return -ENOMEM;
}
+diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
+index 416dfa0..6e41f17 100644
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -353,6 +353,7 @@ config INET_ESP
+ select CRYPTO_CBC
+ select CRYPTO_SHA1
+ select CRYPTO_DES
++ select CRYPTO_ECHAINIV
+ ---help---
+ Support for IPsec ESP.
+
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 4b16cf3..443b1d4 100644
--- a/net/ipv4/af_inet.c
@@ -123430,6 +123658,44 @@ index e89094a..bd431045 100644
}
static int ping_v4_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
+index 3abd9d7..c5e4052 100644
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -333,7 +333,7 @@ static void icmpmsg_put(struct seq_file *seq)
+
+ count = 0;
+ for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
+- val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
++ val = atomic_long_read_unchecked(&net->mib.icmpmsg_statistics->mibs[i]);
+ if (val) {
+ type[count] = i;
+ vals[count++] = val;
+@@ -352,7 +352,7 @@ static void icmp_put(struct seq_file *seq)
+ {
+ int i;
+ struct net *net = seq->private;
+- atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
++ atomic_long_unchecked_t *ptr = net->mib.icmpmsg_statistics->mibs;
+
+ seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors");
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
+@@ -366,13 +366,13 @@ static void icmp_put(struct seq_file *seq)
+ snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
+ seq_printf(seq, " %lu",
+- atomic_long_read(ptr + icmpmibmap[i].index));
++ atomic_long_read_unchecked(ptr + icmpmibmap[i].index));
+ seq_printf(seq, " %lu %lu",
+ snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
+ snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
+ for (i = 0; icmpmibmap[i].name != NULL; i++)
+ seq_printf(seq, " %lu",
+- atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
++ atomic_long_read_unchecked(ptr + (icmpmibmap[i].index | 0x100)));
+ }
+
+ /*
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 561cd4b..a32a155 100644
--- a/net/ipv4/raw.c
@@ -123697,7 +123963,7 @@ index ade7737..70ed9be 100644
goto err_reg;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 0a2b61d..563a1d2 100644
+index 0a2b61d..e6e7d27 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -786,7 +786,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
@@ -123709,7 +123975,17 @@ index 0a2b61d..563a1d2 100644
sk->sk_max_pacing_rate);
}
-@@ -4647,7 +4647,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+@@ -2525,6 +2525,9 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
+ int newly_acked_sacked = prior_unsacked -
+ (tp->packets_out - tp->sacked_out);
+
++ if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
++ return;
++
+ tp->prr_delivered += newly_acked_sacked;
+ if (delta < 0) {
+ u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
+@@ -4647,7 +4650,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
* simplifies code)
*/
static void
@@ -123718,7 +123994,7 @@ index 0a2b61d..563a1d2 100644
struct sk_buff *head, struct sk_buff *tail,
u32 start, u32 end)
{
-@@ -5642,6 +5642,7 @@ discard:
+@@ -5642,6 +5645,7 @@ discard:
tcp_paws_reject(&tp->rx_opt, 0))
goto discard_and_undo;
@@ -123726,7 +124002,7 @@ index 0a2b61d..563a1d2 100644
if (th->syn) {
/* We see SYN without ACK. It is attempt of
* simultaneous connect with crossed SYNs.
-@@ -5693,6 +5694,7 @@ discard:
+@@ -5693,6 +5697,7 @@ discard:
goto discard;
#endif
}
@@ -123734,7 +124010,7 @@ index 0a2b61d..563a1d2 100644
/* "fifth, if neither of the SYN or RST bits is set then
* drop the segment and return."
*/
-@@ -5739,7 +5741,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+@@ -5739,7 +5744,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;
if (th->syn) {
@@ -123743,7 +124019,7 @@ index 0a2b61d..563a1d2 100644
goto discard;
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
-@@ -6069,7 +6071,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+@@ -6069,7 +6074,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
kmemcheck_annotate_bitfield(ireq, flags);
ireq->opt = NULL;
@@ -124064,8 +124340,20 @@ index c10a9ee..c621a01 100644
err_alloc:
return -ENOMEM;
}
+diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
+index 983bb99..ebc39e1 100644
+--- a/net/ipv6/Kconfig
++++ b/net/ipv6/Kconfig
+@@ -69,6 +69,7 @@ config INET6_ESP
+ select CRYPTO_CBC
+ select CRYPTO_SHA1
+ select CRYPTO_DES
++ select CRYPTO_ECHAINIV
+ ---help---
+ Support for IPsec ESP.
+
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index ddd3511..9cad64b 100644
+index ddd3511..22c903e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -178,7 +178,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
@@ -124138,6 +124426,24 @@ index ddd3511..9cad64b 100644
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
+@@ -4688,7 +4695,7 @@ static inline size_t inet6_if_nlmsg_size(void)
+ + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
+ }
+
+-static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
++static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_unchecked_t *mib,
+ int items, int bytes)
+ {
+ int i;
+@@ -4698,7 +4705,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
+ /* Use put_unaligned() because stats may not be aligned for u64. */
+ put_unaligned(items, &stats[0]);
+ for (i = 1; i < items; i++)
+- put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
++ put_unaligned(atomic_long_read_unchecked(&mib[i]), &stats[i]);
+
+ memset(&stats[items], 0, pad);
+ }
@@ -5146,7 +5153,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
rt_genid_bump_ipv6(net);
break;
@@ -124514,9 +124820,53 @@ index 263a516..692f738 100644
inet6_unregister_protosw(&pingv6_protosw);
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
-index 679253d0..70b653c 100644
+index 679253d0..d85dd97 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
+@@ -151,7 +151,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
+ SNMP_MIB_SENTINEL
+ };
+
+-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
++static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_unchecked_t *smib)
+ {
+ char name[32];
+ int i;
+@@ -168,14 +168,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
+ snprintf(name, sizeof(name), "Icmp6%s%s",
+ i & 0x100 ? "Out" : "In", p);
+ seq_printf(seq, "%-32s\t%lu\n", name,
+- atomic_long_read(smib + i));
++ atomic_long_read_unchecked(smib + i));
+ }
+
+ /* print by number (nonzero only) - ICMPMsgStat format */
+ for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
+ unsigned long val;
+
+- val = atomic_long_read(smib + i);
++ val = atomic_long_read_unchecked(smib + i);
+ if (!val)
+ continue;
+ snprintf(name, sizeof(name), "Icmp6%sType%u",
+@@ -188,7 +188,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
+ * or shared one (smib != NULL)
+ */
+ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
+- atomic_long_t *smib,
++ atomic_long_unchecked_t *smib,
+ const struct snmp_mib *itemlist)
+ {
+ int i;
+@@ -197,7 +197,7 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
+ for (i = 0; itemlist[i].name; i++) {
+ val = pcpumib ?
+ snmp_fold_field(pcpumib, itemlist[i].entry) :
+- atomic_long_read(smib + itemlist[i].entry);
++ atomic_long_read_unchecked(smib + itemlist[i].entry);
+ seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, val);
+ }
+ }
@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
goto proc_snmp6_fail;
@@ -127806,7 +128156,7 @@ index 350cca3..a108fc5 100644
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
-index 0fc6dba..adadbef 100644
+index 0fc6dba..8355d2c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -918,6 +918,12 @@ static struct sock *unix_find_other(struct net *net,
@@ -128015,7 +128365,15 @@ index 0fc6dba..adadbef 100644
return max_level;
}
-@@ -2765,9 +2814,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+@@ -2301,6 +2350,7 @@ again:
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
++ scm_destroy(&scm);
+ goto out;
+ }
+
+@@ -2765,9 +2815,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "Num RefCount Protocol Flags Type St "
"Inode Path\n");
else {
@@ -128030,7 +128388,7 @@ index 0fc6dba..adadbef 100644
seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
-@@ -2792,10 +2845,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+@@ -2792,10 +2846,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
seq_putc(seq, '@');
i++;
}
diff --git a/4.3.4/4427_force_XATTR_PAX_tmpfs.patch b/4.3.4/4427_force_XATTR_PAX_tmpfs.patch
index 22c9273..d03130d 100644
--- a/4.3.4/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.3.4/4427_force_XATTR_PAX_tmpfs.patch
@@ -6,7 +6,7 @@ namespace supported on tmpfs so that the PaX markings survive emerge.
diff -Naur a/mm/shmem.c b/mm/shmem.c
--- a/mm/shmem.c 2013-06-11 21:00:18.000000000 -0400
+++ b/mm/shmem.c 2013-06-11 21:08:18.000000000 -0400
-@@ -2558,11 +2558,7 @@
+@@ -2556,11 +2556,7 @@
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -18,7 +18,7 @@ diff -Naur a/mm/shmem.c b/mm/shmem.c
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2618,14 +2614,12 @@
+@@ -2616,14 +2612,12 @@
if (err)
return err;
diff --git a/4.3.4/4450_grsec-kconfig-default-gids.patch b/4.3.4/4450_grsec-kconfig-default-gids.patch
index 9524b1f..c56ca90 100644
--- a/4.3.4/4450_grsec-kconfig-default-gids.patch
+++ b/4.3.4/4450_grsec-kconfig-default-gids.patch
@@ -16,7 +16,7 @@ from shooting themselves in the foot.
diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
--- a/grsecurity/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/grsecurity/Kconfig 2012-10-13 09:52:32.000000000 -0400
-@@ -694,7 +694,7 @@
+@@ -697,7 +697,7 @@
config GRKERNSEC_AUDIT_GID
int "GID for auditing"
depends on GRKERNSEC_AUDIT_GROUP
@@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
config GRKERNSEC_EXECLOG
bool "Exec logging"
-@@ -925,7 +925,7 @@
+@@ -928,7 +928,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*enabled* for. If the sysctl option is enabled, a sysctl option
-@@ -934,7 +934,7 @@
+@@ -937,7 +937,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -1019,7 +1019,7 @@
+@@ -1022,7 +1022,7 @@
config GRKERNSEC_SOCKET_ALL_GID
int "GID to deny all sockets for"
depends on GRKERNSEC_SOCKET_ALL
@@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Here you can choose the GID to disable socket access for. Remember to
add the users you want socket access disabled for to the GID
-@@ -1040,7 +1040,7 @@
+@@ -1043,7 +1043,7 @@
config GRKERNSEC_SOCKET_CLIENT_GID
int "GID to deny client sockets for"
depends on GRKERNSEC_SOCKET_CLIENT
@@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Here you can choose the GID to disable client socket access for.
Remember to add the users you want client socket access disabled for to
-@@ -1058,7 +1058,7 @@
+@@ -1061,7 +1061,7 @@
config GRKERNSEC_SOCKET_SERVER_GID
int "GID to deny server sockets for"
depends on GRKERNSEC_SOCKET_SERVER
diff --git a/4.3.4/4465_selinux-avc_audit-log-curr_ip.patch b/4.3.4/4465_selinux-avc_audit-log-curr_ip.patch
index 28f2163..d2e466f 100644
--- a/4.3.4/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.3.4/4465_selinux-avc_audit-log-curr_ip.patch
@@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
--- a/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400
+++ b/grsecurity/Kconfig 2011-04-17 19:32:53.000000000 -0400
-@@ -1153,6 +1153,27 @@
+@@ -1156,6 +1156,27 @@
menu "Logging Options"
depends on GRKERNSEC
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2016-01-30 12:30 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-01-30 12:29 [gentoo-commits] proj/hardened-patchset:master commit in: 4.3.4/ Anthony G. Basile
-- strict thread matches above, loose matches on Subject: below --
2016-01-28 5:23 Anthony G. Basile
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox