public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 4.3.5/
@ 2016-02-04  9:24 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2016-02-04  9:24 UTC (permalink / raw
  To: gentoo-commits

commit:     00b3360960db057b3b23483f8d6536502a8d41c6
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  4 09:31:45 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Thu Feb  4 09:31:45 2016 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=00b33609

grsecurity-3.1-4.3.5-201602032209

 4.3.5/0000_README                                  |    2 +-
 ...> 4420_grsecurity-3.1-4.3.5-201602032209.patch} | 3149 +++++++++++++++++++-
 2 files changed, 3083 insertions(+), 68 deletions(-)

diff --git a/4.3.5/0000_README b/4.3.5/0000_README
index e49fbae..3ec071f 100644
--- a/4.3.5/0000_README
+++ b/4.3.5/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.1-4.3.5-201601311611.patch
+Patch:	4420_grsecurity-3.1-4.3.5-201602032209.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.3.5/4420_grsecurity-3.1-4.3.5-201601311611.patch b/4.3.5/4420_grsecurity-3.1-4.3.5-201602032209.patch
similarity index 98%
rename from 4.3.5/4420_grsecurity-3.1-4.3.5-201601311611.patch
rename to 4.3.5/4420_grsecurity-3.1-4.3.5-201602032209.patch
index 63ba087..562e55c 100644
--- a/4.3.5/4420_grsecurity-3.1-4.3.5-201601311611.patch
+++ b/4.3.5/4420_grsecurity-3.1-4.3.5-201602032209.patch
@@ -19145,18 +19145,10 @@ index 3c3550c..ca9e4c3 100644
  
  /*
 diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
-index c49c517..0a6e089 100644
+index c49c517..224eeea 100644
 --- a/arch/x86/include/asm/fpu/types.h
 +++ b/arch/x86/include/asm/fpu/types.h
-@@ -189,7 +189,6 @@ union fpregs_state {
- 	struct fxregs_state		fxsave;
- 	struct swregs_state		soft;
- 	struct xregs_state		xsave;
--	u8 __padding[PAGE_SIZE];
- };
- 
- /*
-@@ -199,6 +198,39 @@ union fpregs_state {
+@@ -199,6 +199,39 @@ union fpregs_state {
   */
  struct fpu {
  	/*
@@ -19196,7 +19188,7 @@ index c49c517..0a6e089 100644
  	 * @last_cpu:
  	 *
  	 * Records the last CPU on which this context was loaded into
-@@ -255,43 +287,6 @@ struct fpu {
+@@ -255,43 +288,6 @@ struct fpu {
  	 * deal with bursty apps that only use the FPU for a short time:
  	 */
  	unsigned char			counter;
@@ -19662,7 +19654,7 @@ index 55234d5..7e3c4bf 100644
  	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
  } mm_context_t;
 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index bfd9b2a..a931fef 100644
+index bfd9b2a..0d64fc2 100644
 --- a/arch/x86/include/asm/mmu_context.h
 +++ b/arch/x86/include/asm/mmu_context.h
 @@ -46,7 +46,7 @@ struct ldt_struct {
@@ -19783,6 +19775,15 @@ index bfd9b2a..a931fef 100644
  		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  		this_cpu_write(cpu_tlbstate.active_mm, next);
  #endif
+@@ -129,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 		 * We need to prevent an outcome in which CPU 1 observes
+ 		 * the new PTE value and CPU 0 observes bit 1 clear in
+ 		 * mm_cpumask.  (If that occurs, then the IPI will never
+-		 * be sent, and CPU 0's TLB will contain a stale entry.)
++		 * be sent, and CPU 1's TLB will contain a stale entry.)
+ 		 *
+ 		 * The bad outcome can occur if either CPU's load is
+ 		 * reordered before that CPU's store, so both CPUs must
 @@ -144,7 +226,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  		 * ordering guarantee we need.
  		 *
@@ -20714,7 +20715,7 @@ index e6844df..432b56e 100644
  
  #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index 13f310b..f0ef42e 100644
+index 13f310b..6273ff2 100644
 --- a/arch/x86/include/asm/pgtable_types.h
 +++ b/arch/x86/include/asm/pgtable_types.h
 @@ -85,8 +85,10 @@
@@ -20793,7 +20794,30 @@ index 13f310b..f0ef42e 100644
  static inline pmdval_t native_pmd_val(pmd_t pmd)
  {
  	return native_pgd_val(pmd.pud.pgd);
-@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
+@@ -337,20 +348,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
+ }
+ static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
+ {
++	pgprotval_t val = pgprot_val(pgprot);
+ 	pgprot_t new;
+-	unsigned long val;
+ 
+-	val = pgprot_val(pgprot);
+ 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ 		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+ 	return new;
+ }
+ static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
+ {
++	pgprotval_t val = pgprot_val(pgprot);
+ 	pgprot_t new;
+-	unsigned long val;
+ 
+-	val = pgprot_val(pgprot);
+ 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ 			  ((val & _PAGE_PAT_LARGE) >>
+ 			   (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+@@ -362,7 +371,6 @@ typedef struct page *pgtable_t;
  
  extern pteval_t __supported_pte_mask;
  extern void set_nx(void);
@@ -34821,9 +34845,18 @@ index 78e47ff..17c3093 100644
  err_out:
  	/* info might be NULL, but kfree() handles that */
 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
-index c3b3f65..8919a28 100644
+index c3b3f65..5bfe5dc 100644
 --- a/arch/x86/mm/numa.c
 +++ b/arch/x86/mm/numa.c
+@@ -469,7 +469,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
+ {
+ 	int i, nid;
+ 	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
+-	unsigned long start, end;
++	u64 start, end;
+ 	struct memblock_region *r;
+ 
+ 	/*
 @@ -508,7 +508,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
  	}
  }
@@ -34834,9 +34867,18 @@ index c3b3f65..8919a28 100644
  	unsigned long uninitialized_var(pfn_align);
  	int i, nid;
 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 2c44c07..5c5e457 100644
+index 2c44c07..318be75 100644
 --- a/arch/x86/mm/pageattr.c
 +++ b/arch/x86/mm/pageattr.c
+@@ -33,7 +33,7 @@ struct cpa_data {
+ 	pgd_t		*pgd;
+ 	pgprot_t	mask_set;
+ 	pgprot_t	mask_clr;
+-	int		numpages;
++	unsigned long	numpages;
+ 	int		flags;
+ 	unsigned long	pfn;
+ 	unsigned	force_split : 1;
 @@ -259,7 +259,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
  	 */
  #ifdef CONFIG_PCI_BIOS
@@ -34962,6 +35004,15 @@ index 2c44c07..5c5e457 100644
  			cpa->flags |= CPA_FLUSHTLB;
  		}
  		cpa->numpages = 1;
+@@ -1321,7 +1354,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
+ 		 * CPA operation. Either a large page has been
+ 		 * preserved or a single page update happened.
+ 		 */
+-		BUG_ON(cpa->numpages > numpages);
++		BUG_ON(cpa->numpages > numpages || !cpa->numpages);
+ 		numpages -= cpa->numpages;
+ 		if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
+ 			cpa->curpage++;
 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
 index 188e3e0..5c75446 100644
 --- a/arch/x86/mm/pat.c
@@ -37314,6 +37365,60 @@ index ad3f276..bef6d50 100644
  			return ERR_PTR(-EINVAL);
  
  		nr_pages += end - start;
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 55512dd..8ad1614 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -561,10 +561,10 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ 
+ 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ 		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+-			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
++			   (unsigned long long)atomic64_read_unchecked(&rwstat->aux_cnt[i]));
+ 
+-	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
+-		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
++	v = atomic64_read_unchecked(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
++		atomic64_read_unchecked(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
+ 	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+ 	return v;
+ }
+@@ -716,7 +716,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
+ 		else
+ 			stat = (void *)blkg + off;
+ 
+-		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
++		sum += blkg_stat_read(stat) + atomic64_read_unchecked(&stat->aux_cnt);
+ 	}
+ 	rcu_read_unlock();
+ 
+@@ -760,7 +760,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
+ 			rwstat = (void *)pos_blkg + off;
+ 
+ 		for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
++			atomic64_add_unchecked(atomic64_read_unchecked(&rwstat->aux_cnt[i]) +
+ 				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
+ 				&sum.aux_cnt[i]);
+ 	}
+@@ -877,13 +877,13 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
+ 
+ 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
+ 					offsetof(struct blkcg_gq, stat_bytes));
+-		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
+-		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
++		rbytes = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
++		wbytes = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+ 
+ 		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
+ 					offsetof(struct blkcg_gq, stat_ios));
+-		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
+-		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
++		rios = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
++		wios = atomic64_read_unchecked(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
+ 
+ 		spin_unlock_irq(blkg->q->queue_lock);
+ 
 diff --git a/block/blk-core.c b/block/blk-core.c
 index 18e92a6..1834d7c 100644
 --- a/block/blk-core.c
@@ -37406,6 +37511,21 @@ index d214e92..9649863 100644
  	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
  		if (blk_verify_command(rq->cmd, has_write_perm))
  			return -EPERM;
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 04de884..b740cb4 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1941,8 +1941,8 @@ static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
+ {
+ 	struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
+ 					offsetof(struct blkcg_gq, stat_bytes));
+-	u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+-		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
++	u64 sum = atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
++		atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+ 
+ 	return __blkg_prfill_u64(sf, pd, sum >> 9);
+ }
 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
 index f678c73..f35aa18 100644
 --- a/block/compat_ioctl.c
@@ -37607,6 +37727,34 @@ index c81861b..dbf894f 100644
  
  static void cryptd_queue_worker(struct work_struct *work);
  
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 237f379..215a174 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -499,17 +499,21 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		if (link->dump == NULL)
+ 			return -EINVAL;
+ 
++		down_read(&crypto_alg_sem);
+ 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
+ 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
+ 
+ 		{
+-			struct netlink_dump_control c = {
++			netlink_dump_control_no_const c = {
+ 				.dump = link->dump,
+ 				.done = link->done,
+ 				.min_dump_alloc = dump_alloc,
+ 			};
+-			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
++			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ 		}
++		up_read(&crypto_alg_sem);
++
++		return err;
+ 	}
+ 
+ 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
 index ee9cfb9..30b36ed 100644
 --- a/crypto/pcrypt.c
@@ -37741,7 +37889,7 @@ index 16129c7..8b675cd 100644
  struct apei_exec_context {
  	u32 ip;
 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
-index 23981ac..35eb27e 100644
+index 23981ac..fa41a4b 100644
 --- a/drivers/acpi/apei/ghes.c
 +++ b/drivers/acpi/apei/ghes.c
 @@ -474,7 +474,7 @@ static void __ghes_print_estatus(const char *pfx,
@@ -37762,6 +37910,33 @@ index 23981ac..35eb27e 100644
  	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
  	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
  	       pfx_seq, generic->header.source_id);
+@@ -535,7 +535,7 @@ static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+ 		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ 		if (memcmp(estatus, cache_estatus, len))
+ 			continue;
+-		atomic_inc(&cache->count);
++		atomic_inc_unchecked(&cache->count);
+ 		now = sched_clock();
+ 		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
+ 			cached = 1;
+@@ -569,7 +569,7 @@ static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
+ 	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ 	memcpy(cache_estatus, estatus, len);
+ 	cache->estatus_len = len;
+-	atomic_set(&cache->count, 0);
++	atomic_set_unchecked(&cache->count, 0);
+ 	cache->generic = generic;
+ 	cache->time_in = sched_clock();
+ 	return cache;
+@@ -619,7 +619,7 @@ static void ghes_estatus_cache_add(
+ 			slot_cache = cache;
+ 			break;
+ 		}
+-		count = atomic_read(&cache->count);
++		count = atomic_read_unchecked(&cache->count);
+ 		period = duration;
+ 		do_div(period, (count + 1));
+ 		if (period > max_period) {
 diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
 index a83e3c6..c3d617f 100644
 --- a/drivers/acpi/bgrt.c
@@ -40448,6 +40623,50 @@ index 54bccf7..f7a4fa9 100644
  
  	new_smi->interrupt_disabled = true;
  	atomic_set(&new_smi->need_watch, 0);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 877205d..b098b7e 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -283,17 +283,17 @@ struct ssif_info {
+ 	unsigned int  multi_len;
+ 	unsigned int  multi_pos;
+ 
+-	atomic_t stats[SSIF_NUM_STATS];
++	atomic_unchecked_t stats[SSIF_NUM_STATS];
+ };
+ 
+ #define ssif_inc_stat(ssif, stat) \
+-	atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
++	atomic_inc_unchecked(&(ssif)->stats[SSIF_STAT_ ## stat])
+ #define ssif_get_stat(ssif, stat) \
+-	((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
++	((unsigned int) atomic_read_unchecked(&(ssif)->stats[SSIF_STAT_ ## stat]))
+ 
+ static bool initialized;
+ 
+-static atomic_t next_intf = ATOMIC_INIT(0);
++static atomic_unchecked_t next_intf = ATOMIC_INIT(0);
+ 
+ static void return_hosed_msg(struct ssif_info *ssif_info,
+ 			     struct ipmi_smi_msg *msg);
+@@ -1608,7 +1608,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ 	}
+ 
+  found:
+-	ssif_info->intf_num = atomic_inc_return(&next_intf);
++	ssif_info->intf_num = atomic_inc_return_unchecked(&next_intf);
+ 
+ 	if (ssif_dbg_probe) {
+ 		pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
+@@ -1622,7 +1622,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ 	ssif_info->retry_timer.function = retry_timeout;
+ 
+ 	for (i = 0; i < SSIF_NUM_STATS; i++)
+-		atomic_set(&ssif_info->stats[i], 0);
++		atomic_set_unchecked(&ssif_info->stats[i], 0);
+ 
+ 	if (ssif_info->supports_pec)
+ 		ssif_info->client->flags |= I2C_CLIENT_PEC;
 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
 index 6b1721f..fda9398 100644
 --- a/drivers/char/mem.c
@@ -42776,6 +42995,45 @@ index 7b69070..d7bd78b 100644
  							pqn->q);
  	if (retval != 0)
  		return retval;
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index 3697eee..7147654 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -133,7 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ 	if (r)
+ 		return r;
+ 
+-	atomic_set(&entity->fence_seq, 0);
++	atomic_set_unchecked(&entity->fence_seq, 0);
+ 	entity->fence_context = fence_context_alloc(1);
+ 
+ 	/* Add the entity to the run queue */
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index 80b64dc..fa4b4b4 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -44,7 +44,7 @@ struct amd_sched_entity {
+ 	spinlock_t			queue_lock;
+ 	struct kfifo                    job_queue;
+ 
+-	atomic_t			fence_seq;
++	atomic_unchecked_t		fence_seq;
+ 	uint64_t                        fence_context;
+ 
+ 	struct fence			*dependency;
+diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+index d802638..c78a893 100644
+--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+@@ -39,7 +39,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
+ 	fence->sched = s_entity->sched;
+ 	spin_lock_init(&fence->lock);
+ 
+-	seq = atomic_inc_return(&s_entity->fence_seq);
++	seq = atomic_inc_return_unchecked(&s_entity->fence_seq);
+ 	fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
+ 		   s_entity->fence_context, seq);
+ 
 diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
 index 225034b..4bb9696 100644
 --- a/drivers/gpu/drm/armada/armada_drv.c
@@ -45900,6 +46158,24 @@ index 9f5ad7c..588cd84 100644
  				wake_up_process(pool->thread);
  			}
  		}
+diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+index d47df93..93e4cfd 100644
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -177,11 +177,10 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 			}
+ 
+ 			{
+-				struct netlink_dump_control c = {
++				netlink_dump_control_no_const c = {
+ 					.dump = client->cb_table[op].dump,
+-					.module = client->cb_table[op].module,
+ 				};
+-				return netlink_dump_start(nls, skb, nlh, &c);
++				return __netlink_dump_start(nls, skb, nlh, &c, NULL, client->cb_table[op].module);
+ 			}
+ 		}
+ 	}
 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
 index be4cb9f..a2bd947 100644
 --- a/drivers/infiniband/core/uverbs_cmd.c
@@ -45914,6 +46190,50 @@ index be4cb9f..a2bd947 100644
  	INIT_UDATA(&udata, buf + sizeof cmd,
  		   (unsigned long) cmd.response + sizeof resp,
  		   in_len - sizeof cmd, out_len - sizeof resp);
+diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
+index 1a29739..154f025 100644
+--- a/drivers/infiniband/hw/cxgb4/device.c
++++ b/drivers/infiniband/hw/cxgb4/device.c
+@@ -122,7 +122,7 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
+ 	if (!wq->rdev->wr_log)
+ 		return;
+ 
+-	idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
++	idx = (atomic_inc_return_unchecked(&wq->rdev->wr_log_idx) - 1) &
+ 		(wq->rdev->wr_log_size - 1);
+ 	le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
+ 	getnstimeofday(&le.poll_host_ts);
+@@ -154,7 +154,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
+ 
+ #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
+ 
+-	idx = atomic_read(&dev->rdev.wr_log_idx) &
++	idx = atomic_read_unchecked(&dev->rdev.wr_log_idx) &
+ 		(dev->rdev.wr_log_size - 1);
+ 	end = idx - 1;
+ 	if (end < 0)
+@@ -860,7 +860,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+ 				       sizeof(*rdev->wr_log), GFP_KERNEL);
+ 		if (rdev->wr_log) {
+ 			rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
+-			atomic_set(&rdev->wr_log_idx, 0);
++			atomic_set_unchecked(&rdev->wr_log_idx, 0);
+ 		} else {
+ 			pr_err(MOD "error allocating wr_log. Logging disabled\n");
+ 		}
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index c7bb38c..46059b4 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -180,7 +180,7 @@ struct c4iw_rdev {
+ 	struct c4iw_stats stats;
+ 	struct c4iw_hw_queue hw_queue;
+ 	struct t4_dev_status_page *status_page;
+-	atomic_t wr_log_idx;
++	atomic_unchecked_t wr_log_idx;
+ 	struct wr_log_entry *wr_log;
+ 	int wr_log_size;
+ };
 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
 index 026b91e..47c3bdb 100644
 --- a/drivers/infiniband/hw/cxgb4/mem.c
@@ -47990,6 +48310,54 @@ index 4d20088..de60cb2 100644
  	---help---
  	Keeps all active closures in a linked list and provides a debugfs
  	interface to list them, which makes it possible to see asynchronous
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 8eeab72..292b65d 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -633,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+ 	for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ 		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+ 
+-		atomic_long_add(sectors,
++		atomic_long_add_unchecked(sectors,
+ 				&PTR_CACHE(c, &b->key, i)->sectors_written);
+ 	}
+ 
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 6b420a5..d5acb8f 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -433,12 +433,12 @@ struct cache {
+ 
+ 	/* The rest of this all shows up in sysfs */
+ #define IO_ERROR_SHIFT		20
+-	atomic_t		io_errors;
+-	atomic_t		io_count;
++	atomic_unchecked_t	io_errors;
++	atomic_unchecked_t	io_count;
+ 
+-	atomic_long_t		meta_sectors_written;
+-	atomic_long_t		btree_sectors_written;
+-	atomic_long_t		sectors_written;
++	atomic_long_unchecked_t	meta_sectors_written;
++	atomic_long_unchecked_t	btree_sectors_written;
++	atomic_long_unchecked_t	sectors_written;
+ };
+ 
+ struct gc_stat {
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 83392f8..fc8f340 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -468,7 +468,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
+ 
+ 	do_btree_node_write(b);
+ 
+-	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
++	atomic_long_add_unchecked(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
+ 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+ 
+ 	b->written += set_blocks(i, block_bytes(b->c));
 diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
 index 782cc2c..4fdd593 100644
 --- a/drivers/md/bcache/closure.h
@@ -48003,6 +48371,229 @@ index 782cc2c..4fdd593 100644
  	closure_set_ip(cl);
  	cl->fn = fn;
  	cl->wq = wq;
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index 86a0bb8..0832b32 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -61,7 +61,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
+ 	 */
+ 
+ 	if (ca->set->error_decay) {
+-		unsigned count = atomic_inc_return(&ca->io_count);
++		unsigned count = atomic_inc_return_unchecked(&ca->io_count);
+ 
+ 		while (count > ca->set->error_decay) {
+ 			unsigned errors;
+@@ -73,16 +73,16 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
+ 			 * succesfully do so, we rescale the errors once:
+ 			 */
+ 
+-			count = atomic_cmpxchg(&ca->io_count, old, new);
++			count = atomic_cmpxchg_unchecked(&ca->io_count, old, new);
+ 
+ 			if (count == old) {
+ 				count = new;
+ 
+-				errors = atomic_read(&ca->io_errors);
++				errors = atomic_read_unchecked(&ca->io_errors);
+ 				do {
+ 					old = errors;
+ 					new = ((uint64_t) errors * 127) / 128;
+-					errors = atomic_cmpxchg(&ca->io_errors,
++					errors = atomic_cmpxchg_unchecked(&ca->io_errors,
+ 								old, new);
+ 				} while (old != errors);
+ 			}
+@@ -91,7 +91,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
+ 
+ 	if (error) {
+ 		char buf[BDEVNAME_SIZE];
+-		unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
++		unsigned errors = atomic_add_return_unchecked(1 << IO_ERROR_SHIFT,
+ 						    &ca->io_errors);
+ 		errors >>= IO_ERROR_SHIFT;
+ 
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 29eba72..348efc9 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -621,7 +621,7 @@ static void journal_write_unlocked(struct closure *cl)
+ 		ca = PTR_CACHE(c, k, i);
+ 		bio = &ca->journal.bio;
+ 
+-		atomic_long_add(sectors, &ca->meta_sectors_written);
++		atomic_long_add_unchecked(sectors, &ca->meta_sectors_written);
+ 
+ 		bio_reset(bio);
+ 		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
+diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
+index 0ca072c..5e6e5c3 100644
+--- a/drivers/md/bcache/stats.c
++++ b/drivers/md/bcache/stats.c
+@@ -120,7 +120,7 @@ void bch_cache_accounting_destroy(struct cache_accounting *acc)
+ 	kobject_put(&acc->hour.kobj);
+ 	kobject_put(&acc->day.kobj);
+ 
+-	atomic_set(&acc->closing, 1);
++	atomic_set_unchecked(&acc->closing, 1);
+ 	if (del_timer_sync(&acc->timer))
+ 		closure_return(&acc->cl);
+ }
+@@ -151,7 +151,7 @@ static void scale_accounting(unsigned long data)
+ 	struct cache_accounting *acc = (struct cache_accounting *) data;
+ 
+ #define move_stat(name) do {						\
+-	unsigned t = atomic_xchg(&acc->collector.name, 0);		\
++	unsigned t = atomic_xchg_unchecked(&acc->collector.name, 0);	\
+ 	t <<= 16;							\
+ 	acc->five_minute.name += t;					\
+ 	acc->hour.name += t;						\
+@@ -174,7 +174,7 @@ static void scale_accounting(unsigned long data)
+ 
+ 	acc->timer.expires += accounting_delay;
+ 
+-	if (!atomic_read(&acc->closing))
++	if (!atomic_read_unchecked(&acc->closing))
+ 		add_timer(&acc->timer);
+ 	else
+ 		closure_return(&acc->cl);
+@@ -185,14 +185,14 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
+ {
+ 	if (!bypass)
+ 		if (hit)
+-			atomic_inc(&stats->cache_hits);
++			atomic_inc_unchecked(&stats->cache_hits);
+ 		else
+-			atomic_inc(&stats->cache_misses);
++			atomic_inc_unchecked(&stats->cache_misses);
+ 	else
+ 		if (hit)
+-			atomic_inc(&stats->cache_bypass_hits);
++			atomic_inc_unchecked(&stats->cache_bypass_hits);
+ 		else
+-			atomic_inc(&stats->cache_bypass_misses);
++			atomic_inc_unchecked(&stats->cache_bypass_misses);
+ }
+ 
+ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+@@ -206,22 +206,22 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
+ {
+ 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+-	atomic_inc(&dc->accounting.collector.cache_readaheads);
+-	atomic_inc(&c->accounting.collector.cache_readaheads);
++	atomic_inc_unchecked(&dc->accounting.collector.cache_readaheads);
++	atomic_inc_unchecked(&c->accounting.collector.cache_readaheads);
+ }
+ 
+ void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
+ {
+ 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+-	atomic_inc(&dc->accounting.collector.cache_miss_collisions);
+-	atomic_inc(&c->accounting.collector.cache_miss_collisions);
++	atomic_inc_unchecked(&dc->accounting.collector.cache_miss_collisions);
++	atomic_inc_unchecked(&c->accounting.collector.cache_miss_collisions);
+ }
+ 
+ void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+ 			       int sectors)
+ {
+-	atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
+-	atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
++	atomic_add_unchecked(sectors, &dc->accounting.collector.sectors_bypassed);
++	atomic_add_unchecked(sectors, &c->accounting.collector.sectors_bypassed);
+ }
+ 
+ void bch_cache_accounting_init(struct cache_accounting *acc,
+diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
+index adbff14..018c2d2 100644
+--- a/drivers/md/bcache/stats.h
++++ b/drivers/md/bcache/stats.h
+@@ -2,13 +2,13 @@
+ #define _BCACHE_STATS_H_
+ 
+ struct cache_stat_collector {
+-	atomic_t cache_hits;
+-	atomic_t cache_misses;
+-	atomic_t cache_bypass_hits;
+-	atomic_t cache_bypass_misses;
+-	atomic_t cache_readaheads;
+-	atomic_t cache_miss_collisions;
+-	atomic_t sectors_bypassed;
++	atomic_unchecked_t cache_hits;
++	atomic_unchecked_t cache_misses;
++	atomic_unchecked_t cache_bypass_hits;
++	atomic_unchecked_t cache_bypass_misses;
++	atomic_unchecked_t cache_readaheads;
++	atomic_unchecked_t cache_miss_collisions;
++	atomic_unchecked_t sectors_bypassed;
+ };
+ 
+ struct cache_stats {
+@@ -28,7 +28,7 @@ struct cache_stats {
+ struct cache_accounting {
+ 	struct closure		cl;
+ 	struct timer_list	timer;
+-	atomic_t		closing;
++	atomic_unchecked_t	closing;
+ 
+ 	struct cache_stat_collector collector;
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 679a093..b4dd03d 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -530,7 +530,7 @@ void bch_prio_write(struct cache *ca)
+ 
+ 	ca->disk_buckets->seq++;
+ 
+-	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
++	atomic_long_add_unchecked(ca->sb.bucket_size * prio_buckets(ca),
+ 			&ca->meta_sectors_written);
+ 
+ 	//pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index b3ff57d..b2e30fb 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -739,15 +739,15 @@ SHOW(__bch_cache)
+ 	sysfs_hprint(block_size,	block_bytes(ca));
+ 	sysfs_print(nbuckets,		ca->sb.nbuckets);
+ 	sysfs_print(discard,		ca->discard);
+-	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
++	sysfs_hprint(written, atomic_long_read_unchecked(&ca->sectors_written) << 9);
+ 	sysfs_hprint(btree_written,
+-		     atomic_long_read(&ca->btree_sectors_written) << 9);
++		     atomic_long_read_unchecked(&ca->btree_sectors_written) << 9);
+ 	sysfs_hprint(metadata_written,
+-		     (atomic_long_read(&ca->meta_sectors_written) +
+-		      atomic_long_read(&ca->btree_sectors_written)) << 9);
++		     (atomic_long_read_unchecked(&ca->meta_sectors_written) +
++		      atomic_long_read_unchecked(&ca->btree_sectors_written)) << 9);
+ 
+ 	sysfs_print(io_errors,
+-		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
++		    atomic_read_unchecked(&ca->io_errors) >> IO_ERROR_SHIFT);
+ 
+ 	if (attr == &sysfs_cache_replacement_policy)
+ 		return bch_snprint_string_list(buf, PAGE_SIZE,
+@@ -870,11 +870,11 @@ STORE(__bch_cache)
+ 	}
+ 
+ 	if (attr == &sysfs_clear_stats) {
+-		atomic_long_set(&ca->sectors_written, 0);
+-		atomic_long_set(&ca->btree_sectors_written, 0);
+-		atomic_long_set(&ca->meta_sectors_written, 0);
+-		atomic_set(&ca->io_count, 0);
+-		atomic_set(&ca->io_errors, 0);
++		atomic_long_set_unchecked(&ca->sectors_written, 0);
++		atomic_long_set_unchecked(&ca->btree_sectors_written, 0);
++		atomic_long_set_unchecked(&ca->meta_sectors_written, 0);
++		atomic_set_unchecked(&ca->io_count, 0);
++		atomic_set_unchecked(&ca->io_errors, 0);
+ 	}
+ 
+ 	return size;
 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
 index 48b5890..b0af0ca 100644
 --- a/drivers/md/bitmap.c
@@ -48016,6 +48607,236 @@ index 48b5890..b0af0ca 100644
  	}
  
  	seq_printf(seq, "\n");
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index dd90d12..10c9377 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -185,16 +185,16 @@ struct cache_features {
+ };
+ 
+ struct cache_stats {
+-	atomic_t read_hit;
+-	atomic_t read_miss;
+-	atomic_t write_hit;
+-	atomic_t write_miss;
+-	atomic_t demotion;
+-	atomic_t promotion;
+-	atomic_t copies_avoided;
+-	atomic_t cache_cell_clash;
+-	atomic_t commit_count;
+-	atomic_t discard_count;
++	atomic_unchecked_t read_hit;
++	atomic_unchecked_t read_miss;
++	atomic_unchecked_t write_hit;
++	atomic_unchecked_t write_miss;
++	atomic_unchecked_t demotion;
++	atomic_unchecked_t promotion;
++	atomic_unchecked_t copies_avoided;
++	atomic_unchecked_t cache_cell_clash;
++	atomic_unchecked_t commit_count;
++	atomic_unchecked_t discard_count;
+ };
+ 
+ /*
+@@ -273,8 +273,8 @@ struct cache {
+ 	atomic_t nr_io_migrations;
+ 
+ 	wait_queue_head_t quiescing_wait;
+-	atomic_t quiescing;
+-	atomic_t quiescing_ack;
++	atomic_unchecked_t quiescing;
++	atomic_unchecked_t quiescing_ack;
+ 
+ 	/*
+ 	 * cache_size entries, dirty if set
+@@ -640,7 +640,7 @@ static void set_discard(struct cache *cache, dm_dblock_t b)
+ 	unsigned long flags;
+ 
+ 	BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
+-	atomic_inc(&cache->stats.discard_count);
++	atomic_inc_unchecked(&cache->stats.discard_count);
+ 
+ 	spin_lock_irqsave(&cache->lock, flags);
+ 	set_bit(from_dblock(b), cache->discard_bitset);
+@@ -688,10 +688,10 @@ static void load_stats(struct cache *cache)
+ 	struct dm_cache_statistics stats;
+ 
+ 	dm_cache_metadata_get_stats(cache->cmd, &stats);
+-	atomic_set(&cache->stats.read_hit, stats.read_hits);
+-	atomic_set(&cache->stats.read_miss, stats.read_misses);
+-	atomic_set(&cache->stats.write_hit, stats.write_hits);
+-	atomic_set(&cache->stats.write_miss, stats.write_misses);
++	atomic_set_unchecked(&cache->stats.read_hit, stats.read_hits);
++	atomic_set_unchecked(&cache->stats.read_miss, stats.read_misses);
++	atomic_set_unchecked(&cache->stats.write_hit, stats.write_hits);
++	atomic_set_unchecked(&cache->stats.write_miss, stats.write_misses);
+ }
+ 
+ static void save_stats(struct cache *cache)
+@@ -701,10 +701,10 @@ static void save_stats(struct cache *cache)
+ 	if (get_cache_mode(cache) >= CM_READ_ONLY)
+ 		return;
+ 
+-	stats.read_hits = atomic_read(&cache->stats.read_hit);
+-	stats.read_misses = atomic_read(&cache->stats.read_miss);
+-	stats.write_hits = atomic_read(&cache->stats.write_hit);
+-	stats.write_misses = atomic_read(&cache->stats.write_miss);
++	stats.read_hits = atomic_read_unchecked(&cache->stats.read_hit);
++	stats.read_misses = atomic_read_unchecked(&cache->stats.read_miss);
++	stats.write_hits = atomic_read_unchecked(&cache->stats.write_hit);
++	stats.write_misses = atomic_read_unchecked(&cache->stats.write_miss);
+ 
+ 	dm_cache_metadata_set_stats(cache->cmd, &stats);
+ }
+@@ -1322,7 +1322,7 @@ static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+ 
+ static void avoid_copy(struct dm_cache_migration *mg)
+ {
+-	atomic_inc(&mg->cache->stats.copies_avoided);
++	atomic_inc_unchecked(&mg->cache->stats.copies_avoided);
+ 	migration_success_pre_commit(mg);
+ }
+ 
+@@ -1649,13 +1649,13 @@ static bool spare_migration_bandwidth(struct cache *cache)
+ 
+ static void inc_hit_counter(struct cache *cache, struct bio *bio)
+ {
+-	atomic_inc(bio_data_dir(bio) == READ ?
++	atomic_inc_unchecked(bio_data_dir(bio) == READ ?
+ 		   &cache->stats.read_hit : &cache->stats.write_hit);
+ }
+ 
+ static void inc_miss_counter(struct cache *cache, struct bio *bio)
+ {
+-	atomic_inc(bio_data_dir(bio) == READ ?
++	atomic_inc_unchecked(bio_data_dir(bio) == READ ?
+ 		   &cache->stats.read_miss : &cache->stats.write_miss);
+ }
+ 
+@@ -1828,7 +1828,7 @@ static void process_cell(struct cache *cache, struct prealloc *structs,
+ 			 */
+ 
+ 			if (bio_data_dir(bio) == WRITE) {
+-				atomic_inc(&cache->stats.demotion);
++				atomic_inc_unchecked(&cache->stats.demotion);
+ 				invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
+ 				release_cell = false;
+ 
+@@ -1861,14 +1861,14 @@ static void process_cell(struct cache *cache, struct prealloc *structs,
+ 		break;
+ 
+ 	case POLICY_NEW:
+-		atomic_inc(&cache->stats.promotion);
++		atomic_inc_unchecked(&cache->stats.promotion);
+ 		promote(cache, structs, block, lookup_result.cblock, new_ocell);
+ 		release_cell = false;
+ 		break;
+ 
+ 	case POLICY_REPLACE:
+-		atomic_inc(&cache->stats.demotion);
+-		atomic_inc(&cache->stats.promotion);
++		atomic_inc_unchecked(&cache->stats.demotion);
++		atomic_inc_unchecked(&cache->stats.promotion);
+ 		demote_then_promote(cache, structs, lookup_result.old_oblock,
+ 				    block, lookup_result.cblock,
+ 				    ool.cell, new_ocell);
+@@ -1922,7 +1922,7 @@ static int commit(struct cache *cache, bool clean_shutdown)
+ 	if (get_cache_mode(cache) >= CM_READ_ONLY)
+ 		return -EINVAL;
+ 
+-	atomic_inc(&cache->stats.commit_count);
++	atomic_inc_unchecked(&cache->stats.commit_count);
+ 	r = dm_cache_commit(cache->cmd, clean_shutdown);
+ 	if (r)
+ 		metadata_operation_failed(cache, "dm_cache_commit", r);
+@@ -2153,32 +2153,32 @@ static void process_invalidation_requests(struct cache *cache)
+  *--------------------------------------------------------------*/
+ static bool is_quiescing(struct cache *cache)
+ {
+-	return atomic_read(&cache->quiescing);
++	return atomic_read_unchecked(&cache->quiescing);
+ }
+ 
+ static void ack_quiescing(struct cache *cache)
+ {
+ 	if (is_quiescing(cache)) {
+-		atomic_inc(&cache->quiescing_ack);
++		atomic_inc_unchecked(&cache->quiescing_ack);
+ 		wake_up(&cache->quiescing_wait);
+ 	}
+ }
+ 
+ static void wait_for_quiescing_ack(struct cache *cache)
+ {
+-	wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
++	wait_event(cache->quiescing_wait, atomic_read_unchecked(&cache->quiescing_ack));
+ }
+ 
+ static void start_quiescing(struct cache *cache)
+ {
+-	atomic_inc(&cache->quiescing);
++	atomic_inc_unchecked(&cache->quiescing);
+ 	wait_for_quiescing_ack(cache);
+ }
+ 
+ static void stop_quiescing(struct cache *cache)
+ {
+-	atomic_set(&cache->quiescing, 0);
+-	atomic_set(&cache->quiescing_ack, 0);
++	atomic_set_unchecked(&cache->quiescing, 0);
++	atomic_set_unchecked(&cache->quiescing_ack, 0);
+ }
+ 
+ static void wait_for_migrations(struct cache *cache)
+@@ -2866,8 +2866,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ 	init_waitqueue_head(&cache->migration_wait);
+ 
+ 	init_waitqueue_head(&cache->quiescing_wait);
+-	atomic_set(&cache->quiescing, 0);
+-	atomic_set(&cache->quiescing_ack, 0);
++	atomic_set_unchecked(&cache->quiescing, 0);
++	atomic_set_unchecked(&cache->quiescing_ack, 0);
+ 
+ 	r = -ENOMEM;
+ 	atomic_set(&cache->nr_dirty, 0);
+@@ -2934,12 +2934,12 @@ static int cache_create(struct cache_args *ca, struct cache **result)
+ 
+ 	load_stats(cache);
+ 
+-	atomic_set(&cache->stats.demotion, 0);
+-	atomic_set(&cache->stats.promotion, 0);
+-	atomic_set(&cache->stats.copies_avoided, 0);
+-	atomic_set(&cache->stats.cache_cell_clash, 0);
+-	atomic_set(&cache->stats.commit_count, 0);
+-	atomic_set(&cache->stats.discard_count, 0);
++	atomic_set_unchecked(&cache->stats.demotion, 0);
++	atomic_set_unchecked(&cache->stats.promotion, 0);
++	atomic_set_unchecked(&cache->stats.copies_avoided, 0);
++	atomic_set_unchecked(&cache->stats.cache_cell_clash, 0);
++	atomic_set_unchecked(&cache->stats.commit_count, 0);
++	atomic_set_unchecked(&cache->stats.discard_count, 0);
+ 
+ 	spin_lock_init(&cache->invalidation_lock);
+ 	INIT_LIST_HEAD(&cache->invalidation_requests);
+@@ -3549,12 +3549,12 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 		       cache->sectors_per_block,
+ 		       (unsigned long long) from_cblock(residency),
+ 		       (unsigned long long) from_cblock(cache->cache_size),
+-		       (unsigned) atomic_read(&cache->stats.read_hit),
+-		       (unsigned) atomic_read(&cache->stats.read_miss),
+-		       (unsigned) atomic_read(&cache->stats.write_hit),
+-		       (unsigned) atomic_read(&cache->stats.write_miss),
+-		       (unsigned) atomic_read(&cache->stats.demotion),
+-		       (unsigned) atomic_read(&cache->stats.promotion),
++		       (unsigned) atomic_read_unchecked(&cache->stats.read_hit),
++		       (unsigned) atomic_read_unchecked(&cache->stats.read_miss),
++		       (unsigned) atomic_read_unchecked(&cache->stats.write_hit),
++		       (unsigned) atomic_read_unchecked(&cache->stats.write_miss),
++		       (unsigned) atomic_read_unchecked(&cache->stats.demotion),
++		       (unsigned) atomic_read_unchecked(&cache->stats.promotion),
+ 		       (unsigned long) atomic_read(&cache->nr_dirty));
+ 
+ 		if (writethrough_mode(&cache->features))
 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
 index 80a4395..a5a8981 100644
 --- a/drivers/md/dm-ioctl.c
@@ -48029,6 +48850,19 @@ index 80a4395..a5a8981 100644
  		if (!*param->name) {
  			DMWARN("name not supplied when creating device");
  			return -EINVAL;
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index a090121..9f14702 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1452,7 +1452,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ 		DMEMIT(" %llu",
+ 		       (strcmp(rs->md.last_sync_action, "check")) ? 0 :
+ 		       (unsigned long long)
+-		       atomic64_read(&rs->md.resync_mismatches));
++		       atomic64_read_unchecked(&rs->md.resync_mismatches));
+ 		break;
+ 	case STATUSTYPE_TABLE:
+ 		/* The string you would use to construct this array */
 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
 index f2a363a..9f4763b 100644
 --- a/drivers/md/dm-raid1.c
@@ -48311,7 +49145,7 @@ index 1b5c604..cd00629 100644
  
  void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
 diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 3fe3d04..e7adf83 100644
+index 3fe3d04..e9cfcd1 100644
 --- a/drivers/md/md.c
 +++ b/drivers/md/md.c
 @@ -197,10 +197,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
@@ -48383,6 +49217,24 @@ index 3fe3d04..e7adf83 100644
  
  	INIT_LIST_HEAD(&rdev->same_set);
  	init_waitqueue_head(&rdev->blocked_wait);
+@@ -4276,7 +4276,7 @@ mismatch_cnt_show(struct mddev *mddev, char *page)
+ {
+ 	return sprintf(page, "%llu\n",
+ 		       (unsigned long long)
+-		       atomic64_read(&mddev->resync_mismatches));
++		       atomic64_read_unchecked(&mddev->resync_mismatches));
+ }
+ 
+ static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
+@@ -5290,7 +5290,7 @@ static void md_clean(struct mddev *mddev)
+ 	mddev->new_layout = 0;
+ 	mddev->new_chunk_sectors = 0;
+ 	mddev->curr_resync = 0;
+-	atomic64_set(&mddev->resync_mismatches, 0);
++	atomic64_set_unchecked(&mddev->resync_mismatches, 0);
+ 	mddev->suspend_lo = mddev->suspend_hi = 0;
+ 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
+ 	mddev->recovery = 0;
 @@ -5709,9 +5709,10 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
  	info.patch_version = MD_PATCHLEVEL_VERSION;
  	info.ctime         = mddev->ctime;
@@ -48432,8 +49284,17 @@ index 3fe3d04..e7adf83 100644
  		/* sync IO will cause sync_io to increase before the disk_stats
  		 * as sync_io is counted when a request starts, and
  		 * disk_stats is counted when it completes.
+@@ -7726,7 +7727,7 @@ void md_do_sync(struct md_thread *thread)
+ 		 * which defaults to physical size, but can be virtual size
+ 		 */
+ 		max_sectors = mddev->resync_max_sectors;
+-		atomic64_set(&mddev->resync_mismatches, 0);
++		atomic64_set_unchecked(&mddev->resync_mismatches, 0);
+ 		/* we don't use the checkpoint if there's a bitmap */
+ 		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ 			j = mddev->resync_min;
 diff --git a/drivers/md/md.h b/drivers/md/md.h
-index ab33957..126a644 100644
+index ab33957..0d4a801 100644
 --- a/drivers/md/md.h
 +++ b/drivers/md/md.h
 @@ -96,13 +96,13 @@ struct md_rdev {
@@ -48452,6 +49313,15 @@ index ab33957..126a644 100644
  					   * for reporting to userspace and storing
  					   * in superblock.
  					   */
+@@ -301,7 +301,7 @@ struct mddev {
+ 
+ 	sector_t			resync_max_sectors; /* may be set by personality */
+ 
+-	atomic64_t			resync_mismatches; /* count of sectors where
++	atomic64_unchecked_t		resync_mismatches; /* count of sectors where
+ 							    * parity/replica mismatch found
+ 							    */
+ 
 @@ -479,7 +479,7 @@ extern void mddev_unlock(struct mddev *mddev);
  
  static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
@@ -48496,7 +49366,7 @@ index 3e6d115..ffecdeb 100644
  /*----------------------------------------------------------------*/
  
 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index d9d031e..38e5356 100644
+index d9d031e..11e6fd1 100644
 --- a/drivers/md/raid1.c
 +++ b/drivers/md/raid1.c
 @@ -1061,7 +1061,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
@@ -48517,6 +49387,15 @@ index d9d031e..38e5356 100644
  		}
  		sectors -= s;
  		sect += s;
+@@ -1975,7 +1975,7 @@ static void process_checks(struct r1bio *r1_bio)
+ 		} else
+ 			j = 0;
+ 		if (j >= 0)
+-			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
++			atomic64_add_unchecked(r1_bio->sectors, &mddev->resync_mismatches);
+ 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
+ 			      && !error)) {
+ 			/* No need to write to this device. */
 @@ -2116,7 +2116,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
  			    !test_bit(Faulty, &rdev->flags)) {
  				if (r1_sync_page_io(rdev, sect, s,
@@ -48527,7 +49406,7 @@ index d9d031e..38e5356 100644
  					       "md/raid1:%s: read error corrected "
  					       "(%d sectors at %llu on %s)\n",
 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index 96f3659..38437f5 100644
+index 96f3659..548d342 100644
 --- a/drivers/md/raid10.c
 +++ b/drivers/md/raid10.c
 @@ -1068,7 +1068,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
@@ -48548,6 +49427,15 @@ index 96f3659..38437f5 100644
  			   &conf->mirrors[d].rdev->corrected_errors);
  
  	/* for reconstruct, we always reschedule after a read.
+@@ -1974,7 +1974,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+ 			}
+ 			if (j == vcnt)
+ 				continue;
+-			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
++			atomic64_add_unchecked(r10_bio->sectors, &mddev->resync_mismatches);
+ 			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ 				/* Don't fix anything. */
+ 				continue;
 @@ -2173,7 +2173,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
  {
  	struct timespec cur_time_mon;
@@ -48599,7 +49487,7 @@ index 96f3659..38437f5 100644
  
  			rdev_dec_pending(rdev, mddev);
 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index 45933c1..831da5f 100644
+index 45933c1..0f1bc4f 100644
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
 @@ -1115,23 +1115,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
@@ -48690,6 +49578,24 @@ index 45933c1..831da5f 100644
  			 > conf->max_nr_stripes)
  			printk(KERN_WARNING
  			       "md/raid:%s: Too many read errors, failing device %s.\n",
+@@ -3733,7 +3741,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
+ 			 */
+ 			set_bit(STRIPE_INSYNC, &sh->state);
+ 		else {
+-			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++			atomic64_add_unchecked(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+ 			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ 				/* don't try to repair!! */
+ 				set_bit(STRIPE_INSYNC, &sh->state);
+@@ -3885,7 +3893,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+ 				 */
+ 			}
+ 		} else {
+-			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++			atomic64_add_unchecked(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+ 			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ 				/* don't try to repair!! */
+ 				set_bit(STRIPE_INSYNC, &sh->state);
 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
 index 13bb57f..0ca21b2 100644
 --- a/drivers/media/dvb-core/dvbdev.c
@@ -48849,6 +49755,39 @@ index 04706cc..23ae0ae 100644
  
  /* ------------------------------------------------------------------ */
  
+diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
+index 4e7db89..bd7ef95 100644
+--- a/drivers/media/pci/zoran/zoran.h
++++ b/drivers/media/pci/zoran/zoran.h
+@@ -178,7 +178,6 @@ struct zoran_fh;
+ 
+ struct zoran_mapping {
+ 	struct zoran_fh *fh;
+-	atomic_t count;
+ };
+ 
+ struct zoran_buffer {
+diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
+index 80caa70..d076ecf 100644
+--- a/drivers/media/pci/zoran/zoran_driver.c
++++ b/drivers/media/pci/zoran/zoran_driver.c
+@@ -2607,8 +2607,6 @@ zoran_poll (struct file *file,
+ static void
+ zoran_vm_open (struct vm_area_struct *vma)
+ {
+-	struct zoran_mapping *map = vma->vm_private_data;
+-	atomic_inc(&map->count);
+ }
+ 
+ static void
+@@ -2736,7 +2734,6 @@ zoran_mmap (struct file           *file,
+ 		return res;
+ 	}
+ 	map->fh = fh;
+-	atomic_set(&map->count, 1);
+ 
+ 	vma->vm_ops = &zoran_vm_ops;
+ 	vma->vm_flags |= VM_DONTEXPAND;
 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
 index 70c28d1..ff21b13 100644
 --- a/drivers/media/platform/omap/omap_vout.c
@@ -52087,6 +53026,76 @@ index ad62615..a4c124d 100644
  	rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
  				MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
  	EFX_BUG_ON_PARANOID(rc);
+diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
+index 9d78830..74fc649 100644
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -82,8 +82,8 @@ struct efx_loopback_state {
+ 	int packet_count;
+ 	struct sk_buff **skbs;
+ 	bool offload_csum;
+-	atomic_t rx_good;
+-	atomic_t rx_bad;
++	atomic_unchecked_t rx_good;
++	atomic_unchecked_t rx_bad;
+ 	struct efx_loopback_payload payload;
+ };
+ 
+@@ -349,12 +349,12 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
+ 	netif_vdbg(efx, drv, efx->net_dev,
+ 		   "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+ 
+-	atomic_inc(&state->rx_good);
++	atomic_inc_unchecked(&state->rx_good);
+ 	return;
+ 
+  err:
+ #ifdef DEBUG
+-	if (atomic_read(&state->rx_bad) == 0) {
++	if (atomic_read_unchecked(&state->rx_bad) == 0) {
+ 		netif_err(efx, drv, efx->net_dev, "received packet:\n");
+ 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ 			       buf_ptr, pkt_len, 0);
+@@ -363,7 +363,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
+ 			       &state->payload, sizeof(state->payload), 0);
+ 	}
+ #endif
+-	atomic_inc(&state->rx_bad);
++	atomic_inc_unchecked(&state->rx_bad);
+ }
+ 
+ /* Initialise an efx_selftest_state for a new iteration */
+@@ -397,8 +397,8 @@ static void efx_iterate_state(struct efx_nic *efx)
+ 	memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+ 
+ 	/* Fill out remaining state members */
+-	atomic_set(&state->rx_good, 0);
+-	atomic_set(&state->rx_bad, 0);
++	atomic_set_unchecked(&state->rx_good, 0);
++	atomic_set_unchecked(&state->rx_bad, 0);
+ 	smp_wmb();
+ }
+ 
+@@ -456,7 +456,7 @@ static int efx_poll_loopback(struct efx_nic *efx)
+ {
+ 	struct efx_loopback_state *state = efx->loopback_selftest;
+ 
+-	return atomic_read(&state->rx_good) == state->packet_count;
++	return atomic_read_unchecked(&state->rx_good) == state->packet_count;
+ }
+ 
+ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+@@ -482,8 +482,8 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+ 	netif_tx_unlock_bh(efx->net_dev);
+ 
+ 	/* Check TX completion and received packet counts */
+-	rx_good = atomic_read(&state->rx_good);
+-	rx_bad = atomic_read(&state->rx_bad);
++	rx_good = atomic_read_unchecked(&state->rx_good);
++	rx_bad = atomic_read_unchecked(&state->rx_bad);
+ 	if (tx_done != state->packet_count) {
+ 		/* Don't free the skbs; they will be picked up on TX
+ 		 * overflow or channel teardown.
 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
 index 3f20bb1..59add41 100644
 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -52253,6 +53262,103 @@ index 207f62e..af3f5e5 100644
  		kfree_skb(skb);
  	}
  }
+diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
+index a0849f4..147a4a6 100644
+--- a/drivers/net/irda/vlsi_ir.c
++++ b/drivers/net/irda/vlsi_ir.c
+@@ -142,7 +142,7 @@ static void vlsi_ring_debug(struct vlsi_ring *r)
+ 	printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ 		__func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ 	printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
+-		atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
++		atomic_read_unchecked(&r->head) & r->mask, atomic_read_unchecked(&r->tail) & r->mask);
+ 	for (i = 0; i < r->size; i++) {
+ 		rd = &r->rd[i];
+ 		printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
+@@ -301,8 +301,8 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
+ 
+ 	seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ 		r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+-	h = atomic_read(&r->head) & r->mask;
+-	t = atomic_read(&r->tail) & r->mask;
++	h = atomic_read_unchecked(&r->head) & r->mask;
++	t = atomic_read_unchecked(&r->tail) & r->mask;
+ 	seq_printf(seq, "head = %d / tail = %d ", h, t);
+ 	if (h == t)
+ 		seq_printf(seq, "(empty)\n");
+@@ -410,8 +410,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
+ 	r->rd = (struct ring_descr *)(r+1);
+ 	r->mask = size - 1;
+ 	r->size = size;
+-	atomic_set(&r->head, 0);
+-	atomic_set(&r->tail, 0);
++	atomic_set_unchecked(&r->head, 0);
++	atomic_set_unchecked(&r->tail, 0);
+ 
+ 	for (i = 0; i < size; i++) {
+ 		rd = r->rd + i;
+@@ -1268,10 +1268,10 @@ static int vlsi_init_chip(struct pci_dev *pdev)
+ 		iobase+VLSI_PIO_RINGSIZE);	
+ 
+ 	ptr = inw(iobase+VLSI_PIO_RINGPTR);
+-	atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
+-	atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
+-	atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
+-	atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
++	atomic_set_unchecked(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
++	atomic_set_unchecked(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
++	atomic_set_unchecked(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
++	atomic_set_unchecked(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
+ 
+ 	vlsi_set_baud(idev, iobase);	/* idev->new_baud used as provided by caller */
+ 
+diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
+index f9db2ce..6cd460c 100644
+--- a/drivers/net/irda/vlsi_ir.h
++++ b/drivers/net/irda/vlsi_ir.h
+@@ -671,7 +671,7 @@ struct vlsi_ring {
+ 	unsigned		len;
+ 	unsigned		size;
+ 	unsigned		mask;
+-	atomic_t		head, tail;
++	atomic_unchecked_t	head, tail;
+ 	struct ring_descr	*rd;
+ };
+ 
+@@ -681,13 +681,13 @@ static inline struct ring_descr *ring_last(struct vlsi_ring *r)
+ {
+ 	int t;
+ 
+-	t = atomic_read(&r->tail) & r->mask;
+-	return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
++	t = atomic_read_unchecked(&r->tail) & r->mask;
++	return (((t+1) & r->mask) == (atomic_read_unchecked(&r->head) & r->mask)) ? NULL : &r->rd[t];
+ }
+ 
+ static inline struct ring_descr *ring_put(struct vlsi_ring *r)
+ {
+-	atomic_inc(&r->tail);
++	atomic_inc_unchecked(&r->tail);
+ 	return ring_last(r);
+ }
+ 
+@@ -695,13 +695,13 @@ static inline struct ring_descr *ring_first(struct vlsi_ring *r)
+ {
+ 	int h;
+ 
+-	h = atomic_read(&r->head) & r->mask;
+-	return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
++	h = atomic_read_unchecked(&r->head) & r->mask;
++	return (h == (atomic_read_unchecked(&r->tail) & r->mask)) ? NULL : &r->rd[h];
+ }
+ 
+ static inline struct ring_descr *ring_get(struct vlsi_ring *r)
+ {
+-	atomic_inc(&r->head);
++	atomic_inc_unchecked(&r->head);
+ 	return ring_first(r);
+ }
+ 
 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
 index 47da435..a4a7bd8 100644
 --- a/drivers/net/macvlan.c
@@ -53333,6 +54439,139 @@ index c27143b..bd90160 100644
  }
  
  #endif
+diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
+index 237d0cd..6c094fd 100644
+--- a/drivers/net/wireless/ath/carl9170/carl9170.h
++++ b/drivers/net/wireless/ath/carl9170/carl9170.h
+@@ -297,7 +297,7 @@ struct ar9170 {
+ 	unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
+ 	bool needs_full_reset;
+ 	bool force_usb_reset;
+-	atomic_t pending_restarts;
++	atomic_unchecked_t pending_restarts;
+ 
+ 	/* interface mode settings */
+ 	struct list_head vif_list;
+@@ -400,7 +400,7 @@ struct ar9170 {
+ 	struct carl9170_sta_tid __rcu *tx_ampdu_iter;
+ 	struct list_head tx_ampdu_list;
+ 	atomic_t tx_ampdu_upload;
+-	atomic_t tx_ampdu_scheduler;
++	atomic_unchecked_t tx_ampdu_scheduler;
+ 	atomic_t tx_total_pending;
+ 	atomic_t tx_total_queued;
+ 	unsigned int tx_ampdu_list_len;
+@@ -412,7 +412,7 @@ struct ar9170 {
+ 	spinlock_t mem_lock;
+ 	unsigned long *mem_bitmap;
+ 	atomic_t mem_free_blocks;
+-	atomic_t mem_allocs;
++	atomic_unchecked_t mem_allocs;
+ 
+ 	/* rxstream mpdu merge */
+ 	struct ar9170_rx_head rx_plcp;
+diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
+index 6808db4..3a5df05 100644
+--- a/drivers/net/wireless/ath/carl9170/debug.c
++++ b/drivers/net/wireless/ath/carl9170/debug.c
+@@ -221,7 +221,7 @@ static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
+ 
+ 	ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
+ 	    bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
+-	    ar->fw.mem_blocks, atomic_read(&ar->mem_allocs));
++	    ar->fw.mem_blocks, atomic_read_unchecked(&ar->mem_allocs));
+ 
+ 	ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n",
+ 	    atomic_read(&ar->mem_free_blocks),
+@@ -672,7 +672,7 @@ static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
+ 	ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n",
+ 		ar->fw.bug_counter);
+ 	ADD(buf, *ret, bufsize, "pending restart requests:%d\n",
+-		atomic_read(&ar->pending_restarts));
++		atomic_read_unchecked(&ar->pending_restarts));
+ 	return buf;
+ }
+ __DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
+@@ -779,7 +779,7 @@ DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d",
+ DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d",
+ 		      atomic_read(&ar->tx_total_queued));
+ DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d",
+-		      atomic_read(&ar->tx_ampdu_scheduler));
++		      atomic_read_unchecked(&ar->tx_ampdu_scheduler));
+ 
+ DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d",
+ 		      atomic_read(&ar->tx_total_pending));
+diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
+index 170c209..1e10745 100644
+--- a/drivers/net/wireless/ath/carl9170/main.c
++++ b/drivers/net/wireless/ath/carl9170/main.c
+@@ -320,7 +320,7 @@ static void carl9170_zap_queues(struct ar9170 *ar)
+ 	rcu_read_unlock();
+ 
+ 	atomic_set(&ar->tx_ampdu_upload, 0);
+-	atomic_set(&ar->tx_ampdu_scheduler, 0);
++	atomic_set_unchecked(&ar->tx_ampdu_scheduler, 0);
+ 	atomic_set(&ar->tx_total_pending, 0);
+ 	atomic_set(&ar->tx_total_queued, 0);
+ 	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
+@@ -370,7 +370,7 @@ static int carl9170_op_start(struct ieee80211_hw *hw)
+ 		ar->max_queue_stop_timeout[i] = 0;
+ 	}
+ 
+-	atomic_set(&ar->mem_allocs, 0);
++	atomic_set_unchecked(&ar->mem_allocs, 0);
+ 
+ 	err = carl9170_usb_open(ar);
+ 	if (err)
+@@ -490,7 +490,7 @@ static void carl9170_restart_work(struct work_struct *work)
+ 
+ 	if (!err && !ar->force_usb_reset) {
+ 		ar->restart_counter++;
+-		atomic_set(&ar->pending_restarts, 0);
++		atomic_set_unchecked(&ar->pending_restarts, 0);
+ 
+ 		ieee80211_restart_hw(ar->hw);
+ 	} else {
+@@ -513,7 +513,7 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
+ 	 * By ignoring these *surplus* reset events, the device won't be
+ 	 * killed again, right after it has recovered.
+ 	 */
+-	if (atomic_inc_return(&ar->pending_restarts) > 1) {
++	if (atomic_inc_return_unchecked(&ar->pending_restarts) > 1) {
+ 		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
+ 		return;
+ 	}
+@@ -1818,7 +1818,7 @@ void *carl9170_alloc(size_t priv_size)
+ 	spin_lock_init(&ar->tx_ampdu_list_lock);
+ 	spin_lock_init(&ar->mem_lock);
+ 	spin_lock_init(&ar->state_lock);
+-	atomic_set(&ar->pending_restarts, 0);
++	atomic_set_unchecked(&ar->pending_restarts, 0);
+ 	ar->vifs = 0;
+ 	for (i = 0; i < ar->hw->queues; i++) {
+ 		skb_queue_head_init(&ar->tx_status[i]);
+diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
+index ae86a600..28696b2 100644
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -193,7 +193,7 @@ static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
+ 	unsigned int chunks;
+ 	int cookie = -1;
+ 
+-	atomic_inc(&ar->mem_allocs);
++	atomic_inc_unchecked(&ar->mem_allocs);
+ 
+ 	chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
+ 	if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
+@@ -1130,7 +1130,7 @@ static void carl9170_tx_ampdu(struct ar9170 *ar)
+ 	unsigned int i = 0, done_ampdus = 0;
+ 	u16 seq, queue, tmpssn;
+ 
+-	atomic_inc(&ar->tx_ampdu_scheduler);
++	atomic_inc_unchecked(&ar->tx_ampdu_scheduler);
+ 	ar->tx_ampdu_schedule = false;
+ 
+ 	if (atomic_read(&ar->tx_ampdu_upload))
 diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
 index d7fa19b..6d84263 100644
 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -53510,6 +54749,41 @@ index b15e44f..8658af2 100644
  
  	memset(buf, 0, sizeof(buf));
  	buf_size = min(count, sizeof(buf) - 1);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
+index 5761876..fdd6a8a 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
+@@ -279,7 +279,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ 			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ 			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+ 
+-			pn64 = atomic64_read(&key->tx_pn);
++			pn64 = atomic64_read_unchecked(&key->tx_pn);
+ 			aes_tx_sc->pn = cpu_to_le64(pn64);
+ 		} else {
+ 			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+@@ -1449,7 +1449,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
+ 		switch (key->cipher) {
+ 		case WLAN_CIPHER_SUITE_CCMP:
+ 			iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+-			atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
++			atomic64_set_unchecked(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
+ 			break;
+ 		case WLAN_CIPHER_SUITE_TKIP:
+ 			iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index 6df5aad..421d8db 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -284,7 +284,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ 	case WLAN_CIPHER_SUITE_CCMP:
+ 	case WLAN_CIPHER_SUITE_CCMP_256:
+ 		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+-		pn = atomic64_inc_return(&keyconf->tx_pn);
++		pn = atomic64_inc_return_unchecked(&keyconf->tx_pn);
+ 		crypto_hdr[0] = pn;
+ 		crypto_hdr[2] = 0;
+ 		crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
 index 9028345..5b66ca3 100644
 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -55634,6 +56908,76 @@ index 27debb3..394d998 100644
  };
  
  struct board_type {
+diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
+index a83f705..b40c5e6 100644
+--- a/drivers/scsi/hptiop.c
++++ b/drivers/scsi/hptiop.c
+@@ -1082,7 +1082,6 @@ static const char *hptiop_info(struct Scsi_Host *host)
+ static int hptiop_reset_hba(struct hptiop_hba *hba)
+ {
+ 	if (atomic_xchg(&hba->resetting, 1) == 0) {
+-		atomic_inc(&hba->reset_count);
+ 		hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
+ 	}
+ 
+@@ -1340,7 +1339,6 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+ 	hba->iopintf_v2 = 0;
+ 
+ 	atomic_set(&hba->resetting, 0);
+-	atomic_set(&hba->reset_count, 0);
+ 
+ 	init_waitqueue_head(&hba->reset_wq);
+ 	init_waitqueue_head(&hba->ioctl_wq);
+diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
+index 4d1c511..d5744cb 100644
+--- a/drivers/scsi/hptiop.h
++++ b/drivers/scsi/hptiop.h
+@@ -330,7 +330,6 @@ struct hptiop_hba {
+ 	void        *dma_coherent[HPTIOP_MAX_REQUESTS];
+ 	dma_addr_t  dma_coherent_handle[HPTIOP_MAX_REQUESTS];
+ 
+-	atomic_t    reset_count;
+ 	atomic_t    resetting;
+ 
+ 	wait_queue_head_t reset_wq;
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index b62836d..fe79a06 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -1057,7 +1057,7 @@ static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+ 	if (ioa_cfg->hrrq_num == 1)
+ 		hrrq = 0;
+ 	else {
+-		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
++		hrrq = atomic_add_return_unchecked(1, &ioa_cfg->hrrq_index);
+ 		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+ 	}
+ 	return hrrq;
+@@ -8010,9 +8010,9 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
+ 
+ 	ioa_cfg->identify_hrrq_index = 0;
+ 	if (ioa_cfg->hrrq_num == 1)
+-		atomic_set(&ioa_cfg->hrrq_index, 0);
++		atomic_set_unchecked(&ioa_cfg->hrrq_index, 0);
+ 	else
+-		atomic_set(&ioa_cfg->hrrq_index, 1);
++		atomic_set_unchecked(&ioa_cfg->hrrq_index, 1);
+ 
+ 	/* Zero out config table */
+ 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index e4fb17a..0243f99 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1517,7 +1517,7 @@ struct ipr_ioa_cfg {
+ 
+ 	struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+ 	u32 hrrq_num;
+-	atomic_t  hrrq_index;
++	atomic_unchecked_t  hrrq_index;
+ 	u16 identify_hrrq_index;
+ 
+ 	struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
 index 30f9ef0..a1e29ac 100644
 --- a/drivers/scsi/libfc/fc_exch.c
@@ -56174,6 +57518,65 @@ index c2dd17b..92bc0e0 100644
  			return;
  		}
  	}
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 75514a1..8526741 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -506,7 +506,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+ 	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ 	if (loop_id == 0xFFFF) {
+ 		/* Global event */
+-		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
++		atomic_inc_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+ 		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+ #if 0 /* FIXME: do we need to choose a session here? */
+ 		if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+@@ -5536,7 +5536,7 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+ 
+ retry:
+ 	global_resets =
+-	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
++	    atomic_read_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+ 
+ 	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+ 	if (rc != 0) {
+@@ -5563,12 +5563,12 @@ retry:
+ 		return NULL;
+ 
+ 	if (global_resets !=
+-	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
++	    atomic_read_unchecked(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
+ 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+ 		    "qla_target(%d): global reset during session discovery "
+ 		    "(counter was %d, new %d), retrying", vha->vp_idx,
+ 		    global_resets,
+-		    atomic_read(&vha->vha_tgt.
++		    atomic_read_unchecked(&vha->vha_tgt.
+ 			qla_tgt->tgt_global_resets_count));
+ 		goto retry;
+ 	}
+@@ -5784,7 +5784,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+ 	INIT_LIST_HEAD(&tgt->srr_ctio_list);
+ 	INIT_LIST_HEAD(&tgt->srr_imm_list);
+ 	INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+-	atomic_set(&tgt->tgt_global_resets_count, 0);
++	atomic_set_unchecked(&tgt->tgt_global_resets_count, 0);
+ 
+ 	base_vha->vha_tgt.qla_tgt = tgt;
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index bca584a..9cb3ed9 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -876,7 +876,7 @@ struct qla_tgt {
+ 	struct list_head srr_imm_list;
+ 	struct work_struct srr_work;
+ 
+-	atomic_t tgt_global_resets_count;
++	atomic_unchecked_t tgt_global_resets_count;
+ 
+ 	struct list_head tgt_list_entry;
+ };
 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
 index a7cfc27..151f483 100644
 --- a/drivers/scsi/qla4xxx/ql4_def.h
@@ -76768,9 +78171,18 @@ index 073bb57..7f95749 100644
  	else if (whole->bd_holder != NULL)
  		return false;	 /* is a partition of a held device */
 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index 5f745ea..b8d8e35 100644
+index 5f745ea..d7ab2ab 100644
 --- a/fs/btrfs/ctree.c
 +++ b/fs/btrfs/ctree.c
+@@ -357,7 +357,7 @@ static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
+  */
+ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
+ {
+-	return atomic64_inc_return(&fs_info->tree_mod_seq);
++	return atomic64_inc_return_unchecked(&fs_info->tree_mod_seq);
+ }
+ 
+ /*
 @@ -1182,9 +1182,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
  		free_extent_buffer(buf);
  		add_root_to_dirty_list(root);
@@ -76787,6 +78199,28 @@ index 5f745ea..b8d8e35 100644
  			parent_start = 0;
  
  		WARN_ON(trans->transid != btrfs_header_generation(parent));
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 94eea1f..4bfa5d6 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1542,7 +1542,7 @@ struct btrfs_fs_info {
+ 
+ 	/* this protects tree_mod_seq_list */
+ 	spinlock_t tree_mod_seq_lock;
+-	atomic64_t tree_mod_seq;
++	atomic64_unchecked_t tree_mod_seq;
+ 	struct list_head tree_mod_seq_list;
+ 
+ 	/* this protects tree_mod_log */
+@@ -1847,7 +1847,7 @@ struct btrfs_root {
+ 	struct list_head log_ctxs[2];
+ 	atomic_t log_writers;
+ 	atomic_t log_commit[2];
+-	atomic_t log_batch;
++	atomic_unchecked_t log_batch;
+ 	int log_transid;
+ 	/* No matter the commit succeeds or not*/
+ 	int log_transid_committed;
 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
 index a2ae427..d028233 100644
 --- a/fs/btrfs/delayed-inode.c
@@ -76866,6 +78300,50 @@ index f70119f..b7d2bb4 100644
  
  /* for init */
  int __init btrfs_delayed_inode_init(void);
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 7832031..28a7d67 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -647,7 +647,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ 		action = BTRFS_ADD_DELAYED_REF;
+ 
+ 	if (is_fstree(ref_root))
+-		seq = atomic64_read(&fs_info->tree_mod_seq);
++		seq = atomic64_read_unchecked(&fs_info->tree_mod_seq);
+ 	delayed_refs = &trans->transaction->delayed_refs;
+ 
+ 	/* first set the basic ref node struct up */
+@@ -703,7 +703,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ 	delayed_refs = &trans->transaction->delayed_refs;
+ 
+ 	if (is_fstree(ref_root))
+-		seq = atomic64_read(&fs_info->tree_mod_seq);
++		seq = atomic64_read_unchecked(&fs_info->tree_mod_seq);
+ 
+ 	/* first set the basic ref node struct up */
+ 	atomic_set(&ref->refs, 1);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 1e60d00..bc9c6ce 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1261,7 +1261,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
+ 	atomic_set(&root->log_commit[0], 0);
+ 	atomic_set(&root->log_commit[1], 0);
+ 	atomic_set(&root->log_writers, 0);
+-	atomic_set(&root->log_batch, 0);
++	atomic_set_unchecked(&root->log_batch, 0);
+ 	atomic_set(&root->orphan_inodes, 0);
+ 	atomic_set(&root->refs, 1);
+ 	atomic_set(&root->will_be_snapshoted, 0);
+@@ -2562,7 +2562,7 @@ int open_ctree(struct super_block *sb,
+ 	atomic_set(&fs_info->nr_async_bios, 0);
+ 	atomic_set(&fs_info->defrag_running, 0);
+ 	atomic_set(&fs_info->qgroup_op_seq, 0);
+-	atomic64_set(&fs_info->tree_mod_seq, 0);
++	atomic64_set_unchecked(&fs_info->tree_mod_seq, 0);
+ 	fs_info->sb = sb;
+ 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
+ 	fs_info->metadata_ratio = 0;
 diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
 index 6a98bdd..fed3da6 100644
 --- a/fs/btrfs/extent_map.c
@@ -76892,6 +78370,28 @@ index 6a98bdd..fed3da6 100644
  		rb_erase(&merge->rb_node, &tree->map);
  		RB_CLEAR_NODE(&merge->rb_node);
  		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index e27ea7a..2cac1d6 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1896,7 +1896,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 		return ret;
+ 
+ 	mutex_lock(&inode->i_mutex);
+-	atomic_inc(&root->log_batch);
++	atomic_inc_unchecked(&root->log_batch);
+ 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ 			     &BTRFS_I(inode)->runtime_flags);
+ 	/*
+@@ -1950,7 +1950,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 		mutex_unlock(&inode->i_mutex);
+ 		goto out;
+ 	}
+-	atomic_inc(&root->log_batch);
++	atomic_inc_unchecked(&root->log_batch);
+ 
+ 	/*
+ 	 * If the last transaction that changed this file was before the current
 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
 index 396e3d5..e752d29 100644
 --- a/fs/btrfs/inode.c
@@ -76942,6 +78442,150 @@ index 396e3d5..e752d29 100644
  	/* Reached end of directory/root. Bump pos past the last item. */
  	ctx->pos++;
  
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index fcf7265..275b222 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -153,7 +153,7 @@ struct btrfs_raid_bio {
+ 
+ 	atomic_t stripes_pending;
+ 
+-	atomic_t error;
++	atomic_unchecked_t error;
+ 	/*
+ 	 * these are two arrays of pointers.  We allocate the
+ 	 * rbio big enough to hold them both and setup their
+@@ -898,7 +898,7 @@ static void raid_write_end_io(struct bio *bio)
+ 	err = 0;
+ 
+ 	/* OK, we have read all the stripes we need to. */
+-	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++	if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+ 		err = -EIO;
+ 
+ 	rbio_orig_end_io(rbio, err);
+@@ -983,7 +983,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
+ 	rbio->faila = -1;
+ 	rbio->failb = -1;
+ 	atomic_set(&rbio->refs, 1);
+-	atomic_set(&rbio->error, 0);
++	atomic_set_unchecked(&rbio->error, 0);
+ 	atomic_set(&rbio->stripes_pending, 0);
+ 
+ 	/*
+@@ -1207,7 +1207,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
+ 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ 	spin_unlock_irq(&rbio->bio_list_lock);
+ 
+-	atomic_set(&rbio->error, 0);
++	atomic_set_unchecked(&rbio->error, 0);
+ 
+ 	/*
+ 	 * now that we've set rmw_locked, run through the
+@@ -1395,11 +1395,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
+ 	if (rbio->faila == -1) {
+ 		/* first failure on this rbio */
+ 		rbio->faila = failed;
+-		atomic_inc(&rbio->error);
++		atomic_inc_unchecked(&rbio->error);
+ 	} else if (rbio->failb == -1) {
+ 		/* second failure on this rbio */
+ 		rbio->failb = failed;
+-		atomic_inc(&rbio->error);
++		atomic_inc_unchecked(&rbio->error);
+ 	} else {
+ 		ret = -EIO;
+ 	}
+@@ -1461,7 +1461,7 @@ static void raid_rmw_end_io(struct bio *bio)
+ 	if (!atomic_dec_and_test(&rbio->stripes_pending))
+ 		return;
+ 
+-	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++	if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+ 		goto cleanup;
+ 
+ 	/*
+@@ -1517,7 +1517,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
+ 
+ 	index_rbio_pages(rbio);
+ 
+-	atomic_set(&rbio->error, 0);
++	atomic_set_unchecked(&rbio->error, 0);
+ 	/*
+ 	 * build a list of bios to read all the missing parts of this
+ 	 * stripe
+@@ -2010,7 +2010,7 @@ static void raid_recover_end_io(struct bio *bio)
+ 	if (!atomic_dec_and_test(&rbio->stripes_pending))
+ 		return;
+ 
+-	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++	if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+ 		rbio_orig_end_io(rbio, -EIO);
+ 	else
+ 		__raid_recover_end_io(rbio);
+@@ -2040,7 +2040,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+ 	if (ret)
+ 		goto cleanup;
+ 
+-	atomic_set(&rbio->error, 0);
++	atomic_set_unchecked(&rbio->error, 0);
+ 
+ 	/*
+ 	 * read everything that hasn't failed.  Thanks to the
+@@ -2049,7 +2049,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+ 	 */
+ 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
+ 		if (rbio->faila == stripe || rbio->failb == stripe) {
+-			atomic_inc(&rbio->error);
++			atomic_inc_unchecked(&rbio->error);
+ 			continue;
+ 		}
+ 
+@@ -2079,7 +2079,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+ 		 * were up to date, or we might have no bios to read because
+ 		 * the devices were gone.
+ 		 */
+-		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
++		if (atomic_read_unchecked(&rbio->error) <= rbio->bbio->max_errors) {
+ 			__raid_recover_end_io(rbio);
+ 			goto out;
+ 		} else {
+@@ -2302,7 +2302,7 @@ static void raid_write_parity_end_io(struct bio *bio)
+ 
+ 	err = 0;
+ 
+-	if (atomic_read(&rbio->error))
++	if (atomic_read_unchecked(&rbio->error))
+ 		err = -EIO;
+ 
+ 	rbio_orig_end_io(rbio, err);
+@@ -2366,7 +2366,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 		SetPageUptodate(q_page);
+ 	}
+ 
+-	atomic_set(&rbio->error, 0);
++	atomic_set_unchecked(&rbio->error, 0);
+ 
+ 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ 		struct page *p;
+@@ -2485,7 +2485,7 @@ static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
+  */
+ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
+ {
+-	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
++	if (atomic_read_unchecked(&rbio->error) > rbio->bbio->max_errors)
+ 		goto cleanup;
+ 
+ 	if (rbio->faila >= 0 || rbio->failb >= 0) {
+@@ -2582,7 +2582,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
+ 
+ 	bio_list_init(&bio_list);
+ 
+-	atomic_set(&rbio->error, 0);
++	atomic_set_unchecked(&rbio->error, 0);
+ 	/*
+ 	 * build a list of bios to read all the missing parts of this
+ 	 * stripe
 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
 index 11d1eab..a001845 100644
 --- a/fs/btrfs/super.c
@@ -76968,6 +78612,19 @@ index 603b0cc..8e3f600 100644
  			.name = "features",
  			.attrs = attrs,
  		};
+diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
+index 9626252..e130203 100644
+--- a/fs/btrfs/tests/btrfs-tests.c
++++ b/fs/btrfs/tests/btrfs-tests.c
+@@ -116,7 +116,7 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
+ 	fs_info->running_transaction = NULL;
+ 	fs_info->qgroup_tree = RB_ROOT;
+ 	fs_info->qgroup_ulist = NULL;
+-	atomic64_set(&fs_info->tree_mod_seq, 0);
++	atomic64_set_unchecked(&fs_info->tree_mod_seq, 0);
+ 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
+ 	INIT_LIST_HEAD(&fs_info->dead_roots);
+ 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
 diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
 index 2299bfd..4098e72 100644
 --- a/fs/btrfs/tests/free-space-tests.c
@@ -76994,6 +78651,59 @@ index 2299bfd..4098e72 100644
  	__btrfs_remove_free_space_cache(cache->free_space_ctl);
  
  	return 0;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index a5b0644..522b11a 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -257,7 +257,7 @@ loop:
+ 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
+ 		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
+ 			"creating a fresh transaction\n");
+-	atomic64_set(&fs_info->tree_mod_seq, 0);
++	atomic64_set_unchecked(&fs_info->tree_mod_seq, 0);
+ 
+ 	spin_lock_init(&cur_trans->delayed_refs.lock);
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6f8af2d..c3ab0d8 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -172,7 +172,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
+ 		root->log_start_pid = current->pid;
+ 	}
+ 
+-	atomic_inc(&root->log_batch);
++	atomic_inc_unchecked(&root->log_batch);
+ 	atomic_inc(&root->log_writers);
+ 	if (ctx) {
+ 		int index = root->log_transid % 2;
+@@ -2752,7 +2752,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 		wait_log_commit(root, log_transid - 1);
+ 
+ 	while (1) {
+-		int batch = atomic_read(&root->log_batch);
++		int batch = atomic_read_unchecked(&root->log_batch);
+ 		/* when we're on an ssd, just kick the log commit out */
+ 		if (!btrfs_test_opt(root, SSD) &&
+ 		    test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
+@@ -2761,7 +2761,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 			mutex_lock(&root->log_mutex);
+ 		}
+ 		wait_for_writer(root);
+-		if (batch == atomic_read(&root->log_batch))
++		if (batch == atomic_read_unchecked(&root->log_batch))
+ 			break;
+ 	}
+ 
+@@ -2807,7 +2807,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 	btrfs_init_log_ctx(&root_log_ctx);
+ 
+ 	mutex_lock(&log_root_tree->log_mutex);
+-	atomic_inc(&log_root_tree->log_batch);
++	atomic_inc_unchecked(&log_root_tree->log_batch);
+ 	atomic_inc(&log_root_tree->log_writers);
+ 
+ 	index2 = log_root_tree->log_transid % 2;
 diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
 index 6916a78..4598936 100644
 --- a/fs/btrfs/tree-log.h
@@ -77007,6 +78717,141 @@ index 6916a78..4598936 100644
  }
  
  static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 6fc73586..23b7706 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -155,7 +155,7 @@ static struct btrfs_device *__alloc_device(void)
+ 
+ 	spin_lock_init(&dev->reada_lock);
+ 	atomic_set(&dev->reada_in_flight, 0);
+-	atomic_set(&dev->dev_stats_ccnt, 0);
++	atomic_set_unchecked(&dev->dev_stats_ccnt, 0);
+ 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
+ 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
+ 
+@@ -5071,7 +5071,7 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
+ 		sizeof(u64) * (total_stripes),
+ 		GFP_NOFS|__GFP_NOFAIL);
+ 
+-	atomic_set(&bbio->error, 0);
++	atomic_set_unchecked(&bbio->error, 0);
+ 	atomic_set(&bbio->refs, 1);
+ 
+ 	return bbio;
+@@ -5752,7 +5752,7 @@ static void btrfs_end_bio(struct bio *bio)
+ 	int is_orig_bio = 0;
+ 
+ 	if (bio->bi_error) {
+-		atomic_inc(&bbio->error);
++		atomic_inc_unchecked(&bbio->error);
+ 		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
+ 			unsigned int stripe_index =
+ 				btrfs_io_bio(bio)->stripe_index;
+@@ -5790,7 +5790,7 @@ static void btrfs_end_bio(struct bio *bio)
+ 		/* only send an error to the higher layers if it is
+ 		 * beyond the tolerance of the btrfs bio
+ 		 */
+-		if (atomic_read(&bbio->error) > bbio->max_errors) {
++		if (atomic_read_unchecked(&bbio->error) > bbio->max_errors) {
+ 			bio->bi_error = -EIO;
+ 		} else {
+ 			/*
+@@ -5901,7 +5901,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
+ 
+ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
+ {
+-	atomic_inc(&bbio->error);
++	atomic_inc_unchecked(&bbio->error);
+ 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
+ 		/* Shoud be the original bio. */
+ 		WARN_ON(bio != bbio->orig_bio);
+@@ -6655,10 +6655,10 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+ 		if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
+ 			continue;
+ 
+-		stats_cnt = atomic_read(&device->dev_stats_ccnt);
++		stats_cnt = atomic_read_unchecked(&device->dev_stats_ccnt);
+ 		ret = update_dev_stat_item(trans, dev_root, device);
+ 		if (!ret)
+-			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
++			atomic_sub_unchecked(stats_cnt, &device->dev_stats_ccnt);
+ 	}
+ 	mutex_unlock(&fs_devices->device_list_mutex);
+ 
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 595279a..9abb7aa 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -148,8 +148,8 @@ struct btrfs_device {
+ 	int dev_stats_valid;
+ 
+ 	/* Counter to record the change of device stats */
+-	atomic_t dev_stats_ccnt;
+-	atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
++	atomic_unchecked_t dev_stats_ccnt;
++	atomic_unchecked_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
+ };
+ 
+ /*
+@@ -307,7 +307,7 @@ struct btrfs_bio {
+ 	struct bio *orig_bio;
+ 	unsigned long flags;
+ 	void *private;
+-	atomic_t error;
++	atomic_unchecked_t error;
+ 	int max_errors;
+ 	int num_stripes;
+ 	int mirror_num;
+@@ -496,21 +496,21 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
+ 
+ static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
+ {
+-	return atomic_read(&dev->dev_stats_ccnt);
++	return atomic_read_unchecked(&dev->dev_stats_ccnt);
+ }
+ 
+ static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
+ 				      int index)
+ {
+-	atomic_inc(dev->dev_stat_values + index);
++	atomic_inc_unchecked(dev->dev_stat_values + index);
+ 	smp_mb__before_atomic();
+-	atomic_inc(&dev->dev_stats_ccnt);
++	atomic_inc_unchecked(&dev->dev_stats_ccnt);
+ }
+ 
+ static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
+ 				      int index)
+ {
+-	return atomic_read(dev->dev_stat_values + index);
++	return atomic_read_unchecked(dev->dev_stat_values + index);
+ }
+ 
+ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+@@ -518,18 +518,18 @@ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
+ {
+ 	int ret;
+ 
+-	ret = atomic_xchg(dev->dev_stat_values + index, 0);
++	ret = atomic_xchg_unchecked(dev->dev_stat_values + index, 0);
+ 	smp_mb__before_atomic();
+-	atomic_inc(&dev->dev_stats_ccnt);
++	atomic_inc_unchecked(&dev->dev_stats_ccnt);
+ 	return ret;
+ }
+ 
+ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
+ 				      int index, unsigned long val)
+ {
+-	atomic_set(dev->dev_stat_values + index, val);
++	atomic_set_unchecked(dev->dev_stat_values + index, val);
+ 	smp_mb__before_atomic();
+-	atomic_inc(&dev->dev_stats_ccnt);
++	atomic_inc_unchecked(&dev->dev_stats_ccnt);
+ }
+ 
+ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
 diff --git a/fs/buffer.c b/fs/buffer.c
 index 82283ab..dc40c76 100644
 --- a/fs/buffer.c
@@ -82052,6 +83897,41 @@ index 5d01d26..41c352e 100644
  {
  	int error = -ENOTTY;
  
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 362e5f6..3c562e0 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -1102,7 +1102,7 @@ restart_loop:
+ 	 */
+ 	stats.ts_tid = commit_transaction->t_tid;
+ 	stats.run.rs_handle_count =
+-		atomic_read(&commit_transaction->t_handle_count);
++		atomic_read_unchecked(&commit_transaction->t_handle_count);
+ 	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
+ 			     commit_transaction->t_tid, &stats.run);
+ 	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6b8338e..51de3f2 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -91,7 +91,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
+ 	atomic_set(&transaction->t_updates, 0);
+ 	atomic_set(&transaction->t_outstanding_credits,
+ 		   atomic_read(&journal->j_reserved_credits));
+-	atomic_set(&transaction->t_handle_count, 0);
++	atomic_set_unchecked(&transaction->t_handle_count, 0);
+ 	INIT_LIST_HEAD(&transaction->t_inode_list);
+ 	INIT_LIST_HEAD(&transaction->t_private_list);
+ 
+@@ -374,7 +374,7 @@ repeat:
+ 	handle->h_requested_credits = blocks;
+ 	handle->h_start_jiffies = jiffies;
+ 	atomic_inc(&transaction->t_updates);
+-	atomic_inc(&transaction->t_handle_count);
++	atomic_inc_unchecked(&transaction->t_handle_count);
+ 	jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
+ 		  handle, blocks,
+ 		  atomic_read(&transaction->t_outstanding_credits),
 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
 index 4a6cf28..d3a29d3 100644
 --- a/fs/jffs2/erase.c
@@ -83319,6 +85199,101 @@ index d1a8535..1cfa4a9 100644
  	/* We failed. Cleanup and return. */
  	if (bh_primary)
  		brelse(bh_primary);
+diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
+index e88ccf8..716dfce 100644
+--- a/fs/ocfs2/dlm/dlmcommon.h
++++ b/fs/ocfs2/dlm/dlmcommon.h
+@@ -151,9 +151,9 @@ struct dlm_ctxt
+ 	struct list_head mle_hb_events;
+ 
+ 	/* these give a really vague idea of the system load */
+-	atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
++	atomic_unchecked_t mle_tot_count[DLM_MLE_NUM_TYPES];
+ 	atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
+-	atomic_t res_tot_count;
++	atomic_unchecked_t res_tot_count;
+ 	atomic_t res_cur_count;
+ 
+ 	struct dlm_debug_ctxt *dlm_debug_ctxt;
+diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
+index 8251360..9c7513a 100644
+--- a/fs/ocfs2/dlm/dlmdebug.c
++++ b/fs/ocfs2/dlm/dlmdebug.c
+@@ -755,10 +755,10 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
+ 	out += snprintf(buf + out, len - out,
+ 			"Lock Resources: %d (%d)\n",
+ 			atomic_read(&dlm->res_cur_count),
+-			atomic_read(&dlm->res_tot_count));
++			atomic_read_unchecked(&dlm->res_tot_count));
+ 
+ 	for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
+-		tot_mles += atomic_read(&dlm->mle_tot_count[i]);
++		tot_mles += atomic_read_unchecked(&dlm->mle_tot_count[i]);
+ 
+ 	for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
+ 		cur_mles += atomic_read(&dlm->mle_cur_count[i]);
+@@ -771,19 +771,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
+ 	out += snprintf(buf + out, len - out,
+ 			"  Blocking: %d (%d)\n",
+ 			atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
+-			atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
++			atomic_read_unchecked(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
+ 
+ 	/*  Mastery: xxx (xxx) */
+ 	out += snprintf(buf + out, len - out,
+ 			"  Mastery: %d (%d)\n",
+ 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
+-			atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
++			atomic_read_unchecked(&dlm->mle_tot_count[DLM_MLE_MASTER]));
+ 
+ 	/*  Migration: xxx (xxx) */
+ 	out += snprintf(buf + out, len - out,
+ 			"  Migration: %d (%d)\n",
+ 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
+-			atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
++			atomic_read_unchecked(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
+ 
+ 	/* Lists: Dirty=Empty  Purge=InUse  PendingASTs=Empty  ... */
+ 	out += snprintf(buf + out, len - out,
+diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
+index 6918f30..0e79c35 100644
+--- a/fs/ocfs2/dlm/dlmdomain.c
++++ b/fs/ocfs2/dlm/dlmdomain.c
+@@ -2046,10 +2046,10 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
+ 	dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ 	dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ 
+-	atomic_set(&dlm->res_tot_count, 0);
++	atomic_set_unchecked(&dlm->res_tot_count, 0);
+ 	atomic_set(&dlm->res_cur_count, 0);
+ 	for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
+-		atomic_set(&dlm->mle_tot_count[i], 0);
++		atomic_set_unchecked(&dlm->mle_tot_count[i], 0);
+ 		atomic_set(&dlm->mle_cur_count[i], 0);
+ 	}
+ 
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index ce38b4c..c3aa42a 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -303,7 +303,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
+ 		mle->mnamehash = dlm_lockid_hash(name, namelen);
+ 	}
+ 
+-	atomic_inc(&dlm->mle_tot_count[mle->type]);
++	atomic_inc_unchecked(&dlm->mle_tot_count[mle->type]);
+ 	atomic_inc(&dlm->mle_cur_count[mle->type]);
+ 
+ 	/* copy off the node_map and register hb callbacks on our copy */
+@@ -577,7 +577,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
+ 
+ 	kref_init(&res->refs);
+ 
+-	atomic_inc(&dlm->res_tot_count);
++	atomic_inc_unchecked(&dlm->res_tot_count);
+ 	atomic_inc(&dlm->res_cur_count);
+ 
+ 	/* just for consistency */
 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
 index 0a4457f..67ddb4f 100644
 --- a/fs/ocfs2/localalloc.c
@@ -98139,6 +100114,19 @@ index 0000000..158b330
 +
 +	return retval;
 +}
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 720446c..f32baee 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -32,7 +32,7 @@ struct ghes_estatus_node {
+ 
+ struct ghes_estatus_cache {
+ 	u32 estatus_len;
+-	atomic_t count;
++	atomic_unchecked_t count;
+ 	struct acpi_hest_generic *generic;
+ 	unsigned long long time_in;
+ 	struct rcu_head rcu;
 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
 index 5bdab6b..9ae82fe 100644
 --- a/include/asm-generic/4level-fixup.h
@@ -99099,6 +101087,101 @@ index e635533..3e89a44 100644
  {
  	if (sizeof(l) == 4)
  		return fls(l);
+diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
+index c02e669..439bd4b 100644
+--- a/include/linux/blk-cgroup.h
++++ b/include/linux/blk-cgroup.h
+@@ -63,12 +63,12 @@ struct blkcg {
+  */
+ struct blkg_stat {
+ 	struct percpu_counter		cpu_cnt;
+-	atomic64_t			aux_cnt;
++	atomic64_unchecked_t		aux_cnt;
+ };
+ 
+ struct blkg_rwstat {
+ 	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
+-	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
++	atomic64_unchecked_t		aux_cnt[BLKG_RWSTAT_NR];
+ };
+ 
+ /*
+@@ -508,7 +508,7 @@ static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
+ 	if (ret)
+ 		return ret;
+ 
+-	atomic64_set(&stat->aux_cnt, 0);
++	atomic64_set_unchecked(&stat->aux_cnt, 0);
+ 	return 0;
+ }
+ 
+@@ -546,7 +546,7 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+ static inline void blkg_stat_reset(struct blkg_stat *stat)
+ {
+ 	percpu_counter_set(&stat->cpu_cnt, 0);
+-	atomic64_set(&stat->aux_cnt, 0);
++	atomic64_set_unchecked(&stat->aux_cnt, 0);
+ }
+ 
+ /**
+@@ -559,7 +559,7 @@ static inline void blkg_stat_reset(struct blkg_stat *stat)
+ static inline void blkg_stat_add_aux(struct blkg_stat *to,
+ 				     struct blkg_stat *from)
+ {
+-	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
++	atomic64_add_unchecked(blkg_stat_read(from) + atomic64_read_unchecked(&from->aux_cnt),
+ 		     &to->aux_cnt);
+ }
+ 
+@@ -574,7 +574,7 @@ static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
+ 				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
+ 			return ret;
+ 		}
+-		atomic64_set(&rwstat->aux_cnt[i], 0);
++		atomic64_set_unchecked(&rwstat->aux_cnt[i], 0);
+ 	}
+ 	return 0;
+ }
+@@ -628,7 +628,7 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+ 	int i;
+ 
+ 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-		atomic64_set(&result.aux_cnt[i],
++		atomic64_set_unchecked(&result.aux_cnt[i],
+ 			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
+ 	return result;
+ }
+@@ -645,8 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+ {
+ 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+ 
+-	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
+-		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
++	return atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
++		atomic64_read_unchecked(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
+ }
+ 
+ /**
+@@ -659,7 +659,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+ 
+ 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
+ 		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
+-		atomic64_set(&rwstat->aux_cnt[i], 0);
++		atomic64_set_unchecked(&rwstat->aux_cnt[i], 0);
+ 	}
+ }
+ 
+@@ -677,8 +677,8 @@ static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
+ 	int i;
+ 
+ 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-		atomic64_add(atomic64_read(&v.aux_cnt[i]) +
+-			     atomic64_read(&from->aux_cnt[i]),
++		atomic64_add_unchecked(atomic64_read_unchecked(&v.aux_cnt[i]) +
++			     atomic64_read_unchecked(&from->aux_cnt[i]),
+ 			     &to->aux_cnt[i]);
+ }
+ 
 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
 index 19c2e94..6f12b73 100644
 --- a/include/linux/blkdev.h
@@ -101842,6 +103925,19 @@ index f644fdb..ae23d35 100644
  struct irq_data;
  
  /* Number of irqs reserved for a legacy isa controller */
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 1abeb82..0dcff9d 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -674,7 +674,7 @@ struct transaction_s
+ 	/*
+ 	 * How many handles used this transaction? [t_handle_lock]
+ 	 */
+-	atomic_t		t_handle_count;
++	atomic_unchecked_t	t_handle_count;
+ 
+ 	/*
+ 	 * This transaction is being forced and some process is
 diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
 index 5fdc553..766e169 100644
 --- a/include/linux/jiffies.h
@@ -103015,6 +105111,36 @@ index 0000000..33f4af8
 +};
 +
 +#endif
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index 639e9b8..b37c9be 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -158,19 +158,19 @@ struct netlink_dump_control {
+ 	void *data;
+ 	struct module *module;
+ 	u16 min_dump_alloc;
+-};
++} __do_const;
++typedef struct netlink_dump_control __no_const netlink_dump_control_no_const;
+ 
+ extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 				const struct nlmsghdr *nlh,
+-				struct netlink_dump_control *control);
++				struct netlink_dump_control *control,
++				void *data,
++				struct module *module);
+ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 				     const struct nlmsghdr *nlh,
+ 				     struct netlink_dump_control *control)
+ {
+-	if (!control->module)
+-		control->module = THIS_MODULE;
+-
+-	return __netlink_dump_start(ssk, skb, nlh, control);
++	return __netlink_dump_start(ssk, skb, nlh, control, control->data, control->module ? : THIS_MODULE);
+ }
+ 
+ struct netlink_tap {
 diff --git a/include/linux/nls.h b/include/linux/nls.h
 index 520681b..2b7fabb 100644
 --- a/include/linux/nls.h
@@ -105875,6 +108001,19 @@ index f2ae33d..c457cf0 100644
  	struct list_head list;
  	/* Protects from simultaneous access to first_req list */
  	spinlock_t info_list_lock;
+diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
+index 76b1ffa..d9f7571 100644
+--- a/include/net/cfg802154.h
++++ b/include/net/cfg802154.h
+@@ -188,7 +188,7 @@ struct wpan_dev {
+ 	/* MAC BSN field */
+ 	atomic_t bsn;
+ 	/* MAC DSN field */
+-	atomic_t dsn;
++	atomic_unchecked_t dsn;
+ 
+ 	u8 min_be;
+ 	u8 max_be;
 diff --git a/include/net/flow.h b/include/net/flow.h
 index 9b85db8..e76e5c7 100644
 --- a/include/net/flow.h
@@ -106121,9 +108260,18 @@ index c4359e2..76dbc4a 100644
  struct llc_sap_state {
  	u8			   curr_state;
 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
-index bfc5694..da18971 100644
+index bfc5694..56ba79d 100644
 --- a/include/net/mac80211.h
 +++ b/include/net/mac80211.h
+@@ -1498,7 +1498,7 @@ enum ieee80211_key_flags {
+  */
+ struct ieee80211_key_conf {
+ 	void *drv_priv;
+-	atomic64_t tx_pn;
++	atomic64_unchecked_t tx_pn;
+ 	u32 cipher;
+ 	u8 icv_len;
+ 	u8 iv_len;
 @@ -5111,7 +5111,7 @@ struct ieee80211_tx_rate_control {
  	struct sk_buff *skb;
  	struct ieee80211_tx_rate reported_rate;
@@ -106143,9 +108291,18 @@ index bfc5694..da18971 100644
  static inline int rate_supported(struct ieee80211_sta *sta,
  				 enum ieee80211_band band,
 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
-index 8b68384..42d6ce1 100644
+index 8b68384..48fe40e 100644
 --- a/include/net/neighbour.h
 +++ b/include/net/neighbour.h
+@@ -142,7 +142,7 @@ struct neighbour {
+ 	unsigned int		arp_queue_len_bytes;
+ 	struct timer_list	timer;
+ 	unsigned long		used;
+-	atomic_t		probes;
++	atomic_unchecked_t	probes;
+ 	__u8			flags;
+ 	__u8			nud_state;
+ 	__u8			type;
 @@ -163,7 +163,7 @@ struct neigh_ops {
  	void			(*error_report)(struct neighbour *, struct sk_buff *);
  	int			(*output)(struct neighbour *, struct sk_buff *);
@@ -112286,7 +114443,7 @@ index 2e991f8..66aea20 100644
  	wait_queue_head_t expedited_wq;		/* Wait for check-ins. */
  
 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
-index b2bf396..21142e4 100644
+index b2bf396..beb28db 100644
 --- a/kernel/rcu/tree_plugin.h
 +++ b/kernel/rcu/tree_plugin.h
 @@ -1235,7 +1235,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
@@ -112307,6 +114464,17 @@ index b2bf396..21142e4 100644
  	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
  	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
  	       READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
+@@ -2234,8 +2234,8 @@ static int rcu_nocb_kthread(void *arg)
+ 		}
+ 		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+ 		smp_mb__before_atomic();  /* _add after CB invocation. */
+-		atomic_long_add(-c, &rdp->nocb_q_count);
+-		atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
++		atomic_long_sub(c, &rdp->nocb_q_count);
++		atomic_long_sub(cl, &rdp->nocb_q_count_lazy);
+ 		rdp->n_nocbs_invoked += c;
+ 	}
+ 	return 0;
 @@ -2585,9 +2585,9 @@ static void rcu_sysidle_enter(int irq)
  	j = jiffies;
  	WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
@@ -121390,8 +123558,30 @@ index 700c96c..637df49 100644
  	frag_header.reserved = 0;
  	frag_header.no = 0;
  	frag_header.total_size = htons(skb->len);
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index 8d990b0..44a0966 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -665,7 +665,7 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+ 		if (!primary_if)
+ 			goto out;
+ 		orig_addr = primary_if->net_dev->dev_addr;
+-		orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++		orig_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+ 	} else {
+ 		orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+ 						     vid);
+@@ -740,7 +740,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ 	 * value is used later to check if the node which sent (or re-routed
+ 	 * last time) the packet had an updated information or not
+ 	 */
+-	curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++	curr_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+ 	if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+ 		orig_node = batadv_orig_hash_find(bat_priv,
+ 						  unicast_packet->dest);
 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
-index ac4d08d..4579abb 100644
+index ac4d08d..76780fe 100644
 --- a/net/batman-adv/soft-interface.c
 +++ b/net/batman-adv/soft-interface.c
 @@ -330,7 +330,7 @@ send:
@@ -121403,15 +123593,17 @@ index ac4d08d..4579abb 100644
  		bcast_packet->seqno = htonl(seqno);
  
  		batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
-@@ -798,7 +798,7 @@ static int batadv_softif_init_late(struct net_device *dev)
+@@ -798,8 +798,8 @@ static int batadv_softif_init_late(struct net_device *dev)
  	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
  
  	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
 -	atomic_set(&bat_priv->bcast_seqno, 1);
+-	atomic_set(&bat_priv->tt.vn, 0);
 +	atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
- 	atomic_set(&bat_priv->tt.vn, 0);
++	atomic_set_unchecked(&bat_priv->tt.vn, 0);
  	atomic_set(&bat_priv->tt.local_changes, 0);
  	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+ #ifdef CONFIG_BATMAN_ADV_BLA
 @@ -812,7 +812,7 @@ static int batadv_softif_init_late(struct net_device *dev)
  
  	/* randomize initial seqno to avoid collision */
@@ -121430,8 +123622,70 @@ index ac4d08d..4579abb 100644
  	.kind		= "batadv",
  	.priv_size	= sizeof(struct batadv_priv),
  	.setup		= batadv_softif_init_early,
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 900e94b..5bd0030 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -605,7 +605,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+ 	batadv_dbg(BATADV_DBG_TT, bat_priv,
+ 		   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+ 		   addr, BATADV_PRINT_VID(vid),
+-		   (u8)atomic_read(&bat_priv->tt.vn));
++		   (u8)atomic_read_unchecked(&bat_priv->tt.vn));
+ 
+ 	ether_addr_copy(tt_local->common.addr, addr);
+ 	/* The local entry has to be marked as NEW to avoid to send it in
+@@ -835,7 +835,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+ 	}
+ 
+ 	(*tt_data)->flags = BATADV_NO_FLAGS;
+-	(*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
++	(*tt_data)->ttvn = atomic_read_unchecked(&bat_priv->tt.vn);
+ 	(*tt_data)->num_vlan = htons(num_vlan);
+ 
+ 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+@@ -954,7 +954,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
+ 
+ 	seq_printf(seq,
+ 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
+-		   net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
++		   net_dev->name, (u8)atomic_read_unchecked(&bat_priv->tt.vn));
+ 	seq_printf(seq, "       %-13s  %s %-8s %-9s (%-10s)\n", "Client", "VID",
+ 		   "Flags", "Last seen", "CRC");
+ 
+@@ -2725,7 +2725,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ 
+ 	spin_lock_bh(&bat_priv->tt.commit_lock);
+ 
+-	my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++	my_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+ 	req_ttvn = tt_data->ttvn;
+ 
+ 	orig_node = batadv_orig_hash_find(bat_priv, req_src);
+@@ -2764,7 +2764,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ 		       bat_priv->tt.last_changeset_len);
+ 		spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+ 	} else {
+-		req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
++		req_ttvn = (u8)atomic_read_unchecked(&bat_priv->tt.vn);
+ 
+ 		/* allocate the tvlv, put the tt_data and all the tt_vlan_data
+ 		 * in the initial part
+@@ -3286,10 +3286,10 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
+ 	batadv_tt_local_update_crc(bat_priv);
+ 
+ 	/* Increment the TTVN only once per OGM interval */
+-	atomic_inc(&bat_priv->tt.vn);
++	atomic_inc_unchecked(&bat_priv->tt.vn);
+ 	batadv_dbg(BATADV_DBG_TT, bat_priv,
+ 		   "Local changes committed, updating to ttvn %u\n",
+-		   (u8)atomic_read(&bat_priv->tt.vn));
++		   (u8)atomic_read_unchecked(&bat_priv->tt.vn));
+ 
+ 	/* reset the sending counter */
+ 	atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
-index d260efd..228dbbc 100644
+index d260efd..1548fb3 100644
 --- a/net/batman-adv/types.h
 +++ b/net/batman-adv/types.h
 @@ -81,7 +81,7 @@ enum batadv_dhcp_recipient {
@@ -121443,6 +123697,15 @@ index d260efd..228dbbc 100644
  };
  
  /**
+@@ -529,7 +529,7 @@ enum batadv_counters {
+  * @work: work queue callback item for translation table purging
+  */
+ struct batadv_priv_tt {
+-	atomic_t vn;
++	atomic_unchecked_t vn;
+ 	atomic_t ogm_append_cnt;
+ 	atomic_t local_changes;
+ 	struct list_head changes_list;
 @@ -784,7 +784,7 @@ struct batadv_priv {
  	atomic_t bonding;
  	atomic_t fragmentation;
@@ -122064,9 +124327,72 @@ index 1033725..340f65d 100644
  			fle->object = flo;
  		else
 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
-index c169bba..1f5205a 100644
+index c169bba..02b1405 100644
 --- a/net/core/neighbour.c
 +++ b/net/core/neighbour.c
+@@ -860,7 +860,7 @@ static void neigh_probe(struct neighbour *neigh)
+ 		skb = skb_copy(skb, GFP_ATOMIC);
+ 	write_unlock(&neigh->lock);
+ 	neigh->ops->solicit(neigh, skb);
+-	atomic_inc(&neigh->probes);
++	atomic_inc_unchecked(&neigh->probes);
+ 	kfree_skb(skb);
+ }
+ 
+@@ -916,7 +916,7 @@ static void neigh_timer_handler(unsigned long arg)
+ 			neigh_dbg(2, "neigh %p is probed\n", neigh);
+ 			neigh->nud_state = NUD_PROBE;
+ 			neigh->updated = jiffies;
+-			atomic_set(&neigh->probes, 0);
++			atomic_set_unchecked(&neigh->probes, 0);
+ 			notify = 1;
+ 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
+ 		}
+@@ -926,7 +926,7 @@ static void neigh_timer_handler(unsigned long arg)
+ 	}
+ 
+ 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+-	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
++	    atomic_read_unchecked(&neigh->probes) >= neigh_max_probes(neigh)) {
+ 		neigh->nud_state = NUD_FAILED;
+ 		notify = 1;
+ 		neigh_invalidate(neigh);
+@@ -970,7 +970,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+ 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
+ 			unsigned long next, now = jiffies;
+ 
+-			atomic_set(&neigh->probes,
++			atomic_set_unchecked(&neigh->probes,
+ 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ 			neigh->nud_state     = NUD_INCOMPLETE;
+ 			neigh->updated = now;
+@@ -1161,7 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 	if (new != old) {
+ 		neigh_del_timer(neigh);
+ 		if (new & NUD_PROBE)
+-			atomic_set(&neigh->probes, 0);
++			atomic_set_unchecked(&neigh->probes, 0);
+ 		if (new & NUD_IN_TIMER)
+ 			neigh_add_timer(neigh, (jiffies +
+ 						((new & NUD_REACHABLE) ?
+@@ -1249,7 +1249,7 @@ void __neigh_set_probe_once(struct neighbour *neigh)
+ 	if (!(neigh->nud_state & NUD_FAILED))
+ 		return;
+ 	neigh->nud_state = NUD_INCOMPLETE;
+-	atomic_set(&neigh->probes, neigh_max_probes(neigh));
++	atomic_set_unchecked(&neigh->probes, neigh_max_probes(neigh));
+ 	neigh_add_timer(neigh,
+ 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+ }
+@@ -2186,7 +2186,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
+ 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
+ 	read_unlock_bh(&neigh->lock);
+ 
+-	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
++	if (nla_put_u32(skb, NDA_PROBES, atomic_read_unchecked(&neigh->probes)) ||
+ 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+ 		goto nla_put_failure;
+ 
 @@ -2827,7 +2827,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
  			   void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -122225,7 +124551,7 @@ index 4da4d51..ef1aa60 100644
  		pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
  		return -ENODEV;
 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 0ec4840..5277144 100644
+index 0ec4840..58bf730 100644
 --- a/net/core/rtnetlink.c
 +++ b/net/core/rtnetlink.c
 @@ -61,7 +61,7 @@ struct rtnl_link {
@@ -122272,6 +124598,15 @@ index 0ec4840..5277144 100644
  	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
  		goto nla_put_failure;
  
+@@ -3336,7 +3339,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		__rtnl_unlock();
+ 		rtnl = net->rtnl;
+ 		{
+-			struct netlink_dump_control c = {
++			netlink_dump_control_no_const c = {
+ 				.dump		= dumpit,
+ 				.min_dump_alloc	= min_dump_alloc,
+ 			};
 diff --git a/net/core/scm.c b/net/core/scm.c
 index 8a1741b..20d20e7 100644
 --- a/net/core/scm.c
@@ -122790,6 +125125,19 @@ index 4b16cf3..443b1d4 100644
  #endif
  	return -EINVAL;
  }
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 0c9c348..bae6157 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -332,7 +332,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
+ 	u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
+ 	struct net_device *dev = neigh->dev;
+ 	__be32 target = *(__be32 *)neigh->primary_key;
+-	int probes = atomic_read(&neigh->probes);
++	int probes = atomic_read_unchecked(&neigh->probes);
+ 	struct in_device *in_dev;
+ 	struct dst_entry *dst = NULL;
+ 
 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
 index 2d9cb17..20ae904 100644
 --- a/net/ipv4/devinet.c
@@ -122937,6 +125285,28 @@ index 61b45a1..2970363 100644
  
  		newicsk->icsk_retransmits = 0;
  		newicsk->icsk_backoff	  = 0;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index c3b1f3a..57b3716 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -1042,7 +1042,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 				return -EINVAL;
+ 		}
+ 		{
+-			struct netlink_dump_control c = {
++			static struct netlink_dump_control c = {
+ 				.dump = inet_diag_dump_compat,
+ 			};
+ 			return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
+@@ -1072,7 +1072,7 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+ 				return -EINVAL;
+ 		}
+ 		{
+-			struct netlink_dump_control c = {
++			static struct netlink_dump_control c = {
+ 				.dump = inet_diag_dump,
+ 			};
+ 			return netlink_dump_start(net->diag_nlsk, skb, h, &c);
 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
 index 8912019..8950e24 100644
 --- a/net/ipv4/inet_hashtables.c
@@ -124055,6 +126425,28 @@ index 126ff90..e9ba962 100644
  err_alloc:
  	return -ENOMEM;
  }
+diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
+index 542074c..648df74 100644
+--- a/net/ipv4/xfrm4_state.c
++++ b/net/ipv4/xfrm4_state.c
+@@ -56,6 +56,7 @@ xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
+ 
+ int xfrm4_extract_header(struct sk_buff *skb)
+ {
++	unsigned char iph_tmp;
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 
+ 	XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
+@@ -63,7 +64,8 @@ int xfrm4_extract_header(struct sk_buff *skb)
+ 	XFRM_MODE_SKB_CB(skb)->frag_off = iph->frag_off;
+ 	XFRM_MODE_SKB_CB(skb)->tos = iph->tos;
+ 	XFRM_MODE_SKB_CB(skb)->ttl = iph->ttl;
+-	XFRM_MODE_SKB_CB(skb)->optlen = iph->ihl * 4 - sizeof(*iph);
++	iph_tmp = iph->ihl * 4;
++	XFRM_MODE_SKB_CB(skb)->optlen = iph_tmp - sizeof(*iph);
+ 	memset(XFRM_MODE_SKB_CB(skb)->flow_lbl, 0,
+ 	       sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl));
+ 
 diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
 index 983bb99..ebc39e1 100644
 --- a/net/ipv6/Kconfig
@@ -124365,6 +126757,19 @@ index 4449ad1..e47579c 100644
  		msg.msg_controllen = len;
  		msg.msg_flags = flags;
  
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 9ad46cd..95c2448 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -662,7 +662,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
+ 	struct in6_addr mcaddr;
+ 	struct net_device *dev = neigh->dev;
+ 	struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
+-	int probes = atomic_read(&neigh->probes);
++	int probes = atomic_read_unchecked(&neigh->probes);
+ 
+ 	if (skb && ipv6_chk_addr_and_flags(dev_net(dev), &ipv6_hdr(skb)->saddr,
+ 					   dev, 1,
 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
 index 0771991..faa2784 100644
 --- a/net/ipv6/netfilter/ip6_tables.c
@@ -125175,9 +127580,18 @@ index 1a3c7e0..80f8b0c 100644
  		goto out;
  
 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
-index 7a77a14..3b4a346 100644
+index 7a77a14..1d0b677 100644
 --- a/net/mac80211/cfg.c
 +++ b/net/mac80211/cfg.c
+@@ -379,7 +379,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ 			drv_get_key_seq(sdata->local, key, &kseq);
+ 			memcpy(seq, kseq.ccmp.pn, 6);
+ 		} else {
+-			pn64 = atomic64_read(&key->conf.tx_pn);
++			pn64 = atomic64_read_unchecked(&key->conf.tx_pn);
+ 			seq[0] = pn64;
+ 			seq[1] = pn64 >> 8;
+ 			seq[2] = pn64 >> 16;
 @@ -580,7 +580,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
  			ret = ieee80211_vif_use_channel(sdata, chandef,
  					IEEE80211_CHANCTX_EXCLUSIVE);
@@ -125207,6 +127621,19 @@ index 7a77a14..3b4a346 100644
  		   sdata->vif.type == NL80211_IFTYPE_MONITOR) {
  		if (local->use_chanctx)
  			*chandef = local->monitor_chandef;
+diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
+index 702ca12..a98f135 100644
+--- a/net/mac80211/debugfs_key.c
++++ b/net/mac80211/debugfs_key.c
+@@ -100,7 +100,7 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
+ 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ 	case WLAN_CIPHER_SUITE_GCMP:
+ 	case WLAN_CIPHER_SUITE_GCMP_256:
+-		pn = atomic64_read(&key->conf.tx_pn);
++		pn = atomic64_read_unchecked(&key->conf.tx_pn);
+ 		len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
+ 				(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+ 				(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
 index 6e52659..74e7863 100644
 --- a/net/mac80211/ieee80211_i.h
@@ -125315,6 +127742,28 @@ index 6964fc6..4b98e06 100644
  
  	mutex_lock(&local->iflist_mtx);
  	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index 44388d6..a052880 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -914,7 +914,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ 	case WLAN_CIPHER_SUITE_GCMP_256:
+ 		BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ 			     offsetof(typeof(*seq), gcmp));
+-		pn64 = atomic64_read(&key->conf.tx_pn);
++		pn64 = atomic64_read_unchecked(&key->conf.tx_pn);
+ 		seq->ccmp.pn[5] = pn64;
+ 		seq->ccmp.pn[4] = pn64 >> 8;
+ 		seq->ccmp.pn[3] = pn64 >> 16;
+@@ -1014,7 +1014,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ 		       ((u64)seq->ccmp.pn[2] << 24) |
+ 		       ((u64)seq->ccmp.pn[1] << 32) |
+ 		       ((u64)seq->ccmp.pn[0] << 40);
+-		atomic64_set(&key->conf.tx_pn, pn64);
++		atomic64_set_unchecked(&key->conf.tx_pn, pn64);
+ 		break;
+ 	default:
+ 		WARN_ON(1);
 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
 index ff79a13..fb6b111 100644
 --- a/net/mac80211/main.c
@@ -125376,6 +127825,19 @@ index 64f1936..01102b7 100644
  		if (!txq_data)
  			goto free;
  
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 7892eb8..161da5a 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2824,7 +2824,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
+ 		case WLAN_CIPHER_SUITE_CCMP_256:
+ 		case WLAN_CIPHER_SUITE_GCMP:
+ 		case WLAN_CIPHER_SUITE_GCMP_256:
+-			pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
++			pn = atomic64_inc_return_unchecked(&fast_tx->key->conf.tx_pn);
+ 			crypto_hdr[0] = pn;
+ 			crypto_hdr[1] = pn >> 8;
+ 			crypto_hdr[4] = pn >> 16;
 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
 index cd90ece..5848351 100644
 --- a/net/mac80211/util.c
@@ -125416,6 +127878,77 @@ index cd90ece..5848351 100644
  		drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
  
  	list_for_each_entry(sdata, &local->interfaces, list) {
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index feb547d..eaec5fa4 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -444,7 +444,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
+ 	hdr = (struct ieee80211_hdr *) pos;
+ 	pos += hdrlen;
+ 
+-	pn64 = atomic64_inc_return(&key->conf.tx_pn);
++	pn64 = atomic64_inc_return_unchecked(&key->conf.tx_pn);
+ 
+ 	pn[5] = pn64;
+ 	pn[4] = pn64 >> 8;
+@@ -673,7 +673,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+ 	hdr = (struct ieee80211_hdr *)pos;
+ 	pos += hdrlen;
+ 
+-	pn64 = atomic64_inc_return(&key->conf.tx_pn);
++	pn64 = atomic64_inc_return_unchecked(&key->conf.tx_pn);
+ 
+ 	pn[5] = pn64;
+ 	pn[4] = pn64 >> 8;
+@@ -947,7 +947,7 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
+ 	mmie->key_id = cpu_to_le16(key->conf.keyidx);
+ 
+ 	/* PN = PN + 1 */
+-	pn64 = atomic64_inc_return(&key->conf.tx_pn);
++	pn64 = atomic64_inc_return_unchecked(&key->conf.tx_pn);
+ 
+ 	bip_ipn_set64(mmie->sequence_number, pn64);
+ 
+@@ -991,7 +991,7 @@ ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
+ 	mmie->key_id = cpu_to_le16(key->conf.keyidx);
+ 
+ 	/* PN = PN + 1 */
+-	pn64 = atomic64_inc_return(&key->conf.tx_pn);
++	pn64 = atomic64_inc_return_unchecked(&key->conf.tx_pn);
+ 
+ 	bip_ipn_set64(mmie->sequence_number, pn64);
+ 
+@@ -1136,7 +1136,7 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
+ 	mmie->key_id = cpu_to_le16(key->conf.keyidx);
+ 
+ 	/* PN = PN + 1 */
+-	pn64 = atomic64_inc_return(&key->conf.tx_pn);
++	pn64 = atomic64_inc_return_unchecked(&key->conf.tx_pn);
+ 
+ 	bip_ipn_set64(mmie->sequence_number, pn64);
+ 
+diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
+index ed26952..025b04e 100644
+--- a/net/mac802154/iface.c
++++ b/net/mac802154/iface.c
+@@ -387,7 +387,7 @@ static int mac802154_header_create(struct sk_buff *skb,
+ 	hdr.fc.type = cb->type;
+ 	hdr.fc.security_enabled = cb->secen;
+ 	hdr.fc.ack_request = cb->ackreq;
+-	hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
++	hdr.seq = atomic_inc_return_unchecked(&dev->ieee802154_ptr->dsn) & 0xFF;
+ 
+ 	if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
+ 		return -EINVAL;
+@@ -492,7 +492,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
+ 	get_random_bytes(&tmp, sizeof(tmp));
+ 	atomic_set(&wpan_dev->bsn, tmp);
+ 	get_random_bytes(&tmp, sizeof(tmp));
+-	atomic_set(&wpan_dev->dsn, tmp);
++	atomic_set_unchecked(&wpan_dev->dsn, tmp);
+ 
+ 	/* defaults per 802.15.4-2011 */
+ 	wpan_dev->min_be = 3;
 diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
 index bb185a2..417f388 100644
 --- a/net/mpls/af_mpls.c
@@ -125481,9 +128014,18 @@ index 70d026d..c400590 100644
  obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
  obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
-index 338b404..839dcb0 100644
+index 338b404..e8f3207 100644
 --- a/net/netfilter/ipset/ip_set_core.c
 +++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1423,7 +1423,7 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+ 		return -IPSET_ERR_PROTOCOL;
+ 
+ 	{
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ip_set_dump_start,
+ 			.done = ip_set_dump_done,
+ 		};
 @@ -1998,7 +1998,7 @@ done:
  	return ret;
  }
@@ -125851,7 +128393,7 @@ index bd9d315..fbd2fb7 100644
  			hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
  				unhelp(h, me);
 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
-index 94a6654..9c275f7 100644
+index 94a6654..fcb8d3f 100644
 --- a/net/netfilter/nf_conntrack_netlink.c
 +++ b/net/netfilter/nf_conntrack_netlink.c
 @@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
@@ -125863,6 +128405,96 @@ index 94a6654..9c275f7 100644
  		if (cb->args[0] >= net->ct.htable_size) {
  			spin_unlock(lockp);
  			goto out;
+@@ -1184,10 +1184,11 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
+ 	int err;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ctnetlink_dump_table,
+ 			.done = ctnetlink_done,
+ 		};
++		void *data = NULL;
+ 
+ 		if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+ 			struct ctnetlink_filter *filter;
+@@ -1196,9 +1197,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
+ 			if (IS_ERR(filter))
+ 				return PTR_ERR(filter);
+ 
+-			c.data = filter;
++			data = filter;
+ 		}
+-		return netlink_dump_start(ctnl, skb, nlh, &c);
++		return __netlink_dump_start(ctnl, skb, nlh, &c, data, THIS_MODULE);
+ 	}
+ 
+ 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
+@@ -1336,7 +1337,7 @@ ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
+ 		       const struct nlattr * const cda[])
+ {
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ctnetlink_dump_dying,
+ 			.done = ctnetlink_done_list,
+ 		};
+@@ -1358,7 +1359,7 @@ ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
+ 			     const struct nlattr * const cda[])
+ {
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ctnetlink_dump_unconfirmed,
+ 			.done = ctnetlink_done_list,
+ 		};
+@@ -2040,7 +2041,7 @@ ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
+ 		      const struct nlattr * const cda[])
+ {
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ctnetlink_ct_stat_cpu_dump,
+ 		};
+ 		return netlink_dump_start(ctnl, skb, nlh, &c);
+@@ -2695,7 +2696,7 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
+ 	struct nf_conntrack_tuple_hash *h;
+ 	struct nf_conn *ct;
+ 	struct nf_conntrack_zone zone;
+-	struct netlink_dump_control c = {
++	static struct netlink_dump_control c = {
+ 		.dump = ctnetlink_exp_ct_dump_table,
+ 		.done = ctnetlink_exp_done,
+ 	};
+@@ -2714,9 +2715,8 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
+ 		return -ENOENT;
+ 
+ 	ct = nf_ct_tuplehash_to_ctrack(h);
+-	c.data = ct;
+ 
+-	err = netlink_dump_start(ctnl, skb, nlh, &c);
++	err = __netlink_dump_start(ctnl, skb, nlh, &c, ct, THIS_MODULE);
+ 	nf_ct_put(ct);
+ 
+ 	return err;
+@@ -2740,7 +2740,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
+ 		if (cda[CTA_EXPECT_MASTER])
+ 			return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
+ 		else {
+-			struct netlink_dump_control c = {
++			static struct netlink_dump_control c = {
+ 				.dump = ctnetlink_exp_dump_table,
+ 				.done = ctnetlink_exp_done,
+ 			};
+@@ -3202,7 +3202,7 @@ ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
+ 		       const struct nlattr * const cda[])
+ {
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ctnetlink_exp_stat_cpu_dump,
+ 		};
+ 		return netlink_dump_start(ctnl, skb, nlh, &c);
 diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
 index b65d586..beec902 100644
 --- a/net/netfilter/nf_conntrack_proto.c
@@ -125956,6 +128588,121 @@ index c68c1e5..8b5d670 100644
  	mutex_unlock(&nf_sockopt_mutex);
  }
  EXPORT_SYMBOL(nf_unregister_sockopt);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4a41eb9..324584b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -554,7 +554,7 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
+ 	int err;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nf_tables_dump_tables,
+ 		};
+ 		return netlink_dump_start(nlsk, skb, nlh, &c);
+@@ -1113,7 +1113,7 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
+ 	int err;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nf_tables_dump_chains,
+ 		};
+ 		return netlink_dump_start(nlsk, skb, nlh, &c);
+@@ -1947,7 +1947,7 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
+ 	int err;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nf_tables_dump_rules,
+ 		};
+ 		return netlink_dump_start(nlsk, skb, nlh, &c);
+@@ -2636,7 +2636,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
+ 		return err;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nf_tables_dump_sets,
+ 			.done = nf_tables_dump_sets_done,
+ 		};
+@@ -2647,9 +2647,8 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
+ 			return -ENOMEM;
+ 
+ 		*ctx_dump = ctx;
+-		c.data = ctx_dump;
+ 
+-		return netlink_dump_start(nlsk, skb, nlh, &c);
++		return __netlink_dump_start(nlsk, skb, nlh, &c, ctx_dump, THIS_MODULE);
+ 	}
+ 
+ 	/* Only accept unspec with dump */
+@@ -3228,7 +3227,7 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
+ 		return -ENOENT;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nf_tables_dump_set,
+ 		};
+ 		return netlink_dump_start(nlsk, skb, nlh, &c);
+diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
+index fefbf5f..996b6ef 100644
+--- a/net/netfilter/nfnetlink_acct.c
++++ b/net/netfilter/nfnetlink_acct.c
+@@ -263,10 +263,11 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
+ 	char *acct_name;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nfnl_acct_dump,
+ 			.done = nfnl_acct_done,
+ 		};
++		void *data = NULL;
+ 
+ 		if (tb[NFACCT_FILTER]) {
+ 			struct nfacct_filter *filter;
+@@ -275,9 +276,9 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
+ 			if (IS_ERR(filter))
+ 				return PTR_ERR(filter);
+ 
+-			c.data = filter;
++			data = filter;
+ 		}
+-		return netlink_dump_start(nfnl, skb, nlh, &c);
++		return __netlink_dump_start(nfnl, skb, nlh, &c, data, THIS_MODULE);
+ 	}
+ 
+ 	if (!tb[NFACCT_NAME])
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 54330fb..77d3fb1 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -510,7 +510,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ 	bool tuple_set = false;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = nfnl_cthelper_dump_table,
+ 		};
+ 		return netlink_dump_start(nfnl, skb, nlh, &c);
+diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
+index 476accd..4f3dc0a 100644
+--- a/net/netfilter/nfnetlink_cttimeout.c
++++ b/net/netfilter/nfnetlink_cttimeout.c
+@@ -250,7 +250,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
+ 	struct ctnl_timeout *cur;
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = ctnl_timeout_dump,
+ 		};
+ 		return netlink_dump_start(ctnl, skb, nlh, &c);
 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
 index 4670821..a6c3c47d 100644
 --- a/net/netfilter/nfnetlink_log.c
@@ -126121,7 +128868,7 @@ index 11de55e..f25e448 100644
  	return 0;
  }
 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index fafe33b..8896912 100644
+index fafe33b..699ee5f 100644
 --- a/net/netlink/af_netlink.c
 +++ b/net/netlink/af_netlink.c
 @@ -287,7 +287,7 @@ static void netlink_overrun(struct sock *sk)
@@ -126133,7 +128880,38 @@ index fafe33b..8896912 100644
  }
  
  static void netlink_rcv_wake(struct sock *sk)
-@@ -3183,7 +3183,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -2876,7 +2876,9 @@ errout_skb:
+ 
+ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 			 const struct nlmsghdr *nlh,
+-			 struct netlink_dump_control *control)
++			 struct netlink_dump_control *control,
++			 void *data,
++			 struct module *module)
+ {
+ 	struct netlink_callback *cb;
+ 	struct sock *sk;
+@@ -2908,7 +2910,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 		goto error_unlock;
+ 	}
+ 	/* add reference of module which cb->dump belongs to */
+-	if (!try_module_get(control->module)) {
++	if (!try_module_get(module)) {
+ 		ret = -EPROTONOSUPPORT;
+ 		goto error_unlock;
+ 	}
+@@ -2918,8 +2920,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	cb->dump = control->dump;
+ 	cb->done = control->done;
+ 	cb->nlh = nlh;
+-	cb->data = control->data;
+-	cb->module = control->module;
++	cb->data = data;
++	cb->module = module;
+ 	cb->min_dump_alloc = control->min_dump_alloc;
+ 	cb->skb = skb;
+ 
+@@ -3183,7 +3185,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
  			   sk_wmem_alloc_get(s),
  			   nlk->cb_running,
  			   atomic_read(&s->sk_refcnt),
@@ -126142,6 +128920,56 @@ index fafe33b..8896912 100644
  			   sock_i_ino(s)
  			);
  
+diff --git a/net/netlink/diag.c b/net/netlink/diag.c
+index 3ee63a3cf..d6df4d8 100644
+--- a/net/netlink/diag.c
++++ b/net/netlink/diag.c
+@@ -209,7 +209,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+ 		return -EINVAL;
+ 
+ 	if (h->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = netlink_diag_dump,
+ 		};
+ 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 2ed5f96..5c86d30 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -573,26 +573,24 @@ static int genl_family_rcv_msg(struct genl_family *family,
+ 			return -EOPNOTSUPP;
+ 
+ 		if (!family->parallel_ops) {
+-			struct netlink_dump_control c = {
+-				.module = family->module,
+-				/* we have const, but the netlink API doesn't */
+-				.data = (void *)ops,
++			static struct netlink_dump_control c = {
+ 				.dump = genl_lock_dumpit,
+ 				.done = genl_lock_done,
+ 			};
++			/* we have const, but the netlink API doesn't */
++			void *data = (void *)ops;
+ 
+ 			genl_unlock();
+-			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
++			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c, data, family->module);
+ 			genl_lock();
+ 
+ 		} else {
+-			struct netlink_dump_control c = {
+-				.module = family->module,
++			netlink_dump_control_no_const c = {
+ 				.dump = ops->dumpit,
+ 				.done = ops->done,
+ 			};
+ 
+-			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
++			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c, NULL, family->module);
+ 		}
+ 
+ 		return rc;
 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
 index dba635d..0d2884b 100644
 --- a/net/openvswitch/actions.c
@@ -126192,7 +129020,7 @@ index b393412..3b2f7eb 100644
  };
  
 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 4695a36..b0b92d0 100644
+index 4695a36..95471f0 100644
 --- a/net/packet/af_packet.c
 +++ b/net/packet/af_packet.c
 @@ -278,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
@@ -126204,6 +129032,40 @@ index 4695a36..b0b92d0 100644
  	kfree_skb(skb);
  	return NET_XMIT_DROP;
  }
+@@ -1392,9 +1392,9 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+ 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
+ 			if (i != j)
+ 				po->rollover->sock = i;
+-			atomic_long_inc(&po->rollover->num);
++			atomic_long_inc_unchecked(&po->rollover->num);
+ 			if (room == ROOM_LOW)
+-				atomic_long_inc(&po->rollover->num_huge);
++				atomic_long_inc_unchecked(&po->rollover->num_huge);
+ 			return i;
+ 		}
+ 
+@@ -1402,7 +1402,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+ 			i = 0;
+ 	} while (i != j);
+ 
+-	atomic_long_inc(&po->rollover->num_failed);
++	atomic_long_inc_unchecked(&po->rollover->num_failed);
+ 	return idx;
+ }
+ 
+@@ -1657,9 +1657,9 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 		po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
+ 		if (!po->rollover)
+ 			return -ENOMEM;
+-		atomic_long_set(&po->rollover->num, 0);
+-		atomic_long_set(&po->rollover->num_huge, 0);
+-		atomic_long_set(&po->rollover->num_failed, 0);
++		atomic_long_set_unchecked(&po->rollover->num, 0);
++		atomic_long_set_unchecked(&po->rollover->num_huge, 0);
++		atomic_long_set_unchecked(&po->rollover->num_failed, 0);
+ 	}
+ 
+ 	mutex_lock(&fanout_mutex);
 @@ -2071,7 +2071,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
  drop_n_acct:
  	spin_lock(&sk->sk_receive_queue.lock);
@@ -126222,6 +129084,19 @@ index 4695a36..b0b92d0 100644
  			return -EFAULT;
  		switch (val) {
  		case TPACKET_V1:
+@@ -3797,9 +3797,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 	case PACKET_ROLLOVER_STATS:
+ 		if (!po->rollover)
+ 			return -EINVAL;
+-		rstats.tp_all = atomic_long_read(&po->rollover->num);
+-		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+-		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
++		rstats.tp_all = atomic_long_read_unchecked(&po->rollover->num);
++		rstats.tp_huge = atomic_long_read_unchecked(&po->rollover->num_huge);
++		rstats.tp_failed = atomic_long_read_unchecked(&po->rollover->num_failed);
+ 		data = &rstats;
+ 		lv = sizeof(rstats);
+ 		break;
 @@ -3817,7 +3817,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
  		len = lv;
  	if (put_user(len, optlen))
@@ -126231,6 +129106,36 @@ index 4695a36..b0b92d0 100644
  		return -EFAULT;
  	return 0;
  }
+diff --git a/net/packet/diag.c b/net/packet/diag.c
+index 0ed68f0..54c1dbe 100644
+--- a/net/packet/diag.c
++++ b/net/packet/diag.c
+@@ -236,7 +236,7 @@ static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+ 		return -EINVAL;
+ 
+ 	if (h->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = packet_diag_dump,
+ 		};
+ 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 9ee4631..6b79352 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -93,9 +93,9 @@ struct packet_fanout {
+ struct packet_rollover {
+ 	int			sock;
+ 	struct rcu_head		rcu;
+-	atomic_long_t		num;
+-	atomic_long_t		num_huge;
+-	atomic_long_t		num_failed;
++	atomic_long_unchecked_t	num;
++	atomic_long_unchecked_t	num_huge;
++	atomic_long_unchecked_t	num_failed;
+ #define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
+ 	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
+ } ____cacheline_aligned_in_smp;
 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
 index 850a86c..8884a37 100644
 --- a/net/phonet/pep.c
@@ -127944,6 +130849,19 @@ index 7926de1..8355d2c 100644
  		seq_putc(seq, '\n');
  	}
  
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index c512f64..284072f 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -299,7 +299,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+ 		return -EINVAL;
+ 
+ 	if (h->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
++		static struct netlink_dump_control c = {
+ 			.dump = unix_diag_dump,
+ 		};
+ 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
 diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
 index b3d5150..ff3a837 100644
 --- a/net/unix/sysctl_net_unix.c
@@ -128298,6 +131216,19 @@ index 05a6e3d..6716ec9 100644
  
  	__xfrm_sysctl_init(net);
  
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 24e06a2..b94edf5 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -2468,7 +2468,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 			return -EINVAL;
+ 
+ 		{
+-			struct netlink_dump_control c = {
++			netlink_dump_control_no_const c = {
+ 				.dump = link->dump,
+ 				.done = link->done,
+ 			};
 diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
 index 1db6d73..0819042 100644
 --- a/scripts/Kbuild.include
@@ -130168,6 +133099,19 @@ index 913f377..6e392d5 100644
  	struct path_cond cond = {
  		d_backing_inode(old_dentry)->i_uid,
  		d_backing_inode(old_dentry)->i_mode
+diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
+index c28b0f2..3b9fee0 100644
+--- a/security/apparmor/include/policy.h
++++ b/security/apparmor/include/policy.h
+@@ -134,7 +134,7 @@ struct aa_namespace {
+ 	struct aa_ns_acct acct;
+ 	struct aa_profile *unconfined;
+ 	struct list_head sub_ns;
+-	atomic_t uniq_null;
++	atomic_unchecked_t uniq_null;
+ 	long uniq_id;
+ 
+ 	struct dentry *dents[AAFS_NS_SIZEOF];
 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
 index dec607c..2f291ad9 100644
 --- a/security/apparmor/lsm.c
@@ -130201,6 +133145,28 @@ index dec607c..2f291ad9 100644
  		struct path_cond cond = { d_backing_inode(old_dentry)->i_uid,
  					  d_backing_inode(old_dentry)->i_mode
  		};
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 705c287..81257f1 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -298,7 +298,7 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
+ 	/* ns and ns->unconfined share ns->unconfined refcount */
+ 	ns->unconfined->ns = ns;
+ 
+-	atomic_set(&ns->uniq_null, 0);
++	atomic_set_unchecked(&ns->uniq_null, 0);
+ 
+ 	return ns;
+ 
+@@ -689,7 +689,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat)
+ {
+ 	struct aa_profile *profile = NULL;
+ 	char *name;
+-	int uniq = atomic_inc_return(&parent->ns->uniq_null);
++	int uniq = atomic_inc_return_unchecked(&parent->ns->uniq_null);
+ 
+ 	/* freed below */
+ 	name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, GFP_KERNEL);
 diff --git a/security/commoncap.c b/security/commoncap.c
 index 1832cf7..b805e0f 100644
 --- a/security/commoncap.c
@@ -130862,9 +133828,18 @@ index 75888dd..c940854 100644
  	default:
  		result = -EINVAL;
 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
-index 13cfa81..a68addd 100644
+index 13cfa81..8e0296d 100644
 --- a/sound/core/seq/seq_clientmgr.c
 +++ b/sound/core/seq/seq_clientmgr.c
+@@ -416,7 +416,7 @@ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
+ 	if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
+ 		return -ENXIO;
+ 
+-	if (atomic_read(&fifo->overflow) > 0) {
++	if (atomic_read_unchecked(&fifo->overflow) > 0) {
+ 		/* buffer overflow is detected */
+ 		snd_seq_fifo_clear(fifo);
+ 		/* return error code */
 @@ -446,7 +446,7 @@ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
  			count -= sizeof(struct snd_seq_event);
  			buf += sizeof(struct snd_seq_event);
@@ -130912,6 +133887,50 @@ index 6517590..9905cee 100644
  	snd_leave_user(fs);
  	if (err < 0)
  		goto error;
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index 1d5acbe..5f55223 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -50,7 +50,7 @@ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
+ 	spin_lock_init(&f->lock);
+ 	snd_use_lock_init(&f->use_lock);
+ 	init_waitqueue_head(&f->input_sleep);
+-	atomic_set(&f->overflow, 0);
++	atomic_set_unchecked(&f->overflow, 0);
+ 
+ 	f->head = NULL;
+ 	f->tail = NULL;
+@@ -96,7 +96,7 @@ void snd_seq_fifo_clear(struct snd_seq_fifo *f)
+ 	unsigned long flags;
+ 
+ 	/* clear overflow flag */
+-	atomic_set(&f->overflow, 0);
++	atomic_set_unchecked(&f->overflow, 0);
+ 
+ 	snd_use_lock_sync(&f->use_lock);
+ 	spin_lock_irqsave(&f->lock, flags);
+@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
+ 	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
+ 	if (err < 0) {
+ 		if ((err == -ENOMEM) || (err == -EAGAIN))
+-			atomic_inc(&f->overflow);
++			atomic_inc_unchecked(&f->overflow);
+ 		snd_use_lock_free(&f->use_lock);
+ 		return err;
+ 	}
+diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
+index 062c446..a4b6f4c 100644
+--- a/sound/core/seq/seq_fifo.h
++++ b/sound/core/seq/seq_fifo.h
+@@ -35,7 +35,7 @@ struct snd_seq_fifo {
+ 	spinlock_t lock;
+ 	snd_use_lock_t use_lock;
+ 	wait_queue_head_t input_sleep;
+-	atomic_t overflow;
++	atomic_unchecked_t overflow;
+ 
+ };
+ 
 diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
 index 8010766..4bd361f 100644
 --- a/sound/core/seq/seq_memory.c
@@ -132940,10 +135959,10 @@ index 0000000..0b98f34
 +}
 diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
 new file mode 100644
-index 0000000..9d33451
+index 0000000..28c3242
 --- /dev/null
 +++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,813 @@
+@@ -0,0 +1,819 @@
 +#ifndef GCC_COMMON_H_INCLUDED
 +#define GCC_COMMON_H_INCLUDED
 +
@@ -133091,13 +136110,28 @@ index 0000000..9d33451
 +extern void debug_dominance_info(enum cdi_direction dir);
 +extern void debug_dominance_tree(enum cdi_direction dir, basic_block root);
 +
++#if BUILDING_GCC_VERSION == 4006
++extern void debug_gimple_stmt(gimple);
++extern void debug_gimple_seq(gimple_seq);
++extern void print_gimple_seq(FILE *, gimple_seq, int, int);
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern void print_gimple_expr(FILE *, gimple, int, int);
++extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
++#endif
++
 +#ifdef __cplusplus
 +static inline void debug_tree(const_tree t)
 +{
 +	debug_tree(CONST_CAST_TREE(t));
 +}
++
++static inline void debug_gimple_stmt(const_gimple s)
++{
++	debug_gimple_stmt(CONST_CAST_GIMPLE(s));
++}
 +#else
 +#define debug_tree(t) debug_tree(CONST_CAST_TREE(t))
++#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
 +#endif
 +
 +#define __unused __attribute__((__unused__))
@@ -133257,15 +136291,6 @@ index 0000000..9d33451
 +}
 +#endif
 +
-+#if BUILDING_GCC_VERSION == 4006
-+extern void debug_gimple_stmt(gimple);
-+extern void debug_gimple_seq(gimple_seq);
-+extern void print_gimple_seq(FILE *, gimple_seq, int, int);
-+extern void print_gimple_stmt(FILE *, gimple, int, int);
-+extern void print_gimple_expr(FILE *, gimple, int, int);
-+extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
-+#endif
-+
 +#if BUILDING_GCC_VERSION <= 4007
 +#define FOR_EACH_FUNCTION(node) for (node = cgraph_nodes; node; node = node->next)
 +#define FOR_EACH_VARIABLE(node) for (node = varpool_nodes; node; node = node->next)
@@ -133773,10 +136798,10 @@ index 0000000..7514850
 +fi
 diff --git a/tools/gcc/initify_plugin.c b/tools/gcc/initify_plugin.c
 new file mode 100644
-index 0000000..9431b53
+index 0000000..aedb6ea
 --- /dev/null
 +++ b/tools/gcc/initify_plugin.c
-@@ -0,0 +1,588 @@
+@@ -0,0 +1,591 @@
 +/*
 + * Copyright 2015-2016 by Emese Revfy <re.emese@gmail.com>
 + * Licensed under the GPL v2, or (at your option) v3
@@ -133797,7 +136822,7 @@ index 0000000..9431b53
 +int plugin_is_GPL_compatible;
 +
 +static struct plugin_info initify_plugin_info = {
-+	.version	= "20160113",
++	.version	= "20160130",
 +	.help		= "initify_plugin\n",
 +};
 +
@@ -134004,9 +137029,12 @@ index 0000000..9431b53
 +	for (i = 0; i < TREE_OPERAND_LENGTH(value); i++) {
 +		const_tree op = TREE_OPERAND(value, i);
 +
++		if (op == NULL_TREE)
++			continue;
 +		if (is_same_vardecl(op, vardecl))
 +			return true;
-+		return search_same_vardecl(op, vardecl);
++		if (search_same_vardecl(op, vardecl))
++			return true;
 +	}
 +	return false;
 +}
@@ -178151,19 +181179,6 @@ index 9098083..18f0454 100644
  #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  
  #include <linux/types.h>
-diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
-index fe1b02c..a5eae4a 100644
---- a/tools/lib/api/Makefile
-+++ b/tools/lib/api/Makefile
-@@ -16,7 +16,7 @@ MAKEFLAGS += --no-print-directory
- LIBFILE = $(OUTPUT)libapi.a
- 
- CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
--CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
-+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
- CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
- 
- RM = rm -f
 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
 index 3a3a0f1..6cf679d 100644
 --- a/tools/perf/util/include/asm/alternative-asm.h


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [gentoo-commits] proj/hardened-patchset:master commit in: 4.3.5/
@ 2016-02-08  7:58 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2016-02-08  7:58 UTC (permalink / raw
  To: gentoo-commits

commit:     2ba70c31916532781f96e738155b5be997778910
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  8 08:06:43 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Mon Feb  8 08:06:43 2016 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=2ba70c31

grsecurity-3.1-4.3.5-201602070910

 4.3.5/0000_README                                  |   2 +-
 ...> 4420_grsecurity-3.1-4.3.5-201602070910.patch} | 429 ++++++++++-----------
 2 files changed, 201 insertions(+), 230 deletions(-)

diff --git a/4.3.5/0000_README b/4.3.5/0000_README
index 3ec071f..40c470e 100644
--- a/4.3.5/0000_README
+++ b/4.3.5/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.1-4.3.5-201602032209.patch
+Patch:	4420_grsecurity-3.1-4.3.5-201602070910.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.3.5/4420_grsecurity-3.1-4.3.5-201602032209.patch b/4.3.5/4420_grsecurity-3.1-4.3.5-201602070910.patch
similarity index 99%
rename from 4.3.5/4420_grsecurity-3.1-4.3.5-201602032209.patch
rename to 4.3.5/4420_grsecurity-3.1-4.3.5-201602070910.patch
index 562e55c..e639b69 100644
--- a/4.3.5/4420_grsecurity-3.1-4.3.5-201602032209.patch
+++ b/4.3.5/4420_grsecurity-3.1-4.3.5-201602070910.patch
@@ -1048,10 +1048,18 @@ index 78c0621..94cd626 100644
  	  Counts number of I and D TLB Misses and exports them via Debugfs
  	  The counters can be cleared via Debugfs as well
 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 639411f..82e6320 100644
+index 639411f..d4b3233 100644
 --- a/arch/arm/Kconfig
 +++ b/arch/arm/Kconfig
-@@ -1785,7 +1785,7 @@ config ALIGNMENT_TRAP
+@@ -1708,6 +1708,7 @@ config HIGHPTE
+ config CPU_SW_DOMAIN_PAN
+ 	bool "Enable use of CPU domains to implement privileged no-access"
+ 	depends on MMU && !ARM_LPAE
++	depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ 	default y
+ 	help
+ 	  Increase kernel security by ensuring that normal kernel accesses
+@@ -1785,7 +1786,7 @@ config ALIGNMENT_TRAP
  
  config UACCESS_WITH_MEMCPY
  	bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
@@ -1060,7 +1068,7 @@ index 639411f..82e6320 100644
  	default y if CPU_FEROCEON
  	help
  	  Implement faster copy_to_user and clear_user methods for CPU
-@@ -2022,6 +2022,7 @@ config KEXEC
+@@ -2022,6 +2023,7 @@ config KEXEC
  	depends on (!SMP || PM_SLEEP_SMP)
  	depends on !CPU_V7M
  	select KEXEC_CORE
@@ -1747,7 +1755,7 @@ index 0f84249..8e83c55 100644
  struct of_cpuidle_method {
  	const char *method;
 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
-index fc8ba16..8b84f53 100644
+index fc8ba16..0c20017 100644
 --- a/arch/arm/include/asm/domain.h
 +++ b/arch/arm/include/asm/domain.h
 @@ -42,7 +42,6 @@
@@ -1786,15 +1794,47 @@ index fc8ba16..8b84f53 100644
  
  #define domain_mask(dom)	((3) << (2 * (dom)))
  #define domain_val(dom,type)	((type) << (2 * (dom)))
-@@ -62,7 +79,7 @@
+@@ -62,13 +79,19 @@
  #define DACR_INIT \
  	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
  	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
 -	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
 +	 domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
  	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
++#elif CONFIG_PAX_MEMORY_UDEREF
++	/* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
++#define DACR_INIT \
++	(domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
++	 domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
  #else
  #define DACR_INIT \
+-	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
++	(domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
+ 	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+-	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++	 domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+ 	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+ #endif
+ 
+@@ -113,6 +136,17 @@ static inline void set_domain(unsigned val)
+ 		set_domain(domain);				\
+ 	} while (0)
+ 
++#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#define modify_domain(dom,type)					\
++	do {							\
++		struct thread_info *thread = current_thread_info(); \
++		unsigned int domain = get_domain();		\
++		domain &= ~domain_mask(dom);			\
++		domain = domain | domain_val(dom, type);	\
++		thread->cpu_domain = domain;			\
++		set_domain(domain);				\
++	} while (0)
++
+ #else
+ static inline void modify_domain(unsigned dom, unsigned type)	{ }
+ #endif
 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
 index d2315ff..f60b47b 100644
 --- a/arch/arm/include/asm/elf.h
@@ -1830,61 +1870,17 @@ index de53547..52b9a28 100644
  		(unsigned long)(dest_buf) + (size));			\
  									\
 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
-index 6795368..b784325 100644
+index 6795368..6c4d749 100644
 --- a/arch/arm/include/asm/futex.h
 +++ b/arch/arm/include/asm/futex.h
-@@ -52,6 +52,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- 		return -EFAULT;
- 
-+	pax_open_userland();
-+
- 	smp_mb();
- 	/* Prefetching cannot fault */
- 	prefetchw(uaddr);
-@@ -71,6 +73,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- 	uaccess_restore(__ua_flags);
- 	smp_mb();
- 
-+	pax_close_userland();
-+
- 	*uval = val;
- 	return ret;
- }
-@@ -107,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
  		return -EFAULT;
  
  	preempt_disable();
-+	pax_open_userland();
 +
  	__ua_flags = uaccess_save_and_enable();
  	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
  	"1:	" TUSER(ldr) "	%1, [%4]\n"
-@@ -119,6 +125,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- 	: "cc", "memory");
- 	uaccess_restore(__ua_flags);
- 
-+	pax_close_userland();
-+
- 	*uval = val;
- 	preempt_enable();
- 
-@@ -146,6 +154,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- 	preempt_disable();
- #endif
- 	pagefault_disable();
-+	pax_open_userland();
- 
- 	switch (op) {
- 	case FUTEX_OP_SET:
-@@ -167,6 +176,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- 		ret = -ENOSYS;
- 	}
- 
-+	pax_close_userland();
- 	pagefault_enable();
- #ifndef CONFIG_SMP
- 	preempt_enable();
 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
 index 83eb2f7..ed77159 100644
 --- a/arch/arm/include/asm/kmap_types.h
@@ -2202,10 +2198,20 @@ index ef35665..d69146d 100644
  struct of_cpu_method {
  	const char *method;
 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 776757d..5a598df 100644
+index 776757d..a552c1d 100644
 --- a/arch/arm/include/asm/thread_info.h
 +++ b/arch/arm/include/asm/thread_info.h
-@@ -143,6 +143,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -73,6 +73,9 @@ struct thread_info {
+ 	.flags		= 0,						\
+ 	.preempt_count	= INIT_PREEMPT_COUNT,				\
+ 	.addr_limit	= KERNEL_DS,					\
++	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |	\
++			  domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
++			  domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),	\
+ }
+ 
+ #define init_thread_info	(init_thread_union.thread_info)
+@@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
  #define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
  #define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
  #define TIF_SECCOMP		7	/* seccomp syscall filtering active */
@@ -2216,7 +2222,7 @@ index 776757d..5a598df 100644
  
  #define TIF_NOHZ		12	/* in adaptive nohz mode */
  #define TIF_USING_IWMMXT	17
-@@ -158,10 +162,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
  #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
  #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
  #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
@@ -2252,7 +2258,7 @@ index 5f833f7..76e6644 100644
  		}
  
 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index 8cc85a4..5f24fe2 100644
+index 8cc85a4..28c2880 100644
 --- a/arch/arm/include/asm/uaccess.h
 +++ b/arch/arm/include/asm/uaccess.h
 @@ -18,6 +18,7 @@
@@ -2263,16 +2269,35 @@ index 8cc85a4..5f24fe2 100644
  
  #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  #include <asm-generic/uaccess-unaligned.h>
-@@ -99,11 +100,38 @@ extern int __put_user_bad(void);
- static inline void set_fs(mm_segment_t fs)
- {
- 	current_thread_info()->addr_limit = fs;
--	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
-+	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
- }
- 
- #define segment_eq(a, b)	((a) == (b))
+@@ -50,6 +51,59 @@ struct exception_table_entry
+ extern int fixup_exception(struct pt_regs *regs);
  
+ /*
++ * These two are intentionally not defined anywhere - if the kernel
++ * code generates any references to them, that's a bug.
++ */
++extern int __get_user_bad(void);
++extern int __put_user_bad(void);
++
++/*
++ * Note that this is actually 0x1,0000,0000
++ */
++#define KERNEL_DS	0x00000000
++#define get_ds()	(KERNEL_DS)
++
++#ifdef CONFIG_MMU
++
++#define USER_DS		TASK_SIZE
++#define get_fs()	(current_thread_info()->addr_limit)
++
++static inline void set_fs(mm_segment_t fs)
++{
++	current_thread_info()->addr_limit = fs;
++	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
++}
++
++#define segment_eq(a, b)	((a) == (b))
++
 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
 +
@@ -2300,38 +2325,57 @@ index 8cc85a4..5f24fe2 100644
 +
 +}
 +
++/*
+  * These two functions allow hooking accesses to userspace to increase
+  * system integrity by ensuring that the kernel can not inadvertantly
+  * perform such accesses (eg, via list poison values) which could then
+@@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
+ 
+ 	return old_domain;
+ #else
++	pax_open_userland();
+ 	return 0;
+ #endif
+ }
+@@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
+ #ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ 	/* Restore the user access mask */
+ 	set_domain(flags);
++#else
++	pax_close_userland();
+ #endif
+ }
+ 
+-/*
+- * These two are intentionally not defined anywhere - if the kernel
+- * code generates any references to them, that's a bug.
+- */
+-extern int __get_user_bad(void);
+-extern int __put_user_bad(void);
+-
+-/*
+- * Note that this is actually 0x1,0000,0000
+- */
+-#define KERNEL_DS	0x00000000
+-#define get_ds()	(KERNEL_DS)
+-
+-#ifdef CONFIG_MMU
+-
+-#define USER_DS		TASK_SIZE
+-#define get_fs()	(current_thread_info()->addr_limit)
+-
+-static inline void set_fs(mm_segment_t fs)
+-{
+-	current_thread_info()->addr_limit = fs;
+-	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+-}
+-
+-#define segment_eq(a, b)	((a) == (b))
+-
  #define __addr_ok(addr) ({ \
  	unsigned long flag; \
  	__asm__("cmp %2, %0; movlo %0, #0" \
-@@ -229,8 +257,12 @@ extern int __get_user_64t_4(void *);
- 
- #define get_user(x, p)							\
- 	({								\
-+		int __e;						\
- 		might_fault();						\
--		__get_user_check(x, p);					\
-+		pax_open_userland();					\
-+		__e = __get_user_check((x), (p));			\
-+		pax_close_userland();					\
-+		__e;							\
- 	 })
- 
- extern int __put_user_1(void *, unsigned int);
-@@ -277,8 +309,12 @@ extern int __put_user_8(void *, unsigned long long);
- 
- #define put_user(x, p)							\
- 	({								\
-+		int __e;						\
- 		might_fault();						\
--		__put_user_check(x, p);					\
-+		pax_open_userland();					\
-+		__e = __put_user_check((x), (p));			\
-+		pax_close_userland();					\
-+		__e;							\
- 	 })
- 
- #else /* CONFIG_MMU */
-@@ -302,6 +338,7 @@ static inline void set_fs(mm_segment_t fs)
+@@ -302,6 +333,7 @@ static inline void set_fs(mm_segment_t fs)
  
  #endif /* CONFIG_MMU */
  
@@ -2339,43 +2383,7 @@ index 8cc85a4..5f24fe2 100644
  #define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)
  
  #define user_addr_max() \
-@@ -319,13 +356,17 @@ static inline void set_fs(mm_segment_t fs)
- #define __get_user(x, ptr)						\
- ({									\
- 	long __gu_err = 0;						\
-+	pax_open_userland();						\
- 	__get_user_err((x), (ptr), __gu_err);				\
-+	pax_close_userland();						\
- 	__gu_err;							\
- })
- 
- #define __get_user_error(x, ptr, err)					\
- ({									\
-+	pax_open_userland();						\
- 	__get_user_err((x), (ptr), err);				\
-+	pax_close_userland();						\
- 	(void) 0;							\
- })
- 
-@@ -392,13 +433,17 @@ do {									\
- #define __put_user(x, ptr)						\
- ({									\
- 	long __pu_err = 0;						\
-+	pax_open_userland();						\
- 	__put_user_err((x), (ptr), __pu_err);				\
-+	pax_close_userland();						\
- 	__pu_err;							\
- })
- 
- #define __put_user_error(x, ptr, err)					\
- ({									\
-+	pax_open_userland();						\
- 	__put_user_err((x), (ptr), err);				\
-+	pax_close_userland();						\
- 	(void) 0;							\
- })
- 
-@@ -490,35 +535,41 @@ do {									\
+@@ -490,35 +522,41 @@ do {									\
  
  
  #ifdef CONFIG_MMU
@@ -2425,7 +2433,7 @@ index 8cc85a4..5f24fe2 100644
  __clear_user_std(void __user *addr, unsigned long n);
  
  static inline unsigned long __must_check
-@@ -538,6 +589,9 @@ __clear_user(void __user *addr, unsigned long n)
+@@ -538,6 +576,9 @@ __clear_user(void __user *addr, unsigned long n)
  
  static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
  {
@@ -2435,7 +2443,7 @@ index 8cc85a4..5f24fe2 100644
  	if (access_ok(VERIFY_READ, from, n))
  		n = __copy_from_user(to, from, n);
  	else /* security hole - plug it */
-@@ -547,6 +601,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+@@ -547,6 +588,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
  
  static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
  {
@@ -2485,7 +2493,7 @@ index 318da33..373689f 100644
  /**
   * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index 3e1c26e..9ea61e6 100644
+index 3e1c26e..97a5f9e 100644
 --- a/arch/arm/kernel/entry-armv.S
 +++ b/arch/arm/kernel/entry-armv.S
 @@ -50,6 +50,87 @@
@@ -2608,7 +2616,7 @@ index 3e1c26e..9ea61e6 100644
  	mov	r6, #-1			@  ""  ""      ""       ""
 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
 +	@ offset sp by 8 as done in pax_enter_kernel
-+	add	r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
++	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole + 4)
 +#else
  	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 +#endif
@@ -2956,7 +2964,7 @@ index 69bda1a..755113a 100644
  	if (waddr != addr) {
  		flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 7a7c4ce..bc91093 100644
+index 7a7c4ce..f0de3eb 100644
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
 @@ -98,8 +98,8 @@ void __show_regs(struct pt_regs *regs)
@@ -2970,6 +2978,15 @@ index 7a7c4ce..bc91093 100644
  	printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
  	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
  		regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -226,7 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ 
+ 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+ 
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ 	/*
+ 	 * Copy the initial value of the domain access control register
+ 	 * from the current thread: thread->addr_limit will have been
 @@ -309,12 +309,6 @@ unsigned long get_wchan(struct task_struct *p)
  	return 0;
  }
@@ -3839,71 +3856,6 @@ index df7537f..b931a5f 100644
  	  exploits.
  
  	  If all of the binaries and libraries which run on your platform
-diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
-index 00b7f7d..6fc28bc 100644
---- a/arch/arm/mm/alignment.c
-+++ b/arch/arm/mm/alignment.c
-@@ -216,10 +216,12 @@ union offset_union {
- #define __get16_unaligned_check(ins,val,addr)			\
- 	do {							\
- 		unsigned int err = 0, v, a = addr;		\
-+		pax_open_userland();				\
- 		__get8_unaligned_check(ins,v,a,err);		\
- 		val =  v << ((BE) ? 8 : 0);			\
- 		__get8_unaligned_check(ins,v,a,err);		\
- 		val |= v << ((BE) ? 0 : 8);			\
-+		pax_close_userland();				\
- 		if (err)					\
- 			goto fault;				\
- 	} while (0)
-@@ -233,6 +235,7 @@ union offset_union {
- #define __get32_unaligned_check(ins,val,addr)			\
- 	do {							\
- 		unsigned int err = 0, v, a = addr;		\
-+		pax_open_userland();				\
- 		__get8_unaligned_check(ins,v,a,err);		\
- 		val =  v << ((BE) ? 24 :  0);			\
- 		__get8_unaligned_check(ins,v,a,err);		\
-@@ -241,6 +244,7 @@ union offset_union {
- 		val |= v << ((BE) ?  8 : 16);			\
- 		__get8_unaligned_check(ins,v,a,err);		\
- 		val |= v << ((BE) ?  0 : 24);			\
-+		pax_close_userland();				\
- 		if (err)					\
- 			goto fault;				\
- 	} while (0)
-@@ -254,6 +258,7 @@ union offset_union {
- #define __put16_unaligned_check(ins,val,addr)			\
- 	do {							\
- 		unsigned int err = 0, v = val, a = addr;	\
-+		pax_open_userland();				\
- 		__asm__( FIRST_BYTE_16				\
- 	 ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
- 	 THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
-@@ -273,6 +278,7 @@ union offset_union {
- 		"	.popsection\n"				\
- 		: "=r" (err), "=&r" (v), "=&r" (a)		\
- 		: "0" (err), "1" (v), "2" (a));			\
-+		pax_close_userland();				\
- 		if (err)					\
- 			goto fault;				\
- 	} while (0)
-@@ -286,6 +292,7 @@ union offset_union {
- #define __put32_unaligned_check(ins,val,addr)			\
- 	do {							\
- 		unsigned int err = 0, v = val, a = addr;	\
-+		pax_open_userland();				\
- 		__asm__( FIRST_BYTE_32				\
- 	 ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
- 	 THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
-@@ -315,6 +322,7 @@ union offset_union {
- 		"	.popsection\n"				\
- 		: "=r" (err), "=&r" (v), "=&r" (a)		\
- 		: "0" (err), "1" (v), "2" (a));			\
-+		pax_close_userland();				\
- 		if (err)					\
- 			goto fault;				\
- 	} while (0)
 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
 index 493692d..42a4504 100644
 --- a/arch/arm/mm/cache-l2x0.c
@@ -113574,7 +113526,7 @@ index 04e91ff..2419384 100644
  }
  EXPORT_SYMBOL(__stack_chk_fail);
 diff --git a/kernel/pid.c b/kernel/pid.c
-index ca36879..1f6c7bd 100644
+index ca36879..1f368d5 100644
 --- a/kernel/pid.c
 +++ b/kernel/pid.c
 @@ -33,6 +33,7 @@
@@ -113613,15 +113565,14 @@ index ca36879..1f6c7bd 100644
  }
  
  struct task_struct *find_task_by_vpid(pid_t vnr)
-@@ -461,13 +470,21 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -461,13 +470,20 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
  	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
  }
  
 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
 +{
-+	rcu_lockdep_assert(rcu_read_lock_held(),
-+			   "find_task_by_pid_ns() needs rcu_read_lock()"
-+			   " protection");
++	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
++			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
 +	return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
 +}
 +
@@ -113636,7 +113587,7 @@ index ca36879..1f6c7bd 100644
  	rcu_read_unlock();
  	return pid;
  }
-@@ -528,7 +545,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+@@ -528,7 +544,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
  	if (likely(pid_alive(task))) {
  		if (type != PIDTYPE_PID)
  			task = task->group_leader;
@@ -113837,10 +113788,10 @@ index 99513e1..0caa643 100644
  }
  
 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 787320d..9873654 100644
+index 787320d..64d61aa 100644
 --- a/kernel/ptrace.c
 +++ b/kernel/ptrace.c
-@@ -207,18 +207,45 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+@@ -207,18 +207,46 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
  	return ret;
  }
  
@@ -113881,6 +113832,7 @@ index 787320d..9873654 100644
  	const struct cred *cred = current_cred(), *tcred;
 +	kuid_t caller_uid;
 +	kgid_t caller_gid;
++	int dumpable = 0;
 +
 +	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 +		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
@@ -113889,7 +113841,13 @@ index 787320d..9873654 100644
  
  	/* May we inspect the given task?
  	 * This check is used both for attaching with ptrace
-@@ -233,15 +260,30 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -228,20 +256,35 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ 	 * because setting up the necessary parent/child relationship
+ 	 * or halting the specified task is impossible.
+ 	 */
+-	int dumpable = 0;
++
+ 	/* Don't let security modules deny introspection */
  	if (same_thread_group(task, current))
  		return 0;
  	rcu_read_lock();
@@ -113927,7 +113885,7 @@ index 787320d..9873654 100644
  		goto ok;
  	rcu_read_unlock();
  	return -EPERM;
-@@ -252,7 +294,7 @@ ok:
+@@ -252,7 +295,7 @@ ok:
  		dumpable = get_dumpable(task->mm);
  	rcu_read_lock();
  	if (dumpable != SUID_DUMP_USER &&
@@ -113936,7 +113894,7 @@ index 787320d..9873654 100644
  		rcu_read_unlock();
  		return -EPERM;
  	}
-@@ -306,7 +348,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+@@ -306,7 +349,7 @@ static int ptrace_attach(struct task_struct *task, long request,
  		goto out;
  
  	task_lock(task);
@@ -113945,7 +113903,7 @@ index 787320d..9873654 100644
  	task_unlock(task);
  	if (retval)
  		goto unlock_creds;
-@@ -321,7 +363,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+@@ -321,7 +364,7 @@ static int ptrace_attach(struct task_struct *task, long request,
  	if (seize)
  		flags |= PT_SEIZED;
  	rcu_read_lock();
@@ -113954,7 +113912,7 @@ index 787320d..9873654 100644
  		flags |= PT_PTRACE_CAP;
  	rcu_read_unlock();
  	task->ptrace = flags;
-@@ -514,7 +556,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+@@ -514,7 +557,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
  				break;
  			return -EIO;
  		}
@@ -113963,7 +113921,7 @@ index 787320d..9873654 100644
  			return -EFAULT;
  		copied += retval;
  		src += retval;
-@@ -815,7 +857,7 @@ int ptrace_request(struct task_struct *child, long request,
+@@ -815,7 +858,7 @@ int ptrace_request(struct task_struct *child, long request,
  	bool seized = child->ptrace & PT_SEIZED;
  	int ret = -EIO;
  	siginfo_t siginfo, *si;
@@ -113972,7 +113930,7 @@ index 787320d..9873654 100644
  	unsigned long __user *datalp = datavp;
  	unsigned long flags;
  
-@@ -1061,14 +1103,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+@@ -1061,14 +1104,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
  		goto out;
  	}
  
@@ -113995,7 +113953,7 @@ index 787320d..9873654 100644
  		goto out_put_task_struct;
  	}
  
-@@ -1096,7 +1145,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+@@ -1096,7 +1146,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
  	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
  	if (copied != sizeof(tmp))
  		return -EIO;
@@ -114004,7 +113962,7 @@ index 787320d..9873654 100644
  }
  
  int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
-@@ -1189,7 +1238,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+@@ -1189,7 +1239,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
  }
  
  COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
@@ -114013,7 +113971,7 @@ index 787320d..9873654 100644
  {
  	struct task_struct *child;
  	long ret;
-@@ -1205,14 +1254,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
+@@ -1205,14 +1255,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
  		goto out;
  	}
  
@@ -126058,10 +126016,24 @@ index ade7737..70ed9be 100644
  		goto err_reg;
  
 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 064f1a0..e6e7d27 100644
+index 064f1a0..47f24ef 100644
 --- a/net/ipv4/tcp_input.c
 +++ b/net/ipv4/tcp_input.c
-@@ -786,7 +786,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
+@@ -265,11 +265,13 @@ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
+ 		tp->ecn_flags &= ~TCP_ECN_OK;
+ }
+ 
++#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
+ static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
+ {
+ 	if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
+ 		tp->ecn_flags &= ~TCP_ECN_OK;
+ }
++#endif
+ 
+ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+ {
+@@ -786,7 +788,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
  	 * without any lock. We want to make sure compiler wont store
  	 * intermediate values in this location.
  	 */
@@ -126070,7 +126042,7 @@ index 064f1a0..e6e7d27 100644
  						sk->sk_max_pacing_rate);
  }
  
-@@ -4650,7 +4650,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+@@ -4650,7 +4652,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
   * simplifies code)
   */
  static void
@@ -126079,7 +126051,7 @@ index 064f1a0..e6e7d27 100644
  	     struct sk_buff *head, struct sk_buff *tail,
  	     u32 start, u32 end)
  {
-@@ -5645,6 +5645,7 @@ discard:
+@@ -5645,6 +5647,7 @@ discard:
  	    tcp_paws_reject(&tp->rx_opt, 0))
  		goto discard_and_undo;
  
@@ -126087,7 +126059,7 @@ index 064f1a0..e6e7d27 100644
  	if (th->syn) {
  		/* We see SYN without ACK. It is attempt of
  		 * simultaneous connect with crossed SYNs.
-@@ -5696,6 +5697,7 @@ discard:
+@@ -5696,6 +5699,7 @@ discard:
  		goto discard;
  #endif
  	}
@@ -126095,7 +126067,7 @@ index 064f1a0..e6e7d27 100644
  	/* "fifth, if neither of the SYN or RST bits is set then
  	 * drop the segment and return."
  	 */
-@@ -5742,7 +5744,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+@@ -5742,7 +5746,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
  			goto discard;
  
  		if (th->syn) {
@@ -126104,7 +126076,7 @@ index 064f1a0..e6e7d27 100644
  				goto discard;
  			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
  				return 1;
-@@ -6072,7 +6074,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+@@ -6072,7 +6076,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
  
  		kmemcheck_annotate_bitfield(ireq, flags);
  		ireq->opt = NULL;
@@ -132090,10 +132062,10 @@ index 8e5aee6..445cf3d 100755
  # Find all available archs
  find_all_archs()
 diff --git a/security/Kconfig b/security/Kconfig
-index e452378..3e213c66 100644
+index e452378..16eb80f 100644
 --- a/security/Kconfig
 +++ b/security/Kconfig
-@@ -4,6 +4,981 @@
+@@ -4,6 +4,980 @@
  
  menu "Security options"
  
@@ -132928,7 +132900,6 @@ index e452378..3e213c66 100644
 +	default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && !(X86_64 && GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX) && (!X86 || GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
 +	depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
 +	select PAX_PER_CPU_PGD if X86_64
-+	select CPU_SW_DOMAIN_PAN if ARM
 +	help
 +	  By saying Y here the kernel will be prevented from dereferencing
 +	  userland pointers in contexts where the kernel expects only kernel
@@ -133075,7 +133046,7 @@ index e452378..3e213c66 100644
  source security/keys/Kconfig
  
  config SECURITY_DMESG_RESTRICT
-@@ -104,7 +1079,7 @@ config INTEL_TXT
+@@ -104,7 +1078,7 @@ config INTEL_TXT
  config LSM_MMAP_MIN_ADDR
  	int "Low address space for LSM to protect from user allocation"
  	depends on SECURITY && SECURITY_SELINUX


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-02-08  7:58 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-02-08  7:58 [gentoo-commits] proj/hardened-patchset:master commit in: 4.3.5/ Anthony G. Basile
  -- strict thread matches above, loose matches on Subject: below --
2016-02-04  9:24 Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox