public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 4.1.6/
@ 2015-08-24 11:26 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2015-08-24 11:26 UTC (permalink / raw
  To: gentoo-commits

commit:     9994874d540fa5cffaf5a44f90cd65d770fae08d
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 24 11:29:58 2015 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Mon Aug 24 11:29:58 2015 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=9994874d

grsecurity-3.1-4.1.6-201508230818

 4.1.6/0000_README                                  |   2 +-
 ...> 4420_grsecurity-3.1-4.1.6-201508230818.patch} | 330 ++++++++++++++++++---
 2 files changed, 293 insertions(+), 39 deletions(-)

diff --git a/4.1.6/0000_README b/4.1.6/0000_README
index ddf2d35..fe455ba 100644
--- a/4.1.6/0000_README
+++ b/4.1.6/0000_README
@@ -6,7 +6,7 @@ Patch:	1005_linux-4.1.6.patch
 From:	http://www.kernel.org
 Desc:	Linux 4.1.6
 
-Patch:	4420_grsecurity-3.1-4.1.6-201508181953.patch
+Patch:	4420_grsecurity-3.1-4.1.6-201508230818.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.1.6/4420_grsecurity-3.1-4.1.6-201508181953.patch b/4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
similarity index 99%
rename from 4.1.6/4420_grsecurity-3.1-4.1.6-201508181953.patch
rename to 4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
index ddef976..61bc4c1 100644
--- a/4.1.6/4420_grsecurity-3.1-4.1.6-201508181953.patch
+++ b/4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
@@ -999,6 +999,20 @@ index 45df48b..952017a 100644
  	help
  	  kexec is a system call that implements the ability to shutdown your
  	  current kernel, and to start another kernel.  It is like a reboot
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 985227c..8acc029 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -304,6 +304,9 @@ INSTALL_TARGETS	= zinstall uinstall install
+ 
+ PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+ 
++bootpImage uImage: zImage
++zImage: Image
++
+ $(BOOT_TARGETS): vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+ 
 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
 index e22c119..abe7041 100644
 --- a/arch/arm/include/asm/atomic.h
@@ -19223,10 +19237,39 @@ index 70bbe39..4ae2bd4 100644
  
  void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
-index 751bf4b..a1278b5 100644
+index 751bf4b..3cc39f1 100644
 --- a/arch/x86/include/asm/switch_to.h
 +++ b/arch/x86/include/asm/switch_to.h
-@@ -112,7 +112,7 @@ do {									\
+@@ -79,12 +79,12 @@ do {									\
+ #else /* CONFIG_X86_32 */
+ 
+ /* frame pointer must be last for get_wchan */
+-#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
++#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+ 
+ #define __EXTRA_CLOBBER  \
+ 	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+-	  "r12", "r13", "r14", "r15", "flags"
++	  "r12", "r13", "r14", "r15"
+ 
+ #ifdef CONFIG_CC_STACKPROTECTOR
+ #define __switch_canary							  \
+@@ -100,11 +100,7 @@ do {									\
+ #define __switch_canary_iparam
+ #endif	/* CC_STACKPROTECTOR */
+ 
+-/*
+- * There is no need to save or restore flags, because flags are always
+- * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
+- * has no effect.
+- */
++/* Save restore flags to clear handle leaking NT */
+ #define switch_to(prev, next, last) \
+ 	asm volatile(SAVE_CONTEXT					  \
+ 	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
+@@ -112,7 +108,7 @@ do {									\
  	     "call __switch_to\n\t"					  \
  	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
  	     __switch_canary						  \
@@ -19235,7 +19278,7 @@ index 751bf4b..a1278b5 100644
  	     "movq %%rax,%%rdi\n\t" 					  \
  	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"		  \
  	     "jnz   ret_from_fork\n\t"					  \
-@@ -123,7 +123,7 @@ do {									\
+@@ -123,7 +119,7 @@ do {									\
  	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
  	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
  	       [_tif_fork] "i" (_TIF_FORK),			  	  \
@@ -48751,6 +48794,19 @@ index dce5f7b..2433466 100644
  
  #include "ftmac100.h"
  
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+index c754b20..c9da1b5 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
+ 
+ static inline bool fm10k_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
 index a92b772..250fe69 100644
 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48764,6 +48820,32 @@ index a92b772..250fe69 100644
  	smp_mb(); /* Force the above update. */
  }
  
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index a0a9b1f..3fe93e7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6584,7 +6584,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ 
+ static inline bool igb_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 5be12a0..463ff47 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
+ 
+ static inline bool ixgbe_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ /**
 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
 index e5ba040..d47531c 100644
 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -48777,6 +48859,19 @@ index e5ba040..d47531c 100644
  	smp_mb();
  
  	/* need lock to prevent incorrect read while modifying cyclecounter */
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index e71cdde..1d7b00b 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+ 
+ static inline bool ixgbevf_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ /**
 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
 index 74d0389..086ac03 100644
 --- a/drivers/net/ethernet/marvell/mvneta.c
@@ -97837,7 +97932,7 @@ index 3d385c8..deacb6a 100644
  static inline int
  vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 0755b9f..2960e96 100644
+index 0755b9f..bf8eab1 100644
 --- a/include/linux/mm.h
 +++ b/include/linux/mm.h
 @@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
@@ -97871,7 +97966,42 @@ index 0755b9f..2960e96 100644
  
  struct mmu_gather;
  struct inode;
-@@ -1131,8 +1137,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -1002,6 +1008,34 @@ static inline int page_mapped(struct page *page)
+ }
+ 
+ /*
++ * Return true only if the page has been allocated with
++ * ALLOC_NO_WATERMARKS and the low watermark was not
++ * met implying that the system is under some pressure.
++ */
++static inline bool page_is_pfmemalloc(struct page *page)
++{
++	/*
++	 * Page index cannot be this large so this must be
++	 * a pfmemalloc page.
++	 */
++	return page->index == -1UL;
++}
++
++/*
++ * Only to be called by the page allocator on a freshly allocated
++ * page.
++ */
++static inline void set_page_pfmemalloc(struct page *page)
++{
++	page->index = -1UL;
++}
++
++static inline void clear_page_pfmemalloc(struct page *page)
++{
++	page->index = 0;
++}
++
++/*
+  * Different kinds of faults, as returned by handle_mm_fault().
+  * Used to decide whether a process gets delivered SIGBUS or
+  * just gets major/minor fault counters bumped up.
+@@ -1131,8 +1165,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  	unsigned long *pfn);
  int follow_phys(struct vm_area_struct *vma, unsigned long address,
  		unsigned int flags, unsigned long *prot, resource_size_t *phys);
@@ -97882,7 +98012,7 @@ index 0755b9f..2960e96 100644
  
  static inline void unmap_shared_mapping_range(struct address_space *mapping,
  		loff_t const holebegin, loff_t const holelen)
-@@ -1172,9 +1178,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1172,9 +1206,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
  }
  #endif
  
@@ -97895,7 +98025,7 @@ index 0755b9f..2960e96 100644
  
  long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  		      unsigned long start, unsigned long nr_pages,
-@@ -1219,34 +1225,6 @@ int clear_page_dirty_for_io(struct page *page);
+@@ -1219,34 +1253,6 @@ int clear_page_dirty_for_io(struct page *page);
  
  int get_cmdline(struct task_struct *task, char *buffer, int buflen);
  
@@ -97930,7 +98060,7 @@ index 0755b9f..2960e96 100644
  extern struct task_struct *task_of_stack(struct task_struct *task,
  				struct vm_area_struct *vma, bool in_group);
  
-@@ -1369,8 +1347,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+@@ -1369,8 +1375,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
  {
  	return 0;
  }
@@ -97946,7 +98076,7 @@ index 0755b9f..2960e96 100644
  #endif
  
  #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
-@@ -1380,6 +1365,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+@@ -1380,6 +1393,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
  	return 0;
  }
  
@@ -97959,7 +98089,7 @@ index 0755b9f..2960e96 100644
  static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
  
  static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
-@@ -1392,6 +1383,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+@@ -1392,6 +1411,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
  
  #else
  int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
@@ -97967,7 +98097,7 @@ index 0755b9f..2960e96 100644
  
  static inline void mm_nr_pmds_init(struct mm_struct *mm)
  {
-@@ -1429,11 +1421,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+@@ -1429,11 +1449,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
  		NULL: pud_offset(pgd, address);
  }
  
@@ -97991,7 +98121,7 @@ index 0755b9f..2960e96 100644
  #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
  
  #if USE_SPLIT_PTE_PTLOCKS
-@@ -1810,12 +1814,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+@@ -1810,12 +1842,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
  	bool *need_rmap_locks);
  extern void exit_mmap(struct mm_struct *);
  
@@ -98015,7 +98145,7 @@ index 0755b9f..2960e96 100644
  	if (rlim < RLIM_INFINITY) {
  		if (((new - start) + (end_data - start_data)) > rlim)
  			return -ENOSPC;
-@@ -1840,7 +1855,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1840,7 +1883,7 @@ extern int install_special_mapping(struct mm_struct *mm,
  				   unsigned long addr, unsigned long len,
  				   unsigned long flags, struct page **pages);
  
@@ -98024,7 +98154,7 @@ index 0755b9f..2960e96 100644
  
  extern unsigned long mmap_region(struct file *file, unsigned long addr,
  	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-@@ -1848,6 +1863,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1848,6 +1891,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot, unsigned long flags,
  	unsigned long pgoff, unsigned long *populate);
  extern int do_munmap(struct mm_struct *, unsigned long, size_t);
@@ -98032,7 +98162,7 @@ index 0755b9f..2960e96 100644
  
  #ifdef CONFIG_MMU
  extern int __mm_populate(unsigned long addr, unsigned long len,
-@@ -1876,10 +1892,11 @@ struct vm_unmapped_area_info {
+@@ -1876,10 +1920,11 @@ struct vm_unmapped_area_info {
  	unsigned long high_limit;
  	unsigned long align_mask;
  	unsigned long align_offset;
@@ -98046,7 +98176,7 @@ index 0755b9f..2960e96 100644
  
  /*
   * Search for an unmapped address range.
-@@ -1891,7 +1908,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+@@ -1891,7 +1936,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
   * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
   */
  static inline unsigned long
@@ -98055,7 +98185,7 @@ index 0755b9f..2960e96 100644
  {
  	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
  		return unmapped_area_topdown(info);
-@@ -1953,6 +1970,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+@@ -1953,6 +1998,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
  extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
  					     struct vm_area_struct **pprev);
  
@@ -98066,7 +98196,7 @@ index 0755b9f..2960e96 100644
  /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
     NULL if none.  Assume start_addr < end_addr. */
  static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1982,10 +2003,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+@@ -1982,10 +2031,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
  }
  
  #ifdef CONFIG_MMU
@@ -98079,7 +98209,7 @@ index 0755b9f..2960e96 100644
  {
  	return __pgprot(0);
  }
-@@ -2047,6 +2068,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+@@ -2047,6 +2096,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
  static inline void vm_stat_account(struct mm_struct *mm,
  			unsigned long flags, struct file *file, long pages)
  {
@@ -98091,7 +98221,7 @@ index 0755b9f..2960e96 100644
  	mm->total_vm += pages;
  }
  #endif /* CONFIG_PROC_FS */
-@@ -2149,7 +2175,7 @@ extern int unpoison_memory(unsigned long pfn);
+@@ -2149,7 +2203,7 @@ extern int unpoison_memory(unsigned long pfn);
  extern int sysctl_memory_failure_early_kill;
  extern int sysctl_memory_failure_recovery;
  extern void shake_page(struct page *p, int access);
@@ -98100,7 +98230,7 @@ index 0755b9f..2960e96 100644
  extern int soft_offline_page(struct page *page, int flags);
  
  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-@@ -2200,5 +2226,11 @@ void __init setup_nr_node_ids(void);
+@@ -2200,5 +2254,11 @@ void __init setup_nr_node_ids(void);
  static inline void setup_nr_node_ids(void) {}
  #endif
  
@@ -98113,10 +98243,26 @@ index 0755b9f..2960e96 100644
  #endif /* __KERNEL__ */
  #endif /* _LINUX_MM_H */
 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 8d37e26..6a6f55b 100644
+index 8d37e26..29c54c9 100644
 --- a/include/linux/mm_types.h
 +++ b/include/linux/mm_types.h
-@@ -313,7 +313,9 @@ struct vm_area_struct {
+@@ -63,15 +63,6 @@ struct page {
+ 		union {
+ 			pgoff_t index;		/* Our offset within mapping. */
+ 			void *freelist;		/* sl[aou]b first free object */
+-			bool pfmemalloc;	/* If set by the page allocator,
+-						 * ALLOC_NO_WATERMARKS was set
+-						 * and the low watermark was not
+-						 * met implying that the system
+-						 * is under some pressure. The
+-						 * caller should try ensure
+-						 * this page is only used to
+-						 * free other pages.
+-						 */
+ 		};
+ 
+ 		union {
+@@ -313,7 +304,9 @@ struct vm_area_struct {
  #ifdef CONFIG_NUMA
  	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
  #endif
@@ -98127,7 +98273,7 @@ index 8d37e26..6a6f55b 100644
  
  struct core_thread {
  	struct task_struct *task;
-@@ -466,7 +468,25 @@ struct mm_struct {
+@@ -466,7 +459,25 @@ struct mm_struct {
  	/* address of the bounds directory */
  	void __user *bd_addr;
  #endif
@@ -99784,7 +99930,7 @@ index ab1e039..ad4229e 100644
  
  static inline void disallow_signal(int sig)
 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index f15154a..72cf02c 100644
+index f15154a..17b985a 100644
 --- a/include/linux/skbuff.h
 +++ b/include/linux/skbuff.h
 @@ -776,7 +776,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
@@ -99796,7 +99942,32 @@ index f15154a..72cf02c 100644
  					gfp_t priority)
  {
  	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -1971,7 +1971,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+ 	/*
+-	 * Propagate page->pfmemalloc to the skb if we can. The problem is
+-	 * that not all callers have unique ownership of the page. If
+-	 * pfmemalloc is set, we check the mapping as a mapping implies
+-	 * page->index is set (index and pfmemalloc share space).
+-	 * If it's a valid mapping, we cannot use page->pfmemalloc but we
+-	 * do not lose pfmemalloc information as the pages would not be
+-	 * allocated using __GFP_MEMALLOC.
++	 * Propagate page pfmemalloc to the skb if we can. The problem is
++	 * that not all callers have unique ownership of the page but rely
++	 * on page_is_pfmemalloc doing the right thing(tm).
+ 	 */
+ 	frag->page.p		  = page;
+ 	frag->page_offset	  = off;
+ 	skb_frag_size_set(frag, size);
+ 
+ 	page = compound_head(page);
+-	if (page->pfmemalloc && !page->mapping)
++	if (page_is_pfmemalloc(page))
+ 		skb->pfmemalloc	= true;
+ }
+ 
+@@ -1971,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
  	return skb->inner_transport_header - skb->inner_network_header;
  }
  
@@ -99805,7 +99976,7 @@ index f15154a..72cf02c 100644
  {
  	return skb_network_header(skb) - skb->data;
  }
-@@ -2031,7 +2031,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -2031,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
   * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
   */
  #ifndef NET_SKB_PAD
@@ -99814,7 +99985,16 @@ index f15154a..72cf02c 100644
  #endif
  
  int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2673,9 +2673,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void)
+ static inline void skb_propagate_pfmemalloc(struct page *page,
+ 					     struct sk_buff *skb)
+ {
+-	if (page && page->pfmemalloc)
++	if (page_is_pfmemalloc(page))
+ 		skb->pfmemalloc = true;
+ }
+ 
+@@ -2673,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
  				  int *err);
  unsigned int datagram_poll(struct file *file, struct socket *sock,
  			   struct poll_table_struct *wait);
@@ -99826,7 +100006,7 @@ index f15154a..72cf02c 100644
  					struct msghdr *msg, int size)
  {
  	return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
-@@ -3197,6 +3197,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -3197,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
  	nf_bridge_put(skb->nf_bridge);
  	skb->nf_bridge = NULL;
  #endif
@@ -113722,7 +113902,7 @@ index eb59f7e..b23a2a8 100644
  					unsigned long bg_thresh,
  					unsigned long dirty,
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index ebffa0e..c61160a 100644
+index ebffa0e..a5ae7f7 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -61,6 +61,7 @@
@@ -113827,7 +114007,25 @@ index ebffa0e..c61160a 100644
  
  	if (order && (gfp_flags & __GFP_COMP))
  		prep_compound_page(page, order);
-@@ -1649,6 +1689,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
+@@ -983,12 +1023,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
+ 	set_page_owner(page, order, gfp_flags);
+ 
+ 	/*
+-	 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
++	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
+ 	 * allocate the page. The expectation is that the caller is taking
+ 	 * steps that will free more memory. The caller should avoid the page
+ 	 * being used for !PFMEMALLOC purposes.
+ 	 */
+-	page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
++	if (alloc_flags & ALLOC_NO_WATERMARKS)
++		set_page_pfmemalloc(page);
++	else
++		clear_page_pfmemalloc(page);
+ 
+ 	return 0;
+ }
+@@ -1649,6 +1692,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
  	zone->free_area[order].nr_free--;
  	rmv_page_order(page);
  
@@ -113836,7 +114034,7 @@ index ebffa0e..c61160a 100644
  	/* Set the pageblock if the isolated page is at least a pageblock */
  	if (order >= pageblock_order - 1) {
  		struct page *endpage = page + (1 << order) - 1;
-@@ -1660,7 +1702,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
+@@ -1660,7 +1705,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
  		}
  	}
  
@@ -113845,7 +114043,7 @@ index ebffa0e..c61160a 100644
  	return 1UL << order;
  }
  
-@@ -1749,7 +1791,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -1749,7 +1794,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
  	}
  
  	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
@@ -113854,7 +114052,7 @@ index ebffa0e..c61160a 100644
  	    !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
  		set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  
-@@ -2068,7 +2110,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
+@@ -2068,7 +2113,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
  	do {
  		mod_zone_page_state(zone, NR_ALLOC_BATCH,
  			high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -113863,7 +114061,7 @@ index ebffa0e..c61160a 100644
  		clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  	} while (zone++ != preferred_zone);
  }
-@@ -5781,7 +5823,7 @@ static void __setup_per_zone_wmarks(void)
+@@ -5781,7 +5826,7 @@ static void __setup_per_zone_wmarks(void)
  
  		__mod_zone_page_state(zone, NR_ALLOC_BATCH,
  			high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -114101,7 +114299,7 @@ index 47d536e..8321b4e 100644
  		return -ENOMEM;
  
 diff --git a/mm/slab.c b/mm/slab.c
-index 7eb38dd..5dee2c4 100644
+index 7eb38dd..0451459 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -114148,6 +114346,24 @@ index 7eb38dd..5dee2c4 100644
  	slab_state = PARTIAL_NODE;
  
  	slab_early_init = 0;
+@@ -1602,7 +1606,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ 	}
+ 
+ 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
+-	if (unlikely(page->pfmemalloc))
++	if (page_is_pfmemalloc(page))
+ 		pfmemalloc_active = true;
+ 
+ 	nr_pages = (1 << cachep->gfporder);
+@@ -1613,7 +1617,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ 		add_zone_page_state(page_zone(page),
+ 			NR_SLAB_UNRECLAIMABLE, nr_pages);
+ 	__SetPageSlab(page);
+-	if (page->pfmemalloc)
++	if (page_is_pfmemalloc(page))
+ 		SetPageSlabPfmemalloc(page);
+ 
+ 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
 @@ -2073,7 +2077,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
  
  	cachep = find_mergeable(size, align, flags, name, ctor);
@@ -114993,7 +115209,7 @@ index 4765f65..f17284d 100644
  EXPORT_SYMBOL(kmem_cache_free);
  
 diff --git a/mm/slub.c b/mm/slub.c
-index 54c0876..9fb1661 100644
+index 54c0876..61847f8 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
 @@ -198,7 +198,7 @@ struct track {
@@ -115014,6 +115230,15 @@ index 54c0876..9fb1661 100644
  	       s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
  #ifdef CONFIG_STACKTRACE
  	{
+@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+ 	inc_slabs_node(s, page_to_nid(page), page->objects);
+ 	page->slab_cache = s;
+ 	__SetPageSlab(page);
+-	if (page->pfmemalloc)
++	if (page_is_pfmemalloc(page))
+ 		SetPageSlabPfmemalloc(page);
+ 
+ 	start = page_address(page);
 @@ -2707,6 +2707,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
  
  	slab_free_hook(s, x);
@@ -115904,6 +116129,26 @@ index c92b52f..006c052 100644
  	.kind		= "vlan",
  	.maxtype	= IFLA_VLAN_MAX,
  	.policy		= vlan_policy,
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 81925b9..fcf6fe0 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
+ 	struct p9_client *clnt = fid->clnt;
+ 	struct p9_req_t *req;
+ 	int total = 0;
++	*err = 0;
+ 
+ 	p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ 		   fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
+@@ -1616,6 +1617,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 	struct p9_client *clnt = fid->clnt;
+ 	struct p9_req_t *req;
+ 	int total = 0;
++	*err = 0;
+ 
+ 	p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+ 				fid->fid, (unsigned long long) offset,
 diff --git a/net/9p/mod.c b/net/9p/mod.c
 index 6ab36ae..6f1841b 100644
 --- a/net/9p/mod.c
@@ -117281,9 +117526,18 @@ index 3b6899b..cf36238 100644
  	{
  		struct socket *sock;
 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 41ec022..3cc0a1c 100644
+index 41ec022..89b1df7 100644
 --- a/net/core/skbuff.c
 +++ b/net/core/skbuff.c
+@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+ 
+ 	if (skb && frag_size) {
+ 		skb->head_frag = 1;
+-		if (virt_to_head_page(data)->pfmemalloc)
++		if (page_is_pfmemalloc(virt_to_head_page(data)))
+ 			skb->pfmemalloc = 1;
+ 	}
+ 	return skb;
 @@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
  __wsum skb_checksum(const struct sk_buff *skb, int offset,
  		    int len, __wsum csum)


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [gentoo-commits] proj/hardened-patchset:master commit in: 4.1.6/
@ 2015-09-12 14:35 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2015-09-12 14:35 UTC (permalink / raw
  To: gentoo-commits

commit:     ae30efd8a6e286aea67d1524f3dcd76244bf36b2
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 12 14:39:20 2015 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sat Sep 12 14:39:20 2015 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=ae30efd8

grsecurity-3.1-4.1.6-201509112213

 4.1.6/0000_README                                  |    6 +-
 4.1.6/1005_linux-4.1.6.patch                       | 4380 --------------------
 ...> 4420_grsecurity-3.1-4.1.6-201509112213.patch} |  139 +-
 3 files changed, 89 insertions(+), 4436 deletions(-)

diff --git a/4.1.6/0000_README b/4.1.6/0000_README
index fe455ba..1d2e649 100644
--- a/4.1.6/0000_README
+++ b/4.1.6/0000_README
@@ -2,11 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	1005_linux-4.1.6.patch
-From:	http://www.kernel.org
-Desc:	Linux 4.1.6
-
-Patch:	4420_grsecurity-3.1-4.1.6-201508230818.patch
+Patch:	4420_grsecurity-3.1-4.1.6-201509112213.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.1.6/1005_linux-4.1.6.patch b/4.1.6/1005_linux-4.1.6.patch
deleted file mode 100644
index 0cc52e5..0000000
--- a/4.1.6/1005_linux-4.1.6.patch
+++ /dev/null
@@ -1,4380 +0,0 @@
-diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt
-index 225990f..47570d2 100644
---- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
-+++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
-@@ -15,8 +15,8 @@ Required properties:
- - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
- - clocks : parent clock phandle
- - reg - pll control0 and pll multipler registers
--- reg-names : control and multiplier. The multiplier is applicable only for
--		main pll clock
-+- reg-names : control, multiplier and post-divider. The multiplier and
-+		post-divider registers are applicable only for main pll clock
- - fixed-postdiv : fixed post divider value. If absent, use clkod register bits
- 		for postdiv
- 
-@@ -25,8 +25,8 @@ Example:
- 		#clock-cells = <0>;
- 		compatible = "ti,keystone,main-pll-clock";
- 		clocks = <&refclksys>;
--		reg = <0x02620350 4>, <0x02310110 4>;
--		reg-names = "control", "multiplier";
-+		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
-+		reg-names = "control", "multiplier", "post-divider";
- 		fixed-postdiv = <2>;
- 	};
- 
-diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
-index c86f2f1..1fec113 100644
---- a/Documentation/input/alps.txt
-+++ b/Documentation/input/alps.txt
-@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
-  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
- 
- Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
--the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
--buttons get reported separately in the PSM, PSR and PSL bits.
-+the DualPoint Stick. The M, R and L bits signal the combined status of both
-+the pointingstick and touchpad buttons, except for Dell dualpoint devices
-+where the pointingstick buttons get reported separately in the PSM, PSR
-+and PSL bits.
- 
- Dualpoint device -- interleaved packet format
- ---------------------------------------------
-diff --git a/Makefile b/Makefile
-index 068dd69..838dabc 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 1
--SUBLEVEL = 5
-+SUBLEVEL = 6
- EXTRAVERSION =
- NAME = Series 4800
- 
-diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
-index b6478e9..e6540b5 100644
---- a/arch/arm/boot/dts/imx35.dtsi
-+++ b/arch/arm/boot/dts/imx35.dtsi
-@@ -286,8 +286,8 @@
- 			can1: can@53fe4000 {
- 				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
- 				reg = <0x53fe4000 0x1000>;
--				clocks = <&clks 33>;
--				clock-names = "ipg";
-+				clocks = <&clks 33>, <&clks 33>;
-+				clock-names = "ipg", "per";
- 				interrupts = <43>;
- 				status = "disabled";
- 			};
-@@ -295,8 +295,8 @@
- 			can2: can@53fe8000 {
- 				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
- 				reg = <0x53fe8000 0x1000>;
--				clocks = <&clks 34>;
--				clock-names = "ipg";
-+				clocks = <&clks 34>, <&clks 34>;
-+				clock-names = "ipg", "per";
- 				interrupts = <44>;
- 				status = "disabled";
- 			};
-diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
-index 4773d6a..d56d68f 100644
---- a/arch/arm/boot/dts/k2e-clocks.dtsi
-+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
-@@ -13,9 +13,8 @@ clocks {
- 		#clock-cells = <0>;
- 		compatible = "ti,keystone,main-pll-clock";
- 		clocks = <&refclksys>;
--		reg = <0x02620350 4>, <0x02310110 4>;
--		reg-names = "control", "multiplier";
--		fixed-postdiv = <2>;
-+		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
-+		reg-names = "control", "multiplier", "post-divider";
- 	};
- 
- 	papllclk: papllclk@2620358 {
-diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
-index d5adee3..af9b719 100644
---- a/arch/arm/boot/dts/k2hk-clocks.dtsi
-+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
-@@ -22,9 +22,8 @@ clocks {
- 		#clock-cells = <0>;
- 		compatible = "ti,keystone,main-pll-clock";
- 		clocks = <&refclksys>;
--		reg = <0x02620350 4>, <0x02310110 4>;
--		reg-names = "control", "multiplier";
--		fixed-postdiv = <2>;
-+		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
-+		reg-names = "control", "multiplier", "post-divider";
- 	};
- 
- 	papllclk: papllclk@2620358 {
-diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
-index eb1e3e2..ef8464b 100644
---- a/arch/arm/boot/dts/k2l-clocks.dtsi
-+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
-@@ -22,9 +22,8 @@ clocks {
- 		#clock-cells = <0>;
- 		compatible = "ti,keystone,main-pll-clock";
- 		clocks = <&refclksys>;
--		reg = <0x02620350 4>, <0x02310110 4>;
--		reg-names = "control", "multiplier";
--		fixed-postdiv = <2>;
-+		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
-+		reg-names = "control", "multiplier", "post-divider";
- 	};
- 
- 	papllclk: papllclk@2620358 {
-diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index 752969f..5286e77 100644
---- a/arch/arm/mach-omap2/omap_hwmod.c
-+++ b/arch/arm/mach-omap2/omap_hwmod.c
-@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
-  * registers.  This address is needed early so the OCP registers that
-  * are part of the device's address space can be ioremapped properly.
-  *
-+ * If SYSC access is not needed, the registers will not be remapped
-+ * and non-availability of MPU access is not treated as an error.
-+ *
-  * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
-  * -ENXIO on absent or invalid register target address space.
-  */
-@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
- 
- 	_save_mpu_port_index(oh);
- 
-+	/* if we don't need sysc access we don't need to ioremap */
-+	if (!oh->class->sysc)
-+		return 0;
-+
-+	/* we can't continue without MPU PORT if we need sysc access */
- 	if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
- 		return -ENXIO;
- 
-@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
- 			 oh->name);
- 
- 		/* Extract the IO space from device tree blob */
--		if (!np)
-+		if (!np) {
-+			pr_err("omap_hwmod: %s: no dt node\n", oh->name);
- 			return -ENXIO;
-+		}
- 
- 		va_start = of_iomap(np, index + oh->mpu_rt_idx);
- 	} else {
-@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
- 				oh->name, np->name);
- 	}
- 
--	if (oh->class->sysc) {
--		r = _init_mpu_rt_base(oh, NULL, index, np);
--		if (r < 0) {
--			WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
--			     oh->name);
--			return 0;
--		}
-+	r = _init_mpu_rt_base(oh, NULL, index, np);
-+	if (r < 0) {
-+		WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
-+		     oh->name);
-+		return 0;
- 	}
- 
- 	r = _init_clocks(oh, NULL);
-diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
-index d26fcd4..c0cff34 100644
---- a/arch/arm64/kernel/signal32.c
-+++ b/arch/arm64/kernel/signal32.c
-@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
- 		 * Other callers might not initialize the si_lsb field,
- 		 * so check explicitely for the right codes here.
- 		 */
--		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
-+		if (from->si_signo == SIGBUS &&
-+		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
- 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
- #endif
- 		break;
-@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
- 
- int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
- {
--	memset(to, 0, sizeof *to);
--
- 	if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
- 	    copy_from_user(to->_sifields._pad,
- 			   from->_sifields._pad, SI_PAD_SIZE))
-diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
-index 7fc8397..fd2a36a 100644
---- a/arch/mips/ath79/setup.c
-+++ b/arch/mips/ath79/setup.c
-@@ -186,6 +186,7 @@ int get_c0_perfcount_int(void)
- {
- 	return ATH79_MISC_IRQ(5);
- }
-+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
- 
- unsigned int get_c0_compare_int(void)
- {
-diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
-deleted file mode 100644
-index 11d3b57..0000000
---- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
-+++ /dev/null
-@@ -1,10 +0,0 @@
--#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
--#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
--
--#include <asm/bmips.h>
--
--#define plat_post_dma_flush	bmips_post_dma_flush
--
--#include <asm/mach-generic/dma-coherence.h>
--
--#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
-diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index 819af9d..70f6e7f 100644
---- a/arch/mips/include/asm/pgtable.h
-+++ b/arch/mips/include/asm/pgtable.h
-@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
- 		 * Make sure the buddy is global too (if it's !none,
- 		 * it better already be global)
- 		 */
-+#ifdef CONFIG_SMP
-+		/*
-+		 * For SMP, multiple CPUs can race, so we need to do
-+		 * this atomically.
-+		 */
-+#ifdef CONFIG_64BIT
-+#define LL_INSN "lld"
-+#define SC_INSN "scd"
-+#else /* CONFIG_32BIT */
-+#define LL_INSN "ll"
-+#define SC_INSN "sc"
-+#endif
-+		unsigned long page_global = _PAGE_GLOBAL;
-+		unsigned long tmp;
-+
-+		__asm__ __volatile__ (
-+			"	.set	push\n"
-+			"	.set	noreorder\n"
-+			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
-+			"	bnez	%[tmp], 2f\n"
-+			"	 or	%[tmp], %[tmp], %[global]\n"
-+			"	" SC_INSN "	%[tmp], %[buddy]\n"
-+			"	beqz	%[tmp], 1b\n"
-+			"	 nop\n"
-+			"2:\n"
-+			"	.set pop"
-+			: [buddy] "+m" (buddy->pte),
-+			  [tmp] "=&r" (tmp)
-+			: [global] "r" (page_global));
-+#else /* !CONFIG_SMP */
- 		if (pte_none(*buddy))
- 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
-+#endif /* CONFIG_SMP */
- 	}
- #endif
- }
-diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
-index 28d6d93..a71da57 100644
---- a/arch/mips/include/asm/stackframe.h
-+++ b/arch/mips/include/asm/stackframe.h
-@@ -152,6 +152,31 @@
- 		.set	noreorder
- 		bltz	k0, 8f
- 		 move	k1, sp
-+#ifdef CONFIG_EVA
-+		/*
-+		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
-+		 * EntryHi. Toggling Config7.RPS is slower and less portable.
-+		 *
-+		 * The RPS isn't automatically flushed when exceptions are
-+		 * taken, which can result in kernel mode speculative accesses
-+		 * to user addresses if the RPS mispredicts. That's harmless
-+		 * when user and kernel share the same address space, but with
-+		 * EVA the same user segments may be unmapped to kernel mode,
-+		 * even containing sensitive MMIO regions or invalid memory.
-+		 *
-+		 * This can happen when the kernel sets the return address to
-+		 * ret_from_* and jr's to the exception handler, which looks
-+		 * more like a tail call than a function call. If nested calls
-+		 * don't evict the last user address in the RPS, it will
-+		 * mispredict the return and fetch from a user controlled
-+		 * address into the icache.
-+		 *
-+		 * More recent EVA-capable cores with MAAR to restrict
-+		 * speculative accesses aren't affected.
-+		 */
-+		MFC0	k0, CP0_ENTRYHI
-+		MTC0	k0, CP0_ENTRYHI
-+#endif
- 		.set	reorder
- 		/* Called from user mode, new stack. */
- 		get_saved_sp
-diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
-index 3e4491a..789d7bf 100644
---- a/arch/mips/kernel/mips-mt-fpaff.c
-+++ b/arch/mips/kernel/mips-mt-fpaff.c
-@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
- 				      unsigned long __user *user_mask_ptr)
- {
- 	unsigned int real_len;
--	cpumask_t mask;
-+	cpumask_t allowed, mask;
- 	int retval;
- 	struct task_struct *p;
- 
-@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
- 	if (retval)
- 		goto out_unlock;
- 
--	cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
-+	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
-+	cpumask_and(&mask, &allowed, cpu_active_mask);
- 
- out_unlock:
- 	read_unlock(&tasklist_lock);
-diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
-index 74bab9d..c6bbf21 100644
---- a/arch/mips/kernel/relocate_kernel.S
-+++ b/arch/mips/kernel/relocate_kernel.S
-@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
- 
- process_entry:
- 	PTR_L		s2, (s0)
--	PTR_ADD		s0, s0, SZREG
-+	PTR_ADDIU	s0, s0, SZREG
- 
- 	/*
- 	 * In case of a kdump/crash kernel, the indirection page is not
-@@ -61,9 +61,9 @@ copy_word:
- 	/* copy page word by word */
- 	REG_L		s5, (s2)
- 	REG_S		s5, (s4)
--	PTR_ADD		s4, s4, SZREG
--	PTR_ADD		s2, s2, SZREG
--	LONG_SUB	s6, s6, 1
-+	PTR_ADDIU	s4, s4, SZREG
-+	PTR_ADDIU	s2, s2, SZREG
-+	LONG_ADDIU	s6, s6, -1
- 	beq		s6, zero, process_entry
- 	b		copy_word
- 	b		process_entry
-diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
-index 19a7705..5d7f263 100644
---- a/arch/mips/kernel/signal32.c
-+++ b/arch/mips/kernel/signal32.c
-@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
- 
- int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
- {
--	memset(to, 0, sizeof *to);
--
- 	if (copy_from_user(to, from, 3*sizeof(int)) ||
- 	    copy_from_user(to->_sifields._pad,
- 			   from->_sifields._pad, SI_PAD_SIZE32))
-diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index d2d1c19..5f5f44e 100644
---- a/arch/mips/kernel/traps.c
-+++ b/arch/mips/kernel/traps.c
-@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
- void show_stack(struct task_struct *task, unsigned long *sp)
- {
- 	struct pt_regs regs;
-+	mm_segment_t old_fs = get_fs();
- 	if (sp) {
- 		regs.regs[29] = (unsigned long)sp;
- 		regs.regs[31] = 0;
-@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
- 			prepare_frametrace(&regs);
- 		}
- 	}
-+	/*
-+	 * show_stack() deals exclusively with kernel mode, so be sure to access
-+	 * the stack in the kernel (not user) address space.
-+	 */
-+	set_fs(KERNEL_DS);
- 	show_stacktrace(task, &regs);
-+	set_fs(old_fs);
- }
- 
- static void show_code(unsigned int __user *pc)
-@@ -1518,6 +1525,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
- 	const int field = 2 * sizeof(unsigned long);
- 	int multi_match = regs->cp0_status & ST0_TS;
- 	enum ctx_state prev_state;
-+	mm_segment_t old_fs = get_fs();
- 
- 	prev_state = exception_enter();
- 	show_regs(regs);
-@@ -1539,8 +1547,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
- 		dump_tlb_all();
- 	}
- 
-+	if (!user_mode(regs))
-+		set_fs(KERNEL_DS);
-+
- 	show_code((unsigned int __user *) regs->cp0_epc);
- 
-+	set_fs(old_fs);
-+
- 	/*
- 	 * Some chips may have other causes of machine check (e.g. SB1
- 	 * graduation timer)
-diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
-index af84bef..eb3efd1 100644
---- a/arch/mips/kernel/unaligned.c
-+++ b/arch/mips/kernel/unaligned.c
-@@ -438,7 +438,7 @@ do {                                                        \
- 		: "memory");                                \
- } while(0)
- 
--#define     StoreDW(addr, value, res) \
-+#define     _StoreDW(addr, value, res) \
- do {                                                        \
- 		__asm__ __volatile__ (                      \
- 			".set\tpush\n\t"		    \
-diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
-index 6ab1057..d01ade6 100644
---- a/arch/mips/lantiq/irq.c
-+++ b/arch/mips/lantiq/irq.c
-@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
- {
- 	return ltq_perfcount_irq;
- }
-+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
- 
- unsigned int get_c0_compare_int(void)
- {
-diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
-index 185e682..a7f7d9f 100644
---- a/arch/mips/mti-malta/malta-time.c
-+++ b/arch/mips/mti-malta/malta-time.c
-@@ -148,6 +148,7 @@ int get_c0_perfcount_int(void)
- 
- 	return mips_cpu_perf_irq;
- }
-+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
- 
- unsigned int get_c0_compare_int(void)
- {
-@@ -165,14 +166,17 @@ unsigned int get_c0_compare_int(void)
- 
- static void __init init_rtc(void)
- {
--	/* stop the clock whilst setting it up */
--	CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
-+	unsigned char freq, ctrl;
- 
--	/* 32KHz time base */
--	CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
-+	/* Set 32KHz time base if not already set */
-+	freq = CMOS_READ(RTC_FREQ_SELECT);
-+	if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
-+		CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
- 
--	/* start the clock */
--	CMOS_WRITE(RTC_24H, RTC_CONTROL);
-+	/* Ensure SET bit is clear so RTC can run */
-+	ctrl = CMOS_READ(RTC_CONTROL);
-+	if (ctrl & RTC_SET)
-+		CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
- }
- 
- void __init plat_time_init(void)
-diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
-index e1d6989..a120b7a 100644
---- a/arch/mips/mti-sead3/sead3-time.c
-+++ b/arch/mips/mti-sead3/sead3-time.c
-@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
- 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
- 	return -1;
- }
-+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
- 
- unsigned int get_c0_compare_int(void)
- {
-diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
-index 67889fc..ab73f6f 100644
---- a/arch/mips/pistachio/time.c
-+++ b/arch/mips/pistachio/time.c
-@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
- {
- 	return gic_get_c0_perfcount_int();
- }
-+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
- 
- void __init plat_time_init(void)
- {
-diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
-index 7cf91b9..199ace4 100644
---- a/arch/mips/ralink/irq.c
-+++ b/arch/mips/ralink/irq.c
-@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
- {
- 	return rt_perfcount_irq;
- }
-+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
- 
- unsigned int get_c0_compare_int(void)
- {
-diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index d3a831a..da50e0c 100644
---- a/arch/powerpc/kernel/signal_32.c
-+++ b/arch/powerpc/kernel/signal_32.c
-@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
- 
- int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
- {
--	memset(to, 0, sizeof *to);
--
- 	if (copy_from_user(to, from, 3*sizeof(int)) ||
- 	    copy_from_user(to->_sifields._pad,
- 			   from->_sifields._pad, SI_PAD_SIZE32))
-diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
-index 1f0aa20..6424249 100644
---- a/arch/sparc/include/asm/visasm.h
-+++ b/arch/sparc/include/asm/visasm.h
-@@ -28,16 +28,10 @@
-  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
- 
- #define VISEntryHalf					\
--	rd		%fprs, %o5;			\
--	andcc		%o5, FPRS_FEF, %g0;		\
--	be,pt		%icc, 297f;			\
--	 sethi		%hi(298f), %g7;			\
--	sethi		%hi(VISenterhalf), %g1;		\
--	jmpl		%g1 + %lo(VISenterhalf), %g0;	\
--	 or		%g7, %lo(298f), %g7;		\
--	clr		%o5;				\
--297:	wr		%o5, FPRS_FEF, %fprs;		\
--298:
-+	VISEntry
-+
-+#define VISExitHalf					\
-+	VISExit
- 
- #define VISEntryHalfFast(fail_label)			\
- 	rd		%fprs, %o5;			\
-@@ -47,7 +41,7 @@
- 	ba,a,pt		%xcc, fail_label;		\
- 297:	wr		%o5, FPRS_FEF, %fprs;
- 
--#define VISExitHalf					\
-+#define VISExitHalfFast					\
- 	wr		%o5, 0, %fprs;
- 
- #ifndef __ASSEMBLY__
-diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
-index 140527a..83aeeb1 100644
---- a/arch/sparc/lib/NG4memcpy.S
-+++ b/arch/sparc/lib/NG4memcpy.S
-@@ -240,8 +240,11 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
- 	add		%o0, 0x40, %o0
- 	bne,pt		%icc, 1b
- 	 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
-+#ifdef NON_USER_COPY
-+	VISExitHalfFast
-+#else
- 	VISExitHalf
--
-+#endif
- 	brz,pn		%o2, .Lexit
- 	 cmp		%o2, 19
- 	ble,pn		%icc, .Lsmall_unaligned
-diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
-index b320ae9..a063d84 100644
---- a/arch/sparc/lib/VISsave.S
-+++ b/arch/sparc/lib/VISsave.S
-@@ -44,9 +44,8 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
- 
- 	 stx		%g3, [%g6 + TI_GSR]
- 2:	add		%g6, %g1, %g3
--	cmp		%o5, FPRS_DU
--	be,pn		%icc, 6f
--	 sll		%g1, 3, %g1
-+	mov		FPRS_DU | FPRS_DL | FPRS_FEF, %o5
-+	sll		%g1, 3, %g1
- 	stb		%o5, [%g3 + TI_FPSAVED]
- 	rd		%gsr, %g2
- 	add		%g6, %g1, %g3
-@@ -80,65 +79,3 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
- 	.align		32
- 80:	jmpl		%g7 + %g0, %g0
- 	 nop
--
--6:	ldub		[%g3 + TI_FPSAVED], %o5
--	or		%o5, FPRS_DU, %o5
--	add		%g6, TI_FPREGS+0x80, %g2
--	stb		%o5, [%g3 + TI_FPSAVED]
--
--	sll		%g1, 5, %g1
--	add		%g6, TI_FPREGS+0xc0, %g3
--	wr		%g0, FPRS_FEF, %fprs
--	membar		#Sync
--	stda		%f32, [%g2 + %g1] ASI_BLK_P
--	stda		%f48, [%g3 + %g1] ASI_BLK_P
--	membar		#Sync
--	ba,pt		%xcc, 80f
--	 nop
--
--	.align		32
--80:	jmpl		%g7 + %g0, %g0
--	 nop
--
--	.align		32
--VISenterhalf:
--	ldub		[%g6 + TI_FPDEPTH], %g1
--	brnz,a,pn	%g1, 1f
--	 cmp		%g1, 1
--	stb		%g0, [%g6 + TI_FPSAVED]
--	stx		%fsr, [%g6 + TI_XFSR]
--	clr		%o5
--	jmpl		%g7 + %g0, %g0
--	 wr		%g0, FPRS_FEF, %fprs
--
--1:	bne,pn		%icc, 2f
--	 srl		%g1, 1, %g1
--	ba,pt		%xcc, vis1
--	 sub		%g7, 8, %g7
--2:	addcc		%g6, %g1, %g3
--	sll		%g1, 3, %g1
--	andn		%o5, FPRS_DU, %g2
--	stb		%g2, [%g3 + TI_FPSAVED]
--
--	rd		%gsr, %g2
--	add		%g6, %g1, %g3
--	stx		%g2, [%g3 + TI_GSR]
--	add		%g6, %g1, %g2
--	stx		%fsr, [%g2 + TI_XFSR]
--	sll		%g1, 5, %g1
--3:	andcc		%o5, FPRS_DL, %g0
--	be,pn		%icc, 4f
--	 add		%g6, TI_FPREGS, %g2
--
--	add		%g6, TI_FPREGS+0x40, %g3
--	membar		#Sync
--	stda		%f0, [%g2 + %g1] ASI_BLK_P
--	stda		%f16, [%g3 + %g1] ASI_BLK_P
--	membar		#Sync
--	ba,pt		%xcc, 4f
--	 nop
--
--	.align		32
--4:	and		%o5, FPRS_DU, %o5
--	jmpl		%g7 + %g0, %g0
--	 wr		%o5, FPRS_FEF, %fprs
-diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index 1d649a9..8069ce1 100644
---- a/arch/sparc/lib/ksyms.c
-+++ b/arch/sparc/lib/ksyms.c
-@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
- void VISenter(void);
- EXPORT_SYMBOL(VISenter);
- 
--/* CRYPTO code needs this */
--void VISenterhalf(void);
--EXPORT_SYMBOL(VISenterhalf);
--
- extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
- extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
- 		unsigned long *);
-diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
-index e8c2c04..c667e10 100644
---- a/arch/tile/kernel/compat_signal.c
-+++ b/arch/tile/kernel/compat_signal.c
-@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
- 	if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
- 		return -EFAULT;
- 
--	memset(to, 0, sizeof(*to));
--
- 	err = __get_user(to->si_signo, &from->si_signo);
- 	err |= __get_user(to->si_errno, &from->si_errno);
- 	err |= __get_user(to->si_code, &from->si_code);
-diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 02c2eff..4bd6c19 100644
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
-@@ -793,8 +793,6 @@ retint_kernel:
- restore_c_regs_and_iret:
- 	RESTORE_C_REGS
- 	REMOVE_PT_GPREGS_FROM_STACK 8
--
--irq_return:
- 	INTERRUPT_RETURN
- 
- ENTRY(native_iret)
-@@ -1413,11 +1411,12 @@ ENTRY(nmi)
- 	 *  If the variable is not set and the stack is not the NMI
- 	 *  stack then:
- 	 *    o Set the special variable on the stack
--	 *    o Copy the interrupt frame into a "saved" location on the stack
--	 *    o Copy the interrupt frame into a "copy" location on the stack
-+	 *    o Copy the interrupt frame into an "outermost" location on the
-+	 *      stack
-+	 *    o Copy the interrupt frame into an "iret" location on the stack
- 	 *    o Continue processing the NMI
- 	 *  If the variable is set or the previous stack is the NMI stack:
--	 *    o Modify the "copy" location to jump to the repeate_nmi
-+	 *    o Modify the "iret" location to jump to the repeat_nmi
- 	 *    o return back to the first NMI
- 	 *
- 	 * Now on exit of the first NMI, we first clear the stack variable
-@@ -1426,32 +1425,151 @@ ENTRY(nmi)
- 	 * a nested NMI that updated the copy interrupt stack frame, a
- 	 * jump will be made to the repeat_nmi code that will handle the second
- 	 * NMI.
-+	 *
-+	 * However, espfix prevents us from directly returning to userspace
-+	 * with a single IRET instruction.  Similarly, IRET to user mode
-+	 * can fault.  We therefore handle NMIs from user space like
-+	 * other IST entries.
- 	 */
- 
- 	/* Use %rdx as our temp variable throughout */
- 	pushq_cfi %rdx
- 	CFI_REL_OFFSET rdx, 0
- 
-+	testb	$3, CS-RIP+8(%rsp)
-+	jz	.Lnmi_from_kernel
-+
- 	/*
--	 * If %cs was not the kernel segment, then the NMI triggered in user
--	 * space, which means it is definitely not nested.
-+	 * NMI from user mode.  We need to run on the thread stack, but we
-+	 * can't go through the normal entry paths: NMIs are masked, and
-+	 * we don't want to enable interrupts, because then we'll end
-+	 * up in an awkward situation in which IRQs are on but NMIs
-+	 * are off.
- 	 */
--	cmpl $__KERNEL_CS, 16(%rsp)
--	jne first_nmi
-+
-+	SWAPGS
-+	cld
-+	movq	%rsp, %rdx
-+	movq	PER_CPU_VAR(kernel_stack), %rsp
-+	pushq	5*8(%rdx)	/* pt_regs->ss */
-+	pushq	4*8(%rdx)	/* pt_regs->rsp */
-+	pushq	3*8(%rdx)	/* pt_regs->flags */
-+	pushq	2*8(%rdx)	/* pt_regs->cs */
-+	pushq	1*8(%rdx)	/* pt_regs->rip */
-+	pushq   $-1		/* pt_regs->orig_ax */
-+	pushq   %rdi		/* pt_regs->di */
-+	pushq   %rsi		/* pt_regs->si */
-+	pushq   (%rdx)		/* pt_regs->dx */
-+	pushq   %rcx		/* pt_regs->cx */
-+	pushq   %rax		/* pt_regs->ax */
-+	pushq   %r8		/* pt_regs->r8 */
-+	pushq   %r9		/* pt_regs->r9 */
-+	pushq   %r10		/* pt_regs->r10 */
-+	pushq   %r11		/* pt_regs->r11 */
-+	pushq	%rbx		/* pt_regs->rbx */
-+	pushq	%rbp		/* pt_regs->rbp */
-+	pushq	%r12		/* pt_regs->r12 */
-+	pushq	%r13		/* pt_regs->r13 */
-+	pushq	%r14		/* pt_regs->r14 */
-+	pushq	%r15		/* pt_regs->r15 */
- 
- 	/*
--	 * Check the special variable on the stack to see if NMIs are
--	 * executing.
-+	 * At this point we no longer need to worry about stack damage
-+	 * due to nesting -- we're on the normal thread stack and we're
-+	 * done with the NMI stack.
-+	 */
-+	movq	%rsp, %rdi
-+	movq	$-1, %rsi
-+	call	do_nmi
-+
-+	/*
-+	 * Return back to user mode.  We must *not* do the normal exit
-+	 * work, because we don't want to enable interrupts.  Fortunately,
-+	 * do_nmi doesn't modify pt_regs.
-+	 */
-+	SWAPGS
-+	jmp	restore_c_regs_and_iret
-+
-+.Lnmi_from_kernel:
-+	/*
-+	 * Here's what our stack frame will look like:
-+	 * +---------------------------------------------------------+
-+	 * | original SS                                             |
-+	 * | original Return RSP                                     |
-+	 * | original RFLAGS                                         |
-+	 * | original CS                                             |
-+	 * | original RIP                                            |
-+	 * +---------------------------------------------------------+
-+	 * | temp storage for rdx                                    |
-+	 * +---------------------------------------------------------+
-+	 * | "NMI executing" variable                                |
-+	 * +---------------------------------------------------------+
-+	 * | iret SS          } Copied from "outermost" frame        |
-+	 * | iret Return RSP  } on each loop iteration; overwritten  |
-+	 * | iret RFLAGS      } by a nested NMI to force another     |
-+	 * | iret CS          } iteration if needed.                 |
-+	 * | iret RIP         }                                      |
-+	 * +---------------------------------------------------------+
-+	 * | outermost SS          } initialized in first_nmi;       |
-+	 * | outermost Return RSP  } will not be changed before      |
-+	 * | outermost RFLAGS      } NMI processing is done.         |
-+	 * | outermost CS          } Copied to "iret" frame on each  |
-+	 * | outermost RIP         } iteration.                      |
-+	 * +---------------------------------------------------------+
-+	 * | pt_regs                                                 |
-+	 * +---------------------------------------------------------+
-+	 *
-+	 * The "original" frame is used by hardware.  Before re-enabling
-+	 * NMIs, we need to be done with it, and we need to leave enough
-+	 * space for the asm code here.
-+	 *
-+	 * We return by executing IRET while RSP points to the "iret" frame.
-+	 * That will either return for real or it will loop back into NMI
-+	 * processing.
-+	 *
-+	 * The "outermost" frame is copied to the "iret" frame on each
-+	 * iteration of the loop, so each iteration starts with the "iret"
-+	 * frame pointing to the final return target.
-+	 */
-+
-+	/*
-+	 * Determine whether we're a nested NMI.
-+	 *
-+	 * If we interrupted kernel code between repeat_nmi and
-+	 * end_repeat_nmi, then we are a nested NMI.  We must not
-+	 * modify the "iret" frame because it's being written by
-+	 * the outer NMI.  That's okay; the outer NMI handler is
-+	 * about to about to call do_nmi anyway, so we can just
-+	 * resume the outer NMI.
-+	 */
-+
-+	movq	$repeat_nmi, %rdx
-+	cmpq	8(%rsp), %rdx
-+	ja	1f
-+	movq	$end_repeat_nmi, %rdx
-+	cmpq	8(%rsp), %rdx
-+	ja	nested_nmi_out
-+1:
-+
-+	/*
-+	 * Now check "NMI executing".  If it's set, then we're nested.
-+	 * This will not detect if we interrupted an outer NMI just
-+	 * before IRET.
- 	 */
- 	cmpl $1, -8(%rsp)
- 	je nested_nmi
- 
- 	/*
--	 * Now test if the previous stack was an NMI stack.
--	 * We need the double check. We check the NMI stack to satisfy the
--	 * race when the first NMI clears the variable before returning.
--	 * We check the variable because the first NMI could be in a
--	 * breakpoint routine using a breakpoint stack.
-+	 * Now test if the previous stack was an NMI stack.  This covers
-+	 * the case where we interrupt an outer NMI after it clears
-+	 * "NMI executing" but before IRET.  We need to be careful, though:
-+	 * there is one case in which RSP could point to the NMI stack
-+	 * despite there being no NMI active: naughty userspace controls
-+	 * RSP at the very beginning of the SYSCALL targets.  We can
-+	 * pull a fast one on naughty userspace, though: we program
-+	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
-+	 * if it controls the kernel's RSP.  We set DF before we clear
-+	 * "NMI executing".
- 	 */
- 	lea	6*8(%rsp), %rdx
- 	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
-@@ -1462,25 +1580,21 @@ ENTRY(nmi)
- 	cmpq	%rdx, 4*8(%rsp)
- 	/* If it is below the NMI stack, it is a normal NMI */
- 	jb	first_nmi
--	/* Ah, it is within the NMI stack, treat it as nested */
-+
-+	/* Ah, it is within the NMI stack. */
-+
-+	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
-+	jz	first_nmi	/* RSP was user controlled. */
-+
-+	/* This is a nested NMI. */
- 
- 	CFI_REMEMBER_STATE
- 
- nested_nmi:
- 	/*
--	 * Do nothing if we interrupted the fixup in repeat_nmi.
--	 * It's about to repeat the NMI handler, so we are fine
--	 * with ignoring this one.
-+	 * Modify the "iret" frame to point to repeat_nmi, forcing another
-+	 * iteration of NMI handling.
- 	 */
--	movq $repeat_nmi, %rdx
--	cmpq 8(%rsp), %rdx
--	ja 1f
--	movq $end_repeat_nmi, %rdx
--	cmpq 8(%rsp), %rdx
--	ja nested_nmi_out
--
--1:
--	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
- 	leaq -1*8(%rsp), %rdx
- 	movq %rdx, %rsp
- 	CFI_ADJUST_CFA_OFFSET 1*8
-@@ -1499,60 +1613,23 @@ nested_nmi_out:
- 	popq_cfi %rdx
- 	CFI_RESTORE rdx
- 
--	/* No need to check faults here */
-+	/* We are returning to kernel mode, so this cannot result in a fault. */
- 	INTERRUPT_RETURN
- 
- 	CFI_RESTORE_STATE
- first_nmi:
--	/*
--	 * Because nested NMIs will use the pushed location that we
--	 * stored in rdx, we must keep that space available.
--	 * Here's what our stack frame will look like:
--	 * +-------------------------+
--	 * | original SS             |
--	 * | original Return RSP     |
--	 * | original RFLAGS         |
--	 * | original CS             |
--	 * | original RIP            |
--	 * +-------------------------+
--	 * | temp storage for rdx    |
--	 * +-------------------------+
--	 * | NMI executing variable  |
--	 * +-------------------------+
--	 * | copied SS               |
--	 * | copied Return RSP       |
--	 * | copied RFLAGS           |
--	 * | copied CS               |
--	 * | copied RIP              |
--	 * +-------------------------+
--	 * | Saved SS                |
--	 * | Saved Return RSP        |
--	 * | Saved RFLAGS            |
--	 * | Saved CS                |
--	 * | Saved RIP               |
--	 * +-------------------------+
--	 * | pt_regs                 |
--	 * +-------------------------+
--	 *
--	 * The saved stack frame is used to fix up the copied stack frame
--	 * that a nested NMI may change to make the interrupted NMI iret jump
--	 * to the repeat_nmi. The original stack frame and the temp storage
--	 * is also used by nested NMIs and can not be trusted on exit.
--	 */
--	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
-+	/* Restore rdx. */
- 	movq (%rsp), %rdx
- 	CFI_RESTORE rdx
- 
--	/* Set the NMI executing variable on the stack. */
-+	/* Set "NMI executing" on the stack. */
- 	pushq_cfi $1
- 
--	/*
--	 * Leave room for the "copied" frame
--	 */
-+	/* Leave room for the "iret" frame */
- 	subq $(5*8), %rsp
- 	CFI_ADJUST_CFA_OFFSET 5*8
- 
--	/* Copy the stack frame to the Saved frame */
-+	/* Copy the "original" frame to the "outermost" frame */
- 	.rept 5
- 	pushq_cfi 11*8(%rsp)
- 	.endr
-@@ -1560,6 +1637,7 @@ first_nmi:
- 
- 	/* Everything up to here is safe from nested NMIs */
- 
-+repeat_nmi:
- 	/*
- 	 * If there was a nested NMI, the first NMI's iret will return
- 	 * here. But NMIs are still enabled and we can take another
-@@ -1568,16 +1646,21 @@ first_nmi:
- 	 * it will just return, as we are about to repeat an NMI anyway.
- 	 * This makes it safe to copy to the stack frame that a nested
- 	 * NMI will update.
--	 */
--repeat_nmi:
--	/*
--	 * Update the stack variable to say we are still in NMI (the update
--	 * is benign for the non-repeat case, where 1 was pushed just above
--	 * to this very stack slot).
-+	 *
-+	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
-+	 * we're repeating an NMI, gsbase has the same value that it had on
-+	 * the first iteration.  paranoid_entry will load the kernel
-+	 * gsbase if needed before we call do_nmi.
-+	 *
-+	 * Set "NMI executing" in case we came back here via IRET.
- 	 */
- 	movq $1, 10*8(%rsp)
- 
--	/* Make another copy, this one may be modified by nested NMIs */
-+	/*
-+	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
-+	 * here must not modify the "iret" frame while we're writing to
-+	 * it or it will end up containing garbage.
-+	 */
- 	addq $(10*8), %rsp
- 	CFI_ADJUST_CFA_OFFSET -10*8
- 	.rept 5
-@@ -1588,9 +1671,9 @@ repeat_nmi:
- end_repeat_nmi:
- 
- 	/*
--	 * Everything below this point can be preempted by a nested
--	 * NMI if the first NMI took an exception and reset our iret stack
--	 * so that we repeat another NMI.
-+	 * Everything below this point can be preempted by a nested NMI.
-+	 * If this happens, then the inner NMI will change the "iret"
-+	 * frame to point back to repeat_nmi.
- 	 */
- 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
- 	ALLOC_PT_GPREGS_ON_STACK
-@@ -1605,29 +1688,11 @@ end_repeat_nmi:
- 	call paranoid_entry
- 	DEFAULT_FRAME 0
- 
--	/*
--	 * Save off the CR2 register. If we take a page fault in the NMI then
--	 * it could corrupt the CR2 value. If the NMI preempts a page fault
--	 * handler before it was able to read the CR2 register, and then the
--	 * NMI itself takes a page fault, the page fault that was preempted
--	 * will read the information from the NMI page fault and not the
--	 * origin fault. Save it off and restore it if it changes.
--	 * Use the r12 callee-saved register.
--	 */
--	movq %cr2, %r12
--
- 	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
- 	movq %rsp,%rdi
- 	movq $-1,%rsi
- 	call do_nmi
- 
--	/* Did the NMI take a page fault? Restore cr2 if it did */
--	movq %cr2, %rcx
--	cmpq %rcx, %r12
--	je 1f
--	movq %r12, %cr2
--1:
--	
- 	testl %ebx,%ebx				/* swapgs needed? */
- 	jnz nmi_restore
- nmi_swapgs:
-@@ -1635,12 +1700,27 @@ nmi_swapgs:
- nmi_restore:
- 	RESTORE_EXTRA_REGS
- 	RESTORE_C_REGS
--	/* Pop the extra iret frame at once */
-+
-+	/* Point RSP at the "iret" frame. */
- 	REMOVE_PT_GPREGS_FROM_STACK 6*8
- 
--	/* Clear the NMI executing stack variable */
--	movq $0, 5*8(%rsp)
--	jmp irq_return
-+	/*
-+	 * Clear "NMI executing".  Set DF first so that we can easily
-+	 * distinguish the remaining code between here and IRET from
-+	 * the SYSCALL entry and exit paths.  On a native kernel, we
-+	 * could just inspect RIP, but, on paravirt kernels,
-+	 * INTERRUPT_RETURN can translate into a jump into a
-+	 * hypercall page.
-+	 */
-+	std
-+	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
-+
-+	/*
-+	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
-+	 * stack in a single instruction.  We are returning to kernel
-+	 * mode, so this cannot result in a fault.
-+	 */
-+	INTERRUPT_RETURN
- 	CFI_ENDPROC
- END(nmi)
- 
-diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
-index c3e985d..d05bd2e 100644
---- a/arch/x86/kernel/nmi.c
-+++ b/arch/x86/kernel/nmi.c
-@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
- NOKPROBE_SYMBOL(default_do_nmi);
- 
- /*
-- * NMIs can hit breakpoints which will cause it to lose its
-- * NMI context with the CPU when the breakpoint does an iret.
-- */
--#ifdef CONFIG_X86_32
--/*
-- * For i386, NMIs use the same stack as the kernel, and we can
-- * add a workaround to the iret problem in C (preventing nested
-- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
-- * can be in:
-+ * NMIs can page fault or hit breakpoints which will cause it to lose
-+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
-+ *
-+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
-+ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
-+ * if the outer NMI came from kernel mode, but we can still nest if the
-+ * outer NMI came from user mode.
-+ *
-+ * To handle these nested NMIs, we have three states:
-  *
-  *  1) not running
-  *  2) executing
-@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
-  * (Note, the latch is binary, thus multiple NMIs triggering,
-  *  when one is running, are ignored. Only one NMI is restarted.)
-  *
-- * If an NMI hits a breakpoint that executes an iret, another
-- * NMI can preempt it. We do not want to allow this new NMI
-- * to run, but we want to execute it when the first one finishes.
-- * We set the state to "latched", and the exit of the first NMI will
-- * perform a dec_return, if the result is zero (NOT_RUNNING), then
-- * it will simply exit the NMI handler. If not, the dec_return
-- * would have set the state to NMI_EXECUTING (what we want it to
-- * be when we are running). In this case, we simply jump back
-- * to rerun the NMI handler again, and restart the 'latched' NMI.
-+ * If an NMI executes an iret, another NMI can preempt it. We do not
-+ * want to allow this new NMI to run, but we want to execute it when the
-+ * first one finishes.  We set the state to "latched", and the exit of
-+ * the first NMI will perform a dec_return, if the result is zero
-+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
-+ * dec_return would have set the state to NMI_EXECUTING (what we want it
-+ * to be when we are running). In this case, we simply jump back to
-+ * rerun the NMI handler again, and restart the 'latched' NMI.
-  *
-  * No trap (breakpoint or page fault) should be hit before nmi_restart,
-  * thus there is no race between the first check of state for NOT_RUNNING
-@@ -461,49 +460,36 @@ enum nmi_states {
- static DEFINE_PER_CPU(enum nmi_states, nmi_state);
- static DEFINE_PER_CPU(unsigned long, nmi_cr2);
- 
--#define nmi_nesting_preprocess(regs)					\
--	do {								\
--		if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {	\
--			this_cpu_write(nmi_state, NMI_LATCHED);		\
--			return;						\
--		}							\
--		this_cpu_write(nmi_state, NMI_EXECUTING);		\
--		this_cpu_write(nmi_cr2, read_cr2());			\
--	} while (0);							\
--	nmi_restart:
--
--#define nmi_nesting_postprocess()					\
--	do {								\
--		if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))	\
--			write_cr2(this_cpu_read(nmi_cr2));		\
--		if (this_cpu_dec_return(nmi_state))			\
--			goto nmi_restart;				\
--	} while (0)
--#else /* x86_64 */
-+#ifdef CONFIG_X86_64
- /*
-- * In x86_64 things are a bit more difficult. This has the same problem
-- * where an NMI hitting a breakpoint that calls iret will remove the
-- * NMI context, allowing a nested NMI to enter. What makes this more
-- * difficult is that both NMIs and breakpoints have their own stack.
-- * When a new NMI or breakpoint is executed, the stack is set to a fixed
-- * point. If an NMI is nested, it will have its stack set at that same
-- * fixed address that the first NMI had, and will start corrupting the
-- * stack. This is handled in entry_64.S, but the same problem exists with
-- * the breakpoint stack.
-+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
-+ * some care, the inner breakpoint will clobber the outer breakpoint's
-+ * stack.
-  *
-- * If a breakpoint is being processed, and the debug stack is being used,
-- * if an NMI comes in and also hits a breakpoint, the stack pointer
-- * will be set to the same fixed address as the breakpoint that was
-- * interrupted, causing that stack to be corrupted. To handle this case,
-- * check if the stack that was interrupted is the debug stack, and if
-- * so, change the IDT so that new breakpoints will use the current stack
-- * and not switch to the fixed address. On return of the NMI, switch back
-- * to the original IDT.
-+ * If a breakpoint is being processed, and the debug stack is being
-+ * used, if an NMI comes in and also hits a breakpoint, the stack
-+ * pointer will be set to the same fixed address as the breakpoint that
-+ * was interrupted, causing that stack to be corrupted. To handle this
-+ * case, check if the stack that was interrupted is the debug stack, and
-+ * if so, change the IDT so that new breakpoints will use the current
-+ * stack and not switch to the fixed address. On return of the NMI,
-+ * switch back to the original IDT.
-  */
- static DEFINE_PER_CPU(int, update_debug_stack);
-+#endif
- 
--static inline void nmi_nesting_preprocess(struct pt_regs *regs)
-+dotraplinkage notrace void
-+do_nmi(struct pt_regs *regs, long error_code)
- {
-+	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
-+		this_cpu_write(nmi_state, NMI_LATCHED);
-+		return;
-+	}
-+	this_cpu_write(nmi_state, NMI_EXECUTING);
-+	this_cpu_write(nmi_cr2, read_cr2());
-+nmi_restart:
-+
-+#ifdef CONFIG_X86_64
- 	/*
- 	 * If we interrupted a breakpoint, it is possible that
- 	 * the nmi handler will have breakpoints too. We need to
-@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
- 		debug_stack_set_zero();
- 		this_cpu_write(update_debug_stack, 1);
- 	}
--}
--
--static inline void nmi_nesting_postprocess(void)
--{
--	if (unlikely(this_cpu_read(update_debug_stack))) {
--		debug_stack_reset();
--		this_cpu_write(update_debug_stack, 0);
--	}
--}
- #endif
- 
--dotraplinkage notrace void
--do_nmi(struct pt_regs *regs, long error_code)
--{
--	nmi_nesting_preprocess(regs);
--
- 	nmi_enter();
- 
- 	inc_irq_stat(__nmi_count);
-@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
- 
- 	nmi_exit();
- 
--	/* On i386, may loop back to preprocess */
--	nmi_nesting_postprocess();
-+#ifdef CONFIG_X86_64
-+	if (unlikely(this_cpu_read(update_debug_stack))) {
-+		debug_stack_reset();
-+		this_cpu_write(update_debug_stack, 0);
-+	}
-+#endif
-+
-+	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
-+		write_cr2(this_cpu_read(nmi_cr2));
-+	if (this_cpu_dec_return(nmi_state))
-+		goto nmi_restart;
- }
- NOKPROBE_SYMBOL(do_nmi);
- 
-diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
-index 9d28383..c4ea87e 100644
---- a/arch/x86/kvm/lapic.h
-+++ b/arch/x86/kvm/lapic.h
-@@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
- 
- static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
- {
--	return vcpu->arch.apic->pending_events;
-+	return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
- }
- 
- bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 46957ea..a671e83 100644
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
- 	pte_t pte;
- 	unsigned long pfn;
- 	struct page *page;
-+	unsigned char dummy;
- 
- 	ptep = lookup_address((unsigned long)v, &level);
- 	BUG_ON(ptep == NULL);
-@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
- 
- 	pte = pfn_pte(pfn, prot);
- 
-+	/*
-+	 * Careful: update_va_mapping() will fail if the virtual address
-+	 * we're poking isn't populated in the page tables.  We don't
-+	 * need to worry about the direct map (that's always in the page
-+	 * tables), but we need to be careful about vmap space.  In
-+	 * particular, the top level page table can lazily propagate
-+	 * entries between processes, so if we've switched mms since we
-+	 * vmapped the target in the first place, we might not have the
-+	 * top-level page table entry populated.
-+	 *
-+	 * We disable preemption because we want the same mm active when
-+	 * we probe the target and when we issue the hypercall.  We'll
-+	 * have the same nominal mm, but if we're a kernel thread, lazy
-+	 * mm dropping could change our pgd.
-+	 *
-+	 * Out of an abundance of caution, this uses __get_user() to fault
-+	 * in the target address just in case there's some obscure case
-+	 * in which the target address isn't readable.
-+	 */
-+
-+	preempt_disable();
-+
-+	pagefault_disable();	/* Avoid warnings due to being atomic. */
-+	__get_user(dummy, (unsigned char __user __force *)v);
-+	pagefault_enable();
-+
- 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
- 		BUG();
- 
-@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
- 				BUG();
- 	} else
- 		kmap_flush_unused();
-+
-+	preempt_enable();
- }
- 
- static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
-@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
- 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
- 	int i;
- 
-+	/*
-+	 * We need to mark the all aliases of the LDT pages RO.  We
-+	 * don't need to call vm_flush_aliases(), though, since that's
-+	 * only responsible for flushing aliases out the TLBs, not the
-+	 * page tables, and Xen will flush the TLB for us if needed.
-+	 *
-+	 * To avoid confusing future readers: none of this is necessary
-+	 * to load the LDT.  The hypervisor only checks this when the
-+	 * LDT is faulted in due to subsequent descriptor access.
-+	 */
-+
- 	for(i = 0; i < entries; i += entries_per_page)
- 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
- }
-diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
-index 53f2535..010ce0b 100644
---- a/drivers/block/rbd.c
-+++ b/drivers/block/rbd.c
-@@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
- #  define rbd_assert(expr)	((void) 0)
- #endif /* !RBD_DEBUG */
- 
-+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
- static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
- static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
- static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
-@@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
- 	obj_request_done_set(obj_request);
- }
- 
-+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
-+{
-+	dout("%s: obj %p\n", __func__, obj_request);
-+
-+	if (obj_request_img_data_test(obj_request))
-+		rbd_osd_copyup_callback(obj_request);
-+	else
-+		obj_request_done_set(obj_request);
-+}
-+
- static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
- 				struct ceph_msg *msg)
- {
-@@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
- 		rbd_osd_discard_callback(obj_request);
- 		break;
- 	case CEPH_OSD_OP_CALL:
-+		rbd_osd_call_callback(obj_request);
-+		break;
- 	case CEPH_OSD_OP_NOTIFY_ACK:
- 	case CEPH_OSD_OP_WATCH:
- 		rbd_osd_trivial_callback(obj_request);
-@@ -2509,13 +2522,15 @@ out_unwind:
- }
- 
- static void
--rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
-+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
- {
- 	struct rbd_img_request *img_request;
- 	struct rbd_device *rbd_dev;
- 	struct page **pages;
- 	u32 page_count;
- 
-+	dout("%s: obj %p\n", __func__, obj_request);
-+
- 	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
- 		obj_request->type == OBJ_REQUEST_NODATA);
- 	rbd_assert(obj_request_img_data_test(obj_request));
-@@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
- 	if (!obj_request->result)
- 		obj_request->xferred = obj_request->length;
- 
--	/* Finish up with the normal image object callback */
--
--	rbd_img_obj_callback(obj_request);
-+	obj_request_done_set(obj_request);
- }
- 
- static void
-@@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
- 
- 	/* All set, send it off. */
- 
--	orig_request->callback = rbd_img_obj_copyup_callback;
- 	osdc = &rbd_dev->rbd_client->client->osdc;
- 	img_result = rbd_obj_request_submit(osdc, orig_request);
- 	if (!img_result)
-diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
-index da8faf7..5643b65 100644
---- a/drivers/char/hw_random/core.c
-+++ b/drivers/char/hw_random/core.c
-@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
- static void start_khwrngd(void)
- {
- 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
--	if (hwrng_fill == ERR_PTR(-ENOMEM)) {
-+	if (IS_ERR(hwrng_fill)) {
- 		pr_err("hwrng_fill thread creation failed");
- 		hwrng_fill = NULL;
- 	}
-diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
-index a43048b..3c1a123 100644
---- a/drivers/char/i8k.c
-+++ b/drivers/char/i8k.c
-@@ -900,6 +900,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
- 
- MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
- 
-+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
-+	{
-+		/*
-+		 * CPU fan speed going up and down on Dell Studio XPS 8100
-+		 * for unknown reasons.
-+		 */
-+		.ident = "Dell Studio XPS 8100",
-+		.matches = {
-+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
-+		},
-+	},
-+	{ }
-+};
-+
- /*
-  * Probe for the presence of a supported laptop.
-  */
-@@ -911,7 +926,8 @@ static int __init i8k_probe(void)
- 	/*
- 	 * Get DMI information
- 	 */
--	if (!dmi_check_system(i8k_dmi_table)) {
-+	if (!dmi_check_system(i8k_dmi_table) ||
-+	    dmi_check_system(i8k_blacklist_dmi_table)) {
- 		if (!ignore_dmi && !force)
- 			return -ENODEV;
- 
-diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
-index 0dd8a4b..4a375ea 100644
---- a/drivers/clk/keystone/pll.c
-+++ b/drivers/clk/keystone/pll.c
-@@ -37,7 +37,8 @@
-  *	Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
-  *	or PA PLL available on keystone2. These PLLs are controlled by
-  *	this register. Main PLL is controlled by a PLL controller.
-- * @pllm: PLL register map address
-+ * @pllm: PLL register map address for multiplier bits
-+ * @pllod: PLL register map address for post divider bits
-  * @pll_ctl0: PLL controller map address
-  * @pllm_lower_mask: multiplier lower mask
-  * @pllm_upper_mask: multiplier upper mask
-@@ -53,6 +54,7 @@ struct clk_pll_data {
- 	u32 phy_pllm;
- 	u32 phy_pll_ctl0;
- 	void __iomem *pllm;
-+	void __iomem *pllod;
- 	void __iomem *pll_ctl0;
- 	u32 pllm_lower_mask;
- 	u32 pllm_upper_mask;
-@@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
- 		/* read post divider from od bits*/
- 		postdiv = ((val & pll_data->clkod_mask) >>
- 				 pll_data->clkod_shift) + 1;
--	else
-+	else if (pll_data->pllod) {
-+		postdiv = readl(pll_data->pllod);
-+		postdiv = ((postdiv & pll_data->clkod_mask) >>
-+				pll_data->clkod_shift) + 1;
-+	} else
- 		postdiv = pll_data->postdiv;
- 
- 	rate /= (prediv + 1);
-@@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
- 		/* assume the PLL has output divider register bits */
- 		pll_data->clkod_mask = CLKOD_MASK;
- 		pll_data->clkod_shift = CLKOD_SHIFT;
-+
-+		/*
-+		 * Check if there is an post-divider register. If not
-+		 * assume od bits are part of control register.
-+		 */
-+		i = of_property_match_string(node, "reg-names",
-+					     "post-divider");
-+		pll_data->pllod = of_iomap(node, i);
- 	}
- 
- 	i = of_property_match_string(node, "reg-names", "control");
- 	pll_data->pll_ctl0 = of_iomap(node, i);
- 	if (!pll_data->pll_ctl0) {
- 		pr_err("%s: ioremap failed\n", __func__);
-+		iounmap(pll_data->pllod);
- 		goto out;
- 	}
- 
-@@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
- 		pll_data->pllm = of_iomap(node, i);
- 		if (!pll_data->pllm) {
- 			iounmap(pll_data->pll_ctl0);
-+			iounmap(pll_data->pllod);
- 			goto out;
- 		}
- 	}
-diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
-index 48f4535..ede9e9e3 100644
---- a/drivers/crypto/ixp4xx_crypto.c
-+++ b/drivers/crypto/ixp4xx_crypto.c
-@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
- 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
- 		/* This was never tested by Intel
- 		 * for more than one dst buffer, I think. */
--		BUG_ON(req->dst->length < nbytes);
- 		req_ctx->dst = NULL;
- 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
- 					flags, DMA_FROM_DEVICE))
-diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
-index 67f8081..e4311ce 100644
---- a/drivers/crypto/nx/nx-aes-ccm.c
-+++ b/drivers/crypto/nx/nx-aes-ccm.c
-@@ -494,8 +494,9 @@ out:
- static int ccm4309_aes_nx_encrypt(struct aead_request *req)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- 	struct blkcipher_desc desc;
--	u8 *iv = nx_ctx->priv.ccm.iv;
-+	u8 *iv = rctx->iv;
- 
- 	iv[0] = 3;
- 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
-@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
- static int ccm4309_aes_nx_decrypt(struct aead_request *req)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- 	struct blkcipher_desc desc;
--	u8 *iv = nx_ctx->priv.ccm.iv;
-+	u8 *iv = rctx->iv;
- 
- 	iv[0] = 3;
- 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
-diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
-index 2617cd4..dd7e9f3 100644
---- a/drivers/crypto/nx/nx-aes-ctr.c
-+++ b/drivers/crypto/nx/nx-aes-ctr.c
-@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
- 	if (key_len < CTR_RFC3686_NONCE_SIZE)
- 		return -EINVAL;
- 
--	memcpy(nx_ctx->priv.ctr.iv,
-+	memcpy(nx_ctx->priv.ctr.nonce,
- 	       in_key + key_len - CTR_RFC3686_NONCE_SIZE,
- 	       CTR_RFC3686_NONCE_SIZE);
- 
-@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
- 				unsigned int           nbytes)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
--	u8 *iv = nx_ctx->priv.ctr.iv;
-+	u8 iv[16];
- 
-+	memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
- 	memcpy(iv + CTR_RFC3686_NONCE_SIZE,
- 	       desc->info, CTR_RFC3686_IV_SIZE);
- 	iv[12] = iv[13] = iv[14] = 0;
- 	iv[15] = 1;
- 
--	desc->info = nx_ctx->priv.ctr.iv;
-+	desc->info = iv;
- 
- 	return ctr_aes_nx_crypt(desc, dst, src, nbytes);
- }
-diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
-index 88c5624..c6ebeb6 100644
---- a/drivers/crypto/nx/nx-aes-gcm.c
-+++ b/drivers/crypto/nx/nx-aes-gcm.c
-@@ -330,6 +330,7 @@ out:
- static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- 	struct blkcipher_desc desc;
- 	unsigned int nbytes = req->cryptlen;
-@@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
- 
- 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- 
--	desc.info = nx_ctx->priv.gcm.iv;
-+	desc.info = rctx->iv;
- 	/* initialize the counter */
- 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
- 
-@@ -434,8 +435,8 @@ out:
- 
- static int gcm_aes_nx_encrypt(struct aead_request *req)
- {
--	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
--	char *iv = nx_ctx->priv.gcm.iv;
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
-+	char *iv = rctx->iv;
- 
- 	memcpy(iv, req->iv, 12);
- 
-@@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
- 
- static int gcm_aes_nx_decrypt(struct aead_request *req)
- {
--	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
--	char *iv = nx_ctx->priv.gcm.iv;
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
-+	char *iv = rctx->iv;
- 
- 	memcpy(iv, req->iv, 12);
- 
-@@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
- static int gcm4106_aes_nx_encrypt(struct aead_request *req)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
--	char *iv = nx_ctx->priv.gcm.iv;
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
-+	char *iv = rctx->iv;
- 	char *nonce = nx_ctx->priv.gcm.nonce;
- 
- 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
-@@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
- static int gcm4106_aes_nx_decrypt(struct aead_request *req)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
--	char *iv = nx_ctx->priv.gcm.iv;
-+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
-+	char *iv = rctx->iv;
- 	char *nonce = nx_ctx->priv.gcm.nonce;
- 
- 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
-diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
-index 8c2faff..c2f7d4b 100644
---- a/drivers/crypto/nx/nx-aes-xcbc.c
-+++ b/drivers/crypto/nx/nx-aes-xcbc.c
-@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
- 			   unsigned int         key_len)
- {
- 	struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
-+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- 
- 	switch (key_len) {
- 	case AES_KEYSIZE_128:
-@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
- 		return -EINVAL;
- 	}
- 
--	memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
-+	memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
- 
- 	return 0;
- }
-@@ -148,32 +149,29 @@ out:
- 	return rc;
- }
- 
--static int nx_xcbc_init(struct shash_desc *desc)
-+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
- {
--	struct xcbc_state *sctx = shash_desc_ctx(desc);
--	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
- 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
--	struct nx_sg *out_sg;
--	int len;
-+	int err;
- 
--	nx_ctx_init(nx_ctx, HCOP_FC_AES);
-+	err = nx_crypto_ctx_aes_xcbc_init(tfm);
-+	if (err)
-+		return err;
- 
--	memset(sctx, 0, sizeof *sctx);
-+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
- 
- 	NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
- 	csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
- 
--	memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
--	memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
--
--	len = AES_BLOCK_SIZE;
--	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
--				  &len, nx_ctx->ap->sglen);
-+	return 0;
-+}
- 
--	if (len != AES_BLOCK_SIZE)
--		return -EINVAL;
-+static int nx_xcbc_init(struct shash_desc *desc)
-+{
-+	struct xcbc_state *sctx = shash_desc_ctx(desc);
- 
--	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
-+	memset(sctx, 0, sizeof *sctx);
- 
- 	return 0;
- }
-@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- 	struct nx_sg *in_sg;
-+	struct nx_sg *out_sg;
- 	u32 to_process = 0, leftover, total;
- 	unsigned int max_sg_len;
- 	unsigned long irq_flags;
-@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
- 	max_sg_len = min_t(u64, max_sg_len,
- 				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
- 
-+	data_len = AES_BLOCK_SIZE;
-+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-+				  &len, nx_ctx->ap->sglen);
-+
-+	if (data_len != AES_BLOCK_SIZE) {
-+		rc = -EINVAL;
-+		goto out;
-+	}
-+
-+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
-+
- 	do {
- 		to_process = total - to_process;
- 		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
-@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
- 						(u8 *) sctx->buffer,
- 						&data_len,
- 						max_sg_len);
--			if (data_len != sctx->count)
--				return -EINVAL;
-+			if (data_len != sctx->count) {
-+				rc = -EINVAL;
-+				goto out;
-+			}
- 		}
- 
- 		data_len = to_process - sctx->count;
-@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
- 					&data_len,
- 					max_sg_len);
- 
--		if (data_len != to_process - sctx->count)
--			return -EINVAL;
-+		if (data_len != to_process - sctx->count) {
-+			rc = -EINVAL;
-+			goto out;
-+		}
- 
- 		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- 					sizeof(struct nx_sg);
-@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
- 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- 				 &len, nx_ctx->ap->sglen);
- 
--	if (len != sctx->count)
--		return -EINVAL;
-+	if (len != sctx->count) {
-+		rc = -EINVAL;
-+		goto out;
-+	}
- 
- 	len = AES_BLOCK_SIZE;
- 	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
- 				  nx_ctx->ap->sglen);
- 
--	if (len != AES_BLOCK_SIZE)
--		return -EINVAL;
-+	if (len != AES_BLOCK_SIZE) {
-+		rc = -EINVAL;
-+		goto out;
-+	}
- 
- 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
-@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
- 		.cra_blocksize   = AES_BLOCK_SIZE,
- 		.cra_module      = THIS_MODULE,
- 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
--		.cra_init        = nx_crypto_ctx_aes_xcbc_init,
-+		.cra_init        = nx_crypto_ctx_aes_xcbc_init2,
- 		.cra_exit        = nx_crypto_ctx_exit,
- 	}
- };
-diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
-index 23621da..08f8d5c 100644
---- a/drivers/crypto/nx/nx-sha256.c
-+++ b/drivers/crypto/nx/nx-sha256.c
-@@ -29,30 +29,28 @@
- #include "nx.h"
- 
- 
--static int nx_sha256_init(struct shash_desc *desc)
-+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
- {
--	struct sha256_state *sctx = shash_desc_ctx(desc);
--	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
--	int len;
--	int rc;
-+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
-+	int err;
- 
--	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
-+	err = nx_crypto_ctx_sha_init(tfm);
-+	if (err)
-+		return err;
- 
--	memset(sctx, 0, sizeof *sctx);
-+	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
- 
- 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
- 
- 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- 
--	len = SHA256_DIGEST_SIZE;
--	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
--				  &nx_ctx->op.outlen,
--				  &len,
--				  (u8 *) sctx->state,
--				  NX_DS_SHA256);
-+	return 0;
-+}
- 
--	if (rc)
--		goto out;
-+static int nx_sha256_init(struct shash_desc *desc) {
-+	struct sha256_state *sctx = shash_desc_ctx(desc);
-+
-+	memset(sctx, 0, sizeof *sctx);
- 
- 	sctx->state[0] = __cpu_to_be32(SHA256_H0);
- 	sctx->state[1] = __cpu_to_be32(SHA256_H1);
-@@ -64,7 +62,6 @@ static int nx_sha256_init(struct shash_desc *desc)
- 	sctx->state[7] = __cpu_to_be32(SHA256_H7);
- 	sctx->count = 0;
- 
--out:
- 	return 0;
- }
- 
-@@ -74,10 +71,13 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
- 	struct sha256_state *sctx = shash_desc_ctx(desc);
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-+	struct nx_sg *in_sg;
-+	struct nx_sg *out_sg;
- 	u64 to_process = 0, leftover, total;
- 	unsigned long irq_flags;
- 	int rc = 0;
- 	int data_len;
-+	u32 max_sg_len;
- 	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
- 
- 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
-@@ -97,6 +97,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
- 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- 
-+	in_sg = nx_ctx->in_sg;
-+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-+	max_sg_len = min_t(u64, max_sg_len,
-+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-+
-+	data_len = SHA256_DIGEST_SIZE;
-+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-+				  &data_len, max_sg_len);
-+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
-+
-+	if (data_len != SHA256_DIGEST_SIZE) {
-+		rc = -EINVAL;
-+		goto out;
-+	}
-+
- 	do {
- 		/*
- 		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
-@@ -108,25 +124,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
- 
- 		if (buf_len) {
- 			data_len = buf_len;
--			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
--						  &nx_ctx->op.inlen,
--						  &data_len,
--						  (u8 *) sctx->buf,
--						  NX_DS_SHA256);
-+			in_sg = nx_build_sg_list(nx_ctx->in_sg,
-+						 (u8 *) sctx->buf,
-+						 &data_len,
-+						 max_sg_len);
- 
--			if (rc || data_len != buf_len)
-+			if (data_len != buf_len) {
-+				rc = -EINVAL;
- 				goto out;
-+			}
- 		}
- 
- 		data_len = to_process - buf_len;
--		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
--					  &nx_ctx->op.inlen,
--					  &data_len,
--					  (u8 *) data,
--					  NX_DS_SHA256);
-+		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
-+					 &data_len, max_sg_len);
- 
--		if (rc)
--			goto out;
-+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- 
- 		to_process = (data_len + buf_len);
- 		leftover = total - to_process;
-@@ -173,12 +186,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
- 	struct sha256_state *sctx = shash_desc_ctx(desc);
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-+	struct nx_sg *in_sg, *out_sg;
- 	unsigned long irq_flags;
--	int rc;
-+	u32 max_sg_len;
-+	int rc = 0;
- 	int len;
- 
- 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- 
-+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-+	max_sg_len = min_t(u64, max_sg_len,
-+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-+
- 	/* final is represented by continuing the operation and indicating that
- 	 * this is not an intermediate operation */
- 	if (sctx->count >= SHA256_BLOCK_SIZE) {
-@@ -195,25 +215,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
- 	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- 
- 	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
--	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
--				  &nx_ctx->op.inlen,
--				  &len,
--				  (u8 *) sctx->buf,
--				  NX_DS_SHA256);
-+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
-+				 &len, max_sg_len);
- 
--	if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
-+	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
-+		rc = -EINVAL;
- 		goto out;
-+	}
- 
- 	len = SHA256_DIGEST_SIZE;
--	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
--				  &nx_ctx->op.outlen,
--				  &len,
--				  out,
--				  NX_DS_SHA256);
-+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
- 
--	if (rc || len != SHA256_DIGEST_SIZE)
-+	if (len != SHA256_DIGEST_SIZE) {
-+		rc = -EINVAL;
- 		goto out;
-+	}
- 
-+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
-+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- 	if (!nx_ctx->op.outlen) {
- 		rc = -EINVAL;
- 		goto out;
-@@ -268,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
- 		.cra_blocksize   = SHA256_BLOCK_SIZE,
- 		.cra_module      = THIS_MODULE,
- 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
--		.cra_init        = nx_crypto_ctx_sha_init,
-+		.cra_init        = nx_crypto_ctx_sha256_init,
- 		.cra_exit        = nx_crypto_ctx_exit,
- 	}
- };
-diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
-index b3adf10..aff0fe5 100644
---- a/drivers/crypto/nx/nx-sha512.c
-+++ b/drivers/crypto/nx/nx-sha512.c
-@@ -28,30 +28,29 @@
- #include "nx.h"
- 
- 
--static int nx_sha512_init(struct shash_desc *desc)
-+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
- {
--	struct sha512_state *sctx = shash_desc_ctx(desc);
--	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
--	int len;
--	int rc;
-+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
-+	int err;
- 
--	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
-+	err = nx_crypto_ctx_sha_init(tfm);
-+	if (err)
-+		return err;
- 
--	memset(sctx, 0, sizeof *sctx);
-+	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
- 
- 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
- 
- 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- 
--	len = SHA512_DIGEST_SIZE;
--	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
--				  &nx_ctx->op.outlen,
--				  &len,
--				  (u8 *)sctx->state,
--				  NX_DS_SHA512);
-+	return 0;
-+}
- 
--	if (rc || len != SHA512_DIGEST_SIZE)
--		goto out;
-+static int nx_sha512_init(struct shash_desc *desc)
-+{
-+	struct sha512_state *sctx = shash_desc_ctx(desc);
-+
-+	memset(sctx, 0, sizeof *sctx);
- 
- 	sctx->state[0] = __cpu_to_be64(SHA512_H0);
- 	sctx->state[1] = __cpu_to_be64(SHA512_H1);
-@@ -63,7 +62,6 @@ static int nx_sha512_init(struct shash_desc *desc)
- 	sctx->state[7] = __cpu_to_be64(SHA512_H7);
- 	sctx->count[0] = 0;
- 
--out:
- 	return 0;
- }
- 
-@@ -73,10 +71,13 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
- 	struct sha512_state *sctx = shash_desc_ctx(desc);
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-+	struct nx_sg *in_sg;
-+	struct nx_sg *out_sg;
- 	u64 to_process, leftover = 0, total;
- 	unsigned long irq_flags;
- 	int rc = 0;
- 	int data_len;
-+	u32 max_sg_len;
- 	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
- 
- 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
-@@ -96,6 +97,22 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
- 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- 
-+	in_sg = nx_ctx->in_sg;
-+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-+	max_sg_len = min_t(u64, max_sg_len,
-+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-+
-+	data_len = SHA512_DIGEST_SIZE;
-+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-+				  &data_len, max_sg_len);
-+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
-+
-+	if (data_len != SHA512_DIGEST_SIZE) {
-+		rc = -EINVAL;
-+		goto out;
-+	}
-+
- 	do {
- 		/*
- 		 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
-@@ -108,25 +125,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
- 
- 		if (buf_len) {
- 			data_len = buf_len;
--			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
--						  &nx_ctx->op.inlen,
--						  &data_len,
--						  (u8 *) sctx->buf,
--						  NX_DS_SHA512);
-+			in_sg = nx_build_sg_list(nx_ctx->in_sg,
-+						 (u8 *) sctx->buf,
-+						 &data_len, max_sg_len);
- 
--			if (rc || data_len != buf_len)
-+			if (data_len != buf_len) {
-+				rc = -EINVAL;
- 				goto out;
-+			}
- 		}
- 
- 		data_len = to_process - buf_len;
--		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
--					  &nx_ctx->op.inlen,
--					  &data_len,
--					  (u8 *) data,
--					  NX_DS_SHA512);
-+		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
-+					 &data_len, max_sg_len);
-+
-+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- 
--		if (rc || data_len != (to_process - buf_len))
-+		if (data_len != (to_process - buf_len)) {
-+			rc = -EINVAL;
- 			goto out;
-+		}
- 
- 		to_process = (data_len + buf_len);
- 		leftover = total - to_process;
-@@ -172,13 +190,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
- 	struct sha512_state *sctx = shash_desc_ctx(desc);
- 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-+	struct nx_sg *in_sg, *out_sg;
-+	u32 max_sg_len;
- 	u64 count0;
- 	unsigned long irq_flags;
--	int rc;
-+	int rc = 0;
- 	int len;
- 
- 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- 
-+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-+	max_sg_len = min_t(u64, max_sg_len,
-+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-+
- 	/* final is represented by continuing the operation and indicating that
- 	 * this is not an intermediate operation */
- 	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
-@@ -200,24 +225,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
- 	csbcpb->cpb.sha512.message_bit_length_lo = count0;
- 
- 	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
--	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
--				  &nx_ctx->op.inlen,
--				  &len,
--				  (u8 *)sctx->buf,
--				  NX_DS_SHA512);
-+	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
-+				 max_sg_len);
- 
--	if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
-+	if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
-+		rc = -EINVAL;
- 		goto out;
-+	}
- 
- 	len = SHA512_DIGEST_SIZE;
--	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
--				  &nx_ctx->op.outlen,
--				  &len,
--				  out,
--				  NX_DS_SHA512);
-+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
-+				 max_sg_len);
- 
--	if (rc)
--		goto out;
-+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
-+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- 
- 	if (!nx_ctx->op.outlen) {
- 		rc = -EINVAL;
-@@ -273,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
- 		.cra_blocksize   = SHA512_BLOCK_SIZE,
- 		.cra_module      = THIS_MODULE,
- 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
--		.cra_init        = nx_crypto_ctx_sha_init,
-+		.cra_init        = nx_crypto_ctx_sha512_init,
- 		.cra_exit        = nx_crypto_ctx_exit,
- 	}
- };
-diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
-index 1da6dc5..737d33d 100644
---- a/drivers/crypto/nx/nx.c
-+++ b/drivers/crypto/nx/nx.c
-@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
-  * @delta:  is the amount we need to crop in order to bound the list.
-  *
-  */
--static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
-+static long int trim_sg_list(struct nx_sg *sg,
-+			     struct nx_sg *end,
-+			     unsigned int delta,
-+			     unsigned int *nbytes)
- {
-+	long int oplen;
-+	long int data_back;
-+	unsigned int is_delta = delta;
-+
- 	while (delta && end > sg) {
- 		struct nx_sg *last = end - 1;
- 
-@@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d
- 			delta -= last->len;
- 		}
- 	}
--	return (sg - end) * sizeof(struct nx_sg);
--}
--
--/**
-- * nx_sha_build_sg_list - walk and build sg list to sha modes
-- *			  using right bounds and limits.
-- * @nx_ctx: NX crypto context for the lists we're building
-- * @nx_sg: current sg list in or out list
-- * @op_len: current op_len to be used in order to build a sg list
-- * @nbytes:  number or bytes to be processed
-- * @offset: buf offset
-- * @mode: SHA256 or SHA512
-- */
--int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
--			  struct nx_sg 	      *nx_in_outsg,
--			  s64		      *op_len,
--			  unsigned int        *nbytes,
--			  u8 		      *offset,
--			  u32		      mode)
--{
--	unsigned int delta = 0;
--	unsigned int total = *nbytes;
--	struct nx_sg *nx_insg = nx_in_outsg;
--	unsigned int max_sg_len;
- 
--	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
--			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
--	max_sg_len = min_t(u64, max_sg_len,
--			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
--
--	*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
--	nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
--
--	switch (mode) {
--	case NX_DS_SHA256:
--		if (*nbytes < total)
--			delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
--		break;
--	case NX_DS_SHA512:
--		if (*nbytes < total)
--			delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
--		break;
--	default:
--		return -EINVAL;
-+	/* There are cases where we need to crop list in order to make it
-+	 * a block size multiple, but we also need to align data. In order to
-+	 * that we need to calculate how much we need to put back to be
-+	 * processed
-+	 */
-+	oplen = (sg - end) * sizeof(struct nx_sg);
-+	if (is_delta) {
-+		data_back = (abs(oplen) / AES_BLOCK_SIZE) *  sg->len;
-+		data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
-+		*nbytes -= data_back;
- 	}
--	*op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
- 
--	return 0;
-+	return oplen;
- }
- 
- /**
-@@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
- 	/* these lengths should be negative, which will indicate to phyp that
- 	 * the input and output parameters are scatterlists, not linear
- 	 * buffers */
--	nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
--	nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
-+	nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
-+	nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
- 
- 	return 0;
- }
-@@ -662,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
- /* entry points from the crypto tfm initializers */
- int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
- {
-+	tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
- 	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
- 				  NX_MODE_AES_CCM);
- }
- 
- int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
- {
-+	tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
- 	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
- 				  NX_MODE_AES_GCM);
- }
-diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
-index 6c9ecaa..c3ed837 100644
---- a/drivers/crypto/nx/nx.h
-+++ b/drivers/crypto/nx/nx.h
-@@ -2,6 +2,8 @@
- #ifndef __NX_H__
- #define __NX_H__
- 
-+#include <crypto/ctr.h>
-+
- #define NX_NAME		"nx-crypto"
- #define NX_STRING	"IBM Power7+ Nest Accelerator Crypto Driver"
- #define NX_VERSION	"1.0"
-@@ -91,8 +93,11 @@ struct nx_crypto_driver {
- 
- #define NX_GCM4106_NONCE_LEN		(4)
- #define NX_GCM_CTR_OFFSET		(12)
--struct nx_gcm_priv {
-+struct nx_gcm_rctx {
- 	u8 iv[16];
-+};
-+
-+struct nx_gcm_priv {
- 	u8 iauth_tag[16];
- 	u8 nonce[NX_GCM4106_NONCE_LEN];
- };
-@@ -100,8 +105,11 @@ struct nx_gcm_priv {
- #define NX_CCM_AES_KEY_LEN		(16)
- #define NX_CCM4309_AES_KEY_LEN		(19)
- #define NX_CCM4309_NONCE_LEN		(3)
--struct nx_ccm_priv {
-+struct nx_ccm_rctx {
- 	u8 iv[16];
-+};
-+
-+struct nx_ccm_priv {
- 	u8 b0[16];
- 	u8 iauth_tag[16];
- 	u8 oauth_tag[16];
-@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
- };
- 
- struct nx_ctr_priv {
--	u8 iv[16];
-+	u8 nonce[CTR_RFC3686_NONCE_SIZE];
- };
- 
- struct nx_crypto_ctx {
-@@ -153,8 +161,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
- void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
- int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
- 		  u32 may_sleep);
--int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
--			 s64 *, unsigned int *, u8 *, u32);
- struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
- int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- 		      struct scatterlist *, struct scatterlist *, unsigned int *,
-diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
-index 1dc5b0a..34139a8 100644
---- a/drivers/crypto/qat/qat_common/qat_algs.c
-+++ b/drivers/crypto/qat/qat_common/qat_algs.c
-@@ -73,7 +73,8 @@
- 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- 				       ICP_QAT_HW_CIPHER_DECRYPT)
- 
--static atomic_t active_dev;
-+static DEFINE_MUTEX(algs_lock);
-+static unsigned int active_devs;
- 
- struct qat_alg_buf {
- 	uint32_t len;
-@@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = { {
- 
- int qat_algs_register(void)
- {
--	if (atomic_add_return(1, &active_dev) == 1) {
-+	int ret = 0;
-+
-+	mutex_lock(&algs_lock);
-+	if (++active_devs == 1) {
- 		int i;
- 
- 		for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
-@@ -1280,21 +1284,25 @@ int qat_algs_register(void)
- 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
- 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- 
--		return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
-+		ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
- 	}
--	return 0;
-+	mutex_unlock(&algs_lock);
-+	return ret;
- }
- 
- int qat_algs_unregister(void)
- {
--	if (atomic_sub_return(1, &active_dev) == 0)
--		return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
--	return 0;
-+	int ret = 0;
-+
-+	mutex_lock(&algs_lock);
-+	if (--active_devs == 0)
-+		ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
-+	mutex_unlock(&algs_lock);
-+	return ret;
- }
- 
- int qat_algs_init(void)
- {
--	atomic_set(&active_dev, 0);
- 	crypto_get_default_rng();
- 	return 0;
- }
-diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
-index 7992164e..c89a7ab 100644
---- a/drivers/dma/at_xdmac.c
-+++ b/drivers/dma/at_xdmac.c
-@@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- 			desc->lld.mbr_sa = mem;
- 			desc->lld.mbr_da = atchan->sconfig.dst_addr;
- 		}
--		desc->lld.mbr_cfg = atchan->cfg;
--		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
-+		dwidth = at_xdmac_get_dwidth(atchan->cfg);
- 		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
--			       ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
-+			       ? dwidth
- 			       : AT_XDMAC_CC_DWIDTH_BYTE;
- 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
- 			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
- 			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
- 			| (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)		/* descriptor fetch */
- 			| (len >> fixed_dwidth);				/* microblock length */
-+		desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
-+				    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
- 		dev_dbg(chan2dev(chan),
- 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
- 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
-diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
-index 340f9e6..3dabc52 100644
---- a/drivers/dma/pl330.c
-+++ b/drivers/dma/pl330.c
-@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
- 			desc->txd.callback = last->txd.callback;
- 			desc->txd.callback_param = last->txd.callback_param;
- 		}
--		last->last = false;
-+		desc->last = false;
- 
- 		dma_cookie_assign(&desc->txd);
- 
-@@ -2621,6 +2621,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
- 		desc->rqcfg.brst_len = 1;
- 
- 	desc->rqcfg.brst_len = get_burst_len(desc, len);
-+	desc->bytes_requested = len;
- 
- 	desc->txd.flags = flags;
- 
-diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
-index 778bbb6..b0487c9f 100644
---- a/drivers/gpu/drm/drm_dp_mst_topology.c
-+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
-@@ -1294,7 +1294,6 @@ retry:
- 				goto retry;
- 			}
- 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
--			WARN(1, "fail\n");
- 
- 			return -EIO;
- 		}
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 8ae6f7f..683a9b0 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -3190,15 +3190,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
- #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
- 
- #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
--		u32 upper = I915_READ(upper_reg);			\
--		u32 lower = I915_READ(lower_reg);			\
--		u32 tmp = I915_READ(upper_reg);				\
--		if (upper != tmp) {					\
--			upper = tmp;					\
--			lower = I915_READ(lower_reg);			\
--			WARN_ON(I915_READ(upper_reg) != upper);		\
--		}							\
--		(u64)upper << 32 | lower; })
-+	u32 upper, lower, tmp;						\
-+	tmp = I915_READ(upper_reg);					\
-+	do {								\
-+		upper = tmp;						\
-+		lower = I915_READ(lower_reg);				\
-+		tmp = I915_READ(upper_reg);				\
-+	} while (upper != tmp);						\
-+	(u64)upper << 32 | lower; })
- 
- #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
- #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
-diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
-index 6377b22..7ee23d1 100644
---- a/drivers/gpu/drm/i915/i915_gem_tiling.c
-+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
-@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
- 	}
- 
- 	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
--	args->phys_swizzle_mode = args->swizzle_mode;
-+	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
-+		args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
-+	else
-+		args->phys_swizzle_mode = args->swizzle_mode;
- 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
- 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
- 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
-diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
-index 68fd9fc..44480c1 100644
---- a/drivers/gpu/drm/radeon/dce6_afmt.c
-+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
-@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
- 	struct radeon_device *rdev = encoder->dev->dev_private;
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
--	u32 offset;
- 
--	if (!dig || !dig->afmt || !dig->afmt->pin)
-+	if (!dig || !dig->afmt || !dig->pin)
- 		return;
- 
--	offset = dig->afmt->offset;
--
--	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
--	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
-+	WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
-+	       AFMT_AUDIO_SRC_SELECT(dig->pin->id));
- }
- 
- void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
--		struct drm_connector *connector, struct drm_display_mode *mode)
-+				    struct drm_connector *connector,
-+				    struct drm_display_mode *mode)
- {
- 	struct radeon_device *rdev = encoder->dev->dev_private;
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
--	u32 tmp = 0, offset;
-+	u32 tmp = 0;
- 
--	if (!dig || !dig->afmt || !dig->afmt->pin)
-+	if (!dig || !dig->afmt || !dig->pin)
- 		return;
- 
--	offset = dig->afmt->pin->offset;
--
- 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- 		if (connector->latency_present[1])
- 			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
-@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
- 		else
- 			tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
- 	}
--	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-+	WREG32_ENDPOINT(dig->pin->offset,
-+			AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
- }
- 
- void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
--	u8 *sadb, int sad_count)
-+					     u8 *sadb, int sad_count)
- {
- 	struct radeon_device *rdev = encoder->dev->dev_private;
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
--	u32 offset, tmp;
-+	u32 tmp;
- 
--	if (!dig || !dig->afmt || !dig->afmt->pin)
-+	if (!dig || !dig->afmt || !dig->pin)
- 		return;
- 
--	offset = dig->afmt->pin->offset;
--
- 	/* program the speaker allocation */
--	tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
-+	tmp = RREG32_ENDPOINT(dig->pin->offset,
-+			      AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- 	tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
- 	/* set HDMI mode */
- 	tmp |= HDMI_CONNECTION;
-@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
- 		tmp |= SPEAKER_ALLOCATION(sadb[0]);
- 	else
- 		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
--	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-+	WREG32_ENDPOINT(dig->pin->offset,
-+			AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
- }
- 
- void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
--	u8 *sadb, int sad_count)
-+					   u8 *sadb, int sad_count)
- {
- 	struct radeon_device *rdev = encoder->dev->dev_private;
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
--	u32 offset, tmp;
-+	u32 tmp;
- 
--	if (!dig || !dig->afmt || !dig->afmt->pin)
-+	if (!dig || !dig->afmt || !dig->pin)
- 		return;
- 
--	offset = dig->afmt->pin->offset;
--
- 	/* program the speaker allocation */
--	tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
-+	tmp = RREG32_ENDPOINT(dig->pin->offset,
-+			      AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- 	tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
- 	/* set DP mode */
- 	tmp |= DP_CONNECTION;
-@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
- 		tmp |= SPEAKER_ALLOCATION(sadb[0]);
- 	else
- 		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
--	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-+	WREG32_ENDPOINT(dig->pin->offset,
-+			AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
- }
- 
- void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
--	struct cea_sad *sads, int sad_count)
-+			      struct cea_sad *sads, int sad_count)
- {
--	u32 offset;
- 	int i;
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
- 		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- 	};
- 
--	if (!dig || !dig->afmt || !dig->afmt->pin)
-+	if (!dig || !dig->afmt || !dig->pin)
- 		return;
- 
--	offset = dig->afmt->pin->offset;
--
- 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- 		u32 value = 0;
- 		u8 stereo_freqs = 0;
-@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
- 
- 		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
- 
--		WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
-+		WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
- 	}
- }
- 
-@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
- }
- 
- void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
--	struct radeon_crtc *crtc, unsigned int clock)
-+			     struct radeon_crtc *crtc, unsigned int clock)
- {
- 	/* Two dtos; generally use dto0 for HDMI */
- 	u32 value = 0;
-@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
- }
- 
- void dce6_dp_audio_set_dto(struct radeon_device *rdev,
--	struct radeon_crtc *crtc, unsigned int clock)
-+			   struct radeon_crtc *crtc, unsigned int clock)
- {
- 	/* Two dtos; generally use dto1 for DP */
- 	u32 value = 0;
-diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
-index fa719c5..59b3d32 100644
---- a/drivers/gpu/drm/radeon/radeon_audio.c
-+++ b/drivers/gpu/drm/radeon/radeon_audio.c
-@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
- static void radeon_audio_enable(struct radeon_device *rdev,
- 				struct r600_audio_pin *pin, u8 enable_mask)
- {
-+	struct drm_encoder *encoder;
-+	struct radeon_encoder *radeon_encoder;
-+	struct radeon_encoder_atom_dig *dig;
-+	int pin_count = 0;
-+
-+	if (!pin)
-+		return;
-+
-+	if (rdev->mode_info.mode_config_initialized) {
-+		list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
-+			if (radeon_encoder_is_digital(encoder)) {
-+				radeon_encoder = to_radeon_encoder(encoder);
-+				dig = radeon_encoder->enc_priv;
-+				if (dig->pin == pin)
-+					pin_count++;
-+			}
-+		}
-+
-+		if ((pin_count > 1) && (enable_mask == 0))
-+			return;
-+	}
-+
- 	if (rdev->audio.funcs->enable)
- 		rdev->audio.funcs->enable(rdev, pin, enable_mask);
- }
-@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
- 
- static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
- {
--	struct radeon_encoder *radeon_encoder;
--	struct drm_connector *connector;
--	struct radeon_connector *radeon_connector = NULL;
-+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct cea_sad *sads;
- 	int sad_count;
- 
--	list_for_each_entry(connector,
--		&encoder->dev->mode_config.connector_list, head) {
--		if (connector->encoder == encoder) {
--			radeon_connector = to_radeon_connector(connector);
--			break;
--		}
--	}
--
--	if (!radeon_connector) {
--		DRM_ERROR("Couldn't find encoder's connector\n");
-+	if (!connector)
- 		return;
--	}
- 
- 	sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- 	if (sad_count <= 0) {
-@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
- 	}
- 	BUG_ON(!sads);
- 
--	radeon_encoder = to_radeon_encoder(encoder);
--
- 	if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
- 		radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
- 
-@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
- 
- static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
- {
-+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
--	struct drm_connector *connector;
--	struct radeon_connector *radeon_connector = NULL;
- 	u8 *sadb = NULL;
- 	int sad_count;
- 
--	list_for_each_entry(connector,
--			    &encoder->dev->mode_config.connector_list, head) {
--		if (connector->encoder == encoder) {
--			radeon_connector = to_radeon_connector(connector);
--			break;
--		}
--	}
--
--	if (!radeon_connector) {
--		DRM_ERROR("Couldn't find encoder's connector\n");
-+	if (!connector)
- 		return;
--	}
- 
--	sad_count = drm_edid_to_speaker_allocation(
--		radeon_connector_edid(connector), &sadb);
-+	sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
-+						   &sadb);
- 	if (sad_count < 0) {
- 		DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
- 			  sad_count);
-@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
- }
- 
- static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
--	struct drm_display_mode *mode)
-+					      struct drm_display_mode *mode)
- {
--	struct radeon_encoder *radeon_encoder;
--	struct drm_connector *connector;
--	struct radeon_connector *radeon_connector = 0;
--
--	list_for_each_entry(connector,
--		&encoder->dev->mode_config.connector_list, head) {
--		if (connector->encoder == encoder) {
--			radeon_connector = to_radeon_connector(connector);
--			break;
--		}
--	}
-+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 
--	if (!radeon_connector) {
--		DRM_ERROR("Couldn't find encoder's connector\n");
-+	if (!connector)
- 		return;
--	}
--
--	radeon_encoder = to_radeon_encoder(encoder);
- 
- 	if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
- 		radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
-@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
- }
- 
- void radeon_audio_detect(struct drm_connector *connector,
-+			 struct drm_encoder *encoder,
- 			 enum drm_connector_status status)
- {
--	struct radeon_device *rdev;
--	struct radeon_encoder *radeon_encoder;
-+	struct drm_device *dev = connector->dev;
-+	struct radeon_device *rdev = dev->dev_private;
-+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig;
- 
--	if (!connector || !connector->encoder)
-+	if (!radeon_audio_chipset_supported(rdev))
- 		return;
- 
--	rdev = connector->encoder->dev->dev_private;
--
--	if (!radeon_audio_chipset_supported(rdev))
-+	if (!radeon_encoder_is_digital(encoder))
- 		return;
- 
--	radeon_encoder = to_radeon_encoder(connector->encoder);
- 	dig = radeon_encoder->enc_priv;
- 
- 	if (status == connector_status_connected) {
--		if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
--			radeon_encoder->audio = NULL;
--			return;
--		}
--
- 		if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- 			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- 
-@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
- 			radeon_encoder->audio = rdev->audio.hdmi_funcs;
- 		}
- 
--		dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
--		radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-+		if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-+			if (!dig->pin)
-+				dig->pin = radeon_audio_get_pin(encoder);
-+			radeon_audio_enable(rdev, dig->pin, 0xf);
-+		} else {
-+			radeon_audio_enable(rdev, dig->pin, 0);
-+			dig->pin = NULL;
-+		}
- 	} else {
--		radeon_audio_enable(rdev, dig->afmt->pin, 0);
--		dig->afmt->pin = NULL;
-+		radeon_audio_enable(rdev, dig->pin, 0);
-+		dig->pin = NULL;
- 	}
- }
- 
-@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
- }
- 
- static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
--	struct drm_display_mode *mode)
-+				       struct drm_display_mode *mode)
- {
- 	struct radeon_device *rdev = encoder->dev->dev_private;
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
--	struct drm_connector *connector;
--	struct radeon_connector *radeon_connector = NULL;
-+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- 	struct hdmi_avi_infoframe frame;
- 	int err;
- 
--	list_for_each_entry(connector,
--		&encoder->dev->mode_config.connector_list, head) {
--		if (connector->encoder == encoder) {
--			radeon_connector = to_radeon_connector(connector);
--			break;
--		}
--	}
--
--	if (!radeon_connector) {
--		DRM_ERROR("Couldn't find encoder's connector\n");
--		return -ENOENT;
--	}
-+	if (!connector)
-+		return -EINVAL;
- 
- 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- 	if (err < 0) {
-@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
- 		return err;
- 	}
- 
--	if (dig && dig->afmt &&
--		radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
-+	if (dig && dig->afmt && radeon_encoder->audio &&
-+	    radeon_encoder->audio->set_avi_packet)
- 		radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
- 			buffer, sizeof(buffer));
- 
-@@ -745,7 +719,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
- }
- 
- static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
--	struct drm_display_mode *mode)
-+				     struct drm_display_mode *mode)
- {
- 	struct drm_device *dev = encoder->dev;
- 	struct radeon_device *rdev = dev->dev_private;
-@@ -756,6 +730,9 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- 	struct radeon_connector_atom_dig *dig_connector =
- 		radeon_connector->con_priv;
- 
-+	if (!connector)
-+		return;
-+
- 	if (!dig || !dig->afmt)
- 		return;
- 
-@@ -774,7 +751,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
- }
- 
- void radeon_audio_mode_set(struct drm_encoder *encoder,
--	struct drm_display_mode *mode)
-+			   struct drm_display_mode *mode)
- {
- 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- 
-diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
-index 8438304..059cc30 100644
---- a/drivers/gpu/drm/radeon/radeon_audio.h
-+++ b/drivers/gpu/drm/radeon/radeon_audio.h
-@@ -68,7 +68,8 @@ struct radeon_audio_funcs
- 
- int radeon_audio_init(struct radeon_device *rdev);
- void radeon_audio_detect(struct drm_connector *connector,
--	enum drm_connector_status status);
-+			 struct drm_encoder *encoder,
-+			 enum drm_connector_status status);
- u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
- 	u32 offset, u32 reg);
- void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
-diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
-index 3e5f6b7..c097d3a 100644
---- a/drivers/gpu/drm/radeon/radeon_combios.c
-+++ b/drivers/gpu/drm/radeon/radeon_combios.c
-@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
- 
- 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
- 			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
-+				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
-+
-+				if (hss > lvds->native_mode.hdisplay)
-+					hss = (10 - 1) * 8;
-+
- 				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
- 					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
- 				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
--					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
-+					hss;
- 				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
- 					(RBIOS8(tmp + 23) * 8);
- 
-diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index cebb65e..94b21ae 100644
---- a/drivers/gpu/drm/radeon/radeon_connectors.c
-+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
-@@ -1379,8 +1379,16 @@ out:
- 	/* updated in get modes as well since we need to know if it's analog or digital */
- 	radeon_connector_update_scratch_regs(connector, ret);
- 
--	if (radeon_audio != 0)
--		radeon_audio_detect(connector, ret);
-+	if ((radeon_audio != 0) && radeon_connector->use_digital) {
-+		const struct drm_connector_helper_funcs *connector_funcs =
-+			connector->helper_private;
-+
-+		encoder = connector_funcs->best_encoder(connector);
-+		if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
-+			radeon_connector_get_edid(connector);
-+			radeon_audio_detect(connector, encoder, ret);
-+		}
-+	}
- 
- exit:
- 	pm_runtime_mark_last_busy(connector->dev->dev);
-@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
- 
- 	radeon_connector_update_scratch_regs(connector, ret);
- 
--	if (radeon_audio != 0)
--		radeon_audio_detect(connector, ret);
-+	if ((radeon_audio != 0) && encoder) {
-+		radeon_connector_get_edid(connector);
-+		radeon_audio_detect(connector, encoder, ret);
-+	}
- 
- out:
- 	pm_runtime_mark_last_busy(connector->dev->dev);
-diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
-index f01c797..9af2d83 100644
---- a/drivers/gpu/drm/radeon/radeon_mode.h
-+++ b/drivers/gpu/drm/radeon/radeon_mode.h
-@@ -237,7 +237,6 @@ struct radeon_afmt {
- 	int offset;
- 	bool last_buffer_filled_status;
- 	int id;
--	struct r600_audio_pin *pin;
- };
- 
- struct radeon_mode_info {
-@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
- 	uint8_t backlight_level;
- 	int panel_mode;
- 	struct radeon_afmt *afmt;
-+	struct r600_audio_pin *pin;
- 	int active_mst_links;
- };
- 
-diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
-index 6153df73..08ff89d 100644
---- a/drivers/hwmon/nct7904.c
-+++ b/drivers/hwmon/nct7904.c
-@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
- 	{"nct7904", 0},
- 	{}
- };
-+MODULE_DEVICE_TABLE(i2c, nct7904_id);
- 
- static struct i2c_driver nct7904_driver = {
- 	.class = I2C_CLASS_HWMON,
-diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
-index a353b7d..bc7eed6 100644
---- a/drivers/input/mouse/alps.c
-+++ b/drivers/input/mouse/alps.c
-@@ -20,6 +20,7 @@
- #include <linux/input/mt.h>
- #include <linux/serio.h>
- #include <linux/libps2.h>
-+#include <linux/dmi.h>
- 
- #include "psmouse.h"
- #include "alps.h"
-@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
- #define ALPS_FOUR_BUTTONS	0x40	/* 4 direction button present */
- #define ALPS_PS2_INTERLEAVED	0x80	/* 3-byte PS/2 packet interleaved with
- 					   6-byte ALPS packet */
-+#define ALPS_DELL		0x100	/* device is a Dell laptop */
- #define ALPS_BUTTONPAD		0x200	/* device is a clickpad */
- 
- static const struct alps_model_info alps_model_data[] = {
-@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
- 		return;
- 	}
- 
--	/* Non interleaved V2 dualpoint has separate stick button bits */
-+	/* Dell non interleaved V2 dualpoint has separate stick button bits */
- 	if (priv->proto_version == ALPS_PROTO_V2 &&
--	    priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
-+	    priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
- 		left |= packet[0] & 1;
- 		right |= packet[0] & 2;
- 		middle |= packet[0] & 4;
-@@ -2542,6 +2544,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
- 	priv->byte0 = protocol->byte0;
- 	priv->mask0 = protocol->mask0;
- 	priv->flags = protocol->flags;
-+	if (dmi_name_in_vendors("Dell"))
-+		priv->flags |= ALPS_DELL;
- 
- 	priv->x_max = 2000;
- 	priv->y_max = 1400;
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index e8d8456..697f34f 100644
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -1719,7 +1719,8 @@ static int dm_merge_bvec(struct request_queue *q,
- 	struct mapped_device *md = q->queuedata;
- 	struct dm_table *map = dm_get_live_table_fast(md);
- 	struct dm_target *ti;
--	sector_t max_sectors, max_size = 0;
-+	sector_t max_sectors;
-+	int max_size = 0;
- 
- 	if (unlikely(!map))
- 		goto out;
-@@ -1732,18 +1733,10 @@ static int dm_merge_bvec(struct request_queue *q,
- 	 * Find maximum amount of I/O that won't need splitting
- 	 */
- 	max_sectors = min(max_io_len(bvm->bi_sector, ti),
--			  (sector_t) queue_max_sectors(q));
-+			  (sector_t) BIO_MAX_SECTORS);
- 	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
--
--	/*
--	 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
--	 * to the targets' merge function since it holds sectors not bytes).
--	 * Just doing this as an interim fix for stable@ because the more
--	 * comprehensive cleanup of switching to sector_t will impact every
--	 * DM target that implements a ->merge hook.
--	 */
--	if (max_size > INT_MAX)
--		max_size = INT_MAX;
-+	if (max_size < 0)
-+		max_size = 0;
- 
- 	/*
- 	 * merge_bvec_fn() returns number of bytes
-@@ -1751,13 +1744,13 @@ static int dm_merge_bvec(struct request_queue *q,
- 	 * max is precomputed maximal io size
- 	 */
- 	if (max_size && ti->type->merge)
--		max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
-+		max_size = ti->type->merge(ti, bvm, biovec, max_size);
- 	/*
- 	 * If the target doesn't support merge method and some of the devices
--	 * provided their merge_bvec method (we know this by looking for the
--	 * max_hw_sectors that dm_set_device_limits may set), then we can't
--	 * allow bios with multiple vector entries.  So always set max_size
--	 * to 0, and the code below allows just one page.
-+	 * provided their merge_bvec method (we know this by looking at
-+	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
-+	 * entries.  So always set max_size to 0, and the code below allows
-+	 * just one page.
- 	 */
- 	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
- 		max_size = 0;
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index b920028..e462151 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -5740,7 +5740,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
- 	char *ptr;
- 	int err;
- 
--	file = kmalloc(sizeof(*file), GFP_NOIO);
-+	file = kzalloc(sizeof(*file), GFP_NOIO);
- 	if (!file)
- 		return -ENOMEM;
- 
-diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index cd7b0c1..5ce3cd5 100644
---- a/drivers/md/raid1.c
-+++ b/drivers/md/raid1.c
-@@ -1475,6 +1475,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
- {
- 	char b[BDEVNAME_SIZE];
- 	struct r1conf *conf = mddev->private;
-+	unsigned long flags;
- 
- 	/*
- 	 * If it is not operational, then we have already marked it as dead
-@@ -1494,14 +1495,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
- 		return;
- 	}
- 	set_bit(Blocked, &rdev->flags);
-+	spin_lock_irqsave(&conf->device_lock, flags);
- 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
--		unsigned long flags;
--		spin_lock_irqsave(&conf->device_lock, flags);
- 		mddev->degraded++;
- 		set_bit(Faulty, &rdev->flags);
--		spin_unlock_irqrestore(&conf->device_lock, flags);
- 	} else
- 		set_bit(Faulty, &rdev->flags);
-+	spin_unlock_irqrestore(&conf->device_lock, flags);
- 	/*
- 	 * if recovery is running, make sure it aborts.
- 	 */
-@@ -1567,7 +1567,10 @@ static int raid1_spare_active(struct mddev *mddev)
- 	 * Find all failed disks within the RAID1 configuration
- 	 * and mark them readable.
- 	 * Called under mddev lock, so rcu protection not needed.
-+	 * device_lock used to avoid races with raid1_end_read_request
-+	 * which expects 'In_sync' flags and ->degraded to be consistent.
- 	 */
-+	spin_lock_irqsave(&conf->device_lock, flags);
- 	for (i = 0; i < conf->raid_disks; i++) {
- 		struct md_rdev *rdev = conf->mirrors[i].rdev;
- 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
-@@ -1598,7 +1601,6 @@ static int raid1_spare_active(struct mddev *mddev)
- 			sysfs_notify_dirent_safe(rdev->sysfs_state);
- 		}
- 	}
--	spin_lock_irqsave(&conf->device_lock, flags);
- 	mddev->degraded -= count;
- 	spin_unlock_irqrestore(&conf->device_lock, flags);
- 
-diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
-index 7681237..ead5432 100644
---- a/drivers/net/wireless/ath/ath10k/pci.c
-+++ b/drivers/net/wireless/ath/ath10k/pci.c
-@@ -1524,12 +1524,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
- 		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
- 		case QCA6174_HW_1_0_CHIP_ID_REV:
- 		case QCA6174_HW_1_1_CHIP_ID_REV:
-+		case QCA6174_HW_2_1_CHIP_ID_REV:
-+		case QCA6174_HW_2_2_CHIP_ID_REV:
- 			return 3;
- 		case QCA6174_HW_1_3_CHIP_ID_REV:
- 			return 2;
--		case QCA6174_HW_2_1_CHIP_ID_REV:
--		case QCA6174_HW_2_2_CHIP_ID_REV:
--			return 6;
- 		case QCA6174_HW_3_0_CHIP_ID_REV:
- 		case QCA6174_HW_3_1_CHIP_ID_REV:
- 		case QCA6174_HW_3_2_CHIP_ID_REV:
-diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
-index 8882afb..6285f46 100644
---- a/drivers/phy/phy-twl4030-usb.c
-+++ b/drivers/phy/phy-twl4030-usb.c
-@@ -144,6 +144,16 @@
- #define PMBR1				0x0D
- #define GPIO_USB_4PIN_ULPI_2430C	(3 << 0)
- 
-+/*
-+ * If VBUS is valid or ID is ground, then we know a
-+ * cable is present and we need to be runtime-enabled
-+ */
-+static inline bool cable_present(enum omap_musb_vbus_id_status stat)
-+{
-+	return stat == OMAP_MUSB_VBUS_VALID ||
-+		stat == OMAP_MUSB_ID_GROUND;
-+}
-+
- struct twl4030_usb {
- 	struct usb_phy		phy;
- 	struct device		*dev;
-@@ -536,8 +546,10 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
- 
- 	mutex_lock(&twl->lock);
- 	if (status >= 0 && status != twl->linkstat) {
-+		status_changed =
-+			cable_present(twl->linkstat) !=
-+			cable_present(status);
- 		twl->linkstat = status;
--		status_changed = true;
- 	}
- 	mutex_unlock(&twl->lock);
- 
-@@ -553,15 +565,11 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
- 		 * USB_LINK_VBUS state.  musb_hdrc won't care until it
- 		 * starts to handle softconnect right.
- 		 */
--		if ((status == OMAP_MUSB_VBUS_VALID) ||
--		    (status == OMAP_MUSB_ID_GROUND)) {
--			if (pm_runtime_suspended(twl->dev))
--				pm_runtime_get_sync(twl->dev);
-+		if (cable_present(status)) {
-+			pm_runtime_get_sync(twl->dev);
- 		} else {
--			if (pm_runtime_active(twl->dev)) {
--				pm_runtime_mark_last_busy(twl->dev);
--				pm_runtime_put_autosuspend(twl->dev);
--			}
-+			pm_runtime_mark_last_busy(twl->dev);
-+			pm_runtime_put_autosuspend(twl->dev);
- 		}
- 		omap_musb_mailbox(status);
- 	}
-@@ -766,6 +774,9 @@ static int twl4030_usb_remove(struct platform_device *pdev)
- 
- 	/* disable complete OTG block */
- 	twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
-+
-+	if (cable_present(twl->linkstat))
-+		pm_runtime_put_noidle(twl->dev);
- 	pm_runtime_mark_last_busy(twl->dev);
- 	pm_runtime_put(twl->dev);
- 
-diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
-index 8827448..a9aa389 100644
---- a/drivers/scsi/ipr.c
-+++ b/drivers/scsi/ipr.c
-@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
- {
- 	struct ipr_trace_entry *trace_entry;
- 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-+	unsigned int trace_index;
- 
--	trace_entry = &ioa_cfg->trace[atomic_add_return
--			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
-+	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
-+	trace_entry = &ioa_cfg->trace[trace_index];
- 	trace_entry->time = jiffies;
- 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
- 	trace_entry->type = type;
-@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
- 
- static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
- {
-+	unsigned int hrrq;
-+
- 	if (ioa_cfg->hrrq_num == 1)
--		return 0;
--	else
--		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
-+		hrrq = 0;
-+	else {
-+		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
-+		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
-+	}
-+	return hrrq;
- }
- 
- /**
-@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
- 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
- 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
--	unsigned long hrrq_flags;
-+	unsigned long lock_flags;
- 
- 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
- 
- 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
- 		scsi_dma_unmap(scsi_cmd);
- 
--		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
-+		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
- 		scsi_cmd->scsi_done(scsi_cmd);
--		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
-+		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
- 	} else {
--		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
-+		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-+		spin_lock(&ipr_cmd->hrrq->_lock);
- 		ipr_erp_start(ioa_cfg, ipr_cmd);
--		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
-+		spin_unlock(&ipr_cmd->hrrq->_lock);
-+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- 	}
- }
- 
-diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
-index 73790a1..6b97ee4 100644
---- a/drivers/scsi/ipr.h
-+++ b/drivers/scsi/ipr.h
-@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
- 
- #define IPR_NUM_TRACE_INDEX_BITS	8
- #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
-+#define IPR_TRACE_INDEX_MASK		(IPR_NUM_TRACE_ENTRIES - 1)
- #define IPR_TRACE_SIZE	(sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
- 	char trace_start[8];
- #define IPR_TRACE_START_LABEL			"trace"
-diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
-index 9c934e6..c61add4 100644
---- a/drivers/staging/lustre/lustre/obdclass/debug.c
-+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
-@@ -40,7 +40,7 @@
- 
- #define DEBUG_SUBSYSTEM D_OTHER
- 
--#include <linux/unaligned/access_ok.h>
-+#include <asm/unaligned.h>
- 
- #include "../include/obd_support.h"
- #include "../include/lustre_debug.h"
-diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
-index 15baacb..376e4a0 100644
---- a/drivers/staging/vt6655/device_main.c
-+++ b/drivers/staging/vt6655/device_main.c
-@@ -1486,8 +1486,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
- 		}
- 	}
- 
--	if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
--		if (conf->assoc) {
-+	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
-+	    priv->op_mode != NL80211_IFTYPE_AP) {
-+		if (conf->assoc && conf->beacon_rate) {
- 			CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
- 				       conf->sync_tsf);
- 
-diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
-index 1d30b09..67098a8 100644
---- a/drivers/thermal/samsung/exynos_tmu.c
-+++ b/drivers/thermal/samsung/exynos_tmu.c
-@@ -1209,6 +1209,8 @@ err_clk_sec:
- 	if (!IS_ERR(data->clk_sec))
- 		clk_unprepare(data->clk_sec);
- err_sensor:
-+	if (!IS_ERR_OR_NULL(data->regulator))
-+		regulator_disable(data->regulator);
- 	thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
- 
- 	return ret;
-diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
-index 74fea4f..3ad48e1 100644
---- a/drivers/usb/chipidea/core.c
-+++ b/drivers/usb/chipidea/core.c
-@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
- 	},
- };
- 
--module_platform_driver(ci_hdrc_driver);
-+static int __init ci_hdrc_platform_register(void)
-+{
-+	ci_hdrc_host_driver_init();
-+	return platform_driver_register(&ci_hdrc_driver);
-+}
-+module_init(ci_hdrc_platform_register);
-+
-+static void __exit ci_hdrc_platform_unregister(void)
-+{
-+	platform_driver_unregister(&ci_hdrc_driver);
-+}
-+module_exit(ci_hdrc_platform_unregister);
- 
- MODULE_ALIAS("platform:ci_hdrc");
- MODULE_LICENSE("GPL v2");
-diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
-index 21fe1a3..2f8af40 100644
---- a/drivers/usb/chipidea/host.c
-+++ b/drivers/usb/chipidea/host.c
-@@ -237,9 +237,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
- 	rdrv->name	= "host";
- 	ci->roles[CI_ROLE_HOST] = rdrv;
- 
-+	return 0;
-+}
-+
-+void ci_hdrc_host_driver_init(void)
-+{
- 	ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
- 	orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
- 	ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
--
--	return 0;
- }
-diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
-index 5707bf3..0f12f13 100644
---- a/drivers/usb/chipidea/host.h
-+++ b/drivers/usb/chipidea/host.h
-@@ -5,6 +5,7 @@
- 
- int ci_hdrc_host_init(struct ci_hdrc *ci);
- void ci_hdrc_host_destroy(struct ci_hdrc *ci);
-+void ci_hdrc_host_driver_init(void);
- 
- #else
- 
-@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
- 
- }
- 
-+static void ci_hdrc_host_driver_init(void)
-+{
-+
-+}
-+
- #endif
- 
- #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
-diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
-index 6d3eb8b..5318615 100644
---- a/drivers/usb/gadget/function/f_uac2.c
-+++ b/drivers/usb/gadget/function/f_uac2.c
-@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
- 			factor = 1000;
- 		} else {
- 			ep_desc = &hs_epin_desc;
--			factor = 125;
-+			factor = 8000;
- 		}
- 
- 		/* pre-compute some values for iso_complete() */
- 		uac2->p_framesize = opts->p_ssize *
- 				    num_channels(opts->p_chmask);
- 		rate = opts->p_srate * uac2->p_framesize;
--		uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
-+		uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- 		uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
- 					prm->max_psize);
- 
-diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
-index d69c355..7d69931 100644
---- a/drivers/usb/gadget/udc/udc-core.c
-+++ b/drivers/usb/gadget/udc/udc-core.c
-@@ -321,6 +321,7 @@ err4:
- 
- err3:
- 	put_device(&udc->dev);
-+	device_del(&gadget->dev);
- 
- err2:
- 	put_device(&gadget->dev);
-diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
-index 3e442f7..9a8c936 100644
---- a/drivers/usb/host/xhci-mem.c
-+++ b/drivers/usb/host/xhci-mem.c
-@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
- 	int size;
- 	int i, j, num_ports;
- 
--	del_timer_sync(&xhci->cmd_timer);
-+	if (timer_pending(&xhci->cmd_timer))
-+		del_timer_sync(&xhci->cmd_timer);
- 
- 	/* Free the Event Ring Segment Table and the actual Event Ring */
- 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
-diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
-index d095677..b3a0a22 100644
---- a/drivers/usb/host/xhci-ring.c
-+++ b/drivers/usb/host/xhci-ring.c
-@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
- 		return 0;
- 	/* offset in TRBs */
- 	segment_offset = trb - seg->trbs;
--	if (segment_offset > TRBS_PER_SEGMENT)
-+	if (segment_offset >= TRBS_PER_SEGMENT)
- 		return 0;
- 	return seg->dma + (segment_offset * sizeof(*trb));
- }
-diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
-index 19b85ee..876423b 100644
---- a/drivers/usb/serial/option.c
-+++ b/drivers/usb/serial/option.c
-@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
- 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- 	{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
- 	  .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
-+	{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
-+	  .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
- 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
- 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
- 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
-diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
-index 9c63897..d156545 100644
---- a/drivers/usb/serial/qcserial.c
-+++ b/drivers/usb/serial/qcserial.c
-@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
- 	{DEVICE_SWI(0x1199, 0x901c)},	/* Sierra Wireless EM7700 */
- 	{DEVICE_SWI(0x1199, 0x901f)},	/* Sierra Wireless EM7355 */
- 	{DEVICE_SWI(0x1199, 0x9040)},	/* Sierra Wireless Modem */
--	{DEVICE_SWI(0x1199, 0x9041)},	/* Sierra Wireless MC7305/MC7355 */
- 	{DEVICE_SWI(0x1199, 0x9051)},	/* Netgear AirCard 340U */
- 	{DEVICE_SWI(0x1199, 0x9053)},	/* Sierra Wireless Modem */
- 	{DEVICE_SWI(0x1199, 0x9054)},	/* Sierra Wireless Modem */
-@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
- 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
- 	{DEVICE_SWI(0x413c, 0x81a8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
- 	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
-+	{DEVICE_SWI(0x413c, 0x81b1)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
- 
- 	/* Huawei devices */
- 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
-diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
-index 46179a0..07d1ecd 100644
---- a/drivers/usb/serial/sierra.c
-+++ b/drivers/usb/serial/sierra.c
-@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
- 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
- 	},
-+	{ USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
- 	/* AT&T Direct IP LTE modems */
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
- 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
-diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
-index 8927485..4bd23bb 100644
---- a/drivers/xen/gntdev.c
-+++ b/drivers/xen/gntdev.c
-@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
- 
- 	pr_debug("priv %p\n", priv);
- 
-+	mutex_lock(&priv->lock);
- 	while (!list_empty(&priv->maps)) {
- 		map = list_entry(priv->maps.next, struct grant_map, next);
- 		list_del(&map->next);
- 		gntdev_put_map(NULL /* already removed */, map);
- 	}
- 	WARN_ON(!list_empty(&priv->freeable_maps));
-+	mutex_unlock(&priv->lock);
- 
- 	if (use_ptemod)
- 		mmu_notifier_unregister(&priv->mn, priv->mm);
-diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 039f9c8a..6e13504 100644
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -4397,9 +4397,9 @@ laundromat_main(struct work_struct *laundry)
- 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
- }
- 
--static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
-+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
- {
--	if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
-+	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
- 		return nfserr_bad_stateid;
- 	return nfs_ok;
- }
-@@ -4574,20 +4574,48 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
- 	return nfs_ok;
- }
- 
-+static struct file *
-+nfs4_find_file(struct nfs4_stid *s, int flags)
-+{
-+	switch (s->sc_type) {
-+	case NFS4_DELEG_STID:
-+		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
-+			return NULL;
-+		return get_file(s->sc_file->fi_deleg_file);
-+	case NFS4_OPEN_STID:
-+	case NFS4_LOCK_STID:
-+		if (flags & RD_STATE)
-+			return find_readable_file(s->sc_file);
-+		else
-+			return find_writeable_file(s->sc_file);
-+		break;
-+	}
-+
-+	return NULL;
-+}
-+
-+static __be32
-+nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
-+{
-+	__be32 status;
-+
-+	status = nfsd4_check_openowner_confirmed(ols);
-+	if (status)
-+		return status;
-+	return nfs4_check_openmode(ols, flags);
-+}
-+
- /*
--* Checks for stateid operations
--*/
-+ * Checks for stateid operations
-+ */
- __be32
- nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
- 			   stateid_t *stateid, int flags, struct file **filpp)
- {
--	struct nfs4_stid *s;
--	struct nfs4_ol_stateid *stp = NULL;
--	struct nfs4_delegation *dp = NULL;
--	struct svc_fh *current_fh = &cstate->current_fh;
--	struct inode *ino = d_inode(current_fh->fh_dentry);
-+	struct svc_fh *fhp = &cstate->current_fh;
-+	struct inode *ino = d_inode(fhp->fh_dentry);
- 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
--	struct file *file = NULL;
-+	struct nfs4_stid *s;
- 	__be32 status;
- 
- 	if (filpp)
-@@ -4597,60 +4625,39 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
- 		return nfserr_grace;
- 
- 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
--		return check_special_stateids(net, current_fh, stateid, flags);
-+		return check_special_stateids(net, fhp, stateid, flags);
- 
- 	status = nfsd4_lookup_stateid(cstate, stateid,
- 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
- 				&s, nn);
- 	if (status)
- 		return status;
--	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
-+	status = check_stateid_generation(stateid, &s->sc_stateid,
-+			nfsd4_has_session(cstate));
- 	if (status)
- 		goto out;
-+
- 	switch (s->sc_type) {
- 	case NFS4_DELEG_STID:
--		dp = delegstateid(s);
--		status = nfs4_check_delegmode(dp, flags);
--		if (status)
--			goto out;
--		if (filpp) {
--			file = dp->dl_stid.sc_file->fi_deleg_file;
--			if (!file) {
--				WARN_ON_ONCE(1);
--				status = nfserr_serverfault;
--				goto out;
--			}
--			get_file(file);
--		}
-+		status = nfs4_check_delegmode(delegstateid(s), flags);
- 		break;
- 	case NFS4_OPEN_STID:
- 	case NFS4_LOCK_STID:
--		stp = openlockstateid(s);
--		status = nfs4_check_fh(current_fh, stp);
--		if (status)
--			goto out;
--		status = nfsd4_check_openowner_confirmed(stp);
--		if (status)
--			goto out;
--		status = nfs4_check_openmode(stp, flags);
--		if (status)
--			goto out;
--		if (filpp) {
--			struct nfs4_file *fp = stp->st_stid.sc_file;
--
--			if (flags & RD_STATE)
--				file = find_readable_file(fp);
--			else
--				file = find_writeable_file(fp);
--		}
-+		status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
- 		break;
- 	default:
- 		status = nfserr_bad_stateid;
-+		break;
-+	}
-+	if (status)
- 		goto out;
-+	status = nfs4_check_fh(fhp, s);
-+
-+	if (!status && filpp) {
-+		*filpp = nfs4_find_file(s, flags);
-+		if (!*filpp)
-+			status = nfserr_serverfault;
- 	}
--	status = nfs_ok;
--	if (file)
--		*filpp = file;
- out:
- 	nfs4_put_stid(s);
- 	return status;
-@@ -4754,7 +4761,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
- 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
- 	if (status)
- 		return status;
--	return nfs4_check_fh(current_fh, stp);
-+	return nfs4_check_fh(current_fh, &stp->st_stid);
- }
- 
- /* 
-diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 158badf..d4d8445 100644
---- a/fs/nfsd/nfs4xdr.c
-+++ b/fs/nfsd/nfs4xdr.c
-@@ -2142,6 +2142,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
- 			      FATTR4_WORD0_RDATTR_ERROR)
- #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
-+#define WORD2_ABSENT_FS_ATTRS 0
- 
- #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- static inline __be32
-@@ -2170,7 +2171,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- { return 0; }
- #endif
- 
--static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
-+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
- {
- 	/* As per referral draft:  */
- 	if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
-@@ -2183,6 +2184,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
- 	}
- 	*bmval0 &= WORD0_ABSENT_FS_ATTRS;
- 	*bmval1 &= WORD1_ABSENT_FS_ATTRS;
-+	*bmval2 &= WORD2_ABSENT_FS_ATTRS;
- 	return 0;
- }
- 
-@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
- 	BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
- 
- 	if (exp->ex_fslocs.migrated) {
--		BUG_ON(bmval[2]);
--		status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
-+		status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
- 		if (status)
- 			goto out;
- 	}
-@@ -2290,8 +2291,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
- 	}
- 
- #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
--	if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
--			bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
-+	if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
-+	     bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
- 		err = security_inode_getsecctx(d_inode(dentry),
- 						&context, &contextlen);
- 		contextsupport = (err == 0);
-diff --git a/fs/notify/mark.c b/fs/notify/mark.c
-index 92e48c7..39ddcaf 100644
---- a/fs/notify/mark.c
-+++ b/fs/notify/mark.c
-@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
- 					 unsigned int flags)
- {
- 	struct fsnotify_mark *lmark, *mark;
-+	LIST_HEAD(to_free);
- 
-+	/*
-+	 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
-+	 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
-+	 * to_free list so we have to use mark_mutex even when accessing that
-+	 * list. And freeing mark requires us to drop mark_mutex. So we can
-+	 * reliably free only the first mark in the list. That's why we first
-+	 * move marks to free to to_free list in one go and then free marks in
-+	 * to_free list one by one.
-+	 */
- 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
- 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
--		if (mark->flags & flags) {
--			fsnotify_get_mark(mark);
--			fsnotify_destroy_mark_locked(mark, group);
--			fsnotify_put_mark(mark);
--		}
-+		if (mark->flags & flags)
-+			list_move(&mark->g_list, &to_free);
- 	}
- 	mutex_unlock(&group->mark_mutex);
-+
-+	while (1) {
-+		mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
-+		if (list_empty(&to_free)) {
-+			mutex_unlock(&group->mark_mutex);
-+			break;
-+		}
-+		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
-+		fsnotify_get_mark(mark);
-+		fsnotify_destroy_mark_locked(mark, group);
-+		mutex_unlock(&group->mark_mutex);
-+		fsnotify_put_mark(mark);
-+	}
- }
- 
- /*
-diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
-index f906a25..9ea7012 100644
---- a/fs/ocfs2/aops.c
-+++ b/fs/ocfs2/aops.c
-@@ -686,7 +686,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
- 
- 	if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
- 		u64 s = i_size_read(inode);
--		sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
-+		sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
- 			(do_div(s, osb->s_clustersize) >> 9);
- 
- 		ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
-@@ -911,7 +911,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
- 		BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
- 
- 		ret = blkdev_issue_zeroout(osb->sb->s_bdev,
--				p_cpos << (osb->s_clustersize_bits - 9),
-+				(u64)p_cpos << (osb->s_clustersize_bits - 9),
- 				zero_len_head >> 9, GFP_NOFS, false);
- 		if (ret < 0)
- 			mlog_errno(ret);
-diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
-index 8b23aa2..23157e4 100644
---- a/fs/ocfs2/dlmglue.c
-+++ b/fs/ocfs2/dlmglue.c
-@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
- 	osb->dc_work_sequence = osb->dc_wake_sequence;
- 
- 	processed = osb->blocked_lock_count;
--	while (processed) {
--		BUG_ON(list_empty(&osb->blocked_lock_list));
--
-+	/*
-+	 * blocked lock processing in this loop might call iput which can
-+	 * remove items off osb->blocked_lock_list. Downconvert up to
-+	 * 'processed' number of locks, but stop short if we had some
-+	 * removed in ocfs2_mark_lockres_freeing when downconverting.
-+	 */
-+	while (processed && !list_empty(&osb->blocked_lock_list)) {
- 		lockres = list_entry(osb->blocked_lock_list.next,
- 				     struct ocfs2_lock_res, l_blocked_list);
- 		list_del_init(&lockres->l_blocked_list);
-diff --git a/fs/signalfd.c b/fs/signalfd.c
-index 7e412ad..270221f 100644
---- a/fs/signalfd.c
-+++ b/fs/signalfd.c
-@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
- 		 * Other callers might not initialize the si_lsb field,
- 		 * so check explicitly for the right codes here.
- 		 */
--		if (kinfo->si_code == BUS_MCEERR_AR ||
--		    kinfo->si_code == BUS_MCEERR_AO)
-+		if (kinfo->si_signo == SIGBUS &&
-+		    (kinfo->si_code == BUS_MCEERR_AR ||
-+		     kinfo->si_code == BUS_MCEERR_AO))
- 			err |= __put_user((short) kinfo->si_addr_lsb,
- 					  &uinfo->ssi_addr_lsb);
- #endif
-diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
-index 3d4ea7e..12b75f3 100644
---- a/include/linux/mtd/nand.h
-+++ b/include/linux/mtd/nand.h
-@@ -176,17 +176,17 @@ typedef enum {
- /* Chip may not exist, so silence any errors in scan */
- #define NAND_SCAN_SILENT_NODEV	0x00040000
- /*
-- * This option could be defined by controller drivers to protect against
-- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
-- */
--#define NAND_USE_BOUNCE_BUFFER	0x00080000
--/*
-  * Autodetect nand buswidth with readid/onfi.
-  * This suppose the driver will configure the hardware in 8 bits mode
-  * when calling nand_scan_ident, and update its configuration
-  * before calling nand_scan_tail.
-  */
- #define NAND_BUSWIDTH_AUTO      0x00080000
-+/*
-+ * This option could be defined by controller drivers to protect against
-+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
-+ */
-+#define NAND_USE_BOUNCE_BUFFER	0x00100000
- 
- /* Options set by nand scan */
- /* Nand scan has allocated controller struct */
-diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
-index efe3443..413417f 100644
---- a/include/uapi/linux/pci_regs.h
-+++ b/include/uapi/linux/pci_regs.h
-@@ -319,6 +319,7 @@
- #define PCI_MSIX_PBA		8	/* Pending Bit Array offset */
- #define  PCI_MSIX_PBA_BIR	0x00000007 /* BAR index */
- #define  PCI_MSIX_PBA_OFFSET	0xfffffff8 /* Offset into specified BAR */
-+#define PCI_MSIX_FLAGS_BIRMASK	PCI_MSIX_PBA_BIR /* deprecated */
- #define PCI_CAP_MSIX_SIZEOF	12	/* size of MSIX registers */
- 
- /* MSI-X Table entry format */
-diff --git a/ipc/mqueue.c b/ipc/mqueue.c
-index 3aaea7f..c3fc5c2 100644
---- a/ipc/mqueue.c
-+++ b/ipc/mqueue.c
-@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
- 		if (!leaf)
- 			return -ENOMEM;
- 		INIT_LIST_HEAD(&leaf->msg_list);
--		info->qsize += sizeof(*leaf);
- 	}
- 	leaf->priority = msg->m_type;
- 	rb_link_node(&leaf->rb_node, parent, p);
-@@ -188,7 +187,6 @@ try_again:
- 			     "lazy leaf delete!\n");
- 		rb_erase(&leaf->rb_node, &info->msg_tree);
- 		if (info->node_cache) {
--			info->qsize -= sizeof(*leaf);
- 			kfree(leaf);
- 		} else {
- 			info->node_cache = leaf;
-@@ -201,7 +199,6 @@ try_again:
- 		if (list_empty(&leaf->msg_list)) {
- 			rb_erase(&leaf->rb_node, &info->msg_tree);
- 			if (info->node_cache) {
--				info->qsize -= sizeof(*leaf);
- 				kfree(leaf);
- 			} else {
- 				info->node_cache = leaf;
-@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
- 		/* Save our speculative allocation into the cache */
- 		INIT_LIST_HEAD(&new_leaf->msg_list);
- 		info->node_cache = new_leaf;
--		info->qsize += sizeof(*new_leaf);
- 		new_leaf = NULL;
- 	} else {
- 		kfree(new_leaf);
-@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
- 		/* Save our speculative allocation into the cache */
- 		INIT_LIST_HEAD(&new_leaf->msg_list);
- 		info->node_cache = new_leaf;
--		info->qsize += sizeof(*new_leaf);
- 	} else {
- 		kfree(new_leaf);
- 	}
-diff --git a/kernel/signal.c b/kernel/signal.c
-index d51c5dd..0206be7 100644
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -2753,12 +2753,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
- 		 * Other callers might not initialize the si_lsb field,
- 		 * so check explicitly for the right codes here.
- 		 */
--		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
-+		if (from->si_signo == SIGBUS &&
-+		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
- 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
- #endif
- #ifdef SEGV_BNDERR
--		err |= __put_user(from->si_lower, &to->si_lower);
--		err |= __put_user(from->si_upper, &to->si_upper);
-+		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
-+			err |= __put_user(from->si_lower, &to->si_lower);
-+			err |= __put_user(from->si_upper, &to->si_upper);
-+		}
- #endif
- 		break;
- 	case __SI_CHLD:
-@@ -3022,7 +3025,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
- 			int, sig,
- 			struct compat_siginfo __user *, uinfo)
- {
--	siginfo_t info;
-+	siginfo_t info = {};
- 	int ret = copy_siginfo_from_user32(&info, uinfo);
- 	if (unlikely(ret))
- 		return ret;
-@@ -3066,7 +3069,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
- 			int, sig,
- 			struct compat_siginfo __user *, uinfo)
- {
--	siginfo_t info;
-+	siginfo_t info = {};
- 
- 	if (copy_siginfo_from_user32(&info, uinfo))
- 		return -EFAULT;
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 5e8eadd..0d024fc 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -937,21 +937,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
- 		 *
- 		 * 2) Global reclaim encounters a page, memcg encounters a
- 		 *    page that is not marked for immediate reclaim or
--		 *    the caller does not have __GFP_IO. In this case mark
-+		 *    the caller does not have __GFP_FS (or __GFP_IO if it's
-+		 *    simply going to swap, not to fs). In this case mark
- 		 *    the page for immediate reclaim and continue scanning.
- 		 *
--		 *    __GFP_IO is checked  because a loop driver thread might
-+		 *    Require may_enter_fs because we would wait on fs, which
-+		 *    may not have submitted IO yet. And the loop driver might
- 		 *    enter reclaim, and deadlock if it waits on a page for
- 		 *    which it is needed to do the write (loop masks off
- 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
- 		 *    would probably show more reasons.
- 		 *
--		 *    Don't require __GFP_FS, since we're not going into the
--		 *    FS, just waiting on its writeback completion. Worryingly,
--		 *    ext4 gfs2 and xfs allocate pages with
--		 *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
--		 *    may_enter_fs here is liable to OOM on them.
--		 *
- 		 * 3) memcg encounters a page that is not already marked
- 		 *    PageReclaim. memcg does not have any dirty pages
- 		 *    throttling so we could easily OOM just because too many
-@@ -968,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
- 
- 			/* Case 2 above */
- 			} else if (global_reclaim(sc) ||
--			    !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
-+			    !PageReclaim(page) || !may_enter_fs) {
- 				/*
- 				 * This is slightly racy - end_page_writeback()
- 				 * might have just cleared PageReclaim, then
-diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
-index 1ab3dc9..7b815bc 100644
---- a/net/bluetooth/smp.c
-+++ b/net/bluetooth/smp.c
-@@ -2295,6 +2295,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
- 		return 1;
- 
- 	chan = conn->smp;
-+	if (!chan) {
-+		BT_ERR("SMP security requested but not available");
-+		return 1;
-+	}
- 
- 	if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
- 		return 1;
-diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
-index e061355..bf20593 100644
---- a/sound/firewire/amdtp.c
-+++ b/sound/firewire/amdtp.c
-@@ -730,8 +730,9 @@ static void handle_in_packet(struct amdtp_stream *s,
- 	    s->data_block_counter != UINT_MAX)
- 		data_block_counter = s->data_block_counter;
- 
--	if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
--	    (s->data_block_counter == UINT_MAX)) {
-+	if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
-+	     data_block_counter == s->tx_first_dbc) ||
-+	    s->data_block_counter == UINT_MAX) {
- 		lost = false;
- 	} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
- 		lost = data_block_counter != s->data_block_counter;
-diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
-index 8a03a91..25c9055 100644
---- a/sound/firewire/amdtp.h
-+++ b/sound/firewire/amdtp.h
-@@ -153,6 +153,8 @@ struct amdtp_stream {
- 
- 	/* quirk: fixed interval of dbc between previos/current packets. */
- 	unsigned int tx_dbc_interval;
-+	/* quirk: indicate the value of dbc field in a first packet. */
-+	unsigned int tx_first_dbc;
- 
- 	bool callbacked;
- 	wait_queue_head_t callback_wait;
-diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
-index 2682e7e..c94a432 100644
---- a/sound/firewire/fireworks/fireworks.c
-+++ b/sound/firewire/fireworks/fireworks.c
-@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
- 	err = get_hardware_info(efw);
- 	if (err < 0)
- 		goto error;
-+	/* AudioFire8 (since 2009) and AudioFirePre8 */
- 	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
- 		efw->is_af9 = true;
-+	/* These models uses the same firmware. */
-+	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
-+	    entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
-+	    entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
-+	    entry->model_id == MODEL_GIBSON_RIP ||
-+	    entry->model_id == MODEL_GIBSON_GOLDTOP)
-+		efw->is_fireworks3 = true;
- 
- 	snd_efw_proc_init(efw);
- 
-diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
-index 4f0201a..084d414 100644
---- a/sound/firewire/fireworks/fireworks.h
-+++ b/sound/firewire/fireworks/fireworks.h
-@@ -71,6 +71,7 @@ struct snd_efw {
- 
- 	/* for quirks */
- 	bool is_af9;
-+	bool is_fireworks3;
- 	u32 firmware_version;
- 
- 	unsigned int midi_in_ports;
-diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
-index c55db1b..7e353f1 100644
---- a/sound/firewire/fireworks/fireworks_stream.c
-+++ b/sound/firewire/fireworks/fireworks_stream.c
-@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
- 	efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
- 	/* Fireworks reset dbc at bus reset. */
- 	efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
-+	/*
-+	 * But Recent firmwares starts packets with non-zero dbc.
-+	 * Driver version 5.7.6 installs firmware version 5.7.3.
-+	 */
-+	if (efw->is_fireworks3 &&
-+	    (efw->firmware_version == 0x5070000 ||
-+	     efw->firmware_version == 0x5070300 ||
-+	     efw->firmware_version == 0x5080000))
-+		efw->tx_stream.tx_first_dbc = 0x02;
- 	/* AudioFire9 always reports wrong dbs. */
- 	if (efw->is_af9)
- 		efw->tx_stream.flags |= CIP_WRONG_DBS;
-diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
-index 50e9dd6..3a24f77 100644
---- a/sound/pci/hda/patch_cirrus.c
-+++ b/sound/pci/hda/patch_cirrus.c
-@@ -1001,9 +1001,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
- 
- 	spec->spdif_present = spdif_present;
- 	/* SPDIF TX on/off */
--	if (spdif_present)
--		snd_hda_set_pin_ctl(codec, spdif_pin,
--				    spdif_present ? PIN_OUT : 0);
-+	snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
- 
- 	cs_automute(codec);
- }
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 590bcfb0..1e99f07 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -5118,6 +5118,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
-+	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
- 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
-diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
-index 477e13d..e7ba557 100644
---- a/sound/soc/codecs/pcm1681.c
-+++ b/sound/soc/codecs/pcm1681.c
-@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
- 
- 	if (val != -1) {
- 		regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
--					PCM1681_DEEMPH_RATE_MASK, val);
-+				   PCM1681_DEEMPH_RATE_MASK, val << 3);
- 		enable = 1;
- 	} else
- 		enable = 0;
-diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
-index a984485..f7549cc 100644
---- a/sound/soc/codecs/ssm4567.c
-+++ b/sound/soc/codecs/ssm4567.c
-@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
- 	if (invert_fclk)
- 		ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
- 
--	return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
-+	return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
-+			SSM4567_SAI_CTRL_1_BCLK |
-+			SSM4567_SAI_CTRL_1_FSYNC |
-+			SSM4567_SAI_CTRL_1_LJ |
-+			SSM4567_SAI_CTRL_1_TDM |
-+			SSM4567_SAI_CTRL_1_PDM,
-+			ctrl1);
- }
- 
- static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
-diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
-index 7b50a9d..edc1869 100644
---- a/sound/soc/intel/atom/sst/sst_drv_interface.c
-+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
-@@ -42,6 +42,11 @@
- #define MIN_FRAGMENT_SIZE (50 * 1024)
- #define MAX_FRAGMENT_SIZE (1024 * 1024)
- #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
-+#ifdef CONFIG_PM
-+#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
-+#else
-+#define GET_USAGE_COUNT(dev) 1
-+#endif
- 
- int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
- {
-@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
- 	int ret = 0;
- 	int usage_count = 0;
- 
--#ifdef CONFIG_PM
--	usage_count = atomic_read(&dev->power.usage_count);
--#else
--	usage_count = 1;
--#endif
--
- 	if (state == true) {
- 		ret = pm_runtime_get_sync(dev);
--
-+		usage_count = GET_USAGE_COUNT(dev);
- 		dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
- 		if (ret < 0) {
- 			dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
-@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
- 			}
- 		}
- 	} else {
-+		usage_count = GET_USAGE_COUNT(dev);
- 		dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
- 		return sst_pm_runtime_put(ctx);
- 	}
-diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
-index 158204d..b6c12dc 100644
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -1811,6 +1811,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
- 					   size_t count, loff_t *ppos)
- {
- 	struct snd_soc_dapm_widget *w = file->private_data;
-+	struct snd_soc_card *card = w->dapm->card;
- 	char *buf;
- 	int in, out;
- 	ssize_t ret;
-@@ -1820,6 +1821,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
- 	if (!buf)
- 		return -ENOMEM;
- 
-+	mutex_lock(&card->dapm_mutex);
-+
- 	/* Supply widgets are not handled by is_connected_{input,output}_ep() */
- 	if (w->is_supply) {
- 		in = 0;
-@@ -1866,6 +1869,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
- 					p->sink->name);
- 	}
- 
-+	mutex_unlock(&card->dapm_mutex);
-+
- 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
- 
- 	kfree(buf);
-@@ -2140,11 +2145,15 @@ static ssize_t dapm_widget_show(struct device *dev,
- 	struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
- 	int i, count = 0;
- 
-+	mutex_lock(&rtd->card->dapm_mutex);
-+
- 	for (i = 0; i < rtd->num_codecs; i++) {
- 		struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
- 		count += dapm_widget_show_codec(codec, buf + count);
- 	}
- 
-+	mutex_unlock(&rtd->card->dapm_mutex);
-+
- 	return count;
- }
- 
-@@ -3100,16 +3109,10 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
- 	}
- 
- 	prefix = soc_dapm_prefix(dapm);
--	if (prefix) {
-+	if (prefix)
- 		w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
--		if (widget->sname)
--			w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
--					     widget->sname);
--	} else {
-+	else
- 		w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
--		if (widget->sname)
--			w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
--	}
- 	if (w->name == NULL) {
- 		kfree(w);
- 		return NULL;
-@@ -3557,7 +3560,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
- 				break;
- 			}
- 
--			if (!w->sname || !strstr(w->sname, dai_w->name))
-+			if (!w->sname || !strstr(w->sname, dai_w->sname))
- 				continue;
- 
- 			if (dai_w->id == snd_soc_dapm_dai_in) {

diff --git a/4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch b/4.1.6/4420_grsecurity-3.1-4.1.6-201509112213.patch
similarity index 99%
rename from 4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
rename to 4.1.6/4420_grsecurity-3.1-4.1.6-201509112213.patch
index 61bc4c1..c1cfd1d 100644
--- a/4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
+++ b/4.1.6/4420_grsecurity-3.1-4.1.6-201509112213.patch
@@ -17690,7 +17690,7 @@ index 904f528..b4d0d24 100644
  
  #ifdef CONFIG_FLATMEM
 diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
-index b3bebf9..b7e1204 100644
+index b3bebf9..cb419e7 100644
 --- a/arch/x86/include/asm/page_64.h
 +++ b/arch/x86/include/asm/page_64.h
 @@ -7,9 +7,9 @@
@@ -17717,9 +17717,9 @@ index b3bebf9..b7e1204 100644
  #define __phys_addr(x)		__phys_addr_nodebug(x)
 -#define __phys_addr_symbol(x) \
 -	((unsigned long)(x) - __START_KERNEL_map + phys_base)
-+static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(const void *x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x)
 +{
-+	return (unsigned long)x - __START_KERNEL_map + phys_base;
++	return x - __START_KERNEL_map + phys_base;
 +}
  #endif
  
@@ -34492,7 +34492,7 @@ index 5dc6ca5..25c03f5 100644
  		.callback = fix_broken_hp_bios_irq9,
  		.ident = "HP Pavilion N5400 Series Laptop",
 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
-index 9b83b90..4112152 100644
+index 9b83b90..2c256c5 100644
 --- a/arch/x86/pci/pcbios.c
 +++ b/arch/x86/pci/pcbios.c
 @@ -79,7 +79,7 @@ union bios32 {
@@ -34500,7 +34500,7 @@ index 9b83b90..4112152 100644
  	unsigned long address;
  	unsigned short segment;
 -} bios32_indirect __initdata = { 0, __KERNEL_CS };
-+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
++} bios32_indirect __initdata = { 0, __PCIBIOS_CS };
  
  /*
   * Returns the entry point for the given service, NULL on error
@@ -92195,7 +92195,7 @@ index 0000000..dbe0a6b
 +}
 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
 new file mode 100644
-index 0000000..0e39d8c
+index 0000000..0e39d8c7
 --- /dev/null
 +++ b/grsecurity/grsec_mem.c
 @@ -0,0 +1,48 @@
@@ -111389,7 +111389,7 @@ index 501820c..9612bcf 100644
  	}
  	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
 diff --git a/mm/memory.c b/mm/memory.c
-index 2a9e098..4574079 100644
+index 2a9e098..37435af 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -111497,7 +111497,7 @@ index 2a9e098..4574079 100644
  	if (!pud)
  		return -ENOMEM;
  	do {
-@@ -2040,6 +2066,185 @@ static inline int wp_page_reuse(struct mm_struct *mm,
+@@ -2040,6 +2066,196 @@ static inline int wp_page_reuse(struct mm_struct *mm,
  	return VM_FAULT_WRITE;
  }
  
@@ -111510,11 +111510,22 @@ index 2a9e098..4574079 100644
 +
 +	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 +	entry = *pte;
-+	if (!pte_present(entry)) {
-+		if (!pte_none(entry)) {
-+			free_swap_and_cache(pte_to_swp_entry(entry));
-+			pte_clear_not_present_full(mm, address, pte, 0);
++	if (pte_none(entry))
++		;
++	else if (!pte_present(entry)) {
++		swp_entry_t swapentry;
++
++		swapentry = pte_to_swp_entry(entry);
++		if (!non_swap_entry(swapentry))
++			dec_mm_counter_fast(mm, MM_SWAPENTS);
++		else if (is_migration_entry(swapentry)) {
++			if (PageAnon(migration_entry_to_page(swapentry)))
++				dec_mm_counter_fast(mm, MM_ANONPAGES);
++			else
++				dec_mm_counter_fast(mm, MM_FILEPAGES);
 +		}
++		free_swap_and_cache(swapentry);
++		pte_clear_not_present_full(mm, address, pte, 0);
 +	} else {
 +		struct page *page;
 +
@@ -111683,7 +111694,7 @@ index 2a9e098..4574079 100644
  /*
   * Handle the case of a page which we actually need to copy to a new page.
   *
-@@ -2093,6 +2298,12 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2093,6 +2309,12 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -111696,7 +111707,7 @@ index 2a9e098..4574079 100644
  		if (old_page) {
  			if (!PageAnon(old_page)) {
  				dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2147,6 +2358,10 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2147,6 +2369,10 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
  			page_remove_rmap(old_page);
  		}
  
@@ -111707,7 +111718,7 @@ index 2a9e098..4574079 100644
  		/* Free the old page.. */
  		new_page = old_page;
  		page_copied = 1;
-@@ -2578,6 +2793,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2578,6 +2804,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	swap_free(entry);
  	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  		try_to_free_swap(page);
@@ -111719,7 +111730,7 @@ index 2a9e098..4574079 100644
  	unlock_page(page);
  	if (page != swapcache) {
  		/*
-@@ -2601,6 +2821,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2601,6 +2832,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -111731,7 +111742,7 @@ index 2a9e098..4574079 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  out:
-@@ -2620,40 +2845,6 @@ out_release:
+@@ -2620,40 +2856,6 @@ out_release:
  }
  
  /*
@@ -111772,7 +111783,7 @@ index 2a9e098..4574079 100644
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -2663,31 +2854,29 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2663,31 +2865,29 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned int flags)
  {
  	struct mem_cgroup *memcg;
@@ -111812,7 +111823,7 @@ index 2a9e098..4574079 100644
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -2711,6 +2900,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2711,6 +2911,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (!pte_none(*page_table))
  		goto release;
  
@@ -111824,7 +111835,7 @@ index 2a9e098..4574079 100644
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, address);
  	mem_cgroup_commit_charge(page, memcg, false);
-@@ -2720,6 +2914,12 @@ setpte:
+@@ -2720,6 +2925,12 @@ setpte:
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -111837,7 +111848,7 @@ index 2a9e098..4574079 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  	return 0;
-@@ -2952,6 +3152,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2952,6 +3163,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		return ret;
  	}
  	do_set_pte(vma, address, fault_page, pte, false, false);
@@ -111849,7 +111860,7 @@ index 2a9e098..4574079 100644
  	unlock_page(fault_page);
  unlock_out:
  	pte_unmap_unlock(pte, ptl);
-@@ -3003,7 +3208,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3003,7 +3219,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		}
  		goto uncharge_out;
  	}
@@ -111868,7 +111879,7 @@ index 2a9e098..4574079 100644
  	mem_cgroup_commit_charge(new_page, memcg, false);
  	lru_cache_add_active_or_unevictable(new_page, vma);
  	pte_unmap_unlock(pte, ptl);
-@@ -3061,6 +3277,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3061,6 +3288,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		return ret;
  	}
  	do_set_pte(vma, address, fault_page, pte, true, false);
@@ -111880,7 +111891,7 @@ index 2a9e098..4574079 100644
  	pte_unmap_unlock(pte, ptl);
  
  	if (set_page_dirty(fault_page))
-@@ -3286,6 +3507,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+@@ -3286,6 +3518,12 @@ static int handle_pte_fault(struct mm_struct *mm,
  		if (flags & FAULT_FLAG_WRITE)
  			flush_tlb_fix_spurious_fault(vma, address);
  	}
@@ -111893,7 +111904,7 @@ index 2a9e098..4574079 100644
  unlock:
  	pte_unmap_unlock(pte, ptl);
  	return 0;
-@@ -3305,9 +3532,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3305,9 +3543,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	pmd_t *pmd;
  	pte_t *pte;
  
@@ -111935,7 +111946,7 @@ index 2a9e098..4574079 100644
  	pgd = pgd_offset(mm, address);
  	pud = pud_alloc(mm, pgd, address);
  	if (!pud)
-@@ -3442,6 +3701,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3442,6 +3712,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -111959,7 +111970,7 @@ index 2a9e098..4574079 100644
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
-@@ -3474,6 +3750,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3474,6 +3761,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -111992,7 +112003,7 @@ index 2a9e098..4574079 100644
  #endif /* __PAGETABLE_PMD_FOLDED */
  
  static int __follow_pte(struct mm_struct *mm, unsigned long address,
-@@ -3583,8 +3885,8 @@ out:
+@@ -3583,8 +3896,8 @@ out:
  	return ret;
  }
  
@@ -112003,7 +112014,7 @@ index 2a9e098..4574079 100644
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
-@@ -3610,8 +3912,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -3610,8 +3923,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
   */
@@ -112014,7 +112025,7 @@ index 2a9e098..4574079 100644
  {
  	struct vm_area_struct *vma;
  	void *old_buf = buf;
-@@ -3619,7 +3921,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3619,7 +3932,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  	down_read(&mm->mmap_sem);
  	/* ignore errors, just check how much was successfully transferred */
  	while (len) {
@@ -112023,7 +112034,7 @@ index 2a9e098..4574079 100644
  		void *maddr;
  		struct page *page = NULL;
  
-@@ -3680,8 +3982,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3680,8 +3993,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
   *
   * The caller must hold a reference on @mm.
   */
@@ -112034,7 +112045,7 @@ index 2a9e098..4574079 100644
  {
  	return __access_remote_vm(NULL, mm, addr, buf, len, write);
  }
-@@ -3691,11 +3993,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3691,11 +4004,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
@@ -120136,10 +120147,30 @@ index e51fc3e..8f04229 100644
  
  	kfree_skb(skb);
 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index f337a90..2a9a9db 100644
+index f337a90..ba0d2a0 100644
 --- a/net/ipv6/xfrm6_policy.c
 +++ b/net/ipv6/xfrm6_policy.c
-@@ -222,11 +222,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+@@ -185,7 +185,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 			return;
+ 
+ 		case IPPROTO_ICMPV6:
+-			if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
++			if (!onlyproto && (nh + offset + 2 < skb->data ||
++			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
+ 				u8 *icmp;
+ 
+ 				nh = skb_network_header(skb);
+@@ -199,7 +200,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ #if IS_ENABLED(CONFIG_IPV6_MIP6)
+ 		case IPPROTO_MH:
+ 			offset += ipv6_optlen(exthdr);
+-			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
++			if (!onlyproto && (nh + offset + 3 < skb->data ||
++			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
+ 				struct ip6_mh *mh;
+ 
+ 				nh = skb_network_header(skb);
+@@ -222,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
  	}
  }
  
@@ -120153,7 +120184,7 @@ index f337a90..2a9a9db 100644
  	return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
  }
  
-@@ -338,19 +338,19 @@ static struct ctl_table xfrm6_policy_table[] = {
+@@ -338,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
  
  static int __net_init xfrm6_net_init(struct net *net)
  {
@@ -120178,7 +120209,7 @@ index f337a90..2a9a9db 100644
  	if (!hdr)
  		goto err_reg;
  
-@@ -358,8 +358,7 @@ static int __net_init xfrm6_net_init(struct net *net)
+@@ -358,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
  	return 0;
  
  err_reg:
@@ -126713,10 +126744,10 @@ index 0000000..0c96d8a
 +}
 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
 new file mode 100644
-index 0000000..c5de280
+index 0000000..b884a56
 --- /dev/null
 +++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,568 @@
+@@ -0,0 +1,564 @@
 +/*
 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
 + * Copyright 2011-2015 by PaX Team <pageexec@freemail.hu>
@@ -127084,11 +127115,7 @@ index 0000000..c5de280
 +
 +static void check_global_variables(void *event_data, void *data)
 +{
-+#if BUILDING_GCC_VERSION >= 4009
-+	varpool_node *node;
-+#else
-+	struct varpool_node *node;
-+#endif
++	varpool_node_ptr node;
 +
 +	FOR_EACH_VARIABLE(node) {
 +		tree var = NODE_DECL(node);
@@ -127287,10 +127314,10 @@ index 0000000..c5de280
 +}
 diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
 new file mode 100644
-index 0000000..70924d4
+index 0000000..9cf3947
 --- /dev/null
 +++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,787 @@
+@@ -0,0 +1,789 @@
 +#ifndef GCC_COMMON_H_INCLUDED
 +#define GCC_COMMON_H_INCLUDED
 +
@@ -127463,6 +127490,8 @@ index 0000000..70924d4
 +#define O_BINARY 0
 +#endif
 +
++typedef struct varpool_node *varpool_node_ptr;
++
 +static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
 +{
 +	tree fndecl;
@@ -129781,10 +129810,10 @@ index 0000000..ac6f9b4
 +}
 diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
 new file mode 100644
-index 0000000..40dcfa9
+index 0000000..06a039c
 --- /dev/null
 +++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,922 @@
+@@ -0,0 +1,930 @@
 +/*
 + * Copyright 2014,2015 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
 + *                   and PaX Team <pageexec@freemail.hu>
@@ -130088,6 +130117,7 @@ index 0000000..40dcfa9
 +	unsigned long i;
 +	tree list;
 +	tree variant;
++	tree main_variant;
 +	expanded_location xloc;
 +
 +	if (TYPE_FIELDS(type) == NULL_TREE)
@@ -130150,15 +130180,22 @@ index 0000000..40dcfa9
 +		TREE_CHAIN(newtree[i]) = newtree[i+1];
 +	TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
 +
-+	for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
++	main_variant = TYPE_MAIN_VARIANT(type);
++	for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) {
 +		TYPE_FIELDS(variant) = list;
 +		TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
 +		TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
-+		// force a re-layout
-+		TYPE_SIZE(variant) = NULL_TREE;
-+		layout_type(variant);
 +	}
 +
++	/*
++	 * force a re-layout of the main variant
++	 * the TYPE_SIZE for all variants will be recomputed
++	 * by finalize_type_size()
++	 */
++	TYPE_SIZE(main_variant) = NULL_TREE;
++	layout_type(main_variant);
++	gcc_assert(TYPE_SIZE(main_variant) != NULL_TREE);
++
 +	return 1;
 +}
 +


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2015-09-12 14:35 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-09-12 14:35 [gentoo-commits] proj/hardened-patchset:master commit in: 4.1.6/ Anthony G. Basile
  -- strict thread matches above, loose matches on Subject: below --
2015-08-24 11:26 Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox