* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-08-06 19:34 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-08-06 19:34 UTC (permalink / raw
To: gentoo-commits
commit: 0f23b605fb69e470f285ce960cb4fd7e0492050e
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 6 19:34:27 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 6 19:34:27 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f23b605
Linux patch 4.12.5
0000_README | 4 +
1004_linux-4.12.5.patch | 997 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1001 insertions(+)
diff --git a/0000_README b/0000_README
index 09d6e6c..29e1ca2 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch: 1003_linux-4.12.4.patch
From: http://www.kernel.org
Desc: Linux 4.12.4
+Patch: 1004_linux-4.12.5.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.5
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1004_linux-4.12.5.patch b/1004_linux-4.12.5.patch
new file mode 100644
index 0000000..0b6a672
--- /dev/null
+++ b/1004_linux-4.12.5.patch
@@ -0,0 +1,997 @@
+diff --git a/Makefile b/Makefile
+index bfdc92c2e47a..382e967b0792 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index c32a09095216..85a92db70afc 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ before it can be accessed through the kernel mapping. */
+ preempt_disable();
+ flush_dcache_page_asm(__pa(vfrom), vaddr);
+- preempt_enable();
+ copy_page_asm(vto, vfrom);
++ preempt_enable();
+ }
+ EXPORT_SYMBOL(copy_user_page);
+
+@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm)
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
++ /* Flush the TLB to avoid speculation if coherency is required. */
++ if (parisc_requires_coherency())
++ flush_tlb_all();
++
+ /* Flushing the whole cache on each cpu takes forever on
+ rp3440, etc. So, avoid it if the mm isn't too big. */
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+@@ -577,33 +581,22 @@ void flush_cache_mm(struct mm_struct *mm)
+ void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+- unsigned long addr;
+- pgd_t *pgd;
+-
+ BUG_ON(!vma->vm_mm->context);
+
++ /* Flush the TLB to avoid speculation if coherency is required. */
++ if (parisc_requires_coherency())
++ flush_tlb_range(vma, start, end);
++
+ if ((end - start) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+- if (vma->vm_mm->context == mfsp(3)) {
+- flush_user_dcache_range_asm(start, end);
+- if (vma->vm_flags & VM_EXEC)
+- flush_user_icache_range_asm(start, end);
+- return;
+- }
++ BUG_ON(vma->vm_mm->context != mfsp(3));
+
+- pgd = vma->vm_mm->pgd;
+- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+- unsigned long pfn;
+- pte_t *ptep = get_ptep(pgd, addr);
+- if (!ptep)
+- continue;
+- pfn = pte_pfn(*ptep);
+- if (pfn_valid(pfn))
+- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+- }
++ flush_user_dcache_range_asm(start, end);
++ if (vma->vm_flags & VM_EXEC)
++ flush_user_icache_range_asm(start, end);
+ }
+
+ void
+@@ -612,7 +605,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
+ BUG_ON(!vma->vm_mm->context);
+
+ if (pfn_valid(pfn)) {
+- flush_tlb_page(vma, vmaddr);
++ if (parisc_requires_coherency())
++ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
+ }
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index b64d7d21646e..a45a67d526f8 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -53,6 +53,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/rcupdate.h>
+ #include <linux/random.h>
++#include <linux/nmi.h>
+
+ #include <asm/io.h>
+ #include <asm/asm-offsets.h>
+@@ -145,6 +146,7 @@ void machine_power_off(void)
+
+ /* prevent soft lockup/stalled CPU messages for endless loop. */
+ rcu_sysrq_start();
++ lockup_detector_suspend();
+ for (;;);
+ }
+
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+index 710e491206ed..1c10e26cebbb 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -164,8 +164,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
+ goto out;
+ }
+
+- if (kvm->arch.hpt.virt)
++ if (kvm->arch.hpt.virt) {
+ kvmppc_free_hpt(&kvm->arch.hpt);
++ kvmppc_rmap_reset(kvm);
++ }
+
+ err = kvmppc_allocate_hpt(&info, order);
+ if (err < 0)
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 8d1a365b8edc..1d3602f7ec22 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2938,6 +2938,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
++ /* Enable TM so we can read the TM SPRs */
++ mtmsr(mfmsr() | MSR_TM);
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
+index e5bf1e84047f..011ef2180fe6 100644
+--- a/arch/powerpc/platforms/pseries/reconfig.c
++++ b/arch/powerpc/platforms/pseries/reconfig.c
+@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
+
+ of_detach_node(np);
+ of_node_put(parent);
+- of_node_put(np); /* Must decrement the refcount */
+ return 0;
+ }
+
+diff --git a/crypto/authencesn.c b/crypto/authencesn.c
+index 6f8f6b86bfe2..0cf5fefdb859 100644
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
+ u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+ u32 tmp[2];
+
++ if (!authsize)
++ goto decrypt;
++
+ /* Move high-order bits of sequence number back. */
+ scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
+ if (crypto_memneq(ihash, ohash, authsize))
+ return -EBADMSG;
+
++decrypt:
++
+ sg_init_table(areq_ctx->dst, 2);
+ dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
+
+diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
+index d165af8abe36..4161d9961a24 100644
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -1163,10 +1163,11 @@ static int wdog_reboot_handler(struct notifier_block *this,
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+- /* Set a long timer to let the reboot happens, but
+- reboot if it hangs, but only if the watchdog
++ /* Set a long timer to let the reboot happen or
++ reset if it hangs, but only if the watchdog
+ timer was already running. */
+- timeout = 120;
++ if (timeout < 120)
++ timeout = 120;
+ pretimeout = 0;
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index fb1e60f5002e..778fc1bcccee 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -629,7 +629,7 @@ source "drivers/crypto/virtio/Kconfig"
+ config CRYPTO_DEV_BCM_SPU
+ tristate "Broadcom symmetric crypto/hash acceleration support"
+ depends on ARCH_BCM_IPROC
+- depends on BCM_PDC_MBOX
++ depends on MAILBOX
+ default m
+ select CRYPTO_DES
+ select CRYPTO_MD5
+diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
+index ef04c9748317..bf7ac621c591 100644
+--- a/drivers/crypto/bcm/spu2.c
++++ b/drivers/crypto/bcm/spu2.c
+@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
+ break;
+ case HASH_ALG_SHA3_512:
+ *spu2_type = SPU2_HASH_TYPE_SHA3_512;
++ break;
+ case HASH_ALG_LAST:
+ default:
+ err = -EINVAL;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 9106ea32b048..881df8843e66 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9085,6 +9085,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+ u64 power_domain_mask;
+ bool active;
+
++ if (INTEL_GEN(dev_priv) >= 9) {
++ intel_crtc_init_scalers(crtc, pipe_config);
++
++ pipe_config->scaler_state.scaler_id = -1;
++ pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
++ }
++
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+@@ -9113,13 +9120,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+ pipe_config->gamma_mode =
+ I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+
+- if (INTEL_GEN(dev_priv) >= 9) {
+- intel_crtc_init_scalers(crtc, pipe_config);
+-
+- pipe_config->scaler_state.scaler_id = -1;
+- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+- }
+-
+ power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ power_domain_mask |= BIT_ULL(power_domain);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+index 1e1de6bfe85a..5893be9788d3 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+@@ -27,7 +27,7 @@ struct nv50_disp {
+ u8 type[3];
+ } pior;
+
+- struct nv50_disp_chan *chan[17];
++ struct nv50_disp_chan *chan[21];
+ };
+
+ int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+index c794b2c2d21e..6d8f21290aa2 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
+
+ if (bar->bar[0].mem) {
+ addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
+- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
++ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index c7b53d987f06..fefb9d995d2c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+ {
+- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
++ return -EINVAL;
+ }
+
+ static int vmw_cmd_ok(struct vmw_private *dev_priv,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 50be1f034f9e..5284e8d2f7ba 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+ * something arbitrarily large and we will reject any layout
+ * that doesn't fit prim_bb_mem later
+ */
+- dev->mode_config.max_width = 16384;
+- dev->mode_config.max_height = 16384;
++ dev->mode_config.max_width = 8192;
++ dev->mode_config.max_height = 8192;
+ }
+
+ vmw_kms_create_implicit_placement_property(dev_priv, false);
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index 9b856e1890d1..e4c43a17b333 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
+ if (arg) {
+ if (copy_from_user(bname, argp, sizeof(bname) - 1))
+ return -EFAULT;
++ bname[sizeof(bname)-1] = 0;
+ } else
+ return -EINVAL;
+ ret = mutex_lock_interruptible(&dev->mtx);
+diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
+index c151c6daa67e..f63a110b7bcb 100644
+--- a/drivers/isdn/i4l/isdn_net.c
++++ b/drivers/isdn/i4l/isdn_net.c
+@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
+ char newname[10];
+
+ if (p) {
+- /* Slave-Name MUST not be empty */
+- if (!strlen(p + 1))
++ /* Slave-Name MUST not be empty or overflow 'newname' */
++ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
+ return NULL;
+- strcpy(newname, p + 1);
+ *p = 0;
+ /* Master must already exist */
+ if (!(n = isdn_net_findif(parm)))
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 93b181088168..b68e21c25a17 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1587,16 +1587,18 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ if (likely(ic->mode == 'J')) {
+ if (dio->write) {
+ unsigned next_entry, i, pos;
+- unsigned ws, we;
++ unsigned ws, we, range_sectors;
+
+- dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
++ dio->range.n_sectors = min(dio->range.n_sectors,
++ ic->free_sectors << ic->sb->log2_sectors_per_block);
+ if (unlikely(!dio->range.n_sectors))
+ goto sleep;
+- ic->free_sectors -= dio->range.n_sectors;
++ range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
++ ic->free_sectors -= range_sectors;
+ journal_section = ic->free_section;
+ journal_entry = ic->free_section_entry;
+
+- next_entry = ic->free_section_entry + dio->range.n_sectors;
++ next_entry = ic->free_section_entry + range_sectors;
+ ic->free_section_entry = next_entry % ic->journal_section_entries;
+ ic->free_section += next_entry / ic->journal_section_entries;
+ ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
+@@ -3019,6 +3021,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ ti->error = "Block size doesn't match the information in superblock";
+ goto bad;
+ }
++ if (!le32_to_cpu(ic->sb->journal_sections)) {
++ r = -EINVAL;
++ ti->error = "Corrupted superblock, journal_sections is 0";
++ goto bad;
++ }
+ /* make sure that ti->max_io_len doesn't overflow */
+ if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
+ ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 63d342d560b8..33611a91b1d9 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -733,7 +733,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
+
+ /* for managing resync I/O pages */
+ struct resync_pages {
+- unsigned idx; /* for get/put page from the pool */
+ void *raid_bio;
+ struct page *pages[RESYNC_PAGES];
+ };
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7866563338fa..5de4b3d04eb5 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+ resync_get_all_pages(rp);
+ }
+
+- rp->idx = 0;
+ rp->raid_bio = r1_bio;
+ bio->bi_private = rp;
+ }
+@@ -492,10 +491,6 @@ static void raid1_end_write_request(struct bio *bio)
+ }
+
+ if (behind) {
+- /* we release behind master bio when all write are done */
+- if (r1_bio->behind_master_bio == bio)
+- to_put = NULL;
+-
+ if (test_bit(WriteMostly, &rdev->flags))
+ atomic_dec(&r1_bio->behind_remaining);
+
+@@ -1088,7 +1083,7 @@ static void unfreeze_array(struct r1conf *conf)
+ wake_up(&conf->wait_barrier);
+ }
+
+-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
++static void alloc_behind_master_bio(struct r1bio *r1_bio,
+ struct bio *bio)
+ {
+ int size = bio->bi_iter.bi_size;
+@@ -1098,11 +1093,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
+
+ behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
+ if (!behind_bio)
+- goto fail;
++ return;
+
+ /* discard op, we don't support writezero/writesame yet */
+- if (!bio_has_data(bio))
++ if (!bio_has_data(bio)) {
++ behind_bio->bi_iter.bi_size = size;
+ goto skip_copy;
++ }
+
+ while (i < vcnt && size) {
+ struct page *page;
+@@ -1123,14 +1120,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
+ r1_bio->behind_master_bio = behind_bio;;
+ set_bit(R1BIO_BehindIO, &r1_bio->state);
+
+- return behind_bio;
++ return;
+
+ free_pages:
+ pr_debug("%dB behind alloc failed, doing sync I/O\n",
+ bio->bi_iter.bi_size);
+ bio_free_pages(behind_bio);
+-fail:
+- return behind_bio;
++ bio_put(behind_bio);
+ }
+
+ struct raid1_plug_cb {
+@@ -1483,7 +1479,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ (atomic_read(&bitmap->behind_writes)
+ < mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait)) {
+- mbio = alloc_behind_master_bio(r1_bio, bio);
++ alloc_behind_master_bio(r1_bio, bio);
+ }
+
+ bitmap_startwrite(bitmap, r1_bio->sector,
+@@ -1493,14 +1489,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ first_clone = 0;
+ }
+
+- if (!mbio) {
+- if (r1_bio->behind_master_bio)
+- mbio = bio_clone_fast(r1_bio->behind_master_bio,
+- GFP_NOIO,
+- mddev->bio_set);
+- else
+- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+- }
++ if (r1_bio->behind_master_bio)
++ mbio = bio_clone_fast(r1_bio->behind_master_bio,
++ GFP_NOIO, mddev->bio_set);
++ else
++ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
+
+ if (r1_bio->behind_master_bio) {
+ if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
+@@ -2368,8 +2361,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
+ wbio = bio_clone_fast(r1_bio->behind_master_bio,
+ GFP_NOIO,
+ mddev->bio_set);
+- /* We really need a _all clone */
+- wbio->bi_iter = (struct bvec_iter){ 0 };
+ } else {
+ wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
+ mddev->bio_set);
+@@ -2621,6 +2612,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ int good_sectors = RESYNC_SECTORS;
+ int min_bad = 0; /* number of sectors that are bad in all devices */
+ int idx = sector_to_idx(sector_nr);
++ int page_idx = 0;
+
+ if (!conf->r1buf_pool)
+ if (init_resync(conf))
+@@ -2848,7 +2840,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ bio = r1_bio->bios[i];
+ rp = get_resync_pages(bio);
+ if (bio->bi_end_io) {
+- page = resync_fetch_page(rp, rp->idx++);
++ page = resync_fetch_page(rp, page_idx);
+
+ /*
+ * won't fail because the vec table is big
+@@ -2860,7 +2852,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ nr_sectors += len>>9;
+ sector_nr += len>>9;
+ sync_blocks -= (len>>9);
+- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
++ } while (++page_idx < RESYNC_PAGES);
+
+ r1_bio->sectors = nr_sectors;
+
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 52acffa7a06a..bfc6db236348 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
+ resync_get_all_pages(rp);
+ }
+
+- rp->idx = 0;
+ rp->raid_bio = r10_bio;
+ bio->bi_private = rp;
+ if (rbio) {
+@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ sector_t sectors_skipped = 0;
+ int chunks_skipped = 0;
+ sector_t chunk_mask = conf->geo.chunk_mask;
++ int page_idx = 0;
+
+ if (!conf->r10buf_pool)
+ if (init_resync(conf))
+@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ break;
+ for (bio= biolist ; bio ; bio=bio->bi_next) {
+ struct resync_pages *rp = get_resync_pages(bio);
+- page = resync_fetch_page(rp, rp->idx++);
++ page = resync_fetch_page(rp, page_idx);
+ /*
+ * won't fail because the vec table is big enough
+ * to hold all these pages
+@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ }
+ nr_sectors += len>>9;
+ sector_nr += len>>9;
+- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
++ } while (++page_idx < RESYNC_PAGES);
+ r10_bio->sectors = nr_sectors;
+
+ while (biolist) {
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index d524daddc630..e92dd2dc4b5a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6237,6 +6237,8 @@ static void raid5_do_work(struct work_struct *work)
+ pr_debug("%d stripes handled\n", handled);
+
+ spin_unlock_irq(&conf->device_lock);
++
++ async_tx_issue_pending_all();
+ blk_finish_plug(&plug);
+
+ pr_debug("--- raid5worker inactive\n");
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index d6fa2214aaae..0fb4e4c119e1 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
+ }
+ mmc_writel(host, REG_CLKCR, rval);
+
+- if (host->cfg->needs_new_timings)
+- mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
++ if (host->cfg->needs_new_timings) {
++ /* Don't touch the delay bits */
++ rval = mmc_readl(host, REG_SD_NTSR);
++ rval |= SDXC_2X_TIMING_MODE;
++ mmc_writel(host, REG_SD_NTSR, rval);
++ }
+
+ ret = sunxi_mmc_clk_set_phase(host, ios, rate);
+ if (ret)
+diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
+index a2d92f10501b..a3d20e39e5b5 100644
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -404,30 +404,29 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
+ * Transfer the data
+ */
+ if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
+- u8 data[4] = { };
++ u32 data = 0;
++ u32 *buf32 = (u32 *)buf;
+
+ if (is_read)
+- sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
++ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
+ count >> 2);
+ else
+- sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
++ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
+ count >> 2);
+
+ /* if count was multiple of 4 */
+ if (!(count & 0x3))
+ return;
+
+- buf8 = (u8 *)(buf + (count >> 2));
++ buf32 += count >> 2;
+ count %= 4;
+
+ if (is_read) {
+- sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
+- (u32 *)data, 1);
+- memcpy(buf8, data, count);
++ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
++ memcpy(buf32, &data, count);
+ } else {
+- memcpy(data, buf8, count);
+- sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
+- (u32 *)data, 1);
++ memcpy(&data, buf32, count);
++ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
+ }
+
+ return;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 1161390f4935..736754c5ab63 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry *dentry)
+ return dentry->d_name.name != dentry->d_iname;
+ }
+
++void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
++{
++ spin_lock(&dentry->d_lock);
++ if (unlikely(dname_external(dentry))) {
++ struct external_name *p = external_name(dentry);
++ atomic_inc(&p->u.count);
++ spin_unlock(&dentry->d_lock);
++ name->name = p->name;
++ } else {
++ memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
++ spin_unlock(&dentry->d_lock);
++ name->name = name->inline_name;
++ }
++}
++EXPORT_SYMBOL(take_dentry_name_snapshot);
++
++void release_dentry_name_snapshot(struct name_snapshot *name)
++{
++ if (unlikely(name->name != name->inline_name)) {
++ struct external_name *p;
++ p = container_of(name->name, struct external_name, name[0]);
++ if (unlikely(atomic_dec_and_test(&p->u.count)))
++ kfree_rcu(p, u.head);
++ }
++}
++EXPORT_SYMBOL(release_dentry_name_snapshot);
++
+ static inline void __d_set_inode_and_type(struct dentry *dentry,
+ struct inode *inode,
+ unsigned type_flags)
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index e892ae7d89f8..acd3be2cc691 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -766,7 +766,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ {
+ int error;
+ struct dentry *dentry = NULL, *trap;
+- const char *old_name;
++ struct name_snapshot old_name;
+
+ trap = lock_rename(new_dir, old_dir);
+ /* Source or destination directories don't exist? */
+@@ -781,19 +781,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
+ goto exit;
+
+- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
++ take_dentry_name_snapshot(&old_name, old_dentry);
+
+ error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
+ dentry, 0);
+ if (error) {
+- fsnotify_oldname_free(old_name);
++ release_dentry_name_snapshot(&old_name);
+ goto exit;
+ }
+ d_move(old_dentry, dentry);
+- fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
++ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name,
+ d_is_dir(old_dentry),
+ NULL, old_dentry);
+- fsnotify_oldname_free(old_name);
++ release_dentry_name_snapshot(&old_name);
+ unlock_rename(new_dir, old_dir);
+ dput(dentry);
+ return old_dentry;
+diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
+index 7bc186f4ed4d..1be45c8d460d 100644
+--- a/fs/jfs/acl.c
++++ b/fs/jfs/acl.c
+@@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
+- if (acl) {
+- rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+- if (rc)
+- return rc;
+- inode->i_ctime = current_time(inode);
+- mark_inode_dirty(inode);
+- }
+ break;
+ case ACL_TYPE_DEFAULT:
+ ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
+@@ -118,9 +111,17 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+
+ tid = txBegin(inode->i_sb, 0);
+ mutex_lock(&JFS_IP(inode)->commit_mutex);
++ if (type == ACL_TYPE_ACCESS && acl) {
++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++ if (rc)
++ goto end_tx;
++ inode->i_ctime = current_time(inode);
++ mark_inode_dirty(inode);
++ }
+ rc = __jfs_set_acl(tid, inode, type, acl);
+ if (!rc)
+ rc = txCommit(tid, 1, &inode, 0);
++end_tx:
+ txEnd(tid);
+ mutex_unlock(&JFS_IP(inode)->commit_mutex);
+ return rc;
+diff --git a/fs/namei.c b/fs/namei.c
+index 6571a5f5112e..281c1f7fa983 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -4362,11 +4362,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ {
+ int error;
+ bool is_dir = d_is_dir(old_dentry);
+- const unsigned char *old_name;
+ struct inode *source = old_dentry->d_inode;
+ struct inode *target = new_dentry->d_inode;
+ bool new_is_dir = false;
+ unsigned max_links = new_dir->i_sb->s_max_links;
++ struct name_snapshot old_name;
+
+ if (source == target)
+ return 0;
+@@ -4413,7 +4413,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (error)
+ return error;
+
+- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
++ take_dentry_name_snapshot(&old_name, old_dentry);
+ dget(new_dentry);
+ if (!is_dir || (flags & RENAME_EXCHANGE))
+ lock_two_nondirectories(source, target);
+@@ -4468,14 +4468,14 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ inode_unlock(target);
+ dput(new_dentry);
+ if (!error) {
+- fsnotify_move(old_dir, new_dir, old_name, is_dir,
++ fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
+ !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
+ if (flags & RENAME_EXCHANGE) {
+ fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
+ new_is_dir, NULL, new_dentry);
+ }
+ }
+- fsnotify_oldname_free(old_name);
++ release_dentry_name_snapshot(&old_name);
+
+ return error;
+ }
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 5713eb32a45e..d264363559db 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -750,7 +750,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
+ */
+ nfs_sync_mapping(filp->f_mapping);
+ if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+- nfs_zap_mapping(inode, filp->f_mapping);
++ nfs_zap_caches(inode);
+ out:
+ return status;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index dbfa18900e25..f5a7faac39a7 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6441,7 +6441,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+- freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT);
++ freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
+ }
+
+ finish_wait(q, &wait);
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 01a9f0f007d4..0c4583b61717 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -161,16 +161,20 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
+ if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+ __fsnotify_update_child_dentry_flags(p_inode);
+ else if (p_inode->i_fsnotify_mask & mask) {
++ struct name_snapshot name;
++
+ /* we are notifying a parent so come up with the new mask which
+ * specifies these are events which came from a child. */
+ mask |= FS_EVENT_ON_CHILD;
+
++ take_dentry_name_snapshot(&name, dentry);
+ if (path)
+ ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
+- dentry->d_name.name, 0);
++ name.name, 0);
+ else
+ ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
+- dentry->d_name.name, 0);
++ name.name, 0);
++ release_dentry_name_snapshot(&name);
+ }
+
+ dput(parent);
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index d2e38dc6172c..025727bf6797 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -591,5 +591,11 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
+ return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
+ }
+
++struct name_snapshot {
++ const char *name;
++ char inline_name[DNAME_INLINE_LEN];
++};
++void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
++void release_dentry_name_snapshot(struct name_snapshot *);
+
+ #endif /* __LINUX_DCACHE_H */
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index b43d3f5bd9ea..b78aa7ac77ce 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+ }
+ }
+
+-#if defined(CONFIG_FSNOTIFY) /* notify helpers */
+-
+-/*
+- * fsnotify_oldname_init - save off the old filename before we change it
+- */
+-static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
+-{
+- return kstrdup(name, GFP_KERNEL);
+-}
+-
+-/*
+- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
+- */
+-static inline void fsnotify_oldname_free(const unsigned char *old_name)
+-{
+- kfree(old_name);
+-}
+-
+-#else /* CONFIG_FSNOTIFY */
+-
+-static inline const char *fsnotify_oldname_init(const unsigned char *name)
+-{
+- return NULL;
+-}
+-
+-static inline void fsnotify_oldname_free(const unsigned char *old_name)
+-{
+-}
+-
+-#endif /* CONFIG_FSNOTIFY */
+-
+ #endif /* _LINUX_FS_NOTIFY_H */
+diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
+index ec47f95991a3..586cccea46ce 100755
+--- a/scripts/dtc/dtx_diff
++++ b/scripts/dtc/dtx_diff
+@@ -321,7 +321,7 @@ fi
+ cpp_flags="\
+ -nostdinc \
+ -I${srctree}/arch/${ARCH}/boot/dts \
+- -I${srctree}/arch/${ARCH}/boot/dts/include \
++ -I${srctree}/scripts/dtc/include-prefixes \
+ -I${srctree}/drivers/of/testcase-data \
+ -undef -D__DTS__"
+
+diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
+index c47287d79306..a178e0d03088 100644
+--- a/sound/pci/fm801.c
++++ b/sound/pci/fm801.c
+@@ -1235,8 +1235,6 @@ static int snd_fm801_create(struct snd_card *card,
+ }
+ }
+
+- snd_fm801_chip_init(chip);
+-
+ if ((chip->tea575x_tuner & TUNER_ONLY) == 0) {
+ if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip)) {
+@@ -1248,6 +1246,8 @@ static int snd_fm801_create(struct snd_card *card,
+ pci_set_master(pci);
+ }
+
++ snd_fm801_chip_init(chip);
++
+ if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
+ snd_fm801_free(chip);
+ return err;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 63bc894ddf5e..8c1289963c80 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -933,6 +933,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 90e4ff87445e..c87ff8e5d1d5 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3757,11 +3757,15 @@ HDA_CODEC_ENTRY(0x1002aa01, "R6xx HDMI", patch_atihdmi),
+ HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI", patch_generic_hdmi),
++HDA_CODEC_ENTRY(0x10de0001, "MCP73 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
++HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
+ HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x),
++HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi),
+@@ -3788,17 +3792,40 @@ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0045, "GPU 45 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0050, "GPU 50 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0052, "GPU 52 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0061, "GPU 61 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0062, "GPU 62 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0073, "GPU 73 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0074, "GPU 74 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0076, "GPU 76 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de007b, "GPU 7b HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de007c, "GPU 7c HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de007e, "GPU 7e HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0081, "GPU 81 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0084, "GPU 84 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0090, "GPU 90 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0091, "GPU 91 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0092, "GPU 92 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0093, "GPU 93 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0094, "GPU 94 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
++HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
+ HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi),
+ HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi),
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-08-11 17:40 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-08-11 17:40 UTC (permalink / raw
To: gentoo-commits
commit: c20ad5072b7e3f69f4ff535dd534453ad3d7b8ec
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 11 17:39:51 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 11 17:39:51 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c20ad507
Linux patch 4.12.6
0000_README | 4 +
1005_linux-4.12.6.patch | 3935 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3939 insertions(+)
diff --git a/0000_README b/0000_README
index 29e1ca2..b88e1e0 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch: 1004_linux-4.12.5.patch
From: http://www.kernel.org
Desc: Linux 4.12.5
+Patch: 1005_linux-4.12.6.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.6
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1005_linux-4.12.6.patch b/1005_linux-4.12.6.patch
new file mode 100644
index 0000000..461714c
--- /dev/null
+++ b/1005_linux-4.12.6.patch
@@ -0,0 +1,3935 @@
+diff --git a/Makefile b/Makefile
+index 382e967b0792..c8d80b50495a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
+index 895fa6cfa15a..563901e0ec07 100644
+--- a/arch/arm/boot/dts/armada-388-gp.dts
++++ b/arch/arm/boot/dts/armada-388-gp.dts
+@@ -75,7 +75,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pca0_pins>;
+ interrupt-parent = <&gpio0>;
+- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+@@ -87,7 +87,7 @@
+ compatible = "nxp,pca9555";
+ pinctrl-names = "default";
+ interrupt-parent = <&gpio0>;
+- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
+index 86d8df98802f..13bcc460bcb2 100644
+--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
++++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
+@@ -22,7 +22,7 @@
+ };
+
+ ð0 {
+- phy-connection-type = "rgmii";
++ phy-connection-type = "rgmii-id";
+ phy-handle = <ð0_phy>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
+index e62273aacb43..4ffbbd217e82 100644
+--- a/arch/arm/mach-mvebu/platsmp.c
++++ b/arch/arm/mach-mvebu/platsmp.c
+@@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
+ return PTR_ERR(base);
+
+ writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG);
+- writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
++ writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
+
+ iounmap(base);
+
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index bc179efb10ef..b69e4a4ecdd8 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -219,7 +219,7 @@
+ reg = <0x18800 0x100>, <0x18C00 0x20>;
+ gpiosb: gpio {
+ #gpio-cells = <2>;
+- gpio-ranges = <&pinctrl_sb 0 0 29>;
++ gpio-ranges = <&pinctrl_sb 0 0 30>;
+ gpio-controller;
+ interrupts =
+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
+index 9df1a53bcb36..b4e7dfa214eb 100644
+--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
++++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
+@@ -13,6 +13,8 @@
+ #ifndef _RALINK_REGS_H_
+ #define _RALINK_REGS_H_
+
++#include <linux/io.h>
++
+ enum ralink_soc_type {
+ RALINK_UNKNOWN = 0,
+ RT2880_SOC,
+diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
+index 88fe0aad4390..bc208136bbb2 100644
+--- a/arch/parisc/include/asm/thread_info.h
++++ b/arch/parisc/include/asm/thread_info.h
+@@ -34,7 +34,7 @@ struct thread_info {
+
+ /* thread information allocation */
+
+-#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
++#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */
+ /* Be sure to hunt all references to this down when you change the size of
+ * the kernel stack */
+ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 85a92db70afc..19c0c141bc3f 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -587,13 +587,12 @@ void flush_cache_range(struct vm_area_struct *vma,
+ if (parisc_requires_coherency())
+ flush_tlb_range(vma, start, end);
+
+- if ((end - start) >= parisc_cache_flush_threshold) {
++ if ((end - start) >= parisc_cache_flush_threshold
++ || vma->vm_mm->context != mfsp(3)) {
+ flush_cache_all();
+ return;
+ }
+
+- BUG_ON(vma->vm_mm->context != mfsp(3));
+-
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index ba5e1c7b1f17..ef9a4eea662f 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr)
+ /*
+ * IRQ STACK - used for irq handler
+ */
+-#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
++#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
+
+ union irq_stack_union {
+ unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 5c291df30fe3..40d8b552d15a 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -145,6 +145,19 @@ notrace unsigned int __check_irq_replay(void)
+
+ /* Clear bit 0 which we wouldn't clear otherwise */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
++ if (happened & PACA_IRQ_HARD_DIS) {
++ /*
++ * We may have missed a decrementer interrupt if hard disabled.
++ * Check the decrementer register in case we had a rollover
++ * while hard disabled.
++ */
++ if (!(happened & PACA_IRQ_DEC)) {
++ if (decrementer_check_overflow()) {
++ local_paca->irq_happened |= PACA_IRQ_DEC;
++ happened |= PACA_IRQ_DEC;
++ }
++ }
++ }
+
+ /*
+ * Force the delivery of pending soft-disabled interrupts on PS3.
+@@ -170,7 +183,7 @@ notrace unsigned int __check_irq_replay(void)
+ * in case we also had a rollover while hard disabled
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
+- if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
++ if (happened & PACA_IRQ_DEC)
+ return 0x900;
+
+ /* Finally check if an external interrupt happened */
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 925a4ef90559..660ed39e9c9a 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
+ * If task is not current, it will have been flushed already to
+ * it's thread_struct during __switch_to().
+ *
+- * A reclaim flushes ALL the state.
++ * A reclaim flushes ALL the state or if not in TM save TM SPRs
++ * in the appropriate thread structures from live.
+ */
+
+- if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
+- tm_reclaim_current(TM_CAUSE_SIGNAL);
++ if (tsk != current)
++ return;
+
++ if (MSR_TM_SUSPENDED(mfmsr())) {
++ tm_reclaim_current(TM_CAUSE_SIGNAL);
++ } else {
++ tm_enable();
++ tm_save_sprs(&(tsk->thread));
++ }
+ }
+ #else
+ static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
+index 2cddcda4f85f..87841d687f8d 100644
+--- a/arch/sparc/include/asm/mmu_context_64.h
++++ b/arch/sparc/include/asm/mmu_context_64.h
+@@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
+ void __tsb_context_switch(unsigned long pgd_pa,
+ struct tsb_config *tsb_base,
+ struct tsb_config *tsb_huge,
+- unsigned long tsb_descr_pa);
++ unsigned long tsb_descr_pa,
++ unsigned long secondary_ctx);
+
+-static inline void tsb_context_switch(struct mm_struct *mm)
++static inline void tsb_context_switch_ctx(struct mm_struct *mm,
++ unsigned long ctx)
+ {
+ __tsb_context_switch(__pa(mm->pgd),
+ &mm->context.tsb_block[MM_TSB_BASE],
+@@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
+ #else
+ NULL
+ #endif
+- , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
++ , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
++ ctx);
+ }
+
++#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
++
+ void tsb_grow(struct mm_struct *mm,
+ unsigned long tsb_index,
+ unsigned long mm_rss);
+@@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
+ * cpu0 to update it's TSB because at that point the cpu_vm_mask
+ * only had cpu1 set in it.
+ */
+- load_secondary_context(mm);
+- tsb_context_switch(mm);
++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
+
+ /* Any time a processor runs a context on an address space
+ * for the first time, we must flush that context out of the
+diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
+index ec9c04de3664..ff05992dae7a 100644
+--- a/arch/sparc/include/asm/trap_block.h
++++ b/arch/sparc/include/asm/trap_block.h
+@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
+ void init_cur_cpu_trap(struct thread_info *);
+ void setup_tba(void);
+ extern int ncpus_probed;
++extern u64 cpu_mondo_counter[NR_CPUS];
+
+ unsigned long real_hard_smp_processor_id(void);
+
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index fdf31040a7dc..3218bc43302e 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -622,22 +622,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+ }
+ }
+
+-/* Multi-cpu list version. */
++#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
++#define MONDO_USEC_WAIT_MIN 2
++#define MONDO_USEC_WAIT_MAX 100
++#define MONDO_RETRY_LIMIT 500000
++
++/* Multi-cpu list version.
++ *
++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
++ * Sometimes not all cpus receive the mondo, requiring us to re-send
++ * the mondo until all cpus have received, or cpus are truly stuck
++ * unable to receive mondo, and we timeout.
++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
++ * perform guest service, such as PCIe error handling. Consider the
++ * service time, 1 second overall wait is reasonable for 1 cpu.
++ * Here two in-between mondo check wait time are defined: 2 usec for
++ * single cpu quick turn around and up to 100usec for large cpu count.
++ * Deliver mondo to large number of cpus could take longer, we adjusts
++ * the retry count as long as target cpus are making forward progress.
++ */
+ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+ {
+- int retries, this_cpu, prev_sent, i, saw_cpu_error;
++ int this_cpu, tot_cpus, prev_sent, i, rem;
++ int usec_wait, retries, tot_retries;
++ u16 first_cpu = 0xffff;
++ unsigned long xc_rcvd = 0;
+ unsigned long status;
++ int ecpuerror_id = 0;
++ int enocpu_id = 0;
+ u16 *cpu_list;
++ u16 cpu;
+
+ this_cpu = smp_processor_id();
+-
+ cpu_list = __va(tb->cpu_list_pa);
+-
+- saw_cpu_error = 0;
+- retries = 0;
++ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
++ if (usec_wait > MONDO_USEC_WAIT_MAX)
++ usec_wait = MONDO_USEC_WAIT_MAX;
++ retries = tot_retries = 0;
++ tot_cpus = cnt;
+ prev_sent = 0;
++
+ do {
+- int forward_progress, n_sent;
++ int n_sent, mondo_delivered, target_cpu_busy;
+
+ status = sun4v_cpu_mondo_send(cnt,
+ tb->cpu_list_pa,
+@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+
+ /* HV_EOK means all cpus received the xcall, we're done. */
+ if (likely(status == HV_EOK))
+- break;
++ goto xcall_done;
++
++ /* If not these non-fatal errors, panic */
++ if (unlikely((status != HV_EWOULDBLOCK) &&
++ (status != HV_ECPUERROR) &&
++ (status != HV_ENOCPU)))
++ goto fatal_errors;
+
+ /* First, see if we made any forward progress.
++ *
++ * Go through the cpu_list, count the target cpus that have
++ * received our mondo (n_sent), and those that did not (rem).
++ * Re-pack cpu_list with the cpus remain to be retried in the
++ * front - this simplifies tracking the truly stalled cpus.
+ *
+ * The hypervisor indicates successful sends by setting
+ * cpu list entries to the value 0xffff.
++ *
++ * EWOULDBLOCK means some target cpus did not receive the
++ * mondo and retry usually helps.
++ *
++ * ECPUERROR means at least one target cpu is in error state,
++ * it's usually safe to skip the faulty cpu and retry.
++ *
++ * ENOCPU means one of the target cpu doesn't belong to the
++ * domain, perhaps offlined which is unexpected, but not
++ * fatal and it's okay to skip the offlined cpu.
+ */
++ rem = 0;
+ n_sent = 0;
+ for (i = 0; i < cnt; i++) {
+- if (likely(cpu_list[i] == 0xffff))
++ cpu = cpu_list[i];
++ if (likely(cpu == 0xffff)) {
+ n_sent++;
++ } else if ((status == HV_ECPUERROR) &&
++ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
++ ecpuerror_id = cpu + 1;
++ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
++ enocpu_id = cpu + 1;
++ } else {
++ cpu_list[rem++] = cpu;
++ }
+ }
+
+- forward_progress = 0;
+- if (n_sent > prev_sent)
+- forward_progress = 1;
++ /* No cpu remained, we're done. */
++ if (rem == 0)
++ break;
+
+- prev_sent = n_sent;
++ /* Otherwise, update the cpu count for retry. */
++ cnt = rem;
+
+- /* If we get a HV_ECPUERROR, then one or more of the cpus
+- * in the list are in error state. Use the cpu_state()
+- * hypervisor call to find out which cpus are in error state.
++ /* Record the overall number of mondos received by the
++ * first of the remaining cpus.
+ */
+- if (unlikely(status == HV_ECPUERROR)) {
+- for (i = 0; i < cnt; i++) {
+- long err;
+- u16 cpu;
++ if (first_cpu != cpu_list[0]) {
++ first_cpu = cpu_list[0];
++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
++ }
+
+- cpu = cpu_list[i];
+- if (cpu == 0xffff)
+- continue;
++ /* Was any mondo delivered successfully? */
++ mondo_delivered = (n_sent > prev_sent);
++ prev_sent = n_sent;
+
+- err = sun4v_cpu_state(cpu);
+- if (err == HV_CPU_STATE_ERROR) {
+- saw_cpu_error = (cpu + 1);
+- cpu_list[i] = 0xffff;
+- }
+- }
+- } else if (unlikely(status != HV_EWOULDBLOCK))
+- goto fatal_mondo_error;
++ /* or, was any target cpu busy processing other mondos? */
++ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
++ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+
+- /* Don't bother rewriting the CPU list, just leave the
+- * 0xffff and non-0xffff entries in there and the
+- * hypervisor will do the right thing.
+- *
+- * Only advance timeout state if we didn't make any
+- * forward progress.
++ /* Retry count is for no progress. If we're making progress,
++ * reset the retry count.
+ */
+- if (unlikely(!forward_progress)) {
+- if (unlikely(++retries > 10000))
+- goto fatal_mondo_timeout;
+-
+- /* Delay a little bit to let other cpus catch up
+- * on their cpu mondo queue work.
+- */
+- udelay(2 * cnt);
++ if (likely(mondo_delivered || target_cpu_busy)) {
++ tot_retries += retries;
++ retries = 0;
++ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
++ goto fatal_mondo_timeout;
+ }
+- } while (1);
+
+- if (unlikely(saw_cpu_error))
+- goto fatal_mondo_cpu_error;
++ /* Delay a little bit to let other cpus catch up on
++ * their cpu mondo queue work.
++ */
++ if (!mondo_delivered)
++ udelay(usec_wait);
+
+- return;
++ retries++;
++ } while (1);
+
+-fatal_mondo_cpu_error:
+- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+- "(including %d) were in error state\n",
+- this_cpu, saw_cpu_error - 1);
++xcall_done:
++ if (unlikely(ecpuerror_id > 0)) {
++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
++ this_cpu, ecpuerror_id - 1);
++ } else if (unlikely(enocpu_id > 0)) {
++ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
++ this_cpu, enocpu_id - 1);
++ }
+ return;
+
++fatal_errors:
++ /* fatal errors include bad alignment, etc */
++ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
++ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
++ panic("Unexpected SUN4V mondo error %lu\n", status);
++
+ fatal_mondo_timeout:
+- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+- " progress after %d retries.\n",
+- this_cpu, retries);
+- goto dump_cpu_list_and_out;
+-
+-fatal_mondo_error:
+- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+- this_cpu, status);
+- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+- "mondo_block_pa(%lx)\n",
+- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+-
+-dump_cpu_list_and_out:
+- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+- for (i = 0; i < cnt; i++)
+- printk("%u ", cpu_list[i]);
+- printk("]\n");
++ /* some cpus being non-responsive to the cpu mondo */
++ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
++ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
++ panic("SUN4V mondo timeout panic\n");
+ }
+
+ static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
+diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
+index 559bc5e9c199..34631995859a 100644
+--- a/arch/sparc/kernel/sun4v_ivec.S
++++ b/arch/sparc/kernel/sun4v_ivec.S
+@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
+ ldxa [%g0] ASI_SCRATCHPAD, %g4
+ sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+
++ /* Get smp_processor_id() into %g3 */
++ sethi %hi(trap_block), %g5
++ or %g5, %lo(trap_block), %g5
++ sub %g4, %g5, %g3
++ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
++
++ /* Increment cpu_mondo_counter[smp_processor_id()] */
++ sethi %hi(cpu_mondo_counter), %g5
++ or %g5, %lo(cpu_mondo_counter), %g5
++ sllx %g3, 3, %g3
++ add %g5, %g3, %g5
++ ldx [%g5], %g3
++ add %g3, 1, %g3
++ stx %g3, [%g5]
++
+ /* Get CPU mondo queue base phys address into %g7. */
+ ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 196ee5eb4d48..ad31af1dd726 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
+ }
+ }
+
++u64 cpu_mondo_counter[NR_CPUS] = {0};
+ struct trap_per_cpu trap_block[NR_CPUS];
+ EXPORT_SYMBOL(trap_block);
+
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index 07c0df924960..db872dbfafe9 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -360,6 +360,7 @@ tsb_flush:
+ * %o1: TSB base config pointer
+ * %o2: TSB huge config pointer, or NULL if none
+ * %o3: Hypervisor TSB descriptor physical address
++ * %o4: Secondary context to load, if non-zero
+ *
+ * We have to run this whole thing with interrupts
+ * disabled so that the current cpu doesn't change
+@@ -372,6 +373,17 @@ __tsb_context_switch:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+
++ brz,pn %o4, 1f
++ mov SECONDARY_CONTEXT, %o5
++
++661: stxa %o4, [%o5] ASI_DMMU
++ .section .sun4v_1insn_patch, "ax"
++ .word 661b
++ stxa %o4, [%o5] ASI_MMU
++ .previous
++ flush %g6
++
++1:
+ TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
+
+ stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
+diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
+index 54f98706b03b..5a8cb37f0a3b 100644
+--- a/arch/sparc/lib/U3memcpy.S
++++ b/arch/sparc/lib/U3memcpy.S
+@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ ENTRY(U3_retl_o2_and_7_plus_GS)
+ and %o2, 7, %o2
+ retl
+- add %o2, GLOBAL_SPARE, %o2
++ add %o2, GLOBAL_SPARE, %o0
+ ENDPROC(U3_retl_o2_and_7_plus_GS)
+ ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+ add GLOBAL_SPARE, 8, GLOBAL_SPARE
+ and %o2, 7, %o2
+ retl
+- add %o2, GLOBAL_SPARE, %o2
++ add %o2, GLOBAL_SPARE, %o0
+ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+ #endif
+
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 3c40ebd50f92..fed73f14aa49 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
+ }
+
+ #ifdef CONFIG_HUGETLB_PAGE
++static void __init add_huge_page_size(unsigned long size)
++{
++ unsigned int order;
++
++ if (size_to_hstate(size))
++ return;
++
++ order = ilog2(size) - PAGE_SHIFT;
++ hugetlb_add_hstate(order);
++}
++
++static int __init hugetlbpage_init(void)
++{
++ add_huge_page_size(1UL << HPAGE_64K_SHIFT);
++ add_huge_page_size(1UL << HPAGE_SHIFT);
++ add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
++ add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
++
++ return 0;
++}
++
++arch_initcall(hugetlbpage_init);
++
+ static int __init setup_hugepagesz(char *string)
+ {
+ unsigned long long hugepage_size;
+@@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string)
+ goto out;
+ }
+
+- hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
++ add_huge_page_size(hugepage_size);
+ rc = 1;
+
+ out:
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
+index 17bd2e167e07..df707a8ad311 100644
+--- a/arch/sparc/power/hibernate.c
++++ b/arch/sparc/power/hibernate.c
+@@ -35,6 +35,5 @@ void restore_processor_state(void)
+ {
+ struct mm_struct *mm = current->active_mm;
+
+- load_secondary_context(mm);
+- tsb_context_switch(mm);
++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
+ }
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 43e10d6fdbed..44adcde7a0ca 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
+ if (hlist_unhashed(&n.link))
+ break;
+
++ rcu_irq_exit();
++
+ if (!n.halted) {
+ local_irq_enable();
+ schedule();
+@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
+ /*
+ * We cannot reschedule. So halt.
+ */
+- rcu_irq_exit();
+ native_safe_halt();
+ local_irq_disable();
+- rcu_irq_enter();
+ }
++
++ rcu_irq_enter();
+ }
+ if (!n.halted)
+ finish_swait(&n.wq, &wait);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a7421b772d0e..56a7fac71439 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -3307,6 +3307,10 @@ EXPORT_SYMBOL(blk_finish_plug);
+ */
+ void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+ {
++ /* not support for RQF_PM and ->rpm_status in blk-mq yet */
++ if (q->mq_ops)
++ return;
++
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
+index 8e61e8640e17..5eaecd40f701 100644
+--- a/block/blk-mq-cpumap.c
++++ b/block/blk-mq-cpumap.c
+@@ -35,7 +35,6 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
+ {
+ unsigned int *map = set->mq_map;
+ unsigned int nr_queues = set->nr_hw_queues;
+- const struct cpumask *online_mask = cpu_online_mask;
+ unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
+ cpumask_var_t cpus;
+
+@@ -44,7 +43,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
+
+ cpumask_clear(cpus);
+ nr_cpus = nr_uniq_cpus = 0;
+- for_each_cpu(i, online_mask) {
++ for_each_present_cpu(i) {
+ nr_cpus++;
+ first_sibling = get_first_sibling(i);
+ if (!cpumask_test_cpu(first_sibling, cpus))
+@@ -54,7 +53,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
+
+ queue = 0;
+ for_each_possible_cpu(i) {
+- if (!cpumask_test_cpu(i, online_mask)) {
++ if (!cpumask_test_cpu(i, cpu_present_mask)) {
+ map[i] = 0;
+ continue;
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 958cedaff8b8..7353e0080062 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -37,9 +37,6 @@
+ #include "blk-wbt.h"
+ #include "blk-mq-sched.h"
+
+-static DEFINE_MUTEX(all_q_mutex);
+-static LIST_HEAD(all_q_list);
+-
+ static void blk_mq_poll_stats_start(struct request_queue *q);
+ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+ static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
+@@ -1975,8 +1972,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
+ INIT_LIST_HEAD(&__ctx->rq_list);
+ __ctx->queue = q;
+
+- /* If the cpu isn't online, the cpu is mapped to first hctx */
+- if (!cpu_online(i))
++ /* If the cpu isn't present, the cpu is mapped to first hctx */
++ if (!cpu_present(i))
+ continue;
+
+ hctx = blk_mq_map_queue(q, i);
+@@ -2019,8 +2016,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
+ }
+ }
+
+-static void blk_mq_map_swqueue(struct request_queue *q,
+- const struct cpumask *online_mask)
++static void blk_mq_map_swqueue(struct request_queue *q)
+ {
+ unsigned int i, hctx_idx;
+ struct blk_mq_hw_ctx *hctx;
+@@ -2038,13 +2034,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
+ }
+
+ /*
+- * Map software to hardware queues
++ * Map software to hardware queues.
++ *
++ * If the cpu isn't present, the cpu is mapped to first hctx.
+ */
+- for_each_possible_cpu(i) {
+- /* If the cpu isn't online, the cpu is mapped to first hctx */
+- if (!cpumask_test_cpu(i, online_mask))
+- continue;
+-
++ for_each_present_cpu(i) {
+ hctx_idx = q->mq_map[i];
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[hctx_idx] &&
+@@ -2340,16 +2334,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ blk_queue_softirq_done(q, set->ops->complete);
+
+ blk_mq_init_cpu_queues(q, set->nr_hw_queues);
+-
+- get_online_cpus();
+- mutex_lock(&all_q_mutex);
+-
+- list_add_tail(&q->all_q_node, &all_q_list);
+ blk_mq_add_queue_tag_set(set, q);
+- blk_mq_map_swqueue(q, cpu_online_mask);
+-
+- mutex_unlock(&all_q_mutex);
+- put_online_cpus();
++ blk_mq_map_swqueue(q);
+
+ if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
+ int ret;
+@@ -2375,18 +2361,12 @@ void blk_mq_free_queue(struct request_queue *q)
+ {
+ struct blk_mq_tag_set *set = q->tag_set;
+
+- mutex_lock(&all_q_mutex);
+- list_del_init(&q->all_q_node);
+- mutex_unlock(&all_q_mutex);
+-
+ blk_mq_del_queue_tag_set(q);
+-
+ blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
+ }
+
+ /* Basically redo blk_mq_init_queue with queue frozen */
+-static void blk_mq_queue_reinit(struct request_queue *q,
+- const struct cpumask *online_mask)
++static void blk_mq_queue_reinit(struct request_queue *q)
+ {
+ WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
+
+@@ -2399,76 +2379,12 @@ static void blk_mq_queue_reinit(struct request_queue *q,
+ * involves free and re-allocate memory, worthy doing?)
+ */
+
+- blk_mq_map_swqueue(q, online_mask);
++ blk_mq_map_swqueue(q);
+
+ blk_mq_sysfs_register(q);
+ blk_mq_debugfs_register_hctxs(q);
+ }
+
+-/*
+- * New online cpumask which is going to be set in this hotplug event.
+- * Declare this cpumasks as global as cpu-hotplug operation is invoked
+- * one-by-one and dynamically allocating this could result in a failure.
+- */
+-static struct cpumask cpuhp_online_new;
+-
+-static void blk_mq_queue_reinit_work(void)
+-{
+- struct request_queue *q;
+-
+- mutex_lock(&all_q_mutex);
+- /*
+- * We need to freeze and reinit all existing queues. Freezing
+- * involves synchronous wait for an RCU grace period and doing it
+- * one by one may take a long time. Start freezing all queues in
+- * one swoop and then wait for the completions so that freezing can
+- * take place in parallel.
+- */
+- list_for_each_entry(q, &all_q_list, all_q_node)
+- blk_freeze_queue_start(q);
+- list_for_each_entry(q, &all_q_list, all_q_node)
+- blk_mq_freeze_queue_wait(q);
+-
+- list_for_each_entry(q, &all_q_list, all_q_node)
+- blk_mq_queue_reinit(q, &cpuhp_online_new);
+-
+- list_for_each_entry(q, &all_q_list, all_q_node)
+- blk_mq_unfreeze_queue(q);
+-
+- mutex_unlock(&all_q_mutex);
+-}
+-
+-static int blk_mq_queue_reinit_dead(unsigned int cpu)
+-{
+- cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+- blk_mq_queue_reinit_work();
+- return 0;
+-}
+-
+-/*
+- * Before hotadded cpu starts handling requests, new mappings must be
+- * established. Otherwise, these requests in hw queue might never be
+- * dispatched.
+- *
+- * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
+- * for CPU0, and ctx1 for CPU1).
+- *
+- * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
+- * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
+- *
+- * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
+- * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
+- * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
+- * ignored.
+- */
+-static int blk_mq_queue_reinit_prepare(unsigned int cpu)
+-{
+- cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+- cpumask_set_cpu(cpu, &cpuhp_online_new);
+- blk_mq_queue_reinit_work();
+- return 0;
+-}
+-
+ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+ {
+ int i;
+@@ -2679,7 +2595,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ blk_mq_update_queue_map(set);
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_realloc_hw_ctxs(set, q);
+- blk_mq_queue_reinit(q, cpu_online_mask);
++ blk_mq_queue_reinit(q);
+ }
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+@@ -2895,24 +2811,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_poll);
+
+-void blk_mq_disable_hotplug(void)
+-{
+- mutex_lock(&all_q_mutex);
+-}
+-
+-void blk_mq_enable_hotplug(void)
+-{
+- mutex_unlock(&all_q_mutex);
+-}
+-
+ static int __init blk_mq_init(void)
+ {
+ cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
+ blk_mq_hctx_notify_dead);
+-
+- cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
+- blk_mq_queue_reinit_prepare,
+- blk_mq_queue_reinit_dead);
+ return 0;
+ }
+ subsys_initcall(blk_mq_init);
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index cc67b48e3551..558df56544d2 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -56,11 +56,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ bool at_head);
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+ struct list_head *list);
+-/*
+- * CPU hotplug helpers
+- */
+-void blk_mq_enable_hotplug(void);
+-void blk_mq_disable_hotplug(void);
+
+ /*
+ * CPU -> queue mappings
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 10347e3d73ad..5bd58bd4ab05 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
+ };
+
+ struct lpss_private_data {
++ struct acpi_device *adev;
+ void __iomem *mmio_base;
+ resource_size_t mmio_size;
+ unsigned int fixed_clk_rate;
+@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
+
+ static void byt_pwm_setup(struct lpss_private_data *pdata)
+ {
++ struct acpi_device *adev = pdata->adev;
++
++ /* Only call pwm_add_table for the first PWM controller */
++ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
++ return;
++
+ if (!acpi_dev_present("INT33FD", NULL, -1))
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ }
+@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
+
+ static void bsw_pwm_setup(struct lpss_private_data *pdata)
+ {
++ struct acpi_device *adev = pdata->adev;
++
++ /* Only call pwm_add_table for the first PWM controller */
++ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
++ return;
++
+ pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
+ }
+
+@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
+ goto err_out;
+ }
+
++ pdata->adev = adev;
+ pdata->dev_desc = dev_desc;
+
+ if (dev_desc->setup)
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 49ba9834c715..12d59968020f 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3028,10 +3028,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+ if (!sata_pmp_attached(ap)) {
+- if (likely(devno < ata_link_max_devices(&ap->link)))
++ if (likely(devno >= 0 &&
++ devno < ata_link_max_devices(&ap->link)))
+ return &ap->link.device[devno];
+ } else {
+- if (likely(devno < ap->nr_pmp_links))
++ if (likely(devno >= 0 &&
++ devno < ap->nr_pmp_links))
+ return &ap->pmp_link[devno].device[0];
+ }
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
+index 5372bf8be5e6..31d7ffda9aab 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
++++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
+@@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
+ .hw.init = CLK_HW_INIT_PARENTS("cpu",
+ cpu_parents,
+ &ccu_mux_ops,
+- CLK_IS_CRITICAL),
++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ }
+ };
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index a42a1eea5714..2e96b3d46e0c 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
+ {
+ struct lineevent_state *le = p;
+ struct gpioevent_data ge;
+- int ret;
++ int ret, level;
+
+ ge.timestamp = ktime_get_real_ns();
++ level = gpiod_get_value_cansleep(le->desc);
+
+ if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
+ && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+- int level = gpiod_get_value_cansleep(le->desc);
+-
+ if (level)
+ /* Emit low-to-high event */
+ ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+ else
+ /* Emit high-to-low event */
+ ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
++ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
+ /* Emit low-to-high event */
+ ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
++ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
+ /* Emit high-to-low event */
+ ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+ } else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+index 18fd01f3e4b2..003a131bad47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
++++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+@@ -1,24 +1,25 @@
+-
+ /*
+-***************************************************************************************************
+-*
+-* Trade secret of Advanced Micro Devices, Inc.
+-* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
+-*
+-* All rights reserved. This notice is intended as a precaution against inadvertent publication and
+-* does not imply publication or any waiver of confidentiality. The year included in the foregoing
+-* notice is the year of creation of the work.
+-*
+-***************************************************************************************************
+-*/
+-/**
+-***************************************************************************************************
+-* @brief gfx9 Clearstate Definitions
+-***************************************************************************************************
+-*
+-* Do not edit! This is a machine-generated file!
+-*
+-*/
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
+
+ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index c0b1aabf282f..7dbb7cf47986 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
+ amdgpu_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
++ break;
+ case CHIP_VERDE:
+ amdgpu_program_register_sequence(adev,
+ verde_golden_registers,
+@@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
+ amdgpu_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
++ break;
+ case CHIP_HAINAN:
+ amdgpu_program_register_sequence(adev,
+ hainan_golden_registers,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 1d2db5d912b0..f8a977f86ec7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+
+ hotspot_x = du->hotspot_x;
+ hotspot_y = du->hotspot_y;
++
++ if (plane->fb) {
++ hotspot_x += plane->fb->hot_x;
++ hotspot_y += plane->fb->hot_y;
++ }
++
+ du->cursor_surface = vps->surf;
+ du->cursor_dmabuf = vps->dmabuf;
+
+@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+ vmw_cursor_update_position(dev_priv, true,
+ du->cursor_x + hotspot_x,
+ du->cursor_y + hotspot_y);
++
++ du->core_hotspot_x = hotspot_x - du->hotspot_x;
++ du->core_hotspot_y = hotspot_y - du->hotspot_y;
+ } else {
+ DRM_ERROR("Failed to update cursor image\n");
+ }
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 0f1219fa8561..28fbc81c6e9e 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -4316,6 +4316,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+ /* Setting */
+ irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
+ irte->hi.fields.vector = vcpu_pi_info->vector;
++ irte->lo.fields_vapic.ga_log_intr = 1;
+ irte->lo.fields_vapic.guest_mode = 1;
+ irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
+
+diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
+index b2ff82fa7116..ecfeac5cdbed 100644
+--- a/drivers/media/pci/saa7164/saa7164-bus.c
++++ b/drivers/media/pci/saa7164/saa7164-bus.c
+@@ -389,11 +389,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
+ msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
+ msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
+ msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
++ memcpy(msg, &msg_tmp, sizeof(*msg));
+
+ /* No need to update the read positions, because this was a peek */
+ /* If the caller specifically want to peek, return */
+ if (peekonly) {
+- memcpy(msg, &msg_tmp, sizeof(*msg));
+ goto peekout;
+ }
+
+@@ -438,21 +438,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
+ space_rem = bus->m_dwSizeGetRing - curr_grp;
+
+ if (space_rem < sizeof(*msg)) {
+- /* msg wraps around the ring */
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
+- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
+- sizeof(*msg) - space_rem);
+ if (buf)
+ memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
+ space_rem, buf_size);
+
+ } else if (space_rem == sizeof(*msg)) {
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ if (buf)
+ memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
+ } else {
+ /* Additional data wraps around the ring */
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ if (buf) {
+ memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
+ sizeof(*msg), space_rem - sizeof(*msg));
+@@ -465,15 +459,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
+
+ } else {
+ /* No wrapping */
+- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ if (buf)
+ memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
+ buf_size);
+ }
+- /* Convert from little endian to CPU */
+- msg->size = le16_to_cpu((__force __le16)msg->size);
+- msg->command = le32_to_cpu((__force __le32)msg->command);
+- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
+
+ /* Update the read positions, adjusting the ring */
+ saa7164_writel(bus->m_dwGetReadPos, new_grp);
+diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
+index e3fe3e0635aa..1831bf5ccca5 100644
+--- a/drivers/media/platform/davinci/vpfe_capture.c
++++ b/drivers/media/platform/davinci/vpfe_capture.c
+@@ -1719,27 +1719,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
+
+ switch (cmd) {
+ case VPFE_CMD_S_CCDC_RAW_PARAMS:
++ ret = -EINVAL;
+ v4l2_warn(&vpfe_dev->v4l2_dev,
+- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+- if (ccdc_dev->hw_ops.set_params) {
+- ret = ccdc_dev->hw_ops.set_params(param);
+- if (ret) {
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+- "Error setting parameters in CCDC\n");
+- goto unlock_out;
+- }
+- ret = vpfe_get_ccdc_image_format(vpfe_dev,
+- &vpfe_dev->fmt);
+- if (ret < 0) {
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+- "Invalid image format at CCDC\n");
+- goto unlock_out;
+- }
+- } else {
+- ret = -EINVAL;
+- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+- }
++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+ break;
+ default:
+ ret = -ENOTTY;
+diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
+index de85f1d7ce43..c01b655571a2 100644
+--- a/drivers/media/rc/ir-lirc-codec.c
++++ b/drivers/media/rc/ir-lirc-codec.c
+@@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
+ if (!dev->rx_resolution)
+ return -ENOTTY;
+
+- val = dev->rx_resolution;
++ val = dev->rx_resolution / 1000;
+ break;
+
+ case LIRC_SET_WIDEBAND_RECEIVER:
+diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
+index c8863f36686a..f39cf8cb639f 100644
+--- a/drivers/media/rc/ir-spi.c
++++ b/drivers/media/rc/ir-spi.c
+@@ -57,10 +57,13 @@ static int ir_spi_tx(struct rc_dev *dev,
+
+ /* convert the pulse/space signal to raw binary signal */
+ for (i = 0; i < count; i++) {
++ unsigned int periods;
+ int j;
+ u16 val = ((i + 1) % 2) ? idata->pulse : idata->space;
+
+- if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE)
++ periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
++
++ if (len + periods >= IR_SPI_MAX_BUFSIZE)
+ return -EINVAL;
+
+ /*
+@@ -69,13 +72,13 @@ static int ir_spi_tx(struct rc_dev *dev,
+ * contain a space duration.
+ */
+ val = (i % 2) ? idata->space : idata->pulse;
+- for (j = 0; j < buffer[i]; j++)
++ for (j = 0; j < periods; j++)
+ idata->tx_buf[len++] = val;
+ }
+
+ memset(&xfer, 0, sizeof(xfer));
+
+- xfer.speed_hz = idata->freq;
++ xfer.speed_hz = idata->freq * 16;
+ xfer.len = len * sizeof(*idata->tx_buf);
+ xfer.tx_buf = idata->tx_buf;
+
+diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
+index 1dfc2de1fe77..4767f4341ba9 100644
+--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
++++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
+@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
+ MODULE_LICENSE("GPL");
+
+ static int debug;
+-static int persistent_config = 1;
++static int persistent_config;
+ module_param(debug, int, 0644);
+ module_param(persistent_config, int, 0644);
+ MODULE_PARM_DESC(debug, "debug level (0-1)");
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 3f8c85d5aa09..88fa03142e92 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -176,19 +176,17 @@ static void mmc_retune_timer(unsigned long data)
+ */
+ int mmc_of_parse(struct mmc_host *host)
+ {
+- struct device_node *np;
++ struct device *dev = host->parent;
+ u32 bus_width;
+ int ret;
+ bool cd_cap_invert, cd_gpio_invert = false;
+ bool ro_cap_invert, ro_gpio_invert = false;
+
+- if (!host->parent || !host->parent->of_node)
++ if (!dev || !dev_fwnode(dev))
+ return 0;
+
+- np = host->parent->of_node;
+-
+ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+- if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
++ if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
+ dev_dbg(host->parent,
+ "\"bus-width\" property is missing, assuming 1 bit.\n");
+ bus_width = 1;
+@@ -210,7 +208,7 @@ int mmc_of_parse(struct mmc_host *host)
+ }
+
+ /* f_max is obtained from the optional "max-frequency" property */
+- of_property_read_u32(np, "max-frequency", &host->f_max);
++ device_property_read_u32(dev, "max-frequency", &host->f_max);
+
+ /*
+ * Configure CD and WP pins. They are both by default active low to
+@@ -225,12 +223,12 @@ int mmc_of_parse(struct mmc_host *host)
+ */
+
+ /* Parse Card Detection */
+- if (of_property_read_bool(np, "non-removable")) {
++ if (device_property_read_bool(dev, "non-removable")) {
+ host->caps |= MMC_CAP_NONREMOVABLE;
+ } else {
+- cd_cap_invert = of_property_read_bool(np, "cd-inverted");
++ cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+
+- if (of_property_read_bool(np, "broken-cd"))
++ if (device_property_read_bool(dev, "broken-cd"))
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+@@ -256,7 +254,7 @@ int mmc_of_parse(struct mmc_host *host)
+ }
+
+ /* Parse Write Protection */
+- ro_cap_invert = of_property_read_bool(np, "wp-inverted");
++ ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+ if (!ret)
+@@ -264,64 +262,64 @@ int mmc_of_parse(struct mmc_host *host)
+ else if (ret != -ENOENT && ret != -ENOSYS)
+ return ret;
+
+- if (of_property_read_bool(np, "disable-wp"))
++ if (device_property_read_bool(dev, "disable-wp"))
+ host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
+ /* See the comment on CD inversion above */
+ if (ro_cap_invert ^ ro_gpio_invert)
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+- if (of_property_read_bool(np, "cap-sd-highspeed"))
++ if (device_property_read_bool(dev, "cap-sd-highspeed"))
+ host->caps |= MMC_CAP_SD_HIGHSPEED;
+- if (of_property_read_bool(np, "cap-mmc-highspeed"))
++ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+ host->caps |= MMC_CAP_MMC_HIGHSPEED;
+- if (of_property_read_bool(np, "sd-uhs-sdr12"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr12"))
+ host->caps |= MMC_CAP_UHS_SDR12;
+- if (of_property_read_bool(np, "sd-uhs-sdr25"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr25"))
+ host->caps |= MMC_CAP_UHS_SDR25;
+- if (of_property_read_bool(np, "sd-uhs-sdr50"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr50"))
+ host->caps |= MMC_CAP_UHS_SDR50;
+- if (of_property_read_bool(np, "sd-uhs-sdr104"))
++ if (device_property_read_bool(dev, "sd-uhs-sdr104"))
+ host->caps |= MMC_CAP_UHS_SDR104;
+- if (of_property_read_bool(np, "sd-uhs-ddr50"))
++ if (device_property_read_bool(dev, "sd-uhs-ddr50"))
+ host->caps |= MMC_CAP_UHS_DDR50;
+- if (of_property_read_bool(np, "cap-power-off-card"))
++ if (device_property_read_bool(dev, "cap-power-off-card"))
+ host->caps |= MMC_CAP_POWER_OFF_CARD;
+- if (of_property_read_bool(np, "cap-mmc-hw-reset"))
++ if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
+ host->caps |= MMC_CAP_HW_RESET;
+- if (of_property_read_bool(np, "cap-sdio-irq"))
++ if (device_property_read_bool(dev, "cap-sdio-irq"))
+ host->caps |= MMC_CAP_SDIO_IRQ;
+- if (of_property_read_bool(np, "full-pwr-cycle"))
++ if (device_property_read_bool(dev, "full-pwr-cycle"))
+ host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+- if (of_property_read_bool(np, "keep-power-in-suspend"))
++ if (device_property_read_bool(dev, "keep-power-in-suspend"))
+ host->pm_caps |= MMC_PM_KEEP_POWER;
+- if (of_property_read_bool(np, "wakeup-source") ||
+- of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
++ if (device_property_read_bool(dev, "wakeup-source") ||
++ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
+ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+- if (of_property_read_bool(np, "mmc-ddr-3_3v"))
++ if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
+ host->caps |= MMC_CAP_3_3V_DDR;
+- if (of_property_read_bool(np, "mmc-ddr-1_8v"))
++ if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
+ host->caps |= MMC_CAP_1_8V_DDR;
+- if (of_property_read_bool(np, "mmc-ddr-1_2v"))
++ if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
+ host->caps |= MMC_CAP_1_2V_DDR;
+- if (of_property_read_bool(np, "mmc-hs200-1_8v"))
++ if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
+ host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+- if (of_property_read_bool(np, "mmc-hs200-1_2v"))
++ if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
+ host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+- if (of_property_read_bool(np, "mmc-hs400-1_8v"))
++ if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
+ host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+- if (of_property_read_bool(np, "mmc-hs400-1_2v"))
++ if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
+ host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+- if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
++ if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
+ host->caps2 |= MMC_CAP2_HS400_ES;
+- if (of_property_read_bool(np, "no-sdio"))
++ if (device_property_read_bool(dev, "no-sdio"))
+ host->caps2 |= MMC_CAP2_NO_SDIO;
+- if (of_property_read_bool(np, "no-sd"))
++ if (device_property_read_bool(dev, "no-sd"))
+ host->caps2 |= MMC_CAP2_NO_SD;
+- if (of_property_read_bool(np, "no-mmc"))
++ if (device_property_read_bool(dev, "no-mmc"))
+ host->caps2 |= MMC_CAP2_NO_MMC;
+
+- host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
++ host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
+ if (host->dsr_req && (host->dsr & ~0xffff)) {
+ dev_err(host->parent,
+ "device tree specified broken value for DSR: 0x%x, ignoring\n",
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index e45129f48174..efde0f20dd24 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2707,8 +2707,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+ host->slot[id] = slot;
+
+ mmc->ops = &dw_mci_ops;
+- if (of_property_read_u32_array(host->dev->of_node,
+- "clock-freq-min-max", freq, 2)) {
++ if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
++ freq, 2)) {
+ mmc->f_min = DW_MCI_FREQ_MIN;
+ mmc->f_max = DW_MCI_FREQ_MAX;
+ } else {
+@@ -2808,7 +2808,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
+ {
+ int addr_config;
+ struct device *dev = host->dev;
+- struct device_node *np = dev->of_node;
+
+ /*
+ * Check tansfer mode from HCON[17:16]
+@@ -2869,8 +2868,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
+ dev_info(host->dev, "Using internal DMA controller.\n");
+ } else {
+ /* TRANS_MODE_EDMAC: check dma bindings again */
+- if ((of_property_count_strings(np, "dma-names") < 0) ||
+- (!of_find_property(np, "dmas", NULL))) {
++ if ((device_property_read_string_array(dev, "dma-names",
++ NULL, 0) < 0) ||
++ !device_property_present(dev, "dmas")) {
+ goto no_dma;
+ }
+ host->dma_ops = &dw_mci_edmac_ops;
+@@ -2937,7 +2937,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
+ {
+ struct dw_mci_board *pdata;
+ struct device *dev = host->dev;
+- struct device_node *np = dev->of_node;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ int ret;
+ u32 clock_frequency;
+@@ -2954,20 +2953,21 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
+ }
+
+ /* find out number of slots supported */
+- of_property_read_u32(np, "num-slots", &pdata->num_slots);
++ device_property_read_u32(dev, "num-slots", &pdata->num_slots);
+
+- if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
++ if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
+ dev_info(dev,
+ "fifo-depth property not found, using value of FIFOTH register as default\n");
+
+- of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
++ device_property_read_u32(dev, "card-detect-delay",
++ &pdata->detect_delay_ms);
+
+- of_property_read_u32(np, "data-addr", &host->data_addr_override);
++ device_property_read_u32(dev, "data-addr", &host->data_addr_override);
+
+- if (of_get_property(np, "fifo-watermark-aligned", NULL))
++ if (device_property_present(dev, "fifo-watermark-aligned"))
+ host->wm_aligned = true;
+
+- if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
++ if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
+ pdata->bus_hz = clock_frequency;
+
+ if (drv_data && drv_data->parse_dt) {
+diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
+index 7611fd679f1a..1485530c3592 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -31,6 +31,7 @@
+
+ #define SDMMC_MC1R 0x204
+ #define SDMMC_MC1R_DDR BIT(3)
++#define SDMMC_MC1R_FCD BIT(7)
+ #define SDMMC_CACR 0x230
+ #define SDMMC_CACR_CAPWREN BIT(0)
+ #define SDMMC_CACR_KEY (0x46 << 8)
+@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
+ struct clk *mainck;
+ };
+
++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
++{
++ u8 mc1r;
++
++ mc1r = readb(host->ioaddr + SDMMC_MC1R);
++ mc1r |= SDMMC_MC1R_FCD;
++ writeb(mc1r, host->ioaddr + SDMMC_MC1R);
++}
++
+ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+ u16 clk;
+@@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+ sdhci_set_uhs_signaling(host, timing);
+ }
+
++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
++{
++ sdhci_reset(host, mask);
++
++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++ sdhci_at91_set_force_card_detect(host);
++}
++
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+ .set_clock = sdhci_at91_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+- .reset = sdhci_reset,
++ .reset = sdhci_at91_reset,
+ .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
+ .set_power = sdhci_at91_set_power,
+ };
+@@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
++ /*
++ * If the device attached to the MMC bus is not removable, it is safer
++ * to set the Force Card Detect bit. People often don't connect the
++ * card detect signal and use this pin for another purpose. If the card
++ * detect pin is not muxed to SDHCI controller, a default value is
++ * used. This value can be different from a SoC revision to another
++ * one. Problems come when this default value is not card present. To
++ * avoid this case, if the device is non removable then the card
++ * detection procedure using the SDMCC_CD signal is bypassed.
++ * This bit is reset when a software reset for all command is performed
++ * so we need to implement our own reset function to set back this bit.
++ */
++ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++ sdhci_at91_set_force_card_detect(host);
++
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 8ab6bdbe1682..224e93aa6d23 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2047,6 +2047,7 @@ static int bond_miimon_inspect(struct bonding *bond)
+ continue;
+
+ bond_propose_link_state(slave, BOND_LINK_FAIL);
++ commit++;
+ slave->delay = bond->params.downdelay;
+ if (slave->delay) {
+ netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
+@@ -2085,6 +2086,7 @@ static int bond_miimon_inspect(struct bonding *bond)
+ continue;
+
+ bond_propose_link_state(slave, BOND_LINK_BACK);
++ commit++;
+ slave->delay = bond->params.updelay;
+
+ if (slave->delay) {
+@@ -4598,7 +4600,7 @@ static int bond_check_params(struct bond_params *params)
+ }
+ ad_user_port_key = valptr->value;
+
+- if (bond_mode == BOND_MODE_TLB) {
++ if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
+ bond_opt_initstr(&newval, "default");
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
+ &newval);
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index fa0eece21eef..d9cc94a7d44e 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1668,6 +1668,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ .dev_name = "BCM53125",
+ .vlans = 4096,
+ .enabled_ports = 0xff,
++ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index d034d8cd7d22..32864a47c4c1 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3377,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6390_port_pause_config,
++ .port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
+index 5711fbbd6ae3..878cffd37e1f 100644
+--- a/drivers/net/ethernet/aurora/nb8800.c
++++ b/drivers/net/ethernet/aurora/nb8800.c
+@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
+ mac_mode |= HALF_DUPLEX;
+
+ if (gigabit) {
+- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
++ if (phy_interface_is_rgmii(dev->phydev))
+ mac_mode |= RGMII_MODE;
+
+ mac_mode |= GMAC_MODE;
+@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+- pad_mode = PAD_MODE_RGMII;
+- break;
+-
++ case PHY_INTERFACE_MODE_RGMII_ID:
++ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
++ pad_mode = PAD_MODE_RGMII;
+ break;
+
+ default:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 10d282841f5b..ac0a460c006a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -777,6 +777,10 @@ static void cb_timeout_handler(struct work_struct *work)
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ }
+
++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
++ struct mlx5_cmd_msg *msg);
++
+ static void cmd_work_handler(struct work_struct *work)
+ {
+ struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
+@@ -786,16 +790,27 @@ static void cmd_work_handler(struct work_struct *work)
+ struct mlx5_cmd_layout *lay;
+ struct semaphore *sem;
+ unsigned long flags;
++ int alloc_ret;
+
+ sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+ down(sem);
+ if (!ent->page_queue) {
+- ent->idx = alloc_ent(cmd);
+- if (ent->idx < 0) {
++ alloc_ret = alloc_ent(cmd);
++ if (alloc_ret < 0) {
+ mlx5_core_err(dev, "failed to allocate command entry\n");
++ if (ent->callback) {
++ ent->callback(-EAGAIN, ent->context);
++ mlx5_free_cmd_msg(dev, ent->out);
++ free_msg(dev, ent->in);
++ free_cmd(ent);
++ } else {
++ ent->ret = -EAGAIN;
++ complete(&ent->done);
++ }
+ up(sem);
+ return;
+ }
++ ent->idx = alloc_ret;
+ } else {
+ ent->idx = cmd->max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+@@ -955,7 +970,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+- goto out_free;
++ goto out;
+
+ ds = ent->ts2 - ent->ts1;
+ op = MLX5_GET(mbox_in, in->first.data, opcode);
+@@ -1419,6 +1434,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
+ mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
+ ent->idx);
+ free_ent(cmd, ent->idx);
++ free_cmd(ent);
+ }
+ continue;
+ }
+@@ -1477,7 +1493,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
+ free_msg(dev, ent->in);
+
+ err = err ? err : ent->status;
+- free_cmd(ent);
++ if (!forced)
++ free_cmd(ent);
+ callback(err, context);
+ } else {
+ complete(&ent->done);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 944fc1742464..3b39dbd97e57 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -261,6 +261,14 @@ struct mlx5e_dcbx {
+ };
+ #endif
+
++#define MAX_PIN_NUM 8
++struct mlx5e_pps {
++ u8 pin_caps[MAX_PIN_NUM];
++ struct work_struct out_work;
++ u64 start[MAX_PIN_NUM];
++ u8 enabled;
++};
++
+ struct mlx5e_tstamp {
+ rwlock_t lock;
+ struct cyclecounter cycles;
+@@ -272,7 +280,7 @@ struct mlx5e_tstamp {
+ struct mlx5_core_dev *mdev;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+- u8 *pps_pin_caps;
++ struct mlx5e_pps pps_info;
+ };
+
+ enum {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+index e706a87fc8b2..80c500f87ab6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+@@ -53,6 +53,15 @@ enum {
+ MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
+ };
+
++enum {
++ MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
++ MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
++ MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
++ MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
++ MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
++ MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
++};
++
+ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
+ struct skb_shared_hwtstamps *hwts)
+ {
+@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
+ return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
+ }
+
++static void mlx5e_pps_out(struct work_struct *work)
++{
++ struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
++ out_work);
++ struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
++ pps_info);
++ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
++ unsigned long flags;
++ int i;
++
++ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
++ u64 tstart;
++
++ write_lock_irqsave(&tstamp->lock, flags);
++ tstart = tstamp->pps_info.start[i];
++ tstamp->pps_info.start[i] = 0;
++ write_unlock_irqrestore(&tstamp->lock, flags);
++ if (!tstart)
++ continue;
++
++ MLX5_SET(mtpps_reg, in, pin, i);
++ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
++ MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
++ mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
++ }
++}
++
+ static void mlx5e_timestamp_overflow(struct work_struct *work)
+ {
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
+ overflow_work);
++ struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
+ unsigned long flags;
+
+ write_lock_irqsave(&tstamp->lock, flags);
+ timecounter_read(&tstamp->clock);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+- schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
++ queue_delayed_work(priv->wq, &tstamp->overflow_work,
++ msecs_to_jiffies(tstamp->overflow_period * 1000));
+ }
+
+ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+@@ -214,18 +252,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+ int neg_adj = 0;
+ struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
+ ptp_info);
+- struct mlx5e_priv *priv =
+- container_of(tstamp, struct mlx5e_priv, tstamp);
+-
+- if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
+- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+-
+- /* For future use need to add a loop for finding all 1PPS out pins */
+- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+- MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
+-
+- mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+- }
+
+ if (delta < 0) {
+ neg_adj = 1;
+@@ -254,12 +280,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
++ u32 field_select = 0;
++ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+
+- if (!MLX5_CAP_GEN(priv->mdev, pps) ||
+- !MLX5_CAP_GEN(priv->mdev, pps_modify))
++ if (!MLX5_PPS_CAP(priv->mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->extts.index >= tstamp->ptp_info.n_pins)
+@@ -269,15 +296,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
+ pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
++ pin_mode = MLX5E_PIN_MODE_IN;
++ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
++ field_select = MLX5E_MTPPS_FS_PIN_MODE |
++ MLX5E_MTPPS_FS_PATTERN |
++ MLX5E_MTPPS_FS_ENABLE;
++ } else {
++ pin = rq->extts.index;
++ field_select = MLX5E_MTPPS_FS_ENABLE;
+ }
+
+- if (rq->extts.flags & PTP_FALLING_EDGE)
+- pattern = 1;
+-
+ MLX5_SET(mtpps_reg, in, pin, pin);
+- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
++ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
++ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ if (err)
+@@ -296,14 +329,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+- u64 nsec_now, nsec_delta, time_stamp;
++ u64 nsec_now, nsec_delta, time_stamp = 0;
+ u64 cycles_now, cycles_delta;
+ struct timespec64 ts;
+ unsigned long flags;
++ u32 field_select = 0;
++ u8 pin_mode = 0;
++ u8 pattern = 0;
+ int pin = -1;
++ int err = 0;
+ s64 ns;
+
+- if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
++ if (!MLX5_PPS_CAP(priv->mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= tstamp->ptp_info.n_pins)
+@@ -314,32 +351,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+- }
+
+- ts.tv_sec = rq->perout.period.sec;
+- ts.tv_nsec = rq->perout.period.nsec;
+- ns = timespec64_to_ns(&ts);
+- if (on)
++ pin_mode = MLX5E_PIN_MODE_OUT;
++ pattern = MLX5E_OUT_PATTERN_PERIODIC;
++ ts.tv_sec = rq->perout.period.sec;
++ ts.tv_nsec = rq->perout.period.nsec;
++ ns = timespec64_to_ns(&ts);
++
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+- ts.tv_sec = rq->perout.start.sec;
+- ts.tv_nsec = rq->perout.start.nsec;
+- ns = timespec64_to_ns(&ts);
+- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+- write_lock_irqsave(&tstamp->lock, flags);
+- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+- nsec_delta = ns - nsec_now;
+- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+- tstamp->cycles.mult);
+- write_unlock_irqrestore(&tstamp->lock, flags);
+- time_stamp = cycles_now + cycles_delta;
++
++ ts.tv_sec = rq->perout.start.sec;
++ ts.tv_nsec = rq->perout.start.nsec;
++ ns = timespec64_to_ns(&ts);
++ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
++ write_lock_irqsave(&tstamp->lock, flags);
++ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
++ nsec_delta = ns - nsec_now;
++ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
++ tstamp->cycles.mult);
++ write_unlock_irqrestore(&tstamp->lock, flags);
++ time_stamp = cycles_now + cycles_delta;
++ field_select = MLX5E_MTPPS_FS_PIN_MODE |
++ MLX5E_MTPPS_FS_PATTERN |
++ MLX5E_MTPPS_FS_ENABLE |
++ MLX5E_MTPPS_FS_TIME_STAMP;
++ } else {
++ pin = rq->perout.index;
++ field_select = MLX5E_MTPPS_FS_ENABLE;
++ }
++
+ MLX5_SET(mtpps_reg, in, pin, pin);
+- MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+- MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
++ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
++ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
++ MLX5_SET(mtpps_reg, in, field_select, field_select);
++
++ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
++ if (err)
++ return err;
+
+- return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
++ return mlx5_set_mtppse(priv->mdev, pin, 0,
++ MLX5E_EVENT_MODE_REPETETIVE & on);
++}
++
++static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
++ struct ptp_clock_request *rq,
++ int on)
++{
++ struct mlx5e_tstamp *tstamp =
++ container_of(ptp, struct mlx5e_tstamp, ptp_info);
++
++ tstamp->pps_info.enabled = !!on;
++ return 0;
+ }
+
+ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+@@ -351,6 +416,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+ return mlx5e_extts_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return mlx5e_perout_configure(ptp, rq, on);
++ case PTP_CLK_REQ_PPS:
++ return mlx5e_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+@@ -396,6 +463,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
+ return -ENOMEM;
+ tstamp->ptp_info.enable = mlx5e_ptp_enable;
+ tstamp->ptp_info.verify = mlx5e_ptp_verify;
++ tstamp->ptp_info.pps = 1;
+
+ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+ snprintf(tstamp->ptp_info.pin_config[i].name,
+@@ -423,22 +491,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
+ tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_out_pins);
+
+- tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+- tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+- tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+- tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+- tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+- tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+- tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+- tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
++ tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
++ tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
++ tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
++ tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
++ tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
++ tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
++ tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
++ tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+ }
+
+ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+ struct ptp_clock_event *event)
+ {
++ struct net_device *netdev = priv->netdev;
+ struct mlx5e_tstamp *tstamp = &priv->tstamp;
++ struct timespec64 ts;
++ u64 nsec_now, nsec_delta;
++ u64 cycles_now, cycles_delta;
++ int pin = event->index;
++ s64 ns;
++ unsigned long flags;
+
+- ptp_clock_event(tstamp->ptp, event);
++ switch (tstamp->ptp_info.pin_config[pin].func) {
++ case PTP_PF_EXTTS:
++ if (tstamp->pps_info.enabled) {
++ event->type = PTP_CLOCK_PPSUSR;
++ event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
++ } else {
++ event->type = PTP_CLOCK_EXTTS;
++ }
++ ptp_clock_event(tstamp->ptp, event);
++ break;
++ case PTP_PF_PEROUT:
++ mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
++ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
++ ts.tv_sec += 1;
++ ts.tv_nsec = 0;
++ ns = timespec64_to_ns(&ts);
++ write_lock_irqsave(&tstamp->lock, flags);
++ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
++ nsec_delta = ns - nsec_now;
++ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
++ tstamp->cycles.mult);
++ tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
++ queue_work(priv->wq, &tstamp->pps_info.out_work);
++ write_unlock_irqrestore(&tstamp->lock, flags);
++ break;
++ default:
++ netdev_err(netdev, "%s: Unhandled event\n", __func__);
++ }
+ }
+
+ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+@@ -474,9 +576,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+ do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ tstamp->overflow_period = ns;
+
++ INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
+ INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
+ if (tstamp->overflow_period)
+- schedule_delayed_work(&tstamp->overflow_work, 0);
++ queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
+ else
+ mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
+
+@@ -485,16 +588,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+ snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
+
+ /* Initialize 1PPS data structures */
+-#define MAX_PIN_NUM 8
+- tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
+- if (tstamp->pps_pin_caps) {
+- if (MLX5_CAP_GEN(priv->mdev, pps))
+- mlx5e_get_pps_caps(priv, tstamp);
+- if (tstamp->ptp_info.n_pins)
+- mlx5e_init_pin_config(tstamp);
+- } else {
+- mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
+- }
++ if (MLX5_PPS_CAP(priv->mdev))
++ mlx5e_get_pps_caps(priv, tstamp);
++ if (tstamp->ptp_info.n_pins)
++ mlx5e_init_pin_config(tstamp);
+
+ tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
+ &priv->mdev->pdev->dev);
+@@ -517,8 +614,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
+ priv->tstamp.ptp = NULL;
+ }
+
+- kfree(tstamp->pps_pin_caps);
+- kfree(tstamp->ptp_info.pin_config);
+-
++ cancel_work_sync(&tstamp->pps_info.out_work);
+ cancel_delayed_work_sync(&tstamp->overflow_work);
++ kfree(tstamp->ptp_info.pin_config);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index 85bf4a389295..986387de13ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
+
+ static bool outer_header_zero(u32 *match_criteria)
+ {
+- int size = MLX5_ST_SZ_BYTES(fte_match_param);
++ int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
+ char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+
+@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
+
+ spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
+ flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
++ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 7819fe9ede22..072aa8a13a0a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -365,7 +365,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ break;
+ case MLX5_DEV_EVENT_PPS:
+ eqe = (struct mlx5_eqe *)param;
+- ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_event.index = eqe->data.pps.pin;
+ ptp_event.timestamp =
+ timecounter_cyc2time(&priv->tstamp.clock,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 33eae5ad2fb0..58a9f5c96d10 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -690,7 +690,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
+ else
+ mlx5_core_dbg(dev, "port_module_event is not set\n");
+
+- if (MLX5_CAP_GEN(dev, pps))
++ if (MLX5_PPS_CAP(dev))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+
+ err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+index cc1858752e70..6d90e9e3bfd1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+@@ -160,8 +160,6 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
+
+ static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+ {
+- mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
+-
+ mlx5_core_destroy_qp(mdev, qp);
+ }
+
+@@ -176,8 +174,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
+ return err;
+ }
+
+- mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+-
+ err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
+@@ -235,6 +231,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
+
+ static int mlx5i_init_rx(struct mlx5e_priv *priv)
+ {
++ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ err = mlx5e_create_indirect_rqt(priv);
+@@ -253,12 +250,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
+ if (err)
+ goto err_destroy_indirect_tirs;
+
+- err = mlx5i_create_flow_steering(priv);
++ err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+ if (err)
+ goto err_destroy_direct_tirs;
+
++ err = mlx5i_create_flow_steering(priv);
++ if (err)
++ goto err_remove_rx_underlay_qpn;
++
+ return 0;
+
++err_remove_rx_underlay_qpn:
++ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+ err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+ err_destroy_indirect_tirs:
+@@ -272,6 +275,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
+
+ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
+ {
++ struct mlx5i_priv *ipriv = priv->ppriv;
++
++ mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+ mlx5i_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+index b5d5519542e8..0ca4623bda6b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+@@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+ u8 *port1, u8 *port2)
+ {
+- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+- if (tracker->netdev_state[0].tx_enabled) {
+- *port1 = 1;
+- *port2 = 1;
+- } else {
+- *port1 = 2;
+- *port2 = 2;
+- }
+- } else {
+- *port1 = 1;
+- *port2 = 2;
+- if (!tracker->netdev_state[0].link_up)
+- *port1 = 2;
+- else if (!tracker->netdev_state[1].link_up)
+- *port2 = 1;
++ *port1 = 1;
++ *port2 = 2;
++ if (!tracker->netdev_state[0].tx_enabled ||
++ !tracker->netdev_state[0].link_up) {
++ *port1 = 2;
++ return;
+ }
++
++ if (!tracker->netdev_state[1].tx_enabled ||
++ !tracker->netdev_state[1].link_up)
++ *port2 = 1;
+ }
+
+ static void mlx5_activate_lag(struct mlx5_lag *ldev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index fbc6e9e9e305..1874aa96c1a1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -153,6 +153,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
+ int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
+ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+
++#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
++ MLX5_CAP_GEN((mdev), pps_modify) && \
++ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
++ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
++
+ void mlx5e_init(void);
+ void mlx5e_cleanup(void);
+
+diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
+index 6f6ed75b63c9..765de3bedb88 100644
+--- a/drivers/net/irda/mcs7780.c
++++ b/drivers/net/irda/mcs7780.c
+@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
+ static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
+ {
+ struct usb_device *dev = mcs->usbdev;
+- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+- MCS_RD_RTYPE, 0, reg, val, 2,
+- msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++ void *dmabuf;
++ int ret;
++
++ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
++ if (!dmabuf)
++ return -ENOMEM;
++
++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
++ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
++ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++
++ memcpy(val, dmabuf, sizeof(__u16));
++ kfree(dmabuf);
+
+ return ret;
+ }
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index eebb0e1c70ff..b30d9ceee8bc 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+ phydev->state = PHY_UP;
+ mutex_unlock(&phydev->lock);
++
++ /* Now we can run the state machine synchronously */
++ phy_state_machine(&phydev->state_queue.work);
+ }
+
+ /**
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6633dd4bb649..acb754eb1ccb 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
+
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ buf += headroom; /* advance address leaving hole at front of pkt */
+- ctx = (void *)(unsigned long)len;
+ get_page(alloc_frag->page);
+ alloc_frag->offset += len + headroom;
+ hole = alloc_frag->size - alloc_frag->offset;
+ if (hole < len + headroom) {
+ /* To avoid internal fragmentation, if there is very likely not
+ * enough space for another buffer, add the remaining space to
+- * the current buffer. This extra space is not included in
+- * the truesize stored in ctx.
++ * the current buffer.
+ */
+ len += hole;
+ alloc_frag->offset += hole;
+ }
+
+ sg_init_one(rq->sg, buf, len);
++ ctx = (void *)(unsigned long)len;
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+ if (err < 0)
+ put_page(virt_to_head_page(buf));
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 5653d6dd38f6..d44f59ef4f72 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4168,11 +4168,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+ goto fail;
+ }
+
+- /* allocate scatter-gather table. sg support
+- * will be disabled upon allocation failure.
+- */
+- brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+-
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 4b97371c3b42..838946d17b59 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+ next_reclaimed);
++ iwlagn_check_ratid_empty(priv, sta_id, tid);
+ }
+
+ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+
+- iwlagn_check_ratid_empty(priv, sta_id, tid);
+ freed = 0;
+
+ /* process frames */
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 3c52867dfe28..d145e0d90227 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1241,6 +1241,8 @@ config SCSI_LPFC
+ tristate "Emulex LightPulse Fibre Channel Support"
+ depends on PCI && SCSI
+ depends on SCSI_FC_ATTRS
++ depends on NVME_TARGET_FC || NVME_TARGET_FC=n
++ depends on NVME_FC || NVME_FC=n
+ select CRC_T10DIF
+ ---help---
+ This lpfc driver supports the Emulex LightPulse
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index beb5f098f32d..05804227234d 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -437,7 +437,7 @@ static int scatter_data_area(struct tcmu_dev *udev,
+ to_offset = get_block_offset_user(udev, dbi,
+ block_remaining);
+ offset = DATA_BLOCK_SIZE - block_remaining;
+- to = (void *)(unsigned long)to + offset;
++ to += offset;
+
+ if (*iov_cnt != 0 &&
+ to_offset == iov_tail(udev, *iov)) {
+@@ -510,7 +510,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ copy_bytes = min_t(size_t, sg_remaining,
+ block_remaining);
+ offset = DATA_BLOCK_SIZE - block_remaining;
+- from = (void *)(unsigned long)from + offset;
++ from += offset;
+ tcmu_flush_dcache_range(from, copy_bytes);
+ memcpy(to + sg->length - sg_remaining, from,
+ copy_bytes);
+@@ -699,25 +699,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
+
+ entry = (void *) mb + CMDR_OFF + cmd_head;
+- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
+ tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
+ entry->hdr.cmd_id = 0; /* not used for PAD */
+ entry->hdr.kflags = 0;
+ entry->hdr.uflags = 0;
++ tcmu_flush_dcache_range(entry, sizeof(*entry));
+
+ UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
++ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+ WARN_ON(cmd_head != 0);
+ }
+
+ entry = (void *) mb + CMDR_OFF + cmd_head;
+- tcmu_flush_dcache_range(entry, sizeof(*entry));
++ memset(entry, 0, command_size);
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+- entry->hdr.kflags = 0;
+- entry->hdr.uflags = 0;
+
+ /* Handle allocating space from the data area */
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+@@ -736,11 +735,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ entry->req.iov_cnt = iov_cnt;
+- entry->req.iov_dif_cnt = 0;
+
+ /* Handle BIDI commands */
++ iov_cnt = 0;
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+- iov_cnt = 0;
+ iov++;
+ ret = scatter_data_area(udev, tcmu_cmd,
+ se_cmd->t_bidi_data_sg,
+@@ -753,8 +751,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ pr_err("tcmu: alloc and scatter bidi data failed\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+- entry->req.iov_bidi_cnt = iov_cnt;
+ }
++ entry->req.iov_bidi_cnt = iov_cnt;
+
+ /*
+ * Recalaulate the command's base size and size according
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 33d979e9ea2a..83eecd33ad96 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4776,10 +4776,6 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
+ else
+ flush = BTRFS_RESERVE_NO_FLUSH;
+ spin_lock(&space_info->lock);
+- if (can_overcommit(root, space_info, orig, flush)) {
+- spin_unlock(&space_info->lock);
+- break;
+- }
+ if (list_empty(&space_info->tickets) &&
+ list_empty(&space_info->priority_tickets)) {
+ spin_unlock(&space_info->lock);
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index 3ec0e46de95f..22a8d532cca6 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -193,13 +193,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+- if (acl) {
+- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+- if (error)
+- return error;
+- inode->i_ctime = current_time(inode);
+- ext4_mark_inode_dirty(handle, inode);
+- }
+ break;
+
+ case ACL_TYPE_DEFAULT:
+@@ -221,8 +214,9 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+ value, size, 0);
+
+ kfree(value);
+- if (!error)
++ if (!error) {
+ set_cached_acl(inode, type, acl);
++ }
+
+ return error;
+ }
+@@ -232,6 +226,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ {
+ handle_t *handle;
+ int error, retries = 0;
++ umode_t mode = inode->i_mode;
++ int update_mode = 0;
+
+ error = dquot_initialize(inode);
+ if (error)
+@@ -242,7 +238,20 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
++ if ((type == ACL_TYPE_ACCESS) && acl) {
++ error = posix_acl_update_mode(inode, &mode, &acl);
++ if (error)
++ goto out_stop;
++ update_mode = 1;
++ }
++
+ error = __ext4_set_acl(handle, inode, type, acl);
++ if (!error && update_mode) {
++ inode->i_mode = mode;
++ inode->i_ctime = current_time(inode);
++ ext4_mark_inode_dirty(handle, inode);
++ }
++out_stop:
+ ext4_journal_stop(handle);
+ if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 02ce7e7bbdf5..407fc5aa32a7 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -521,6 +521,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ lastoff = page_offset(page);
+ bh = head = page_buffers(page);
+ do {
++ if (lastoff + bh->b_size <= startoff)
++ goto next;
+ if (buffer_uptodate(bh) ||
+ buffer_unwritten(bh)) {
+ if (whence == SEEK_DATA)
+@@ -535,6 +537,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ unlock_page(page);
+ goto out;
+ }
++next:
+ lastoff += bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index c3ed9021b781..035cd3f4785e 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1927,7 +1927,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+ n_desc_blocks = o_desc_blocks +
+ le16_to_cpu(es->s_reserved_gdt_blocks);
+ n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
+- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
++ n_blocks_count = (ext4_fsblk_t)n_group *
++ EXT4_BLOCKS_PER_GROUP(sb);
+ n_group--; /* set to last group number */
+ }
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index f5a7faac39a7..074169a54162 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7407,7 +7407,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
+ cdata->res.server_scope = NULL;
+ }
+ /* Save the EXCHANGE_ID verifier session trunk tests */
+- memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
++ memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
+ sizeof(clp->cl_confirm.data));
+ }
+ out:
+@@ -7444,7 +7444,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
+ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ u32 sp4_how, struct rpc_xprt *xprt)
+ {
+- nfs4_verifier verifier;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+ .rpc_cred = cred,
+@@ -7468,8 +7467,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ return -ENOMEM;
+ }
+
+- if (!xprt)
+- nfs4_init_boot_verifier(clp, &verifier);
++ nfs4_init_boot_verifier(clp, &calldata->args.verifier);
+
+ status = nfs4_init_uniform_client_string(clp);
+ if (status)
+@@ -7510,9 +7508,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ task_setup_data.rpc_xprt = xprt;
+ task_setup_data.flags =
+ RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
+- calldata->args.verifier = &clp->cl_confirm;
+- } else {
+- calldata->args.verifier = &verifier;
++ memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
++ sizeof(calldata->args.verifier.data));
+ }
+ calldata->args.client = clp;
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 3aebfdc82b30..b0cbee2b2422 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -1765,7 +1765,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
+ int len = 0;
+
+ encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
+- encode_nfs4_verifier(xdr, args->verifier);
++ encode_nfs4_verifier(xdr, &args->verifier);
+
+ encode_string(xdr, strlen(args->client->cl_owner_id),
+ args->client->cl_owner_id);
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index dc22ba8c710f..e50a387959bf 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -240,18 +240,6 @@ int ocfs2_set_acl(handle_t *handle,
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
+- if (acl) {
+- umode_t mode;
+-
+- ret = posix_acl_update_mode(inode, &mode, &acl);
+- if (ret)
+- return ret;
+-
+- ret = ocfs2_acl_set_mode(inode, di_bh,
+- handle, mode);
+- if (ret)
+- return ret;
+- }
+ break;
+ case ACL_TYPE_DEFAULT:
+ name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+@@ -289,7 +277,19 @@ int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
+ if (had_lock < 0)
+ return had_lock;
++ if (type == ACL_TYPE_ACCESS && acl) {
++ umode_t mode;
++
++ status = posix_acl_update_mode(inode, &mode, &acl);
++ if (status)
++ goto unlock;
++
++ status = ocfs2_acl_set_mode(inode, bh, NULL, mode);
++ if (status)
++ goto unlock;
++ }
+ status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
++unlock:
+ ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
+ brelse(bh);
+ return status;
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 1d622f276e3a..26f9591b04b1 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -851,6 +851,9 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+ __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
+ spin_unlock(&ctx->fault_pending_wqh.lock);
+
++ /* Flush pending events that may still wait on event_wqh */
++ wake_up_all(&ctx->event_wqh);
++
+ wake_up_poll(&ctx->fd_wqh, POLLHUP);
+ userfaultfd_ctx_put(ctx);
+ return 0;
+@@ -1645,6 +1648,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
+ ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
+ uffdio_zeropage.range.len);
+ mmput(ctx->mm);
++ } else {
++ return -ENOSPC;
+ }
+ if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
+ return -EFAULT;
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 0f2a80377520..30b86efea2bc 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -58,7 +58,6 @@ enum cpuhp_state {
+ CPUHP_XEN_EVTCHN_PREPARE,
+ CPUHP_ARM_SHMOBILE_SCU_PREPARE,
+ CPUHP_SH_SH3X_PREPARE,
+- CPUHP_BLK_MQ_PREPARE,
+ CPUHP_NET_FLOW_PREPARE,
+ CPUHP_TOPOLOGY_PREPARE,
+ CPUHP_NET_IUCV_PREPARE,
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 119a3f9604b0..898cfe2eeb42 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -18,6 +18,19 @@
+
+ #ifdef CONFIG_CPUSETS
+
++/*
++ * Static branch rewrites can happen in an arbitrary order for a given
++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
++ * to ensure that begin() always gets rewritten before retry() in the
++ * disabled -> enabled transition. If not, then if local irqs are disabled
++ * around the loop, we can deadlock since retry() would always be
++ * comparing the latest value of the mems_allowed seqcount against 0 as
++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
++ * transition should happen in reverse order for the same reasons (want to stop
++ * looking at real value of mems_allowed.sequence in retry() first).
++ */
++extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
+ static inline bool cpusets_enabled(void)
+ {
+@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
+
+ static inline void cpuset_inc(void)
+ {
++ static_branch_inc(&cpusets_pre_enable_key);
+ static_branch_inc(&cpusets_enabled_key);
+ }
+
+ static inline void cpuset_dec(void)
+ {
+ static_branch_dec(&cpusets_enabled_key);
++ static_branch_dec(&cpusets_pre_enable_key);
+ }
+
+ extern int cpuset_init(void);
+@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
+ */
+ static inline unsigned int read_mems_allowed_begin(void)
+ {
+- if (!cpusets_enabled())
++ if (!static_branch_unlikely(&cpusets_pre_enable_key))
+ return 0;
+
+ return read_seqcount_begin(¤t->mems_allowed_seq);
+@@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
+ */
+ static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+- if (!cpusets_enabled())
++ if (!static_branch_unlikely(&cpusets_enabled_key))
+ return false;
+
+ return read_seqcount_retry(¤t->mems_allowed_seq, seq);
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index edafedb7b509..e21a0b3d6454 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -7718,8 +7718,10 @@ struct mlx5_ifc_pcam_reg_bits {
+ };
+
+ struct mlx5_ifc_mcam_enhanced_features_bits {
+- u8 reserved_at_0[0x7f];
++ u8 reserved_at_0[0x7d];
+
++ u8 mtpps_enh_out_per_adj[0x1];
++ u8 mtpps_fs[0x1];
+ u8 pcie_performance_group[0x1];
+ };
+
+@@ -8115,7 +8117,8 @@ struct mlx5_ifc_mtpps_reg_bits {
+ u8 reserved_at_78[0x4];
+ u8 cap_pin_4_mode[0x4];
+
+- u8 reserved_at_80[0x80];
++ u8 field_select[0x20];
++ u8 reserved_at_a0[0x60];
+
+ u8 enable[0x1];
+ u8 reserved_at_101[0xb];
+@@ -8130,8 +8133,9 @@ struct mlx5_ifc_mtpps_reg_bits {
+
+ u8 out_pulse_duration[0x10];
+ u8 out_periodic_adjustment[0x10];
++ u8 enhanced_out_periodic_adjustment[0x20];
+
+- u8 reserved_at_1a0[0x60];
++ u8 reserved_at_1c0[0x20];
+ };
+
+ struct mlx5_ifc_mtppse_reg_bits {
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 45cdb27791a3..ab8f7e11c160 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -494,6 +494,10 @@ struct mm_struct {
+ * PROT_NONE or PROT_NUMA mapped page.
+ */
+ bool tlb_flush_pending;
++#endif
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++ /* See flush_tlb_batched_pending() */
++ bool tlb_flush_batched;
+ #endif
+ struct uprobes_state uprobes_state;
+ #ifdef CONFIG_HUGETLB_PAGE
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index b28c83475ee8..7882a07d973e 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1222,7 +1222,7 @@ struct nfs41_state_protection {
+
+ struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+- nfs4_verifier *verifier;
++ nfs4_verifier verifier;
+ u32 flags;
+ struct nfs41_state_protection state_protect;
+ };
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index c102ef65cb64..db6dc9dc0482 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -323,6 +323,7 @@ enum {
+
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
+ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
+
+ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+ #define alloc_ordered_workqueue(fmt, flags, args...) \
+- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
+
+ #define create_workqueue(name) \
+ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 069582ee5d7f..06db0c3ec384 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
+
+ #define _sctp_walk_params(pos, chunk, end, member)\
+ for (pos.v = chunk->member;\
++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
++ (void *)chunk + end) &&\
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
+@@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
+ #define _sctp_walk_errors(err, chunk_hdr, end)\
+ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+ sizeof(sctp_chunkhdr_t));\
++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
++ (void *)chunk_hdr + end) &&\
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+ ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 3391dbd73959..1933442cf1a6 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -265,6 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
+ }
+
+ void udp_v4_early_demux(struct sk_buff *skb);
++void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+ int udp_get_port(struct sock *sk, unsigned short snum,
+ int (*saddr_cmp)(const struct sock *,
+ const struct sock *));
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 5170fd81e1fd..375893d8d4a5 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -795,10 +795,6 @@ struct snd_soc_component_driver {
+ int (*suspend)(struct snd_soc_component *);
+ int (*resume)(struct snd_soc_component *);
+
+- /* pcm creation and destruction */
+- int (*pcm_new)(struct snd_soc_pcm_runtime *);
+- void (*pcm_free)(struct snd_pcm *);
+-
+ /* DT */
+ int (*of_xlate_dai_name)(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+@@ -872,8 +868,6 @@ struct snd_soc_component {
+ void (*remove)(struct snd_soc_component *);
+ int (*suspend)(struct snd_soc_component *);
+ int (*resume)(struct snd_soc_component *);
+- int (*pcm_new)(struct snd_soc_pcm_runtime *);
+- void (*pcm_free)(struct snd_pcm *);
+
+ /* machine specific init */
+ int (*init)(struct snd_soc_component *component);
+diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
+index 00f4d6bf048f..7a01568e5e22 100644
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -33,6 +33,9 @@ struct cgroup_taskset {
+ struct list_head src_csets;
+ struct list_head dst_csets;
+
++ /* the number of tasks in the set */
++ int nr_tasks;
++
+ /* the subsys currently being processed */
+ int ssid;
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 8d4e85eae42c..2c62e4b3f198 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1948,6 +1948,8 @@ static void cgroup_migrate_add_task(struct task_struct *task,
+ if (!cset->mg_src_cgrp)
+ return;
+
++ mgctx->tset.nr_tasks++;
++
+ list_move_tail(&task->cg_list, &cset->mg_tasks);
+ if (list_empty(&cset->mg_node))
+ list_add_tail(&cset->mg_node,
+@@ -2036,21 +2038,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
+ struct css_set *cset, *tmp_cset;
+ int ssid, failed_ssid, ret;
+
+- /* methods shouldn't be called if no task is actually migrating */
+- if (list_empty(&tset->src_csets))
+- return 0;
+-
+ /* check that we can legitimately attach to the cgroup */
+- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+- if (ss->can_attach) {
+- tset->ssid = ssid;
+- ret = ss->can_attach(tset);
+- if (ret) {
+- failed_ssid = ssid;
+- goto out_cancel_attach;
++ if (tset->nr_tasks) {
++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++ if (ss->can_attach) {
++ tset->ssid = ssid;
++ ret = ss->can_attach(tset);
++ if (ret) {
++ failed_ssid = ssid;
++ goto out_cancel_attach;
++ }
+ }
+- }
+- } while_each_subsys_mask();
++ } while_each_subsys_mask();
++ }
+
+ /*
+ * Now that we're guaranteed success, proceed to move all tasks to
+@@ -2077,25 +2077,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
+ */
+ tset->csets = &tset->dst_csets;
+
+- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+- if (ss->attach) {
+- tset->ssid = ssid;
+- ss->attach(tset);
+- }
+- } while_each_subsys_mask();
++ if (tset->nr_tasks) {
++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++ if (ss->attach) {
++ tset->ssid = ssid;
++ ss->attach(tset);
++ }
++ } while_each_subsys_mask();
++ }
+
+ ret = 0;
+ goto out_release_tset;
+
+ out_cancel_attach:
+- do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+- if (ssid == failed_ssid)
+- break;
+- if (ss->cancel_attach) {
+- tset->ssid = ssid;
+- ss->cancel_attach(tset);
+- }
+- } while_each_subsys_mask();
++ if (tset->nr_tasks) {
++ do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++ if (ssid == failed_ssid)
++ break;
++ if (ss->cancel_attach) {
++ tset->ssid = ssid;
++ ss->cancel_attach(tset);
++ }
++ } while_each_subsys_mask();
++ }
+ out_release_tset:
+ spin_lock_irq(&css_set_lock);
+ list_splice_init(&tset->dst_csets, &tset->src_csets);
+@@ -2917,11 +2921,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
+ cgrp->subtree_control &= ~disable;
+
+ ret = cgroup_apply_control(cgrp);
+-
+ cgroup_finalize_control(cgrp, ret);
++ if (ret)
++ goto out_unlock;
+
+ kernfs_activate(cgrp->kn);
+- ret = 0;
+ out_unlock:
+ cgroup_kn_unlock(of->kn);
+ return ret ?: nbytes;
+@@ -4574,6 +4578,10 @@ int __init cgroup_init(void)
+
+ if (ss->bind)
+ ss->bind(init_css_set.subsys[ssid]);
++
++ mutex_lock(&cgroup_mutex);
++ css_populate_dir(init_css_set.subsys[ssid]);
++ mutex_unlock(&cgroup_mutex);
+ }
+
+ /* init_css_set.subsys[] has been updated, re-hash */
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index ae643412948a..8f26927f16a1 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -63,6 +63,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+
++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+
+ /* See "Frequency meter" comments, below. */
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 152a706ef8b8..d3f33020a06b 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1495,7 +1495,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ base->is_idle = false;
+ } else {
+ if (!is_max_delta)
+- expires = basem + (nextevt - basej) * TICK_NSEC;
++ expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
+ /*
+ * If we expect to sleep more than a tick, mark the base idle:
+ */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index c74bf39ef764..6effbcb7a3d6 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3744,8 +3744,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
+ return -EINVAL;
+
+ /* creating multiple pwqs breaks ordering guarantee */
+- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+- return -EINVAL;
++ if (!list_empty(&wq->pwqs)) {
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
++ return -EINVAL;
++
++ wq->flags &= ~__WQ_ORDERED;
++ }
+
+ ctx = apply_wqattrs_prepare(wq, attrs);
+ if (!ctx)
+@@ -3929,6 +3933,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
+
++ /*
++ * Unbound && max_active == 1 used to imply ordered, which is no
++ * longer the case on NUMA machines due to per-node pools. While
++ * alloc_ordered_workqueue() is the right way to create an ordered
++ * workqueue, keep the previous behavior to avoid subtle breakages
++ * on NUMA.
++ */
++ if ((flags & WQ_UNBOUND) && max_active == 1)
++ flags |= __WQ_ORDERED;
++
+ /* see the comment above the definition of WQ_POWER_EFFICIENT */
+ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+ flags |= WQ_UNBOUND;
+@@ -4119,13 +4133,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
+ struct pool_workqueue *pwq;
+
+ /* disallow meddling with max_active for ordered workqueues */
+- if (WARN_ON(wq->flags & __WQ_ORDERED))
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+ return;
+
+ max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
+
+ mutex_lock(&wq->mutex);
+
++ wq->flags &= ~__WQ_ORDERED;
+ wq->saved_max_active = max_active;
+
+ for_each_pwq(pwq, wq)
+@@ -5253,7 +5268,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
+ * attributes breaks ordering guarantee. Disallow exposing ordered
+ * workqueues.
+ */
+- if (WARN_ON(wq->flags & __WQ_ORDERED))
++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+ return -EINVAL;
+
+ wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3eedb187e549..cc289933f462 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4095,6 +4095,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long vaddr = *position;
+ unsigned long remainder = *nr_pages;
+ struct hstate *h = hstate_vma(vma);
++ int err = -EFAULT;
+
+ while (vaddr < vma->vm_end && remainder) {
+ pte_t *pte;
+@@ -4170,11 +4171,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ }
+ ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
+ if (ret & VM_FAULT_ERROR) {
+- int err = vm_fault_to_errno(ret, flags);
+-
+- if (err)
+- return err;
+-
++ err = vm_fault_to_errno(ret, flags);
+ remainder = 0;
+ break;
+ }
+@@ -4229,7 +4226,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ */
+ *position = vaddr;
+
+- return i ? i : -EFAULT;
++ return i ? i : err;
+ }
+
+ #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+diff --git a/mm/internal.h b/mm/internal.h
+index 0e4f558412fb..9c8a2bfb975c 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -498,6 +498,7 @@ extern struct workqueue_struct *mm_percpu_wq;
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ void try_to_unmap_flush(void);
+ void try_to_unmap_flush_dirty(void);
++void flush_tlb_batched_pending(struct mm_struct *mm);
+ #else
+ static inline void try_to_unmap_flush(void)
+ {
+@@ -505,7 +506,9 @@ static inline void try_to_unmap_flush(void)
+ static inline void try_to_unmap_flush_dirty(void)
+ {
+ }
+-
++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++}
+ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
+ extern const struct trace_print_flags pageflag_names[];
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 25b78ee4fc2c..75d2cffbe61d 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -320,6 +320,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
+
+ tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+ orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ flush_tlb_batched_pending(mm);
+ arch_enter_lazy_mmu_mode();
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+diff --git a/mm/memory.c b/mm/memory.c
+index bb11c474857e..b0c3d1556a94 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1197,6 +1197,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
+ init_rss_vec(rss);
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte = start_pte;
++ flush_tlb_batched_pending(mm);
+ arch_enter_lazy_mmu_mode();
+ do {
+ pte_t ptent = *pte;
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 8edd0d576254..f42749e6bf4e 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -66,6 +66,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ atomic_read(&vma->vm_mm->mm_users) == 1)
+ target_node = numa_node_id();
+
++ flush_tlb_batched_pending(vma->vm_mm);
+ arch_enter_lazy_mmu_mode();
+ do {
+ oldpte = *pte;
+diff --git a/mm/mremap.c b/mm/mremap.c
+index cd8a1b199ef9..3f23715d3c69 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -152,6 +152,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+ new_ptl = pte_lockptr(mm, new_pmd);
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
++ flush_tlb_batched_pending(vma->vm_mm);
+ arch_enter_lazy_mmu_mode();
+
+ for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
+@@ -428,6 +429,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
+ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
+ unsigned long new_addr, unsigned long new_len, bool *locked,
+ struct vm_userfaultfd_ctx *uf,
++ struct list_head *uf_unmap_early,
+ struct list_head *uf_unmap)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -446,7 +448,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
+ if (addr + old_len > new_addr && new_addr + new_len > addr)
+ goto out;
+
+- ret = do_munmap(mm, new_addr, new_len, NULL);
++ ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
+ if (ret)
+ goto out;
+
+@@ -514,6 +516,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ unsigned long charged = 0;
+ bool locked = false;
+ struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
++ LIST_HEAD(uf_unmap_early);
+ LIST_HEAD(uf_unmap);
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+@@ -541,7 +544,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+
+ if (flags & MREMAP_FIXED) {
+ ret = mremap_to(addr, old_len, new_addr, new_len,
+- &locked, &uf, &uf_unmap);
++ &locked, &uf, &uf_unmap_early, &uf_unmap);
+ goto out;
+ }
+
+@@ -621,6 +624,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ up_write(¤t->mm->mmap_sem);
+ if (locked && new_len > old_len)
+ mm_populate(new_addr + old_len, new_len - old_len);
++ userfaultfd_unmap_complete(mm, &uf_unmap_early);
+ mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
+ userfaultfd_unmap_complete(mm, &uf_unmap);
+ return ret;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index d405f0e0ee96..9835d19fe143 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+ cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+ tlb_ubc->flush_required = true;
+
++ /*
++ * Ensure compiler does not re-order the setting of tlb_flush_batched
++ * before the PTE is cleared.
++ */
++ barrier();
++ mm->tlb_flush_batched = true;
++
+ /*
+ * If the PTE was dirty then it's best to assume it's writable. The
+ * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+
+ return should_defer;
+ }
++
++/*
++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
++ * operation such as mprotect or munmap to race between reclaim unmapping
++ * the page and flushing the page. If this race occurs, it potentially allows
++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
++ * batching in flight would be expensive during reclaim so instead track
++ * whether TLB batching occurred in the past and if so then do a flush here
++ * if required. This will cost one additional flush per reclaim cycle paid
++ * by the first operation at risk such as mprotect and mumap.
++ *
++ * This must be called under the PTL so that an access to tlb_flush_batched
++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
++ * via the PTL.
++ */
++void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++ if (mm->tlb_flush_batched) {
++ flush_tlb_mm(mm);
++
++ /*
++ * Do not allow the compiler to re-order the clearing of
++ * tlb_flush_batched before the tlb is flushed.
++ */
++ barrier();
++ mm->tlb_flush_batched = false;
++ }
++}
+ #else
+ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+ {
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index 27fad31784a8..18f9cb9aa87d 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
+
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ return -EFAULT;
++ ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+ error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
+ if (error)
+@@ -423,6 +424,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ if (copy_from_user(&iwr, arg, sizeof(iwr)))
+ return -EFAULT;
+
++ iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
++
+ return wext_handle_ioctl(net, &iwr, cmd, arg);
+ }
+
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 467a2f4510a7..52bfeb60c886 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1977,7 +1977,8 @@ static int do_setlink(const struct sk_buff *skb,
+ struct sockaddr *sa;
+ int len;
+
+- len = sizeof(sa_family_t) + dev->addr_len;
++ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
++ sizeof(*sa));
+ sa = kmalloc(len, GFP_KERNEL);
+ if (!sa) {
+ err = -ENOMEM;
+@@ -4165,6 +4166,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
+
+ switch (event) {
+ case NETDEV_REBOOT:
++ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGENAME:
+ case NETDEV_FEAT_CHANGE:
+ case NETDEV_BONDING_FAILOVER:
+diff --git a/net/dccp/feat.c b/net/dccp/feat.c
+index 1704948e6a12..f227f002c73d 100644
+--- a/net/dccp/feat.c
++++ b/net/dccp/feat.c
+@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
+ * singleton values (which always leads to failure).
+ * These settings can still (later) be overridden via sockopts.
+ */
+- if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
+- ccid_get_builtin_ccids(&rx.val, &rx.len))
++ if (ccid_get_builtin_ccids(&tx.val, &tx.len))
+ return -ENOBUFS;
++ if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
++ kfree(tx.val);
++ return -ENOBUFS;
++ }
+
+ if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
+ !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index f75482bdee9a..97368f229876 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ goto drop_and_free;
+
+ inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++ reqsk_put(req);
+ return 0;
+
+ drop_and_free:
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 992621172220..cf3e40df4765 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ goto drop_and_free;
+
+ inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++ reqsk_put(req);
+ return 0;
+
+ drop_and_free:
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 83e3ed258467..3acc8261477c 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1327,13 +1327,14 @@ static struct pernet_operations fib_net_ops = {
+
+ void __init ip_fib_init(void)
+ {
+- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
++ fib_trie_init();
+
+ register_pernet_subsys(&fib_net_ops);
++
+ register_netdevice_notifier(&fib_netdev_notifier);
+ register_inetaddr_notifier(&fib_inetaddr_notifier);
+
+- fib_trie_init();
++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ad9ad4aab5da..ce7bc2e5175a 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1372,7 +1372,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
+ return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
+ &info.info);
+ case FIB_EVENT_NH_DEL:
+- if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
++ if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+ fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
+ (fib_nh->nh_flags & RTNH_F_DEAD))
+ return call_fib_notifiers(dev_net(fib_nh->nh_dev),
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 0257d965f111..4a97fe20f59e 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
+ treq->rcv_isn = ntohl(th->seq) - 1;
+ treq->snt_isn = cookie;
+ treq->ts_off = 0;
++ treq->txhash = net_tx_rndhash();
+ req->mss = mss;
+ ireq->ir_num = ntohs(th->dest);
+ ireq->ir_rmt_port = th->source;
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index b89bce4c721e..96c95c8d981e 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -113,7 +113,8 @@ struct bbr {
+ cwnd_gain:10, /* current gain for setting cwnd */
+ full_bw_cnt:3, /* number of rounds without large bw gains */
+ cycle_idx:3, /* current index in pacing_gain cycle array */
+- unused_b:6;
++ has_seen_rtt:1, /* have we seen an RTT sample yet? */
++ unused_b:5;
+ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
+ u32 full_bw; /* recent bw, to estimate if pipe is full */
+ };
+@@ -212,6 +213,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
+ return rate >> BW_SCALE;
+ }
+
++/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
++static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
++{
++ u64 rate = bw;
++
++ rate = bbr_rate_bytes_per_sec(sk, rate, gain);
++ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
++ return rate;
++}
++
++/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
++static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct bbr *bbr = inet_csk_ca(sk);
++ u64 bw;
++ u32 rtt_us;
++
++ if (tp->srtt_us) { /* any RTT sample yet? */
++ rtt_us = max(tp->srtt_us >> 3, 1U);
++ bbr->has_seen_rtt = 1;
++ } else { /* no RTT sample yet */
++ rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
++ }
++ bw = (u64)tp->snd_cwnd * BW_UNIT;
++ do_div(bw, rtt_us);
++ sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
++}
++
+ /* Pace using current bw estimate and a gain factor. In order to help drive the
+ * network toward lower queues while maintaining high utilization and low
+ * latency, the average pacing rate aims to be slightly (~1%) lower than the
+@@ -221,12 +251,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
+ */
+ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
+ {
++ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+- u64 rate = bw;
++ u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
+
+- rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+- rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+- if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
++ if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
++ bbr_init_pacing_rate_from_rtt(sk);
++ if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
+ sk->sk_pacing_rate = rate;
+ }
+
+@@ -799,7 +830,6 @@ static void bbr_init(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+- u64 bw;
+
+ bbr->prior_cwnd = 0;
+ bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
+@@ -815,11 +845,8 @@ static void bbr_init(struct sock *sk)
+
+ minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
+
+- /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+- bw = (u64)tp->snd_cwnd * BW_UNIT;
+- do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
+- sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
+- bbr_set_pacing_rate(sk, bw, bbr_high_gain);
++ bbr->has_seen_rtt = 0;
++ bbr_init_pacing_rate_from_rtt(sk);
+
+ bbr->restore_cwnd = 0;
+ bbr->round_start = 0;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 1d6219bf2d6b..b9a84eba60b8 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1762,7 +1762,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ /* For TCP sockets, sk_rx_dst is protected by socket lock
+ * For UDP, we use xchg() to guard against concurrent changes.
+ */
+-static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
++void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+ struct dst_entry *old;
+
+@@ -2120,6 +2120,7 @@ void udp_destroy_sock(struct sock *sk)
+ encap_destroy(sk);
+ }
+ }
++EXPORT_SYMBOL(udp_sk_rx_dst_set);
+
+ /*
+ * Socket option code for UDP
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 1699acb2fa2c..be0306778938 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ *prevhdr = NEXTHDR_FRAGMENT;
+ tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
+ if (!tmp_hdr) {
+- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+- IPSTATS_MIB_FRAGFAILS);
+ err = -ENOMEM;
+ goto fail;
+ }
+@@ -793,8 +791,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+ hroom + troom, GFP_ATOMIC);
+ if (!frag) {
+- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+- IPSTATS_MIB_FRAGFAILS);
+ err = -ENOMEM;
+ goto fail;
+ }
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index e9065b8d3af8..abb2c307fbe8 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
+
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+- u16 offset = sizeof(struct ipv6hdr);
++ unsigned int offset = sizeof(struct ipv6hdr);
+ unsigned int packet_len = skb_tail_pointer(skb) -
+ skb_network_header(skb);
+ int found_rhdr = 0;
+@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
++ unsigned int len;
+
+ switch (**nexthdr) {
+
+@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+
+ exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+ offset);
+- offset += ipv6_optlen(exthdr);
++ len = ipv6_optlen(exthdr);
++ if (len + offset >= IPV6_MAXPLEN)
++ return -EINVAL;
++ offset += len;
+ *nexthdr = &exthdr->nexthdr;
+ }
+
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5abc3692b901..ca7895454cec 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -215,6 +215,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ treq->rcv_isn = ntohl(th->seq) - 1;
+ treq->snt_isn = cookie;
+ treq->ts_off = 0;
++ treq->txhash = net_tx_rndhash();
+
+ /*
+ * We need to lookup the dst_entry to get the correct window size.
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 75703fda23e7..592270c310f4 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
+ struct udp_table *udptable)
+ {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+- struct sock *sk;
+
+- sk = skb_steal_sock(skb);
+- if (unlikely(sk))
+- return sk;
+ return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
+ &iph->daddr, dport, inet6_iif(skb),
+ udptable, skb);
+@@ -798,6 +794,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ if (udp6_csum_init(skb, uh, proto))
+ goto csum_error;
+
++ /* Check if the socket is already available, e.g. due to early demux */
++ sk = skb_steal_sock(skb);
++ if (sk) {
++ struct dst_entry *dst = skb_dst(skb);
++ int ret;
++
++ if (unlikely(sk->sk_rx_dst != dst))
++ udp_sk_rx_dst_set(sk, dst);
++
++ ret = udpv6_queue_rcv_skb(sk, skb);
++ sock_put(sk);
++
++ /* a return value > 0 means to resubmit the input */
++ if (ret > 0)
++ return ret;
++ return 0;
++ }
++
+ /*
+ * Multicast receive code
+ */
+@@ -806,11 +820,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ saddr, daddr, udptable, proto);
+
+ /* Unicast */
+-
+- /*
+- * check socket cache ... must talk to Alan about his plans
+- * for sock caches... i'll skip this for now.
+- */
+ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ if (sk) {
+ int ret;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 08679ebb3068..b3bf66bbf4dc 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1289,8 +1289,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
+
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+- int maxlen = ovs_ct_attr_lens[type].maxlen;
+- int minlen = ovs_ct_attr_lens[type].minlen;
++ int maxlen;
++ int minlen;
+
+ if (type > OVS_CT_ATTR_MAX) {
+ OVS_NLERR(log,
+@@ -1298,6 +1298,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
+ type, OVS_CT_ATTR_MAX);
+ return -EINVAL;
+ }
++
++ maxlen = ovs_ct_attr_lens[type].maxlen;
++ minlen = ovs_ct_attr_lens[type].minlen;
+ if (nla_len(a) < minlen || nla_len(a) > maxlen) {
+ OVS_NLERR(log,
+ "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index e3eeed19cc7a..0880e0a9d151 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4334,7 +4334,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ register_prot_hook(sk);
+ }
+ spin_unlock(&po->bind_lock);
+- if (closing && (po->tp_version > TPACKET_V2)) {
++ if (pg_vec && (po->tp_version > TPACKET_V2)) {
+ /* Because we don't support block-based V3 on tx-ring */
+ if (!tx_ring)
+ prb_shutdown_retire_blk_timer(po, rb_queue);
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 92e332e17391..961a6f81ae64 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ sctp_adaptation_ind_param_t aiparam;
+ sctp_supported_ext_param_t ext_param;
+ int num_ext = 0;
+- __u8 extensions[3];
++ __u8 extensions[4];
+ sctp_paramhdr_t *auth_chunks = NULL,
+ *auth_hmacs = NULL;
+
+@@ -396,7 +396,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
+ sctp_adaptation_ind_param_t aiparam;
+ sctp_supported_ext_param_t ext_param;
+ int num_ext = 0;
+- __u8 extensions[3];
++ __u8 extensions[4];
+ sctp_paramhdr_t *auth_chunks = NULL,
+ *auth_hmacs = NULL,
+ *auth_random = NULL;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a808332d02d0..606d5333ff98 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2296,6 +2296,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
++ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 754e3ef8d7ae..d05acc8eed1f 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -3139,8 +3139,6 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
+ component->remove = component->driver->remove;
+ component->suspend = component->driver->suspend;
+ component->resume = component->driver->resume;
+- component->pcm_new = component->driver->pcm_new;
+- component->pcm_free = component->driver->pcm_free;
+
+ dapm = &component->dapm;
+ dapm->dev = dev;
+@@ -3328,25 +3326,6 @@ static void snd_soc_platform_drv_remove(struct snd_soc_component *component)
+ platform->driver->remove(platform);
+ }
+
+-static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
+-{
+- struct snd_soc_platform *platform = rtd->platform;
+-
+- if (platform->driver->pcm_new)
+- return platform->driver->pcm_new(rtd);
+- else
+- return 0;
+-}
+-
+-static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
+-{
+- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+- struct snd_soc_platform *platform = rtd->platform;
+-
+- if (platform->driver->pcm_free)
+- platform->driver->pcm_free(pcm);
+-}
+-
+ /**
+ * snd_soc_add_platform - Add a platform to the ASoC core
+ * @dev: The parent device for the platform
+@@ -3370,10 +3349,6 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
+ platform->component.probe = snd_soc_platform_drv_probe;
+ if (platform_drv->remove)
+ platform->component.remove = snd_soc_platform_drv_remove;
+- if (platform_drv->pcm_new)
+- platform->component.pcm_new = snd_soc_platform_drv_pcm_new;
+- if (platform_drv->pcm_free)
+- platform->component.pcm_free = snd_soc_platform_drv_pcm_free;
+
+ #ifdef CONFIG_DEBUG_FS
+ platform->component.debugfs_prefix = "platform";
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index efc5831f205d..8ff7cd3b8c1f 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+ dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
+ be->dai_link->name, event, dir);
+
++ if ((event == SND_SOC_DAPM_STREAM_STOP) &&
++ (be->dpcm[dir].users >= 1))
++ continue;
++
+ snd_soc_dapm_stream_event(be, dir, event);
+ }
+
+@@ -2628,25 +2632,12 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
+ return ret;
+ }
+
+-static void soc_pcm_free(struct snd_pcm *pcm)
+-{
+- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+- struct snd_soc_component *component;
+-
+- list_for_each_entry(component, &rtd->card->component_dev_list,
+- card_list) {
+- if (component->pcm_free)
+- component->pcm_free(pcm);
+- }
+-}
+-
+ /* create a new pcm */
+ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
+ {
+ struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+- struct snd_soc_component *component;
+ struct snd_pcm *pcm;
+ char new_name[64];
+ int ret = 0, playback = 0, capture = 0;
+@@ -2755,18 +2746,17 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
+ if (capture)
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
+
+- list_for_each_entry(component, &rtd->card->component_dev_list, card_list) {
+- if (component->pcm_new) {
+- ret = component->pcm_new(rtd);
+- if (ret < 0) {
+- dev_err(component->dev,
+- "ASoC: pcm constructor failed: %d\n",
+- ret);
+- return ret;
+- }
++ if (platform->driver->pcm_new) {
++ ret = platform->driver->pcm_new(rtd);
++ if (ret < 0) {
++ dev_err(platform->dev,
++ "ASoC: pcm constructor failed: %d\n",
++ ret);
++ return ret;
+ }
+ }
+- pcm->private_free = soc_pcm_free;
++
++ pcm->private_free = platform->driver->pcm_free;
+ out:
+ dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
+ (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
+diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
+index b50f68a439ce..ba9fc099cf67 100644
+--- a/sound/soc/ux500/mop500.c
++++ b/sound/soc/ux500/mop500.c
+@@ -33,6 +33,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
+ .stream_name = "ab8500_0",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "ab8500-codec-dai.0",
++ .platform_name = "ux500-msp-i2s.1",
+ .codec_name = "ab8500-codec.0",
+ .init = mop500_ab8500_machine_init,
+ .ops = mop500_ab8500_ops,
+@@ -42,6 +43,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
+ .stream_name = "ab8500_1",
+ .cpu_dai_name = "ux500-msp-i2s.3",
+ .codec_dai_name = "ab8500-codec-dai.1",
++ .platform_name = "ux500-msp-i2s.3",
+ .codec_name = "ab8500-codec.0",
+ .init = NULL,
+ .ops = mop500_ab8500_ops,
+@@ -85,6 +87,8 @@ static int mop500_of_probe(struct platform_device *pdev,
+ for (i = 0; i < 2; i++) {
+ mop500_dai_links[i].cpu_of_node = msp_np[i];
+ mop500_dai_links[i].cpu_dai_name = NULL;
++ mop500_dai_links[i].platform_of_node = msp_np[i];
++ mop500_dai_links[i].platform_name = NULL;
+ mop500_dai_links[i].codec_of_node = codec_np;
+ mop500_dai_links[i].codec_name = NULL;
+ }
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index e2e5effba2a9..db1c7b25a44c 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1665,12 +1665,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *
+
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+ {
++ if (!kvm->arch.pgd)
++ return 0;
+ trace_kvm_age_hva(start, end);
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+ }
+
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ {
++ if (!kvm->arch.pgd)
++ return 0;
+ trace_kvm_test_age_hva(hva);
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+ }
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-08-13 16:37 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-08-13 16:37 UTC (permalink / raw
To: gentoo-commits
commit: ae3dadabd5058e845ed5bf34cbad53c580cfc6f9
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 13 16:36:48 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 13 16:36:48 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ae3dadab
Linux patch 4.12.7
0000_README | 4 +
1006_linux-4.12.7.patch | 648 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 652 insertions(+)
diff --git a/0000_README b/0000_README
index b88e1e0..3a1bafb 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch: 1005_linux-4.12.6.patch
From: http://www.kernel.org
Desc: Linux 4.12.6
+Patch: 1006_linux-4.12.7.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.7
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1006_linux-4.12.7.patch b/1006_linux-4.12.7.patch
new file mode 100644
index 0000000..b130ed5
--- /dev/null
+++ b/1006_linux-4.12.7.patch
@@ -0,0 +1,648 @@
+diff --git a/Makefile b/Makefile
+index c8d80b50495a..ebe69a704bca 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 6e97a2e3fd8d..8cea684f1f53 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
+ insn_count = bpf_jit_insn(jit, fp, i);
+ if (insn_count < 0)
+ return -1;
+- jit->addrs[i + 1] = jit->prg; /* Next instruction address */
++ /* Next instruction address */
++ jit->addrs[i + insn_count] = jit->prg;
+ }
+ bpf_jit_epilogue(jit);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 77abd1813047..802f0e8bff3a 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -572,16 +572,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
+ * header, the HW adds it. To address that, we are subtracting the pseudo
+ * header checksum from the checksum value provided by the HW.
+ */
+-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+- struct iphdr *iph)
++static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
++ struct iphdr *iph)
+ {
+ __u16 length_for_csum = 0;
+ __wsum csum_pseudo_header = 0;
++ __u8 ipproto = iph->protocol;
++
++ if (unlikely(ipproto == IPPROTO_SCTP))
++ return -1;
+
+ length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
+ csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+- length_for_csum, iph->protocol, 0);
++ length_for_csum, ipproto, 0);
+ skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
++ return 0;
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -592,17 +597,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct ipv6hdr *ipv6h)
+ {
++ __u8 nexthdr = ipv6h->nexthdr;
+ __wsum csum_pseudo_hdr = 0;
+
+- if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
+- ipv6h->nexthdr == IPPROTO_HOPOPTS))
++ if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
++ nexthdr == IPPROTO_HOPOPTS ||
++ nexthdr == IPPROTO_SCTP))
+ return -1;
+- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
++ hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
+
+ csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
+ sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
+- csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
++ csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
++ (__force __wsum)htons(nexthdr));
+
+ skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
+ skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
+@@ -625,11 +633,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+ }
+
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
+- get_fixed_ipv4_csum(hw_checksum, skb, hdr);
++ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+ #if IS_ENABLED(CONFIG_IPV6)
+- else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+- if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
+- return -1;
++ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
++ return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
+ #endif
+ return 0;
+ }
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index f9c0e62716ea..18fb00d55aa1 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -120,6 +120,7 @@ struct ppp {
+ int n_channels; /* how many channels are attached 54 */
+ spinlock_t rlock; /* lock for receive side 58 */
+ spinlock_t wlock; /* lock for transmit side 5c */
++ int *xmit_recursion __percpu; /* xmit recursion detect */
+ int mru; /* max receive unit 60 */
+ unsigned int flags; /* control bits 64 */
+ unsigned int xstate; /* transmit state bits 68 */
+@@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+ struct ppp *ppp = netdev_priv(dev);
+ int indx;
+ int err;
++ int cpu;
+
+ ppp->dev = dev;
+ ppp->ppp_net = src_net;
+@@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+ INIT_LIST_HEAD(&ppp->channels);
+ spin_lock_init(&ppp->rlock);
+ spin_lock_init(&ppp->wlock);
++
++ ppp->xmit_recursion = alloc_percpu(int);
++ if (!ppp->xmit_recursion) {
++ err = -ENOMEM;
++ goto err1;
++ }
++ for_each_possible_cpu(cpu)
++ (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
++
+ #ifdef CONFIG_PPP_MULTILINK
+ ppp->minseq = -1;
+ skb_queue_head_init(&ppp->mrq);
+@@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+
+ err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
+ if (err < 0)
+- return err;
++ goto err2;
+
+ conf->file->private_data = &ppp->file;
+
+ return 0;
++err2:
++ free_percpu(ppp->xmit_recursion);
++err1:
++ return err;
+ }
+
+ static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
+@@ -1398,18 +1413,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
+ ppp_xmit_unlock(ppp);
+ }
+
+-static DEFINE_PER_CPU(int, ppp_xmit_recursion);
+-
+ static void ppp_xmit_process(struct ppp *ppp)
+ {
+ local_bh_disable();
+
+- if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
++ if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
+ goto err;
+
+- __this_cpu_inc(ppp_xmit_recursion);
++ (*this_cpu_ptr(ppp->xmit_recursion))++;
+ __ppp_xmit_process(ppp);
+- __this_cpu_dec(ppp_xmit_recursion);
++ (*this_cpu_ptr(ppp->xmit_recursion))--;
+
+ local_bh_enable();
+
+@@ -1900,23 +1913,23 @@ static void __ppp_channel_push(struct channel *pch)
+ spin_unlock_bh(&pch->downl);
+ /* see if there is anything from the attached unit to be sent */
+ if (skb_queue_empty(&pch->file.xq)) {
+- read_lock_bh(&pch->upl);
+ ppp = pch->ppp;
+ if (ppp)
+ __ppp_xmit_process(ppp);
+- read_unlock_bh(&pch->upl);
+ }
+ }
+
+ static void ppp_channel_push(struct channel *pch)
+ {
+- local_bh_disable();
+-
+- __this_cpu_inc(ppp_xmit_recursion);
+- __ppp_channel_push(pch);
+- __this_cpu_dec(ppp_xmit_recursion);
+-
+- local_bh_enable();
++ read_lock_bh(&pch->upl);
++ if (pch->ppp) {
++ (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
++ __ppp_channel_push(pch);
++ (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
++ } else {
++ __ppp_channel_push(pch);
++ }
++ read_unlock_bh(&pch->upl);
+ }
+
+ /*
+@@ -3055,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
+ #endif /* CONFIG_PPP_FILTER */
+
+ kfree_skb(ppp->xmit_pending);
++ free_percpu(ppp->xmit_recursion);
+
+ free_netdev(ppp->dev);
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 32a22f4e8356..c42153a985be 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1340,10 +1340,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ static void qmi_wwan_disconnect(struct usb_interface *intf)
+ {
+ struct usbnet *dev = usb_get_intfdata(intf);
+- struct qmi_wwan_state *info = (void *)&dev->data;
++ struct qmi_wwan_state *info;
+ struct list_head *iter;
+ struct net_device *ldev;
+
++ /* called twice if separate control and data intf */
++ if (!dev)
++ return;
++ info = (void *)&dev->data;
+ if (info->flags & QMI_WWAN_FLAG_MUX) {
+ if (!rtnl_trylock()) {
+ restart_syscall();
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 82c33a6edbea..aa6f1debeaa7 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -751,29 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+ return count;
+ }
+
+-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
+-{
+- switch (hp->dxfer_direction) {
+- case SG_DXFER_NONE:
+- if (hp->dxferp || hp->dxfer_len > 0)
+- return false;
+- return true;
+- case SG_DXFER_TO_DEV:
+- case SG_DXFER_FROM_DEV:
+- case SG_DXFER_TO_FROM_DEV:
+- if (!hp->dxferp || hp->dxfer_len == 0)
+- return false;
+- return true;
+- case SG_DXFER_UNKNOWN:
+- if ((!hp->dxferp && hp->dxfer_len) ||
+- (hp->dxferp && hp->dxfer_len == 0))
+- return false;
+- return true;
+- default:
+- return false;
+- }
+-}
+-
+ static int
+ sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ unsigned char *cmnd, int timeout, int blocking)
+@@ -794,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
+ (int) cmnd[0], (int) hp->cmd_len));
+
+- if (!sg_is_valid_dxfer(hp))
++ if (hp->dxfer_len >= SZ_256M)
+ return -EINVAL;
+
+ k = sg_start_req(srp, cmnd);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 3ef90e91d8be..0c7b61f72478 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4463,29 +4463,25 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ }
+
+ /*
+- * Sanity check for fiemap cache
++ * Emit last fiemap cache
+ *
+- * All fiemap cache should be submitted by emit_fiemap_extent()
+- * Iteration should be terminated either by last fiemap extent or
+- * fieinfo->fi_extents_max.
+- * So no cached fiemap should exist.
++ * The last fiemap cache may still be cached in the following case:
++ * 0 4k 8k
++ * |<- Fiemap range ->|
++ * |<------------ First extent ----------->|
++ *
++ * In this case, the first extent range will be cached but not emitted.
++ * So we must emit it before ending extent_fiemap().
+ */
+-static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
+- struct fiemap_extent_info *fieinfo,
+- struct fiemap_cache *cache)
++static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
++ struct fiemap_extent_info *fieinfo,
++ struct fiemap_cache *cache)
+ {
+ int ret;
+
+ if (!cache->cached)
+ return 0;
+
+- /* Small and recoverbale problem, only to info developer */
+-#ifdef CONFIG_BTRFS_DEBUG
+- WARN_ON(1);
+-#endif
+- btrfs_warn(fs_info,
+- "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
+- cache->offset, cache->phys, cache->len, cache->flags);
+ ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+ cache->len, cache->flags);
+ cache->cached = false;
+@@ -4701,7 +4697,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ }
+ out_free:
+ if (!ret)
+- ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
++ ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache);
+ free_extent_map(em);
+ out:
+ btrfs_free_path(path);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 25f9461eff3f..528edc68a64a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2670,7 +2670,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ {
+ if (tx_path)
+ return skb->ip_summed != CHECKSUM_PARTIAL &&
+- skb->ip_summed != CHECKSUM_NONE;
++ skb->ip_summed != CHECKSUM_UNNECESSARY;
+
+ return skb->ip_summed == CHECKSUM_NONE;
+ }
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 58925b6597de..ab8ebd440423 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
+ net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
+ #endif
+
++ /* Some igmp sysctl, whose values are always used */
++ net->ipv4.sysctl_igmp_max_memberships = 20;
++ net->ipv4.sysctl_igmp_max_msf = 10;
++ /* IGMP reports for link-local multicast groups are enabled by default */
++ net->ipv4.sysctl_igmp_llm_reports = 1;
++ net->ipv4.sysctl_igmp_qrv = 2;
++
+ return 0;
+ }
+
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index ec9a396fa466..3db1adb6b7a0 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
+ goto out_sock;
+ }
+
+- /* Sysctl initialization */
+- net->ipv4.sysctl_igmp_max_memberships = 20;
+- net->ipv4.sysctl_igmp_max_msf = 10;
+- /* IGMP reports for link-local multicast groups are enabled by default */
+- net->ipv4.sysctl_igmp_llm_reports = 1;
+- net->ipv4.sysctl_igmp_qrv = 2;
+ return 0;
+
+ out_sock:
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 532b36e9ce2a..e5948c0c9759 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -964,11 +964,12 @@ static int __ip_append_data(struct sock *sk,
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ if ((skb && skb_is_gso(skb)) ||
++ (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
++ (skb_queue_len(queue) <= 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
+- (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
++ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, transhdrlen,
+ maxfraglen, flags);
+@@ -1287,6 +1288,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+ return -EINVAL;
+
+ if ((size + skb->len > mtu) &&
++ (skb_queue_len(&sk->sk_write_queue) == 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO)) {
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 174d4376baa5..57bcae81fe42 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2517,8 +2517,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
+ return;
+
+ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
+- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
++ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
++ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 4858e190f6ac..8963b8c5fb41 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3361,6 +3361,9 @@ int tcp_connect(struct sock *sk)
+ struct sk_buff *buff;
+ int err;
+
++ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
++ return -EHOSTUNREACH; /* Routing failure or similar. */
++
+ tcp_connect_init(sk);
+
+ if (unlikely(tp->repair)) {
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 14672543cf0b..0733ea7e17cd 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -654,7 +654,8 @@ static void tcp_keepalive_timer (unsigned long data)
+ goto death;
+ }
+
+- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
++ if (!sock_flag(sk, SOCK_KEEPOPEN) ||
++ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
+ goto out;
+
+ elapsed = keepalive_time_when(tp);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b9a84eba60b8..c991b97cbb28 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
+ if (is_udplite) /* UDP-Lite */
+ csum = udplite_csum(skb);
+
+- else if (sk->sk_no_check_tx) { /* UDP csum disabled */
++ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
+
+ skb->ip_summed = CHECKSUM_NONE;
+ goto send;
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 781250151d40..0932c85b42af 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+- skb->ip_summed = CHECKSUM_NONE;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* If there is no outer header we can fake a checksum offload
+ * due to the fact that we have already done the checksum in
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index be0306778938..365d5108a326 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1386,11 +1386,12 @@ static int __ip6_append_data(struct sock *sk,
+ */
+
+ cork->length += length;
+- if ((((length + (skb ? skb->len : headersize)) > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ if ((skb && skb_is_gso(skb)) ||
++ (((length + (skb ? skb->len : headersize)) > mtu) &&
++ (skb_queue_len(queue) <= 1) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
+- (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
++ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
+ err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
+ hh_len, fragheaderlen, exthdrlen,
+ transhdrlen, mtu, flags, fl6);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index bc49f9a82994..aeb7097acc0a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2366,6 +2366,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
+ if (on_link)
+ nrt->rt6i_flags &= ~RTF_GATEWAY;
+
++ nrt->rt6i_protocol = RTPROT_REDIRECT;
+ nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
+
+ if (ip6_ins_rt(nrt))
+@@ -2470,6 +2471,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
+ .fc_dst_len = prefixlen,
+ .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
+ RTF_UP | RTF_PREF(pref),
++ .fc_protocol = RTPROT_RA,
+ .fc_nlinfo.portid = 0,
+ .fc_nlinfo.nlh = NULL,
+ .fc_nlinfo.nl_net = net,
+@@ -2522,6 +2524,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+ .fc_ifindex = dev->ifindex,
+ .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
+ RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
++ .fc_protocol = RTPROT_RA,
+ .fc_nlinfo.portid = 0,
+ .fc_nlinfo.nlh = NULL,
+ .fc_nlinfo.nl_net = dev_net(dev),
+@@ -3434,14 +3437,6 @@ static int rt6_fill_node(struct net *net,
+ rtm->rtm_flags = 0;
+ rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ rtm->rtm_protocol = rt->rt6i_protocol;
+- if (rt->rt6i_flags & RTF_DYNAMIC)
+- rtm->rtm_protocol = RTPROT_REDIRECT;
+- else if (rt->rt6i_flags & RTF_ADDRCONF) {
+- if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
+- rtm->rtm_protocol = RTPROT_RA;
+- else
+- rtm->rtm_protocol = RTPROT_KERNEL;
+- }
+
+ if (rt->rt6i_flags & RTF_CACHE)
+ rtm->rtm_flags |= RTM_F_CLONED;
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index a2267f80febb..e7d378c032cb 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+- skb->ip_summed = CHECKSUM_NONE;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* If there is no outer header we can fake a checksum offload
+ * due to the fact that we have already done the checksum in
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 0880e0a9d151..aa2d4000bafc 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3705,14 +3705,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+- return -EBUSY;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
+- po->tp_reserve = val;
+- return 0;
++ lock_sock(sk);
++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++ ret = -EBUSY;
++ } else {
++ po->tp_reserve = val;
++ ret = 0;
++ }
++ release_sock(sk);
++ return ret;
+ }
+ case PACKET_LOSS:
+ {
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 36f0ced9e60c..d516ba8178b8 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
+ static unsigned int xt_net_id;
+ static struct tc_action_ops act_xt_ops;
+
+-static int ipt_init_target(struct xt_entry_target *t, char *table,
+- unsigned int hook)
++static int ipt_init_target(struct net *net, struct xt_entry_target *t,
++ char *table, unsigned int hook)
+ {
+ struct xt_tgchk_param par;
+ struct xt_target *target;
+@@ -49,8 +49,9 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
+ return PTR_ERR(target);
+
+ t->u.kernel.target = target;
++ memset(&par, 0, sizeof(par));
++ par.net = net;
+ par.table = table;
+- par.entryinfo = NULL;
+ par.target = target;
+ par.targinfo = t->data;
+ par.hook_mask = hook;
+@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
+ [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
+ };
+
+-static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
++static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int ovr, int bind)
+ {
++ struct tc_action_net *tn = net_generic(net, id);
+ struct nlattr *tb[TCA_IPT_MAX + 1];
+ struct tcf_ipt *ipt;
+ struct xt_entry_target *td, *t;
+@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
+ if (unlikely(!t))
+ goto err2;
+
+- err = ipt_init_target(t, tname, hook);
++ err = ipt_init_target(net, t, tname, hook);
+ if (err < 0)
+ goto err3;
+
+@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a, int ovr,
+ int bind)
+ {
+- struct tc_action_net *tn = net_generic(net, ipt_net_id);
+-
+- return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
++ return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
++ bind);
+ }
+
+ static int tcf_xt_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a, int ovr,
+ int bind)
+ {
+- struct tc_action_net *tn = net_generic(net, xt_net_id);
+-
+- return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
++ return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
++ bind);
+ }
+
+ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-08-16 22:28 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-08-16 22:28 UTC (permalink / raw
To: gentoo-commits
commit: bc0d79f2cfab2f521dd63b86f9cc0b8077823e50
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 16 22:28:16 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 16 22:28:16 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bc0d79f2
Linux patch 4.12.8
0000_README | 4 +
1007_linux-4.12.8.patch | 2849 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 2853 insertions(+)
diff --git a/0000_README b/0000_README
index 3a1bafb..47efe0d 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch: 1006_linux-4.12.7.patch
From: http://www.kernel.org
Desc: Linux 4.12.7
+Patch: 1007_linux-4.12.8.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.8
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1007_linux-4.12.8.patch b/1007_linux-4.12.8.patch
new file mode 100644
index 0000000..560efc0
--- /dev/null
+++ b/1007_linux-4.12.8.patch
@@ -0,0 +1,2849 @@
+diff --git a/Makefile b/Makefile
+index ebe69a704bca..6da481d08441 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
+index 1910223a9c02..cea2bb1621e6 100644
+--- a/arch/mips/dec/int-handler.S
++++ b/arch/mips/dec/int-handler.S
+@@ -147,23 +147,12 @@
+ * Find irq with highest priority
+ */
+ # open coded PTR_LA t1, cpu_mask_nr_tbl
+-#if (_MIPS_SZPTR == 32)
++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ # open coded la t1, cpu_mask_nr_tbl
+ lui t1, %hi(cpu_mask_nr_tbl)
+ addiu t1, %lo(cpu_mask_nr_tbl)
+-
+-#endif
+-#if (_MIPS_SZPTR == 64)
+- # open coded dla t1, cpu_mask_nr_tbl
+- .set push
+- .set noat
+- lui t1, %highest(cpu_mask_nr_tbl)
+- lui AT, %hi(cpu_mask_nr_tbl)
+- daddiu t1, t1, %higher(cpu_mask_nr_tbl)
+- daddiu AT, AT, %lo(cpu_mask_nr_tbl)
+- dsll t1, 32
+- daddu t1, t1, AT
+- .set pop
++#else
++#error GCC `-msym32' option required for 64-bit DECstation builds
+ #endif
+ 1: lw t2,(t1)
+ nop
+@@ -214,23 +203,12 @@
+ * Find irq with highest priority
+ */
+ # open coded PTR_LA t1,asic_mask_nr_tbl
+-#if (_MIPS_SZPTR == 32)
++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ # open coded la t1, asic_mask_nr_tbl
+ lui t1, %hi(asic_mask_nr_tbl)
+ addiu t1, %lo(asic_mask_nr_tbl)
+-
+-#endif
+-#if (_MIPS_SZPTR == 64)
+- # open coded dla t1, asic_mask_nr_tbl
+- .set push
+- .set noat
+- lui t1, %highest(asic_mask_nr_tbl)
+- lui AT, %hi(asic_mask_nr_tbl)
+- daddiu t1, t1, %higher(asic_mask_nr_tbl)
+- daddiu AT, AT, %lo(asic_mask_nr_tbl)
+- dsll t1, 32
+- daddu t1, t1, AT
+- .set pop
++#else
++#error GCC `-msym32' option required for 64-bit DECstation builds
+ #endif
+ 2: lw t2,(t1)
+ nop
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index fc67947ed658..8b14c2706aa5 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,6 +9,8 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <kmalloc.h>
++
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+index d045973ddb33..3ea84acf1814 100644
+--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
++++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+@@ -33,6 +33,10 @@
+ #define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
+ #define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
+ #define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
++#define CVMX_L2C_ERR_TDTX(block_id) \
++ (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
++#define CVMX_L2C_ERR_TTGX(block_id) \
++ (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
+ #define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
+ #define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
+ #define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+@@ -66,9 +70,40 @@
+ ((offset) & 1) * 8)
+ #define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
+ ((offset) & 31) * 8)
+-#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+
+
++union cvmx_l2c_err_tdtx {
++ uint64_t u64;
++ struct cvmx_l2c_err_tdtx_s {
++ __BITFIELD_FIELD(uint64_t dbe:1,
++ __BITFIELD_FIELD(uint64_t sbe:1,
++ __BITFIELD_FIELD(uint64_t vdbe:1,
++ __BITFIELD_FIELD(uint64_t vsbe:1,
++ __BITFIELD_FIELD(uint64_t syn:10,
++ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
++ __BITFIELD_FIELD(uint64_t wayidx:18,
++ __BITFIELD_FIELD(uint64_t reserved_2_3:2,
++ __BITFIELD_FIELD(uint64_t type:2,
++ ;)))))))))
++ } s;
++};
++
++union cvmx_l2c_err_ttgx {
++ uint64_t u64;
++ struct cvmx_l2c_err_ttgx_s {
++ __BITFIELD_FIELD(uint64_t dbe:1,
++ __BITFIELD_FIELD(uint64_t sbe:1,
++ __BITFIELD_FIELD(uint64_t noway:1,
++ __BITFIELD_FIELD(uint64_t reserved_56_60:5,
++ __BITFIELD_FIELD(uint64_t syn:6,
++ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
++ __BITFIELD_FIELD(uint64_t wayidx:15,
++ __BITFIELD_FIELD(uint64_t reserved_2_6:5,
++ __BITFIELD_FIELD(uint64_t type:2,
++ ;)))))))))
++ } s;
++};
++
+ union cvmx_l2c_cfg {
+ uint64_t u64;
+ struct cvmx_l2c_cfg_s {
+diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
+new file mode 100644
+index 000000000000..a951ad5d65ad
+--- /dev/null
++++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
+@@ -0,0 +1,60 @@
++/***********************license start***************
++ * Author: Cavium Networks
++ *
++ * Contact: support@caviumnetworks.com
++ * This file is part of the OCTEON SDK
++ *
++ * Copyright (c) 2003-2017 Cavium, Inc.
++ *
++ * This file is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License, Version 2, as
++ * published by the Free Software Foundation.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
++ * NONINFRINGEMENT. See the GNU General Public License for more
++ * details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this file; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ * or visit http://www.gnu.org/licenses/.
++ *
++ * This file may also be available under a different license from Cavium.
++ * Contact Cavium Networks for more information
++ ***********************license end**************************************/
++
++#ifndef __CVMX_L2D_DEFS_H__
++#define __CVMX_L2D_DEFS_H__
++
++#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
++#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
++
++
++union cvmx_l2d_err {
++ uint64_t u64;
++ struct cvmx_l2d_err_s {
++ __BITFIELD_FIELD(uint64_t reserved_6_63:58,
++ __BITFIELD_FIELD(uint64_t bmhclsel:1,
++ __BITFIELD_FIELD(uint64_t ded_err:1,
++ __BITFIELD_FIELD(uint64_t sec_err:1,
++ __BITFIELD_FIELD(uint64_t ded_intena:1,
++ __BITFIELD_FIELD(uint64_t sec_intena:1,
++ __BITFIELD_FIELD(uint64_t ecc_ena:1,
++ ;)))))))
++ } s;
++};
++
++union cvmx_l2d_fus3 {
++ uint64_t u64;
++ struct cvmx_l2d_fus3_s {
++ __BITFIELD_FIELD(uint64_t reserved_40_63:24,
++ __BITFIELD_FIELD(uint64_t ema_ctl:3,
++ __BITFIELD_FIELD(uint64_t reserved_34_36:3,
++ __BITFIELD_FIELD(uint64_t q3fus:34,
++ ;))))
++ } s;
++};
++
++#endif
+diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
+index 9742202f2a32..e638735cc3ac 100644
+--- a/arch/mips/include/asm/octeon/cvmx.h
++++ b/arch/mips/include/asm/octeon/cvmx.h
+@@ -62,6 +62,7 @@ enum cvmx_mips_space {
+ #include <asm/octeon/cvmx-iob-defs.h>
+ #include <asm/octeon/cvmx-ipd-defs.h>
+ #include <asm/octeon/cvmx-l2c-defs.h>
++#include <asm/octeon/cvmx-l2d-defs.h>
+ #include <asm/octeon/cvmx-l2t-defs.h>
+ #include <asm/octeon/cvmx-led-defs.h>
+ #include <asm/octeon/cvmx-mio-defs.h>
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 857129acf960..94a948207cd2 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -335,6 +335,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ maj = ((pvr >> 8) & 0xFF) - 1;
+ min = pvr & 0xFF;
+ break;
++ case 0x004e: /* POWER9 bits 12-15 give chip type */
++ maj = (pvr >> 8) & 0x0F;
++ min = pvr & 0xFF;
++ break;
+ default:
+ maj = (pvr >> 8) & 0xFF;
+ min = pvr & 0xFF;
+diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
+index d159e9b9c018..672391003e40 100644
+--- a/arch/xtensa/kernel/xtensa_ksyms.c
++++ b/arch/xtensa/kernel/xtensa_ksyms.c
+@@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+ }
+ EXPORT_SYMBOL(__sync_fetch_and_or_4);
+
+-#ifdef CONFIG_NET
+ /*
+ * Networking support
+ */
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(csum_partial_copy_generic);
+-#endif /* CONFIG_NET */
+
+ /*
+ * Architecture-specific symbols
+diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
+index 1a804a2f9a5b..3c75c4e597da 100644
+--- a/arch/xtensa/mm/cache.c
++++ b/arch/xtensa/mm/cache.c
+@@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
+ clear_page_alias(kvaddr, paddr);
+ preempt_enable();
+ }
++EXPORT_SYMBOL(clear_user_highpage);
+
+ void copy_user_highpage(struct page *dst, struct page *src,
+ unsigned long vaddr, struct vm_area_struct *vma)
+@@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
+ copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+ preempt_enable();
+ }
+-
+-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+-
+-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
++EXPORT_SYMBOL(copy_user_highpage);
+
+ /*
+ * Any time the kernel writes to a user page cache page, or it is about to
+@@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
+
+ /* There shouldn't be an entry in the cache for this page anymore. */
+ }
+-
++EXPORT_SYMBOL(flush_dcache_page);
+
+ /*
+ * For now, flush the whole cache. FIXME??
+@@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
+ __flush_invalidate_dcache_all();
+ __invalidate_icache_all();
+ }
++EXPORT_SYMBOL(local_flush_cache_range);
+
+ /*
+ * Remove any entry in the cache for this page.
+@@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ __flush_invalidate_dcache_page_alias(virt, phys);
+ __invalidate_icache_page_alias(virt, phys);
+ }
++EXPORT_SYMBOL(local_flush_cache_page);
+
+-#endif
++#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+
+ void
+ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
+@@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
+
+ flush_tlb_page(vma, addr);
+
+-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
++#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+ if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
+ unsigned long phys = page_to_phys(page);
+@@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
+ * flush_dcache_page() on the page.
+ */
+
+-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
++#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 7353e0080062..2414e0cd3a02 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -620,8 +620,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+ unsigned long msecs)
+ {
+- kblockd_schedule_delayed_work(&q->requeue_work,
+- msecs_to_jiffies(msecs));
++ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
++ msecs_to_jiffies(msecs));
+ }
+ EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index ac350c518e0c..31c0586d9b13 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -30,7 +30,6 @@
+ #include <linux/syscore_ops.h>
+ #include <linux/reboot.h>
+ #include <linux/security.h>
+-#include <linux/swait.h>
+
+ #include <generated/utsrelease.h>
+
+@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
+ * state of the firmware loading.
+ */
+ struct fw_state {
+- struct swait_queue_head wq;
++ struct completion completion;
+ enum fw_status status;
+ };
+
+ static void fw_state_init(struct fw_state *fw_st)
+ {
+- init_swait_queue_head(&fw_st->wq);
++ init_completion(&fw_st->completion);
+ fw_st->status = FW_STATUS_UNKNOWN;
+ }
+
+@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+ {
+ long ret;
+
+- ret = swait_event_interruptible_timeout(fw_st->wq,
+- __fw_state_is_done(READ_ONCE(fw_st->status)),
+- timeout);
++ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+- swake_up(&fw_st->wq);
++ complete_all(&fw_st->completion);
+ }
+
+ #define fw_state_start(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_LOADING)
+ #define fw_state_done(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_DONE)
++#define fw_state_aborted(fw_st) \
++ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+ #define fw_state_wait(fw_st) \
+ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+-#ifndef CONFIG_FW_LOADER_USER_HELPER
+-
+-#define fw_state_is_aborted(fw_st) false
+-
+-#else /* CONFIG_FW_LOADER_USER_HELPER */
+-
+ static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+ {
+ return fw_st->status == status;
+ }
+
++#define fw_state_is_aborted(fw_st) \
++ __fw_state_check(fw_st, FW_STATUS_ABORTED)
++
++#ifdef CONFIG_FW_LOADER_USER_HELPER
++
+ #define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+ #define fw_state_is_done(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_DONE)
+ #define fw_state_is_loading(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_LOADING)
+-#define fw_state_is_aborted(fw_st) \
+- __fw_state_check(fw_st, FW_STATUS_ABORTED)
+ #define fw_state_wait_timeout(fw_st, timeout) \
+ __fw_state_wait_common(fw_st, timeout)
+
+@@ -1163,6 +1159,28 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
+ return 0;
+ }
+
++/*
++ * Batched requests need only one wake, we need to do this step last due to the
++ * fallback mechanism. The buf is protected with kref_get(), and it won't be
++ * released until the last user calls release_firmware().
++ *
++ * Failed batched requests are possible as well, in such cases we just share
++ * the struct firmware_buf and won't release it until all requests are woken
++ * and have gone through this same path.
++ */
++static void fw_abort_batch_reqs(struct firmware *fw)
++{
++ struct firmware_buf *buf;
++
++ /* Loaded directly? */
++ if (!fw || !fw->priv)
++ return;
++
++ buf = fw->priv;
++ if (!fw_state_is_aborted(&buf->fw_st))
++ fw_state_aborted(&buf->fw_st);
++}
++
+ /* called from request_firmware() and request_firmware_work_func() */
+ static int
+ _request_firmware(const struct firmware **firmware_p, const char *name,
+@@ -1224,6 +1242,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+
+ out:
+ if (ret < 0) {
++ fw_abort_batch_reqs(fw);
+ release_firmware(fw);
+ fw = NULL;
+ }
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 5c26488e7a2d..0529e500c534 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+
+ /* port@2 is the output port */
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
+- if (ret)
++ if (ret && ret != -ENODEV)
+ return ret;
+
+ /* Shut down GPIO is optional */
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+index 1013765274da..0ceed22187df 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
+ if (ret)
+ return ret;
+
+- if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
+- DRM_ERROR("relocation %u outside object", i);
++ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
++ DRM_ERROR("relocation %u outside object\n", i);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
+index 306c6b06b330..17c4ae7e4e7c 100644
+--- a/drivers/gpu/drm/i915/intel_color.c
++++ b/drivers/gpu/drm/i915/intel_color.c
+@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
+ }
+
+ /* Program the max register to clamp values > 1.0. */
++ i = lut_size - 1;
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
+ drm_color_lut_extract(lut[i].red, 16));
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index d1263b82d646..0703da1d946a 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -254,6 +254,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ }
+
+ acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
++ /* Some broken DSTDs use 1MiHz instead of 1MHz */
++ if (acpi_speed == 1048576)
++ acpi_speed = 1000000;
+ /*
+ * Find bus speed from the "clock-frequency" device property, ACPI
+ * or by using fast mode if neither is set.
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index 6b5d3be283c4..807299dd45eb 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -193,7 +193,6 @@ struct bmc150_accel_data {
+ struct regmap *regmap;
+ int irq;
+ struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
+- atomic_t active_intr;
+ struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
+ struct mutex mutex;
+ u8 fifo_mode, watermark;
+@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
+ goto out_fix_power_state;
+ }
+
+- if (state)
+- atomic_inc(&data->active_intr);
+- else
+- atomic_dec(&data->active_intr);
+-
+ return 0;
+
+ out_fix_power_state:
+@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+- if (atomic_read(&data->active_intr))
+- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
++ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_fifo_set_mode(data);
+ mutex_unlock(&data->mutex);
+
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index 784670e2736b..2ee3ae11eb2a 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .mask_ihl = 0x02,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
++ .sim = {
++ .addr = 0x23,
++ .value = BIT(0),
++ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .mask_od = 0x40,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
++ .sim = {
++ .addr = 0x23,
++ .value = BIT(0),
++ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .en_mask = 0x08,
+ },
+ },
++ .sim = {
++ .addr = 0x24,
++ .value = BIT(0),
++ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .mask_int1 = 0x04,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
++ .sim = {
++ .addr = 0x21,
++ .value = BIT(1),
++ },
+ .multi_read_bit = true,
+ .bootime = 2, /* guess */
+ },
+@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .mask_od = 0x40,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
++ .sim = {
++ .addr = 0x21,
++ .value = BIT(7),
++ },
+ .multi_read_bit = false,
+ .bootime = 2, /* guess */
+ },
+@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x80,
+ },
++ .sim = {
++ .addr = 0x23,
++ .value = BIT(0),
++ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .mask_int1 = 0x04,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
++ .sim = {
++ .addr = 0x21,
++ .value = BIT(1),
++ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ .mask_ihl = 0x02,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
++ .sim = {
++ .addr = 0x23,
++ .value = BIT(0),
++ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
+index 62670cbfa2bb..87fd6e0ce5ee 100644
+--- a/drivers/iio/adc/aspeed_adc.c
++++ b/drivers/iio/adc/aspeed_adc.c
+@@ -22,6 +22,7 @@
+
+ #include <linux/iio/iio.h>
+ #include <linux/iio/driver.h>
++#include <linux/iopoll.h>
+
+ #define ASPEED_RESOLUTION_BITS 10
+ #define ASPEED_CLOCKS_PER_SAMPLE 12
+@@ -38,11 +39,17 @@
+
+ #define ASPEED_ENGINE_ENABLE BIT(0)
+
++#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
++
++#define ASPEED_ADC_INIT_POLLING_TIME 500
++#define ASPEED_ADC_INIT_TIMEOUT 500000
++
+ struct aspeed_adc_model_data {
+ const char *model_name;
+ unsigned int min_sampling_rate; // Hz
+ unsigned int max_sampling_rate; // Hz
+ unsigned int vref_voltage; // mV
++ bool wait_init_sequence;
+ };
+
+ struct aspeed_adc_data {
+@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
+ goto scaler_error;
+ }
+
++ model_data = of_device_get_match_data(&pdev->dev);
++
++ if (model_data->wait_init_sequence) {
++ /* Enable engine in normal mode. */
++ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
++ data->base + ASPEED_REG_ENGINE_CONTROL);
++
++ /* Wait for initial sequence complete. */
++ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
++ adc_engine_control_reg_val,
++ adc_engine_control_reg_val &
++ ASPEED_ADC_CTRL_INIT_RDY,
++ ASPEED_ADC_INIT_POLLING_TIME,
++ ASPEED_ADC_INIT_TIMEOUT);
++ if (ret)
++ goto scaler_error;
++ }
++
+ /* Start all channels in normal mode. */
+ clk_prepare_enable(data->clk_scaler->clk);
+ adc_engine_control_reg_val = GENMASK(31, 16) |
+@@ -270,6 +295,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
+ .vref_voltage = 1800, // mV
+ .min_sampling_rate = 1,
+ .max_sampling_rate = 1000000,
++ .wait_init_sequence = true,
+ };
+
+ static const struct of_device_id aspeed_adc_matches[] = {
+diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
+index 64799ad7ebad..7fd24949c0c1 100644
+--- a/drivers/iio/adc/axp288_adc.c
++++ b/drivers/iio/adc/axp288_adc.c
+@@ -28,6 +28,8 @@
+ #include <linux/iio/driver.h>
+
+ #define AXP288_ADC_EN_MASK 0xF1
++#define AXP288_ADC_TS_PIN_GPADC 0xF2
++#define AXP288_ADC_TS_PIN_ON 0xF3
+
+ enum axp288_adc_id {
+ AXP288_ADC_TS,
+@@ -121,6 +123,16 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
+ return IIO_VAL_INT;
+ }
+
++static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
++ unsigned long address)
++{
++ /* channels other than GPADC do not need to switch TS pin */
++ if (address != AXP288_GP_ADC_H)
++ return 0;
++
++ return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
++}
++
+ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+@@ -131,7 +143,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ mutex_lock(&indio_dev->mlock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
++ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
++ chan->address)) {
++ dev_err(&indio_dev->dev, "GPADC mode\n");
++ ret = -EINVAL;
++ break;
++ }
+ ret = axp288_adc_read_channel(val, chan->address, info->regmap);
++ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
++ chan->address))
++ dev_err(&indio_dev->dev, "TS pin restore\n");
+ break;
+ default:
+ ret = -EINVAL;
+@@ -141,6 +162,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ return ret;
+ }
+
++static int axp288_adc_set_state(struct regmap *regmap)
++{
++ /* ADC should be always enabled for internal FG to function */
++ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
++ return -EIO;
++
++ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
++}
++
+ static const struct iio_info axp288_adc_iio_info = {
+ .read_raw = &axp288_adc_read_raw,
+ .driver_module = THIS_MODULE,
+@@ -169,7 +199,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
+ * Set ADC to enabled state at all time, including system suspend.
+ * otherwise internal fuel gauge functionality may be affected.
+ */
+- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
++ ret = axp288_adc_set_state(axp20x->regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable ADC device\n");
+ return ret;
+diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
+index 01fc76f7d660..c168e0db329a 100644
+--- a/drivers/iio/adc/vf610_adc.c
++++ b/drivers/iio/adc/vf610_adc.c
+@@ -77,7 +77,7 @@
+ #define VF610_ADC_ADSTS_MASK 0x300
+ #define VF610_ADC_ADLPC_EN 0x80
+ #define VF610_ADC_ADHSC_EN 0x400
+-#define VF610_ADC_REFSEL_VALT 0x100
++#define VF610_ADC_REFSEL_VALT 0x800
+ #define VF610_ADC_REFSEL_VBG 0x1000
+ #define VF610_ADC_ADTRG_HARD 0x2000
+ #define VF610_ADC_AVGS_8 0x4000
+diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
+index 79c8c7cd70d5..6e6a1ecc99dd 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_core.c
++++ b/drivers/iio/common/st_sensors/st_sensors_core.c
+@@ -550,6 +550,31 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ }
+ EXPORT_SYMBOL(st_sensors_read_info_raw);
+
++static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
++ const struct st_sensor_settings *sensor_settings)
++{
++ struct st_sensor_data *sdata = iio_priv(indio_dev);
++ struct device_node *np = sdata->dev->of_node;
++ struct st_sensors_platform_data *pdata;
++
++ pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
++ if (((np && of_property_read_bool(np, "spi-3wire")) ||
++ (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
++ int err;
++
++ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
++ sensor_settings->sim.addr,
++ sensor_settings->sim.value);
++ if (err < 0) {
++ dev_err(&indio_dev->dev,
++ "failed to init interface mode\n");
++ return err;
++ }
++ }
++
++ return 0;
++}
++
+ int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ int num_sensors_list,
+ const struct st_sensor_settings *sensor_settings)
+@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ return -ENODEV;
+ }
+
++ err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
++ if (err < 0)
++ return err;
++
+ if (sensor_settings[i].wai_addr) {
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sensor_settings[i].wai_addr, &wai);
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index e7d4ea75e007..7599693f7fe9 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
+ struct tsl2563_chip *chip = iio_priv(dev_info);
+
+ iio_push_event(dev_info,
+- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
++ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
+index fd0edca0e656..99448012b47f 100644
+--- a/drivers/iio/pressure/st_pressure_core.c
++++ b/drivers/iio/pressure/st_pressure_core.c
+@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ .mask_od = 0x40,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+- .multi_read_bit = true,
++ .multi_read_bit = false,
+ .bootime = 2,
+ },
+ };
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 2c87dede5841..f451094acb8d 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1288,7 +1288,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ static int mmc_select_hs400es(struct mmc_card *card)
+ {
+ struct mmc_host *host = card->host;
+- int err = 0;
++ int err = -EINVAL;
+ u8 val;
+
+ if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
+index 55a8ee5306ea..8c210a5776bc 100644
+--- a/drivers/mtd/nand/atmel/pmecc.c
++++ b/drivers/mtd/nand/atmel/pmecc.c
+@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
+ */
+ struct platform_device *pdev = to_platform_device(userdev);
+ const struct atmel_pmecc_caps *caps;
++ const struct of_device_id *match;
+
+ /* No PMECC engine available. */
+ if (!of_property_read_bool(userdev->of_node,
+@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
+
+ caps = &at91sam9g45_caps;
+
+- /*
+- * Try to find the NFC subnode and extract the associated caps
+- * from there.
+- */
+- np = of_find_compatible_node(userdev->of_node, NULL,
+- "atmel,sama5d3-nfc");
+- if (np) {
+- const struct of_device_id *match;
+-
+- match = of_match_node(atmel_pmecc_legacy_match, np);
+- if (match && match->data)
+- caps = match->data;
+-
+- of_node_put(np);
+- }
++ /* Find the caps associated to the NAND dev node. */
++ match = of_match_node(atmel_pmecc_legacy_match,
++ userdev->of_node);
++ if (match && match->data)
++ caps = match->data;
+
+ pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
+ }
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index b1dd12729f19..6f9771e82476 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+
+ if (!section) {
+ oobregion->offset = 0;
+- oobregion->length = 4;
++ if (mtd->oobsize == 16)
++ oobregion->length = 4;
++ else
++ oobregion->length = 3;
+ } else {
++ if (mtd->oobsize == 8)
++ return -ERANGE;
++
+ oobregion->offset = 6;
+ oobregion->length = ecc->total - 4;
+ }
+@@ -1102,7 +1108,9 @@ static int nand_setup_data_interface(struct nand_chip *chip)
+ * Ensure the timing mode has been changed on the chip side
+ * before changing timings on the controller side.
+ */
+- if (chip->onfi_version) {
++ if (chip->onfi_version &&
++ (le16_to_cpu(chip->onfi_params.opt_cmd) &
++ ONFI_OPT_CMD_SET_GET_FEATURES)) {
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
+ chip->onfi_timing_mode_default,
+ };
+diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
+index f06312df3669..7e36d7d13c26 100644
+--- a/drivers/mtd/nand/nand_timings.c
++++ b/drivers/mtd/nand/nand_timings.c
+@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+- timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
+- timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
+- timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
++ timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
++ timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
++ timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 563901cd9c06..9e5483780c97 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4069,40 +4069,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+ return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
+ }
+
+-static int __pci_dev_reset(struct pci_dev *dev, int probe)
+-{
+- int rc;
+-
+- might_sleep();
+-
+- rc = pci_dev_specific_reset(dev, probe);
+- if (rc != -ENOTTY)
+- goto done;
+-
+- if (pcie_has_flr(dev)) {
+- if (!probe)
+- pcie_flr(dev);
+- rc = 0;
+- goto done;
+- }
+-
+- rc = pci_af_flr(dev, probe);
+- if (rc != -ENOTTY)
+- goto done;
+-
+- rc = pci_pm_reset(dev, probe);
+- if (rc != -ENOTTY)
+- goto done;
+-
+- rc = pci_dev_reset_slot_function(dev, probe);
+- if (rc != -ENOTTY)
+- goto done;
+-
+- rc = pci_parent_bus_reset(dev, probe);
+-done:
+- return rc;
+-}
+-
+ static void pci_dev_lock(struct pci_dev *dev)
+ {
+ pci_cfg_access_lock(dev);
+@@ -4141,6 +4107,12 @@ static void pci_reset_notify(struct pci_dev *dev, bool prepare)
+ {
+ const struct pci_error_handlers *err_handler =
+ dev->driver ? dev->driver->err_handler : NULL;
++
++ /*
++ * dev->driver->err_handler->reset_notify() is protected against
++ * races with ->remove() by the device lock, which must be held by
++ * the caller.
++ */
+ if (err_handler && err_handler->reset_notify)
+ err_handler->reset_notify(dev, prepare);
+ }
+@@ -4173,21 +4145,6 @@ static void pci_dev_restore(struct pci_dev *dev)
+ pci_reset_notify(dev, false);
+ }
+
+-static int pci_dev_reset(struct pci_dev *dev, int probe)
+-{
+- int rc;
+-
+- if (!probe)
+- pci_dev_lock(dev);
+-
+- rc = __pci_dev_reset(dev, probe);
+-
+- if (!probe)
+- pci_dev_unlock(dev);
+-
+- return rc;
+-}
+-
+ /**
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
+@@ -4207,7 +4164,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
+ */
+ int __pci_reset_function(struct pci_dev *dev)
+ {
+- return pci_dev_reset(dev, 0);
++ int ret;
++
++ pci_dev_lock(dev);
++ ret = __pci_reset_function_locked(dev);
++ pci_dev_unlock(dev);
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(__pci_reset_function);
+
+@@ -4232,7 +4195,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function);
+ */
+ int __pci_reset_function_locked(struct pci_dev *dev)
+ {
+- return __pci_dev_reset(dev, 0);
++ int rc;
++
++ might_sleep();
++
++ rc = pci_dev_specific_reset(dev, 0);
++ if (rc != -ENOTTY)
++ return rc;
++ if (pcie_has_flr(dev)) {
++ pcie_flr(dev);
++ return 0;
++ }
++ rc = pci_af_flr(dev, 0);
++ if (rc != -ENOTTY)
++ return rc;
++ rc = pci_pm_reset(dev, 0);
++ if (rc != -ENOTTY)
++ return rc;
++ rc = pci_dev_reset_slot_function(dev, 0);
++ if (rc != -ENOTTY)
++ return rc;
++ return pci_parent_bus_reset(dev, 0);
+ }
+ EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
+
+@@ -4249,7 +4232,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
+ */
+ int pci_probe_reset_function(struct pci_dev *dev)
+ {
+- return pci_dev_reset(dev, 1);
++ int rc;
++
++ might_sleep();
++
++ rc = pci_dev_specific_reset(dev, 1);
++ if (rc != -ENOTTY)
++ return rc;
++ if (pcie_has_flr(dev))
++ return 0;
++ rc = pci_af_flr(dev, 1);
++ if (rc != -ENOTTY)
++ return rc;
++ rc = pci_pm_reset(dev, 1);
++ if (rc != -ENOTTY)
++ return rc;
++ rc = pci_dev_reset_slot_function(dev, 1);
++ if (rc != -ENOTTY)
++ return rc;
++
++ return pci_parent_bus_reset(dev, 1);
+ }
+
+ /**
+@@ -4272,20 +4274,57 @@ int pci_reset_function(struct pci_dev *dev)
+ {
+ int rc;
+
+- rc = pci_dev_reset(dev, 1);
++ rc = pci_probe_reset_function(dev);
+ if (rc)
+ return rc;
+
++ pci_dev_lock(dev);
+ pci_dev_save_and_disable(dev);
+
+- rc = pci_dev_reset(dev, 0);
++ rc = __pci_reset_function_locked(dev);
+
+ pci_dev_restore(dev);
++ pci_dev_unlock(dev);
+
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(pci_reset_function);
+
++/**
++ * pci_reset_function_locked - quiesce and reset a PCI device function
++ * @dev: PCI device to reset
++ *
++ * Some devices allow an individual function to be reset without affecting
++ * other functions in the same device. The PCI device must be responsive
++ * to PCI config space in order to use this function.
++ *
++ * This function does not just reset the PCI portion of a device, but
++ * clears all the state associated with the device. This function differs
++ * from __pci_reset_function() in that it saves and restores device state
++ * over the reset. It also differs from pci_reset_function() in that it
++ * requires the PCI device lock to be held.
++ *
++ * Returns 0 if the device function was successfully reset or negative if the
++ * device doesn't support resetting a single function.
++ */
++int pci_reset_function_locked(struct pci_dev *dev)
++{
++ int rc;
++
++ rc = pci_probe_reset_function(dev);
++ if (rc)
++ return rc;
++
++ pci_dev_save_and_disable(dev);
++
++ rc = __pci_reset_function_locked(dev);
++
++ pci_dev_restore(dev);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(pci_reset_function_locked);
++
+ /**
+ * pci_try_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+@@ -4296,20 +4335,18 @@ int pci_try_reset_function(struct pci_dev *dev)
+ {
+ int rc;
+
+- rc = pci_dev_reset(dev, 1);
++ rc = pci_probe_reset_function(dev);
+ if (rc)
+ return rc;
+
+- pci_dev_save_and_disable(dev);
++ if (!pci_dev_trylock(dev))
++ return -EAGAIN;
+
+- if (pci_dev_trylock(dev)) {
+- rc = __pci_dev_reset(dev, 0);
+- pci_dev_unlock(dev);
+- } else
+- rc = -EAGAIN;
++ pci_dev_save_and_disable(dev);
++ rc = __pci_reset_function_locked(dev);
++ pci_dev_unlock(dev);
+
+ pci_dev_restore(dev);
+-
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(pci_try_reset_function);
+@@ -4459,7 +4496,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus)
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
++ pci_dev_lock(dev);
+ pci_dev_save_and_disable(dev);
++ pci_dev_unlock(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+@@ -4474,7 +4513,9 @@ static void pci_bus_restore(struct pci_bus *bus)
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
++ pci_dev_lock(dev);
+ pci_dev_restore(dev);
++ pci_dev_unlock(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 20f1b4493994..04e929fd0ffe 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1547,6 +1547,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
+ },
+ },
++ {
++ .ident = "HP Chromebook 11 G5 (Setzer)",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
++ },
++ },
+ {
+ .ident = "Acer Chromebook R11 (Cyan)",
+ .matches = {
+diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
+index 4d4ef42a39b5..86c4b3fab7b0 100644
+--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
+@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
+
+ static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
+ static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
+-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
+-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
+-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
++static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
++static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
++static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
+ static const unsigned int mrfld_pwm0_pins[] = { 144 };
+ static const unsigned int mrfld_pwm1_pins[] = { 145 };
+ static const unsigned int mrfld_pwm2_pins[] = { 132 };
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+index 9b00be15d258..df942272ba54 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+@@ -85,6 +85,7 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
+ MESON_PIN(GPIODV_15, EE_OFF),
+ MESON_PIN(GPIODV_16, EE_OFF),
+ MESON_PIN(GPIODV_17, EE_OFF),
++ MESON_PIN(GPIODV_18, EE_OFF),
+ MESON_PIN(GPIODV_19, EE_OFF),
+ MESON_PIN(GPIODV_20, EE_OFF),
+ MESON_PIN(GPIODV_21, EE_OFF),
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+index 998210eacf37..3046fd732155 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+@@ -89,6 +89,7 @@ static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = {
+ MESON_PIN(GPIODV_15, EE_OFF),
+ MESON_PIN(GPIODV_16, EE_OFF),
+ MESON_PIN(GPIODV_17, EE_OFF),
++ MESON_PIN(GPIODV_18, EE_OFF),
+ MESON_PIN(GPIODV_19, EE_OFF),
+ MESON_PIN(GPIODV_20, EE_OFF),
+ MESON_PIN(GPIODV_21, EE_OFF),
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+index 5c96f5558310..6aaeb0e9360e 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -176,7 +176,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
+ };
+
+ const struct armada_37xx_pin_data armada_37xx_pin_sb = {
+- .nr_pins = 29,
++ .nr_pins = 30,
+ .name = "GPIO2",
+ .groups = armada_37xx_sb_groups,
+ .ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index 7b0e6cc35e04..2ea8b1505138 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -205,8 +205,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
+
+ spin_unlock_irqrestore(&bank->slock, flags);
+
+- exynos_irq_unmask(irqd);
+-
+ return 0;
+ }
+
+@@ -226,8 +224,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
+ shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
+ mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+
+- exynos_irq_mask(irqd);
+-
+ spin_lock_irqsave(&bank->slock, flags);
+
+ con = readl(bank->eint_base + reg_con);
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+index fb30b86a97ee..5fbbdbf349b8 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+@@ -811,6 +811,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */
+ SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
++ SUNXI_FUNCTION(0x5, "sim"), /* DET */
+ SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
+diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+index 706effe0a492..ad73db8d067b 100644
+--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
++++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+@@ -508,57 +508,71 @@ static const unsigned usb1_pins[] = {48, 49};
+ static const int usb1_muxvals[] = {0, 0};
+ static const unsigned usb2_pins[] = {50, 51};
+ static const int usb2_muxvals[] = {0, 0};
+-static const unsigned port_range_pins[] = {
++static const unsigned port_range0_pins[] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, /* PORT0x */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */
+- 16, 17, 18, -1, -1, -1, -1, -1, /* PORT3x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */
+- -1, -1, -1, 46, 47, 48, 49, 50, /* PORT5x */
+- 51, -1, -1, 54, 55, 56, 57, 58, /* PORT6x */
++ 16, 17, 18, /* PORT30-32 */
++};
++static const int port_range0_muxvals[] = {
++ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
++ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
++ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
++ 15, 15, 15, /* PORT30-32 */
++};
++static const unsigned port_range1_pins[] = {
++ 46, 47, 48, 49, 50, /* PORT53-57 */
++ 51, /* PORT60 */
++};
++static const int port_range1_muxvals[] = {
++ 15, 15, 15, 15, 15, /* PORT53-57 */
++ 15, /* PORT60 */
++};
++static const unsigned port_range2_pins[] = {
++ 54, 55, 56, 57, 58, /* PORT63-67 */
+ 59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */
+ 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */
+ 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */
+ 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+- 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
+- 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
+- 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+- 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+- 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
+- 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
+- 139, 140, 141, 142, -1, -1, -1, -1, /* PORT22x */
+- 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
+- 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */
+ };
+-static const int port_range_muxvals[] = {
+- 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+- 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+- 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+- 15, 15, 15, -1, -1, -1, -1, -1, /* PORT3x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */
+- -1, -1, -1, 15, 15, 15, 15, 15, /* PORT5x */
+- 15, -1, -1, 15, 15, 15, 15, 15, /* PORT6x */
++static const int port_range2_muxvals[] = {
++ 15, 15, 15, 15, 15, /* PORT63-67 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
++};
++static const unsigned port_range3_pins[] = {
++ 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
++ 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
++ 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
++};
++static const int port_range3_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
++};
++static const unsigned port_range4_pins[] = {
++ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
++};
++static const int port_range4_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
++};
++static const unsigned port_range5_pins[] = {
++ 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
++ 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
++ 139, 140, 141, 142, /* PORT220-223 */
++};
++static const int port_range5_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+- 15, 15, 15, 15, -1, -1, -1, -1, /* PORT22x */
++ 15, 15, 15, 15, /* PORT220-223 */
++};
++static const unsigned port_range6_pins[] = {
++ 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
++ 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */
++};
++static const int port_range6_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+ };
+@@ -607,147 +621,153 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+- UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range4),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range5),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range6),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range1, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range1, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range1, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range1, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range1, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range1, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range2, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range2, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range2, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range2, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range2, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range2, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range2, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range2, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range2, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range2, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range2, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range2, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range2, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range2, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range2, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range2, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range2, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range2, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range2, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range2, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range2, 20),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range2, 21),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range2, 22),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range2, 23),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range2, 24),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range2, 25),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range2, 26),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range2, 27),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range2, 28),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range2, 29),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range2, 30),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range2, 31),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range2, 32),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range2, 33),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range2, 34),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range2, 35),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range2, 36),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range3, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range3, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range3, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range3, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range3, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range3, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range3, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range3, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range3, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range3, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range3, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range3, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range3, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range3, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range3, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range3, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range3, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range3, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range3, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range3, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range3, 20),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range3, 21),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range3, 22),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range3, 23),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range4, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range4, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range4, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range4, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range4, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range4, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range4, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range4, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range5, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range5, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range5, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range5, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range5, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range5, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range5, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range5, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range5, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range5, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range5, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range5, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range5, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range5, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range5, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range5, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range5, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range5, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range5, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range5, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range6, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range6, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range6, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range6, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range6, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range6, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range6, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range6, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range6, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range6, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range6, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range6, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range6, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range6, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range6, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range6, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+index c8d18a2d3a88..93006626028d 100644
+--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
++++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+@@ -597,7 +597,7 @@ static const unsigned usb2_pins[] = {50, 51};
+ static const int usb2_muxvals[] = {0, 0};
+ static const unsigned usb3_pins[] = {52, 53};
+ static const int usb3_muxvals[] = {0, 0};
+-static const unsigned port_range_pins[] = {
++static const unsigned port_range0_pins[] = {
+ 168, 169, 170, 171, 172, 173, 174, 175, /* PORT0x */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */
+@@ -609,23 +609,8 @@ static const unsigned port_range_pins[] = {
+ 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */
+ 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */
+ 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+- 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
+- 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
+- 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+- 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+- 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
+- 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
+- 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */
+- 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
+- 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */
+- 163, 164, 165, 166, 167, /* PORT25x */
+ };
+-static const int port_range_muxvals[] = {
++static const int port_range0_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+@@ -637,21 +622,38 @@ static const int port_range_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
++};
++static const unsigned port_range1_pins[] = {
++ 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
++ 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
++ 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
++};
++static const int port_range1_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
++};
++static const unsigned port_range2_pins[] = {
++ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
++};
++static const int port_range2_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
++};
++static const unsigned port_range3_pins[] = {
++ 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
++ 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
++ 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */
++ 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
++ 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */
++ 163, 164, 165, 166, 167, /* PORT250-254 */
++};
++static const int port_range3_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+- 15, 15, 15, 15, 15, /* PORT25x */
++ 15, 15, 15, 15, 15, /* PORT250-254 */
+ };
+ static const unsigned xirq_pins[] = {
+ 149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */
+@@ -695,174 +697,177 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP(usb3),
+- UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
++ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range, 27),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range, 28),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range, 29),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range, 30),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range, 31),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range, 32),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range, 33),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range, 34),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range, 35),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range, 36),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range, 37),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range, 38),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range, 39),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range, 40),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range, 41),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range, 42),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range, 49),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range, 50),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range, 180),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range, 181),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range, 182),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range, 183),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range, 200),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range, 201),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range, 202),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range, 203),
+- UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range, 204),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range2, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range2, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range2, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range2, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range2, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range2, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range2, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range2, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range3, 0),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range3, 1),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range3, 2),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range3, 3),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range3, 4),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range3, 5),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range3, 6),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range3, 7),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range3, 8),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range3, 9),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range3, 10),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range3, 11),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range3, 12),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range3, 13),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range3, 14),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range3, 15),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range3, 16),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range3, 17),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range3, 18),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range3, 19),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range3, 20),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range3, 21),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range3, 22),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range3, 23),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range3, 24),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range3, 25),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range3, 26),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range3, 27),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range3, 28),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range3, 29),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range3, 30),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range3, 31),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range3, 32),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range3, 33),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range3, 34),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range3, 35),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range3, 36),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range3, 37),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range3, 38),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range3, 39),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range3, 40),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range3, 41),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range3, 42),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range3, 43),
++ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range3, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 4ed485a99c68..11d809780ee0 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ continue;
+ }
+
++ set_current_state(TASK_RUNNING);
+ wp = async->buf_write_ptr;
+ n1 = min(n, async->prealloc_bufsz - wp);
+ n2 = n - n1;
+@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ }
+ continue;
+ }
++
++ set_current_state(TASK_RUNNING);
+ rp = async->buf_read_ptr;
+ n1 = min(n, async->prealloc_bufsz - rp);
+ n2 = n - n1;
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index a6a8393d6664..3e00df74b18c 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
+ long m)
+ {
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+- bool negative;
++ u16 negative;
+ int ret = 0;
+ u16 pos;
+ s16 vel;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 3fdca2cdd8da..db843e3f355a 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_RESET;
++ atomic_inc(&np->np_reset_count);
+
+ if (np->np_thread) {
+ spin_unlock_bh(&np->np_thread_lock);
+@@ -2173,6 +2174,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
+ cmd->data_direction = DMA_NONE;
++ kfree(cmd->text_in_ptr);
+ cmd->text_in_ptr = NULL;
+
+ return 0;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 92b96b51d506..e491cf75e92d 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1237,9 +1237,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ flush_signals(current);
+
+ spin_lock_bh(&np->np_thread_lock);
+- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
++ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
++ spin_unlock_bh(&np->np_thread_lock);
+ complete(&np->np_restart_comp);
++ return 1;
+ } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+ spin_unlock_bh(&np->np_thread_lock);
+ goto exit;
+@@ -1272,7 +1274,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ goto exit;
+ } else if (rc < 0) {
+ spin_lock_bh(&np->np_thread_lock);
+- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
++ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
++ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ spin_unlock_bh(&np->np_thread_lock);
+ complete(&np->np_restart_comp);
+ iscsit_put_transport(conn->conn_transport);
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 310d9e55c6eb..2d9ad10de3b3 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+ mutex_lock(&tpg->acl_node_mutex);
+ if (acl->dynamic_node_acl)
+ acl->dynamic_node_acl = 0;
+- list_del(&acl->acl_list);
++ list_del_init(&acl->acl_list);
+ mutex_unlock(&tpg->acl_node_mutex);
+
+ target_shutdown_sessions(acl);
+@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
+ * in transport_deregister_session().
+ */
+ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
+- list_del(&nacl->acl_list);
++ list_del_init(&nacl->acl_list);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 019763561e52..884780d2ec69 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
+ }
+
+ mutex_lock(&se_tpg->acl_node_mutex);
+- list_del(&nacl->acl_list);
++ list_del_init(&nacl->acl_list);
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+ if (se_nacl->dynamic_stop)
+- list_del(&se_nacl->acl_list);
++ list_del_init(&se_nacl->acl_list);
+ }
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 5dea98358c05..cc4121605c53 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1878,7 +1878,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
+ /* No more submits can occur */
+ spin_lock_irq(&hcd_urb_list_lock);
+ rescan:
+- list_for_each_entry (urb, &ep->urb_list, urb_list) {
++ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
+ int is_in;
+
+ if (urb->unlinked)
+@@ -2475,6 +2475,8 @@ void usb_hc_died (struct usb_hcd *hcd)
+ }
+ if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
+ hcd = hcd->shared_hcd;
++ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
++ set_bit(HCD_FLAG_DEAD, &hcd->flags);
+ if (hcd->rh_registered) {
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index b8bb20d7acdb..0881a3e8131c 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4730,7 +4730,8 @@ hub_power_remaining(struct usb_hub *hub)
+ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ u16 portchange)
+ {
+- int status, i;
++ int status = -ENODEV;
++ int i;
+ unsigned unit_load;
+ struct usb_device *hdev = hub->hdev;
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+@@ -4934,9 +4935,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+
+ done:
+ hub_port_disable(hub, port1, 1);
+- if (hcd->driver->relinquish_port && !hub->hdev->parent)
+- hcd->driver->relinquish_port(hcd, port1);
+-
++ if (hcd->driver->relinquish_port && !hub->hdev->parent) {
++ if (status != -ENOTCONN && status != -ENODEV)
++ hcd->driver->relinquish_port(hcd, port1);
++ }
+ }
+
+ /* Handle physical or logical connection change events.
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 3116edfcdc18..574da2b4529c 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
++ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
++
+ /* Avision AV600U */
+ { USB_DEVICE(0x0638, 0x0a13), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Optical Mouse M90/M100 */
+ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index cd4c88529721..9f3addfca744 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -758,21 +758,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
+ return usb3_req;
+ }
+
+-static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+- struct renesas_usb3_request *usb3_req, int status)
++static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
++ struct renesas_usb3_request *usb3_req,
++ int status)
+ {
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+- unsigned long flags;
+
+ dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
+ usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
+ status);
+ usb3_req->req.status = status;
+- spin_lock_irqsave(&usb3->lock, flags);
+ usb3_ep->started = false;
+ list_del_init(&usb3_req->queue);
+- spin_unlock_irqrestore(&usb3->lock, flags);
++ spin_unlock(&usb3->lock);
+ usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
++ spin_lock(&usb3->lock);
++}
++
++static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
++ struct renesas_usb3_request *usb3_req, int status)
++{
++ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
++ unsigned long flags;
++
++ spin_lock_irqsave(&usb3->lock, flags);
++ __usb3_request_done(usb3_ep, usb3_req, status);
++ spin_unlock_irqrestore(&usb3->lock, flags);
+ }
+
+ static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index c8989c62a262..c8f38649f749 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -98,6 +98,7 @@ enum amd_chipset_gen {
+ AMD_CHIPSET_HUDSON2,
+ AMD_CHIPSET_BOLTON,
+ AMD_CHIPSET_YANGTZE,
++ AMD_CHIPSET_TAISHAN,
+ AMD_CHIPSET_UNKNOWN,
+ };
+
+@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ else if (rev >= 0x40 && rev <= 0x4f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB800;
++ }
++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++ 0x145c, NULL);
++ if (pinfo->smbus_dev) {
++ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
+ {
+ /* Make sure amd chipset type has already been initialized */
+ usb_amd_find_chipset_info();
+- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
+- return 0;
+-
+- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+- return 1;
++ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
++ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
++ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
++ return 1;
++ }
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
+
+@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
+ }
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
++
++bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
++{
++ /*
++ * Our dear uPD72020{1,2} friend only partially resets when
++ * asked to via the XHCI interface, and may end up doing DMA
++ * at the wrong addresses, as it keeps the top 32bit of some
++ * addresses from its previous programming under obscure
++ * circumstances.
++ * Give it a good wack at probe time. Unfortunately, this
++ * needs to happen before we've had a chance to discover any
++ * quirk, or the system will be in a rather bad state.
++ */
++ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
++ (pdev->device == 0x0014 || pdev->device == 0x0015))
++ return true;
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
+diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
+index 655994480198..5582cbafecd4 100644
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
+ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
+ void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+ void sb800_prefetch(struct device *dev, int on);
++bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
+ #else
+ struct pci_dev;
+ static inline void usb_amd_quirk_pll_disable(void) {}
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 1ef622ededfd..cefa223f9f08 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -285,6 +285,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ driver = (struct hc_driver *)id->driver_data;
+
++ /* For some HW implementation, a XHCI reset is just not enough... */
++ if (usb_xhci_needs_pci_reset(dev)) {
++ dev_info(&dev->dev, "Resetting\n");
++ if (pci_reset_function_locked(dev))
++ dev_warn(&dev->dev, "Reset failed");
++ }
++
+ /* Prevent runtime suspending between USB-2 and USB-3 initialization */
+ pm_runtime_get_noresume(&dev->dev);
+
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index dbe617a735d8..8bb57f76829d 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+ "Could not flush host TX%d fifo: csr: %04x\n",
+ ep->epnum, csr))
+ return;
++ mdelay(1);
+ }
+ }
+
+diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
+index d544b331c9f2..02b67abfc2a1 100644
+--- a/drivers/usb/renesas_usbhs/rcar3.c
++++ b/drivers/usb/renesas_usbhs/rcar3.c
+@@ -20,9 +20,13 @@
+ /* Low Power Status register (LPSTS) */
+ #define LPSTS_SUSPM 0x4000
+
+-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
++/*
++ * USB General control register 2 (UGCTRL2)
++ * Remarks: bit[31:11] and bit[9:6] should be 0
++ */
+ #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
+ #define UGCTRL2_USB0SEL_OTG 0x00000030
++#define UGCTRL2_VBUSSEL 0x00000400
+
+ static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
+ {
+@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
+ {
+ struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+
+- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
++ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
++ UGCTRL2_VBUSSEL);
+
+ if (enable) {
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f64e914a8985..2d945c9f975c 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
++ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index ebe51f11105d..fe123153b1a5 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index c9ebefd8f35f..a585b477415d 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
++ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
++ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
+ { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 09d9be88209e..3b5a15d1dc0d 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -27,6 +27,7 @@
+ #define ATEN_VENDOR_ID 0x0557
+ #define ATEN_VENDOR_ID2 0x0547
+ #define ATEN_PRODUCT_ID 0x2008
++#define ATEN_PRODUCT_UC485 0x2021
+ #define ATEN_PRODUCT_ID2 0x2118
+
+ #define IODATA_VENDOR_ID 0x04bb
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index cbea9f329e71..cde115359793 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
+ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
+ "Initio Corporation",
+- "",
++ "INIC-3069",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+- US_FL_NO_ATA_1X),
++ US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
+
+ /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
+ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 06615934fed1..0dceb9fa3a06 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
+ {
+ struct us_data *us = (struct us_data *)__us;
+ struct Scsi_Host *host = us_to_host(us);
++ struct scsi_cmnd *srb;
+
+ for (;;) {
+ usb_stor_dbg(us, "*** thread sleeping\n");
+@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
+ scsi_lock(host);
+
+ /* When we are called with no command pending, we're done */
++ srb = us->srb;
+ if (us->srb == NULL) {
+ scsi_unlock(host);
+ mutex_unlock(&us->dev_mutex);
+@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
+ /* lock access to the state */
+ scsi_lock(host);
+
+- /* indicate that the command is done */
+- if (us->srb->result != DID_ABORT << 16) {
+- usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
+- us->srb->result);
+- us->srb->scsi_done(us->srb);
+- } else {
++ /* was the command aborted? */
++ if (us->srb->result == DID_ABORT << 16) {
+ SkipForAbort:
+ usb_stor_dbg(us, "scsi command aborted\n");
++ srb = NULL; /* Don't call srb->scsi_done() */
+ }
+
+ /*
+@@ -429,6 +428,13 @@ static int usb_stor_control_thread(void * __us)
+
+ /* unlock the device pointers */
+ mutex_unlock(&us->dev_mutex);
++
++ /* now that the locks are released, notify the SCSI core */
++ if (srb) {
++ usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
++ srb->result);
++ srb->scsi_done(srb);
++ }
+ } /* for (;;) */
+
+ /* Wait until we are told to stop */
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 3ee4fdc3da9e..76eac2a554c4 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
+ {
+ struct fuse_file *ff;
+
+- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
++ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ if (unlikely(!ff))
+ return NULL;
+
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index 69d02cf8cf37..5f93cfacb3d1 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
+ config PNFS_BLOCK
+ tristate
+ depends on NFS_V4_1 && BLK_DEV_DM
++ depends on 64BIT || LBDAF
+ default NFS_V4
+
+ config PNFS_FLEXFILE_LAYOUT
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 6df7a0cf5660..f32c58bbe556 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -32,6 +32,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
+ {
+ nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
+ nfs4_pnfs_ds_put(mirror_ds->ds);
++ kfree(mirror_ds->ds_versions);
+ kfree_rcu(mirror_ds, id_node.rcu);
+ }
+
+diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
+index 82f1cbcc4de1..1f53dc23aebe 100644
+--- a/fs/xfs/xfs_log_cil.c
++++ b/fs/xfs/xfs_log_cil.c
+@@ -519,6 +519,7 @@ xlog_discard_endio(
+
+ INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
+ queue_work(xfs_discard_wq, &ctx->discard_endio_work);
++ bio_put(bio);
+ }
+
+ static void
+diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
+index 497f2b3a5a62..97f1b465d04f 100644
+--- a/include/linux/iio/common/st_sensors.h
++++ b/include/linux/iio/common/st_sensors.h
+@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
+ struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
+ };
+
++struct st_sensor_sim {
++ u8 addr;
++ u8 value;
++};
++
+ /**
+ * struct st_sensor_bdu - ST sensor device block data update
+ * @addr: address of the register.
+@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
+ * @bdu: Block data update register.
+ * @das: Data Alignment Selection register.
+ * @drdy_irq: Data ready register of the sensor.
++ * @sim: SPI serial interface mode register of the sensor.
+ * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
+ * @bootime: samples to discard when sensor passing from power-down to power-up.
+ */
+@@ -213,6 +219,7 @@ struct st_sensor_settings {
+ struct st_sensor_bdu bdu;
+ struct st_sensor_das das;
+ struct st_sensor_data_ready_irq drdy_irq;
++ struct st_sensor_sim sim;
+ bool multi_read_bit;
+ unsigned int bootime;
+ };
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 8f67b1581683..a3ebb64b1cf4 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -638,10 +638,10 @@ struct nand_buffers {
+ * @tWW_min: WP# transition to WE# low
+ */
+ struct nand_sdr_timings {
+- u32 tBERS_max;
++ u64 tBERS_max;
+ u32 tCCS_min;
+- u32 tPROG_max;
+- u32 tR_max;
++ u64 tPROG_max;
++ u64 tR_max;
+ u32 tALH_min;
+ u32 tADL_min;
+ u32 tALS_min;
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 8039f9f0ca05..16be18678ca1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1049,6 +1049,7 @@ void pcie_flr(struct pci_dev *dev);
+ int __pci_reset_function(struct pci_dev *dev);
+ int __pci_reset_function_locked(struct pci_dev *dev);
+ int pci_reset_function(struct pci_dev *dev);
++int pci_reset_function_locked(struct pci_dev *dev);
+ int pci_try_reset_function(struct pci_dev *dev);
+ int pci_probe_reset_slot(struct pci_slot *slot);
+ int pci_reset_slot(struct pci_slot *slot);
+diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
+index 79b0e4cdb814..f8274b0c6888 100644
+--- a/include/linux/platform_data/st_sensors_pdata.h
++++ b/include/linux/platform_data/st_sensors_pdata.h
+@@ -17,10 +17,12 @@
+ * Available only for accelerometer and pressure sensors.
+ * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * @open_drain: set the interrupt line to be open drain if possible.
++ * @spi_3wire: enable spi-3wire mode.
+ */
+ struct st_sensors_platform_data {
+ u8 drdy_int_pin;
+ bool open_drain;
++ bool spi_3wire;
+ };
+
+ #endif /* ST_SENSORS_PDATA_H */
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index 1628cc34b357..ed766dcb9cb7 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -787,6 +787,7 @@ struct iscsi_np {
+ int np_sock_type;
+ enum np_thread_state_table np_thread_state;
+ bool enabled;
++ atomic_t np_reset_count;
+ enum iscsi_timer_flags_table np_login_timer_flags;
+ u32 np_exports;
+ enum np_flags_table np_flags;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 357348a6cf6b..bb8b5a9fcdd5 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -670,13 +670,14 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ * this reference was taken by ihold under the page lock
+ * pinning the inode in place so i_lock was unnecessary. The
+ * only way for this check to fail is if the inode was
+- * truncated in parallel so warn for now if this happens.
++ * truncated in parallel which is almost certainly an
++ * application bug. In such a case, just retry.
+ *
+ * We are not calling into get_futex_key_refs() in file-backed
+ * cases, therefore a successful atomic_inc return below will
+ * guarantee that get_futex_key() will still imply smp_mb(); (B).
+ */
+- if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
++ if (!atomic_inc_not_zero(&inode->i_count)) {
+ rcu_read_unlock();
+ put_page(page);
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2302f250d6b1..07569fa25760 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -7567,7 +7567,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
+
+ /* Make sure the range is really isolated. */
+ if (test_pages_isolated(outer_start, end, false)) {
+- pr_info("%s: [%lx, %lx) PFNs busy\n",
++ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
+ __func__, outer_start, end);
+ ret = -EBUSY;
+ goto done;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index e67d6ba4e98e..1183e898743b 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1021,7 +1021,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
+ */
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
+ spin_lock(&sbinfo->shrinklist_lock);
+- if (list_empty(&info->shrinklist)) {
++ /*
++ * _careful to defend against unlocked access to
++ * ->shrink_list in shmem_unused_huge_shrink()
++ */
++ if (list_empty_careful(&info->shrinklist)) {
+ list_add_tail(&info->shrinklist,
+ &sbinfo->shrinklist);
+ sbinfo->shrinklist_len++;
+@@ -1817,7 +1821,11 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
+ * to shrink under memory pressure.
+ */
+ spin_lock(&sbinfo->shrinklist_lock);
+- if (list_empty(&info->shrinklist)) {
++ /*
++ * _careful to defend against unlocked access to
++ * ->shrink_list in shmem_unused_huge_shrink()
++ */
++ if (list_empty_careful(&info->shrinklist)) {
+ list_add_tail(&info->shrinklist,
+ &sbinfo->shrinklist);
+ sbinfo->shrinklist_len++;
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-08-25 11:00 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-08-25 11:00 UTC (permalink / raw
To: gentoo-commits
commit: 8911ed4b99ac4a15838eeaaf9bc095b327cb3d23
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 25 11:00:07 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 25 11:00:07 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8911ed4b
Linux patch 4.12.9
0000_README | 4 +
1008_linux-4.12.9.patch | 1644 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1648 insertions(+)
diff --git a/0000_README b/0000_README
index 47efe0d..90242d0 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-4.12.8.patch
From: http://www.kernel.org
Desc: Linux 4.12.8
+Patch: 1008_linux-4.12.9.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-4.12.9.patch b/1008_linux-4.12.9.patch
new file mode 100644
index 0000000..21e964c
--- /dev/null
+++ b/1008_linux-4.12.9.patch
@@ -0,0 +1,1644 @@
+diff --git a/Makefile b/Makefile
+index 6da481d08441..a6c2a5e7a48d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+index 559da17297ef..651299c242ec 100644
+--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+@@ -507,7 +507,7 @@
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ /* PCIe reset */
+- MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
++ MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
+ MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
+ >;
+ };
+@@ -668,7 +668,7 @@
+ &pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+- reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
++ reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
+index 4e6e88a6b2f4..2244a94ed9c9 100644
+--- a/arch/arm/include/asm/bug.h
++++ b/arch/arm/include/asm/bug.h
+@@ -37,7 +37,7 @@ do { \
+ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
+ "2:\t.asciz " #__file "\n" \
+ ".popsection\n" \
+- ".pushsection __bug_table,\"a\"\n" \
++ ".pushsection __bug_table,\"aw\"\n" \
+ ".align 2\n" \
+ "3:\t.word 1b, 2b\n" \
+ "\t.hword " #__line ", 0\n" \
+diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
+index 366448eb0fb7..a02a57186f56 100644
+--- a/arch/arm64/include/asm/bug.h
++++ b/arch/arm64/include/asm/bug.h
+@@ -36,7 +36,7 @@
+ #ifdef CONFIG_GENERIC_BUG
+
+ #define __BUG_ENTRY(flags) \
+- ".pushsection __bug_table,\"a\"\n\t" \
++ ".pushsection __bug_table,\"aw\"\n\t" \
+ ".align 2\n\t" \
+ "0: .long 1f - 0b\n\t" \
+ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
+index acae781f7359..3288c2b36731 100644
+--- a/arch/arm64/include/asm/elf.h
++++ b/arch/arm64/include/asm/elf.h
+@@ -114,10 +114,10 @@
+
+ /*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * 64-bit, this is above 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+-#define ELF_ET_DYN_BASE 0x100000000UL
++#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
+index 8d9b1eba89c4..76b2e82ee730 100644
+--- a/arch/blackfin/include/asm/bug.h
++++ b/arch/blackfin/include/asm/bug.h
+@@ -21,7 +21,7 @@
+ #define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+- " .section __bug_table,\"a\",@progbits\n" \
++ " .section __bug_table,\"aw\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .long %1\n" \
+ " .short %2\n" \
+@@ -38,7 +38,7 @@
+ #define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+- " .section __bug_table,\"a\",@progbits\n" \
++ " .section __bug_table,\"aw\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .short %1\n" \
+ " .org 2b + %2\n" \
+diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
+index aa6a38886391..811414fb002d 100644
+--- a/arch/mn10300/include/asm/bug.h
++++ b/arch/mn10300/include/asm/bug.h
+@@ -21,7 +21,7 @@ do { \
+ asm volatile( \
+ " syscall 15 \n" \
+ "0: \n" \
+- " .section __bug_table,\"a\" \n" \
++ " .section __bug_table,\"aw\" \n" \
+ " .long 0b,%0,%1 \n" \
+ " .previous \n" \
+ : \
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index d2742273a685..07ea467f22fc 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -27,7 +27,7 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.pushsection __bug_table,\"aw\"\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+ "\t.short %c1, %c2\n" \
+ "\t.org 2b+%c3\n" \
+@@ -50,7 +50,7 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.pushsection __bug_table,\"aw\"\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+ "\t.short %c1, %c2\n" \
+ "\t.org 2b+%c3\n" \
+@@ -64,7 +64,7 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.pushsection __bug_table,\"aw\"\n" \
+ "2:\t" ASM_WORD_INSN "1b\n" \
+ "\t.short %c0\n" \
+ "\t.org 2b+%c1\n" \
+diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
+index 0151af6c2a50..87fcc1948817 100644
+--- a/arch/powerpc/include/asm/bug.h
++++ b/arch/powerpc/include/asm/bug.h
+@@ -18,7 +18,7 @@
+ #include <asm/asm-offsets.h>
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ .macro EMIT_BUG_ENTRY addr,file,line,flags
+- .section __bug_table,"a"
++ .section __bug_table,"aw"
+ 5001: PPC_LONG \addr, 5002f
+ .short \line, \flags
+ .org 5001b+BUG_ENTRY_SIZE
+@@ -29,7 +29,7 @@
+ .endm
+ #else
+ .macro EMIT_BUG_ENTRY addr,file,line,flags
+- .section __bug_table,"a"
++ .section __bug_table,"aw"
+ 5001: PPC_LONG \addr
+ .short \flags
+ .org 5001b+BUG_ENTRY_SIZE
+@@ -42,14 +42,14 @@
+ sizeof(struct bug_entry), respectively */
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define _EMIT_BUG_ENTRY \
+- ".section __bug_table,\"a\"\n" \
++ ".section __bug_table,\"aw\"\n" \
+ "2:\t" PPC_LONG "1b, %0\n" \
+ "\t.short %1, %2\n" \
+ ".org 2b+%3\n" \
+ ".previous\n"
+ #else
+ #define _EMIT_BUG_ENTRY \
+- ".section __bug_table,\"a\"\n" \
++ ".section __bug_table,\"aw\"\n" \
+ "2:\t" PPC_LONG "1b\n" \
+ "\t.short %2\n" \
+ ".org 2b+%3\n" \
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 2ad725ef4368..318738f3aa05 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
+
+ cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+
+- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
++ if (current->thread.regs &&
++ (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
+ check_if_tm_restore_required(current);
+ /*
+ * If a thread has already been reclaimed then the
+@@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
+ {
+ if (tsk->thread.regs) {
+ preempt_disable();
+- if (tsk->thread.regs->msr & MSR_VSX) {
++ if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
+ BUG_ON(tsk != current);
+ giveup_vsx(tsk);
+ }
+diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
+index 1bbd9dbfe4e0..ce9cc123988b 100644
+--- a/arch/s390/include/asm/bug.h
++++ b/arch/s390/include/asm/bug.h
+@@ -14,7 +14,7 @@
+ ".section .rodata.str,\"aMS\",@progbits,1\n" \
+ "2: .asciz \""__FILE__"\"\n" \
+ ".previous\n" \
+- ".section __bug_table,\"a\"\n" \
++ ".section __bug_table,\"aw\"\n" \
+ "3: .long 1b-3b,2b-3b\n" \
+ " .short %0,%1\n" \
+ " .org 3b+%2\n" \
+@@ -30,7 +30,7 @@
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+- ".section __bug_table,\"a\"\n" \
++ ".section __bug_table,\"aw\"\n" \
+ "2: .long 1b-2b\n" \
+ " .short %0\n" \
+ " .org 2b+%1\n" \
+diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
+index 1b77f068be2b..986c8781d89f 100644
+--- a/arch/sh/include/asm/bug.h
++++ b/arch/sh/include/asm/bug.h
+@@ -24,14 +24,14 @@
+ */
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define _EMIT_BUG_ENTRY \
+- "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.pushsection __bug_table,\"aw\"\n" \
+ "2:\t.long 1b, %O1\n" \
+ "\t.short %O2, %O3\n" \
+ "\t.org 2b+%O4\n" \
+ "\t.popsection\n"
+ #else
+ #define _EMIT_BUG_ENTRY \
+- "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.pushsection __bug_table,\"aw\"\n" \
+ "2:\t.long 1b\n" \
+ "\t.short %O3\n" \
+ "\t.org 2b+%O4\n" \
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 0efb4c9497bc..ae1d55548f5a 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -94,6 +94,7 @@ config X86
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select GENERIC_TIME_VSYSCALL
++ select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
+ select HAVE_ACPI_APEI if ACPI
+ select HAVE_ACPI_APEI_NMI if ACPI
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+index 1cd792db15ef..1eab79c9ac48 100644
+--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
++++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+@@ -117,11 +117,10 @@
+ .set T1, REG_T1
+ .endm
+
+-#define K_BASE %r8
+ #define HASH_PTR %r9
++#define BLOCKS_CTR %r8
+ #define BUFFER_PTR %r10
+ #define BUFFER_PTR2 %r13
+-#define BUFFER_END %r11
+
+ #define PRECALC_BUF %r14
+ #define WK_BUF %r15
+@@ -205,14 +204,14 @@
+ * blended AVX2 and ALU instruction scheduling
+ * 1 vector iteration per 8 rounds
+ */
+- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
++ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
+ .elseif ((i & 7) == 1)
+- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
++ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
+ WY_TMP, WY_TMP
+ .elseif ((i & 7) == 2)
+ vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
+ .elseif ((i & 7) == 4)
+- vpaddd K_XMM(K_BASE), WY, WY_TMP
++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
+ .elseif ((i & 7) == 7)
+ vmovdqu WY_TMP, PRECALC_WK(i&~7)
+
+@@ -255,7 +254,7 @@
+ vpxor WY, WY_TMP, WY_TMP
+ .elseif ((i & 7) == 7)
+ vpxor WY_TMP2, WY_TMP, WY
+- vpaddd K_XMM(K_BASE), WY, WY_TMP
++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
+ vmovdqu WY_TMP, PRECALC_WK(i&~7)
+
+ PRECALC_ROTATE_WY
+@@ -291,7 +290,7 @@
+ vpsrld $30, WY, WY
+ vpor WY, WY_TMP, WY
+ .elseif ((i & 7) == 7)
+- vpaddd K_XMM(K_BASE), WY, WY_TMP
++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
+ vmovdqu WY_TMP, PRECALC_WK(i&~7)
+
+ PRECALC_ROTATE_WY
+@@ -446,6 +445,16 @@
+
+ .endm
+
++/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
++ * %1 + %2 >= %3 ? %4 : 0
++ */
++.macro ADD_IF_GE a, b, c, d
++ mov \a, RTA
++ add $\d, RTA
++ cmp $\c, \b
++ cmovge RTA, \a
++.endm
++
+ /*
+ * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
+ */
+@@ -463,13 +472,16 @@
+ lea (2*4*80+32)(%rsp), WK_BUF
+
+ # Precalc WK for first 2 blocks
+- PRECALC_OFFSET = 0
++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
+ .set i, 0
+ .rept 160
+ PRECALC i
+ .set i, i + 1
+ .endr
+- PRECALC_OFFSET = 128
++
++ /* Go to next block if needed */
++ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
+ xchg WK_BUF, PRECALC_BUF
+
+ .align 32
+@@ -479,8 +491,8 @@ _loop:
+ * we use K_BASE value as a signal of a last block,
+ * it is set below by: cmovae BUFFER_PTR, K_BASE
+ */
+- cmp K_BASE, BUFFER_PTR
+- jne _begin
++ test BLOCKS_CTR, BLOCKS_CTR
++ jnz _begin
+ .align 32
+ jmp _end
+ .align 32
+@@ -512,10 +524,10 @@ _loop0:
+ .set j, j+2
+ .endr
+
+- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
+- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
+- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
+-
++ /* Update Counter */
++ sub $1, BLOCKS_CTR
++ /* Move to the next block only if needed*/
++ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
+ /*
+ * rounds
+ * 60,62,64,66,68
+@@ -532,8 +544,8 @@ _loop0:
+ UPDATE_HASH 12(HASH_PTR), D
+ UPDATE_HASH 16(HASH_PTR), E
+
+- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
+- je _loop
++ test BLOCKS_CTR, BLOCKS_CTR
++ jz _loop
+
+ mov TB, B
+
+@@ -575,10 +587,10 @@ _loop2:
+ .set j, j+2
+ .endr
+
+- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
+-
+- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
+- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
++ /* update counter */
++ sub $1, BLOCKS_CTR
++ /* Move to the next block only if needed*/
++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
+
+ jmp _loop3
+ _loop3:
+@@ -641,19 +653,12 @@ _loop3:
+
+ avx2_zeroupper
+
+- lea K_XMM_AR(%rip), K_BASE
+-
++ /* Setup initial values */
+ mov CTX, HASH_PTR
+ mov BUF, BUFFER_PTR
+- lea 64(BUF), BUFFER_PTR2
+-
+- shl $6, CNT /* mul by 64 */
+- add BUF, CNT
+- add $64, CNT
+- mov CNT, BUFFER_END
+
+- cmp BUFFER_END, BUFFER_PTR2
+- cmovae K_BASE, BUFFER_PTR2
++ mov BUF, BUFFER_PTR2
++ mov CNT, BLOCKS_CTR
+
+ xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
+
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index f960a043cdeb..fc61739150e7 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
+
+ static bool avx2_usable(void)
+ {
+- if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
++ if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ && boot_cpu_has(X86_FEATURE_BMI1)
+ && boot_cpu_has(X86_FEATURE_BMI2))
+ return true;
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 4a4c0834f965..22f2281b942b 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1209,6 +1209,8 @@ ENTRY(nmi)
+ * other IST entries.
+ */
+
++ ASM_CLAC
++
+ /* Use %rdx as our temp variable throughout */
+ pushq %rdx
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 580b60f5ac83..c138835c5547 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2105,7 +2105,7 @@ static void refresh_pce(void *ignored)
+ load_mm_cr4(current->active_mm);
+ }
+
+-static void x86_pmu_event_mapped(struct perf_event *event)
++static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
+ {
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ return;
+@@ -2120,22 +2120,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
+ * For now, this can't happen because all callers hold mmap_sem
+ * for write. If this changes, we'll need a different solution.
+ */
+- lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
++ lockdep_assert_held_exclusive(&mm->mmap_sem);
+
+- if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
+- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
++ if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
++ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ }
+
+-static void x86_pmu_event_unmapped(struct perf_event *event)
++static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+ {
+- if (!current->mm)
+- return;
+
+ if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ return;
+
+- if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
+- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
++ if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
++ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ }
+
+ static int x86_pmu_event_idx(struct perf_event *event)
+diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
+index 39e702d90cdb..aa6b2023d8f8 100644
+--- a/arch/x86/include/asm/bug.h
++++ b/arch/x86/include/asm/bug.h
+@@ -35,7 +35,7 @@
+ #define _BUG_FLAGS(ins, flags) \
+ do { \
+ asm volatile("1:\t" ins "\n" \
+- ".pushsection __bug_table,\"a\"\n" \
++ ".pushsection __bug_table,\"aw\"\n" \
+ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
+ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
+ "\t.word %c1" "\t# bug_entry::line\n" \
+@@ -52,7 +52,7 @@ do { \
+ #define _BUG_FLAGS(ins, flags) \
+ do { \
+ asm volatile("1:\t" ins "\n" \
+- ".pushsection __bug_table,\"a\"\n" \
++ ".pushsection __bug_table,\"aw\"\n" \
+ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
+ "\t.word %c0" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c1\n" \
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 1c18d83d3f09..9aeb91935ce0 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -247,11 +247,11 @@ extern int force_personality32;
+
+ /*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * 64-bit, this is above 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+- 0x100000000UL)
++ (TASK_SIZE / 3 * 2))
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 19ad095b41df..81db3e92dc76 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void)
+ static unsigned long stack_maxrandom_size(unsigned long task_size)
+ {
+ unsigned long max = 0;
+- if ((current->flags & PF_RANDOMIZE) &&
+- !(current->personality & ADDR_NO_RANDOMIZE)) {
++ if (current->flags & PF_RANDOMIZE) {
+ max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
+ max <<= PAGE_SHIFT;
+ }
+@@ -82,13 +81,13 @@ static int mmap_is_legacy(void)
+
+ static unsigned long arch_rnd(unsigned int rndbits)
+ {
++ if (!(current->flags & PF_RANDOMIZE))
++ return 0;
+ return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
+ }
+
+ unsigned long arch_mmap_rnd(void)
+ {
+- if (!(current->flags & PF_RANDOMIZE))
+- return 0;
+ return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
+ }
+
+diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
+index 0c3354cf3552..76944e3271bf 100644
+--- a/block/blk-mq-pci.c
++++ b/block/blk-mq-pci.c
+@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
+ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ mask = pci_irq_get_affinity(pdev, queue);
+ if (!mask)
+- return -EINVAL;
++ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ set->mq_map[cpu] = queue;
+ }
+
+ return 0;
++
++fallback:
++ WARN_ON_ONCE(set->nr_hw_queues > 1);
++ for_each_possible_cpu(cpu)
++ set->mq_map[cpu] = 0;
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 39459631667c..b49547c5f2c2 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -2119,9 +2119,9 @@ static int blkfront_resume(struct xenbus_device *dev)
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
+- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
+- req_op(shadow[i].request) == REQ_OP_DISCARD ||
+- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
++ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
++ req_op(shadow[j].request) == REQ_OP_DISCARD ||
++ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
+ shadow[j].request->cmd_flags & REQ_FUA) {
+ /*
+ * Flush operations don't contain bios, so
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 771dd26c7076..6719e346b790 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -1074,7 +1074,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
+ req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
+ &crypt->icv_rev_aes);
+ if (unlikely(!req_ctx->hmac_virt))
+- goto free_buf_src;
++ goto free_buf_dst;
+ if (!encrypt) {
+ scatterwalk_map_and_copy(req_ctx->hmac_virt,
+ req->src, cryptlen, authsize, 0);
+@@ -1089,10 +1089,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
+ BUG_ON(qmgr_stat_overflow(SEND_QID));
+ return -EINPROGRESS;
+
+-free_buf_src:
+- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_dst:
+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
++free_buf_src:
++ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ crypt->ctl_flags = CTL_FLAG_UNUSED;
+ return -ENOMEM;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index ed814e6d0207..28c1112e520c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct dma_fence *f = e->fence;
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
++ if (dma_fence_is_signaled(f)) {
++ hash_del(&e->node);
++ dma_fence_put(f);
++ kmem_cache_free(amdgpu_sync_slab, e);
++ continue;
++ }
+ if (ring && s_fence) {
+ /* For fences from the same ring it is sufficient
+ * when they are scheduled.
+@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ }
+ }
+
+- if (dma_fence_is_signaled(f)) {
+- hash_del(&e->node);
+- dma_fence_put(f);
+- kmem_cache_free(amdgpu_sync_slab, e);
+- continue;
+- }
+-
+ return f;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
+index 7032c542a9b1..4dd4c2159a92 100644
+--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
++++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
+@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+ goto err_unpin;
+ }
+
++ ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
++ if (ret)
++ goto err_unpin;
++
+ ret = req->engine->emit_bb_start(req,
+ so->batch_offset, so->batch_size,
+ I915_DISPATCH_SECURE);
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index d5ab9ddef3e3..3b0e9fb33afe 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1224,6 +1224,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
+ { "ELAN0605", 0 },
++ { "ELAN0608", 0 },
++ { "ELAN0605", 0 },
++ { "ELAN0609", 0 },
++ { "ELAN060B", 0 },
+ { "ELAN1000", 0 },
+ { }
+ };
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
+index 28b26c80f4cf..056507099725 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -142,9 +142,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
+ struct device_node *np;
+ void __iomem *regs;
+
+- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
++ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
+ if (!np)
+- np = of_find_compatible_node(root, NULL,
++ np = of_find_compatible_node(NULL, NULL,
+ "atmel,at91sam9x5-rtc");
+
+ if (!np)
+@@ -196,7 +196,6 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
+ return;
+
+ match = of_match_node(matches, root);
+- of_node_put(root);
+
+ if (match) {
+ void (*fixup)(struct device_node *) = match->data;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d7847014821a..caca5d689cdc 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7979,7 +7979,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
+ if (mddev->safemode == 1)
+ mddev->safemode = 0;
+ /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
+- if (mddev->in_sync || !mddev->sync_checkers) {
++ if (mddev->in_sync || mddev->sync_checkers) {
+ spin_lock(&mddev->lock);
+ if (mddev->in_sync) {
+ mddev->in_sync = 0;
+@@ -8639,6 +8639,9 @@ void md_check_recovery(struct mddev *mddev)
+ if (mddev_trylock(mddev)) {
+ int spares = 0;
+
++ if (!mddev->external && mddev->safemode == 1)
++ mddev->safemode = 0;
++
+ if (mddev->ro) {
+ struct md_rdev *rdev;
+ if (!mddev->external && mddev->in_sync)
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index c42153a985be..473f91322368 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
+ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
++ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index 5c63b920b471..ed92c1254cff 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
+
+ dino_dev->hba.dev = dev;
+ dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+- dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
++ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
+ spin_lock_init(&dino_dev->dinosaur_pen);
+ dino_dev->hba.iommu = ccio_get_iommu(dev);
+
+diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
+index 2776cfe64c09..ef9cf4a21afe 100644
+--- a/drivers/usb/core/usb-acpi.c
++++ b/drivers/usb/core/usb-acpi.c
+@@ -127,6 +127,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
+ */
+ #define USB_ACPI_LOCATION_VALID (1 << 31)
+
++static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
++ int raw)
++{
++ struct acpi_device *adev;
++
++ if (!parent)
++ return NULL;
++
++ list_for_each_entry(adev, &parent->children, node) {
++ if (acpi_device_adr(adev) == raw)
++ return adev;
++ }
++
++ return acpi_find_child_device(parent, raw, false);
++}
++
+ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ {
+ struct usb_device *udev;
+@@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ int raw;
+
+ raw = usb_hcd_find_raw_port_number(hcd, port1);
+- adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
+- raw, false);
++
++ adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
++ raw);
++
+ if (!adev)
+ return NULL;
+ } else {
+@@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ return NULL;
+
+ acpi_bus_get_device(parent_handle, &adev);
+- adev = acpi_find_child_device(adev, port1, false);
++
++ adev = usb_acpi_find_port(adev, port1);
++
+ if (!adev)
+ return NULL;
+ }
+diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
+index 4da69dbf7dca..1bdd02a6d6ac 100644
+--- a/drivers/xen/biomerge.c
++++ b/drivers/xen/biomerge.c
+@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
+
+- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
+- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
++ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
+ #else
+ /*
+ * XXX: Add support for merging bio_vec when using different page
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 7465c3ea5dd5..9867eda73769 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -666,8 +666,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+ unsigned long random_variable = 0;
+
+- if ((current->flags & PF_RANDOMIZE) &&
+- !(current->personality & ADDR_NO_RANDOMIZE)) {
++ if (current->flags & PF_RANDOMIZE) {
+ random_variable = get_random_long();
+ random_variable &= STACK_RND_MASK;
+ random_variable <<= PAGE_SHIFT;
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index 8098695e5d8d..2526c501622f 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -65,6 +65,7 @@ extern bool movable_node_enabled;
+ #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+ #define __init_memblock __meminit
+ #define __initdata_memblock __meminitdata
++void memblock_discard(void);
+ #else
+ #define __init_memblock
+ #define __initdata_memblock
+@@ -78,8 +79,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
+ int nid, ulong flags);
+ phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align);
+-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
+-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
+ void memblock_allow_resize(void);
+ int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+ int memblock_add(phys_addr_t base, phys_addr_t size);
+@@ -114,6 +113,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
+ phys_addr_t *out_end);
+
++void __memblock_free_early(phys_addr_t base, phys_addr_t size);
++void __memblock_free_late(phys_addr_t base, phys_addr_t size);
++
+ /**
+ * for_each_mem_range - iterate through memblock areas from type_a and not
+ * included in type_b. Or just type_a if type_b is NULL.
+diff --git a/include/linux/nmi.h b/include/linux/nmi.h
+index aa3cd0878270..a8d4fc3356d2 100644
+--- a/include/linux/nmi.h
++++ b/include/linux/nmi.h
+@@ -155,6 +155,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
+ #define sysctl_softlockup_all_cpu_backtrace 0
+ #define sysctl_hardlockup_all_cpu_backtrace 0
+ #endif
++
++#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
++ defined(CONFIG_HARDLOCKUP_DETECTOR)
++void watchdog_update_hrtimer_threshold(u64 period);
++#else
++static inline void watchdog_update_hrtimer_threshold(u64 period) { }
++#endif
++
+ extern bool is_hardlockup(void);
+ struct ctl_table;
+ extern int proc_watchdog(struct ctl_table *, int ,
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 24a635887f28..fc32347473a9 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -310,8 +310,8 @@ struct pmu {
+ * Notification that the event was mapped or unmapped. Called
+ * in the context of the mapping task.
+ */
+- void (*event_mapped) (struct perf_event *event); /*optional*/
+- void (*event_unmapped) (struct perf_event *event); /*optional*/
++ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
++ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
+
+ /*
+ * Flags for ->add()/->del()/ ->start()/->stop(). There are
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index 4d179316e431..719582744a2e 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
+@@ -8,7 +8,9 @@ enum pid_type
+ PIDTYPE_PID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+- PIDTYPE_MAX
++ PIDTYPE_MAX,
++ /* only valid to __task_pid_nr_ns() */
++ __PIDTYPE_TGID
+ };
+
+ /*
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 7f2a1eff2997..35f4517eeba9 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1132,13 +1132,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
+ return tsk->tgid;
+ }
+
+-extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+-
+-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+-{
+- return pid_vnr(task_tgid(tsk));
+-}
+-
+ /**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+@@ -1154,23 +1147,6 @@ static inline int pid_alive(const struct task_struct *p)
+ return p->pids[PIDTYPE_PID].pid != NULL;
+ }
+
+-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+-{
+- pid_t pid = 0;
+-
+- rcu_read_lock();
+- if (pid_alive(tsk))
+- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+- rcu_read_unlock();
+-
+- return pid;
+-}
+-
+-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+-{
+- return task_ppid_nr_ns(tsk, &init_pid_ns);
+-}
+-
+ static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+ {
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+@@ -1192,6 +1168,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+ }
+
++static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
++{
++ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
++}
++
++static inline pid_t task_tgid_vnr(struct task_struct *tsk)
++{
++ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
++}
++
++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
++{
++ pid_t pid = 0;
++
++ rcu_read_lock();
++ if (pid_alive(tsk))
++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
++ rcu_read_unlock();
++
++ return pid;
++}
++
++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
++{
++ return task_ppid_nr_ns(tsk, &init_pid_ns);
++}
++
+ /* Obsolete, do not use: */
+ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+ {
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 62d686d96581..ed748ee40029 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
+ list_del(&krule->rlist);
+
+ if (list_empty(&watch->rules)) {
++ /*
++ * audit_remove_watch() drops our reference to 'parent' which
++ * can get freed. Grab our own reference to be safe.
++ */
++ audit_get_parent(parent);
+ audit_remove_watch(watch);
+-
+- if (list_empty(&parent->watches)) {
+- audit_get_parent(parent);
++ if (list_empty(&parent->watches))
+ fsnotify_destroy_mark(&parent->mark, audit_watch_group);
+- audit_put_parent(parent);
+- }
++ audit_put_parent(parent);
+ }
+ }
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index d7f726747341..dbb3d273d497 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5084,7 +5084,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
+ atomic_inc(&event->rb->aux_mmap_count);
+
+ if (event->pmu->event_mapped)
+- event->pmu->event_mapped(event);
++ event->pmu->event_mapped(event, vma->vm_mm);
+ }
+
+ static void perf_pmu_output_stop(struct perf_event *event);
+@@ -5107,7 +5107,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ unsigned long size = perf_data_size(rb);
+
+ if (event->pmu->event_unmapped)
+- event->pmu->event_unmapped(event);
++ event->pmu->event_unmapped(event, vma->vm_mm);
+
+ /*
+ * rb->aux_mmap_count will always drop before rb->mmap_count and
+@@ -5405,7 +5405,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ vma->vm_ops = &perf_mmap_vmops;
+
+ if (event->pmu->event_mapped)
+- event->pmu->event_mapped(event);
++ event->pmu->event_mapped(event, vma->vm_mm);
+
+ return ret;
+ }
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index c94da688ee9b..cdf94ce959d8 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -898,13 +898,15 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
+
+ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
+ {
+- unsigned long flags;
++ unsigned long flags, trigger, tmp;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+
+ if (!desc)
+ return;
+ irq_settings_clr_and_set(desc, clr, set);
+
++ trigger = irqd_get_trigger_type(&desc->irq_data);
++
+ irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
+ IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+ if (irq_settings_has_no_balance_set(desc))
+@@ -916,7 +918,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
+ if (irq_settings_is_level(desc))
+ irqd_set(&desc->irq_data, IRQD_LEVEL);
+
+- irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
++ tmp = irq_settings_get_trigger_mask(desc);
++ if (tmp != IRQ_TYPE_NONE)
++ trigger = tmp;
++
++ irqd_set(&desc->irq_data, trigger);
+
+ irq_put_desc_unlock(desc, flags);
+ }
+diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
+index 1a9abc1c8ea0..259a22aa9934 100644
+--- a/kernel/irq/ipi.c
++++ b/kernel/irq/ipi.c
+@@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
+
+- if (!data || !ipimask || cpu > nr_cpu_ids)
++ if (!data || !ipimask || cpu >= nr_cpu_ids)
+ return INVALID_HWIRQ;
+
+ if (!cpumask_test_cpu(cpu, ipimask))
+@@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
+ if (!chip->ipi_send_single && !chip->ipi_send_mask)
+ return -EINVAL;
+
+- if (cpu > nr_cpu_ids)
++ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (dest) {
+diff --git a/kernel/pid.c b/kernel/pid.c
+index fd1cde1e4576..eeb892e728f8 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -527,8 +527,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ if (!ns)
+ ns = task_active_pid_ns(current);
+ if (likely(pid_alive(task))) {
+- if (type != PIDTYPE_PID)
++ if (type != PIDTYPE_PID) {
++ if (type == __PIDTYPE_TGID)
++ type = PIDTYPE_PID;
+ task = task->group_leader;
++ }
+ nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
+ }
+ rcu_read_unlock();
+@@ -537,12 +540,6 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ }
+ EXPORT_SYMBOL(__task_pid_nr_ns);
+
+-pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+-{
+- return pid_nr_ns(task_tgid(tsk), ns);
+-}
+-EXPORT_SYMBOL(task_tgid_nr_ns);
+-
+ struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
+ {
+ return ns_of_pid(task_pid(tsk));
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 03e0b69bb5bf..b8e938c7273f 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -161,6 +161,7 @@ static void set_sample_period(void)
+ * hardlockup detector generates a warning
+ */
+ sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
++ watchdog_update_hrtimer_threshold(sample_period);
+ }
+
+ /* Commands for resetting the watchdog */
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 54a427d1f344..cd0986b69cbc 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -70,6 +70,62 @@ void touch_nmi_watchdog(void)
+ }
+ EXPORT_SYMBOL(touch_nmi_watchdog);
+
++#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
++static DEFINE_PER_CPU(ktime_t, last_timestamp);
++static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
++static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
++
++void watchdog_update_hrtimer_threshold(u64 period)
++{
++ /*
++ * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
++ *
++ * So it runs effectively with 2.5 times the rate of the NMI
++ * watchdog. That means the hrtimer should fire 2-3 times before
++ * the NMI watchdog expires. The NMI watchdog on x86 is based on
++ * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
++ * might run way faster than expected and the NMI fires in a
++ * smaller period than the one deduced from the nominal CPU
++ * frequency. Depending on the Turbo-Mode factor this might be fast
++ * enough to get the NMI period smaller than the hrtimer watchdog
++ * period and trigger false positives.
++ *
++ * The sample threshold is used to check in the NMI handler whether
++ * the minimum time between two NMI samples has elapsed. That
++ * prevents false positives.
++ *
++ * Set this to 4/5 of the actual watchdog threshold period so the
++ * hrtimer is guaranteed to fire at least once within the real
++ * watchdog threshold.
++ */
++ watchdog_hrtimer_sample_threshold = period * 2;
++}
++
++static bool watchdog_check_timestamp(void)
++{
++ ktime_t delta, now = ktime_get_mono_fast_ns();
++
++ delta = now - __this_cpu_read(last_timestamp);
++ if (delta < watchdog_hrtimer_sample_threshold) {
++ /*
++ * If ktime is jiffies based, a stalled timer would prevent
++ * jiffies from being incremented and the filter would look
++ * at a stale timestamp and never trigger.
++ */
++ if (__this_cpu_inc_return(nmi_rearmed) < 10)
++ return false;
++ }
++ __this_cpu_write(nmi_rearmed, 0);
++ __this_cpu_write(last_timestamp, now);
++ return true;
++}
++#else
++static inline bool watchdog_check_timestamp(void)
++{
++ return true;
++}
++#endif
++
+ static struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+@@ -94,6 +150,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
+ return;
+ }
+
++ if (!watchdog_check_timestamp())
++ return;
++
+ /* check for a hardlockup
+ * This is done by making sure our timer interrupt
+ * is incrementing. The timer interrupt should have
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index e4587ebe52c7..1f1cb51005de 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -344,6 +344,13 @@ config SECTION_MISMATCH_WARN_ONLY
+
+ If unsure, say Y.
+
++#
++# Enables a timestamp based low pass filter to compensate for perf based
++# hard lockup detection which runs too fast due to turbo modes.
++#
++config HARDLOCKUP_CHECK_TIMESTAMP
++ bool
++
+ #
+ # Select this config option from the architecture Kconfig, if it
+ # is preferred to always offer frame pointers as a config
+diff --git a/mm/cma_debug.c b/mm/cma_debug.c
+index 595b757bef72..c03ccbc405a0 100644
+--- a/mm/cma_debug.c
++++ b/mm/cma_debug.c
+@@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
+ char name[16];
+ int u32s;
+
+- sprintf(name, "cma-%s", cma->name);
++ scnprintf(name, sizeof(name), "cma-%s", cma->name);
+
+ tmp = debugfs_create_dir(name, cma_debugfs_root);
+
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 7b8a5db76a2f..7087d5578866 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -288,31 +288,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
+ }
+
+ #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+-
+-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+- phys_addr_t *addr)
+-{
+- if (memblock.reserved.regions == memblock_reserved_init_regions)
+- return 0;
+-
+- *addr = __pa(memblock.reserved.regions);
+-
+- return PAGE_ALIGN(sizeof(struct memblock_region) *
+- memblock.reserved.max);
+-}
+-
+-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
+- phys_addr_t *addr)
++/**
++ * Discard memory and reserved arrays if they were allocated
++ */
++void __init memblock_discard(void)
+ {
+- if (memblock.memory.regions == memblock_memory_init_regions)
+- return 0;
++ phys_addr_t addr, size;
+
+- *addr = __pa(memblock.memory.regions);
++ if (memblock.reserved.regions != memblock_reserved_init_regions) {
++ addr = __pa(memblock.reserved.regions);
++ size = PAGE_ALIGN(sizeof(struct memblock_region) *
++ memblock.reserved.max);
++ __memblock_free_late(addr, size);
++ }
+
+- return PAGE_ALIGN(sizeof(struct memblock_region) *
+- memblock.memory.max);
++ if (memblock.memory.regions == memblock_memory_init_regions) {
++ addr = __pa(memblock.memory.regions);
++ size = PAGE_ALIGN(sizeof(struct memblock_region) *
++ memblock.memory.max);
++ __memblock_free_late(addr, size);
++ }
+ }
+-
+ #endif
+
+ /**
+diff --git a/mm/memory.c b/mm/memory.c
+index b0c3d1556a94..9e50ffcf9639 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3882,8 +3882,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ * further.
+ */
+ if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
+- && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
++ && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
++
++ /*
++ * We are going to enforce SIGBUS but the PF path might have
++ * dropped the mmap_sem already so take it again so that
++ * we do not break expectations of all arch specific PF paths
++ * and g-u-p
++ */
++ if (ret & VM_FAULT_RETRY)
++ down_read(&vma->vm_mm->mmap_sem);
+ ret = VM_FAULT_SIGBUS;
++ }
+
+ return ret;
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 37d0b334bfe9..e0157546e6b5 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -931,11 +931,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
+ *policy |= (pol->flags & MPOL_MODE_FLAGS);
+ }
+
+- if (vma) {
+- up_read(¤t->mm->mmap_sem);
+- vma = NULL;
+- }
+-
+ err = 0;
+ if (nmask) {
+ if (mpol_store_user_nodemask(pol)) {
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 89a0a1707f4c..2586d5ab9b99 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -41,6 +41,7 @@
+ #include <linux/page_idle.h>
+ #include <linux/page_owner.h>
+ #include <linux/sched/mm.h>
++#include <linux/ptrace.h>
+
+ #include <asm/tlbflush.h>
+
+@@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+ const int __user *, nodes,
+ int __user *, status, int, flags)
+ {
+- const struct cred *cred = current_cred(), *tcred;
+ struct task_struct *task;
+ struct mm_struct *mm;
+ int err;
+@@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+
+ /*
+ * Check if this process has the right to modify the specified
+- * process. The right exists if the process has administrative
+- * capabilities, superuser privileges or the same
+- * userid as the target process.
++ * process. Use the regular "ptrace_may_access()" checks.
+ */
+- tcred = __task_cred(task);
+- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
+- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
+- !capable(CAP_SYS_NICE)) {
++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+diff --git a/mm/nobootmem.c b/mm/nobootmem.c
+index 487dad610731..ab998125f04d 100644
+--- a/mm/nobootmem.c
++++ b/mm/nobootmem.c
+@@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void)
+ NULL)
+ count += __free_memory_core(start, end);
+
+-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+- {
+- phys_addr_t size;
+-
+- /* Free memblock.reserved array if it was allocated */
+- size = get_allocated_memblock_reserved_regions_info(&start);
+- if (size)
+- count += __free_memory_core(start, start + size);
+-
+- /* Free memblock.memory array if it was allocated */
+- size = get_allocated_memblock_memory_regions_info(&start);
+- if (size)
+- count += __free_memory_core(start, start + size);
+- }
+-#endif
+-
+ return count;
+ }
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 07569fa25760..4d16ef9d42a9 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1582,6 +1582,10 @@ void __init page_alloc_init_late(void)
+ /* Reinit limits that are based on free pages after the kernel is up */
+ files_maxfiles_init();
+ #endif
++#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
++ /* Discard memblock private memory */
++ memblock_discard();
++#endif
+
+ for_each_populated_zone(zone)
+ set_zone_contiguous(zone);
+diff --git a/mm/slub.c b/mm/slub.c
+index 8addc535bcdc..a0f3c56611c6 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5637,13 +5637,14 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
+ * A cache is never shut down before deactivation is
+ * complete, so no need to worry about synchronization.
+ */
+- return;
++ goto out;
+
+ #ifdef CONFIG_MEMCG
+ kset_unregister(s->memcg_kset);
+ #endif
+ kobject_uevent(&s->kobj, KOBJ_REMOVE);
+ kobject_del(&s->kobj);
++out:
+ kobject_put(&s->kobj);
+ }
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index ecc97f74ab18..104eb720ba43 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1669,7 +1669,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ struct page **pages;
+ unsigned int nr_pages, array_size, i;
+ const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
+- const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
++ const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
++ const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
++ 0 :
++ __GFP_HIGHMEM;
+
+ nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+ array_size = (nr_pages * sizeof(struct page *));
+@@ -1677,7 +1680,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ area->nr_pages = nr_pages;
+ /* Please note that the recursion is strictly bounded. */
+ if (array_size > PAGE_SIZE) {
+- pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
++ pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
+ PAGE_KERNEL, node, area->caller);
+ } else {
+ pages = kmalloc_node(array_size, nested_gfp, node);
+@@ -1698,9 +1701,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ }
+
+ if (node == NUMA_NO_NODE)
+- page = alloc_page(alloc_mask);
++ page = alloc_page(alloc_mask|highmem_mask);
+ else
+- page = alloc_pages_node(node, alloc_mask, 0);
++ page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
+
+ if (unlikely(!page)) {
+ /* Successfully allocated i pages, free them in __vunmap() */
+@@ -1708,7 +1711,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ goto fail;
+ }
+ area->pages[i] = page;
+- if (gfpflags_allow_blocking(gfp_mask))
++ if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
+ cond_resched();
+ }
+
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index f3b1d7f50b81..67c4c68ce041 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
+ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
+ {
+ struct snd_seq_queue_info *info = arg;
+- int result;
+ struct snd_seq_queue *q;
+
+- result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
+- if (result < 0)
+- return result;
+-
+- q = queueptr(result);
+- if (q == NULL)
+- return -EINVAL;
++ q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
++ if (IS_ERR(q))
++ return PTR_ERR(q);
+
+ info->queue = q->queue;
+ info->locked = q->locked;
+@@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
+ if (!info->name[0])
+ snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+ strlcpy(q->name, info->name, sizeof(q->name));
+- queuefree(q);
++ snd_use_lock_free(&q->use_lock);
+
+ return 0;
+ }
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index 450c5187eecb..79e0c5604ef8 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
+ static void queue_use(struct snd_seq_queue *queue, int client, int use);
+
+ /* allocate a new queue -
+- * return queue index value or negative value for error
++ * return pointer to new queue or ERR_PTR(-errno) for error
++ * The new queue's use_lock is set to 1. It is the caller's responsibility to
++ * call snd_use_lock_free(&q->use_lock).
+ */
+-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+ {
+ struct snd_seq_queue *q;
+
+ q = queue_new(client, locked);
+ if (q == NULL)
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+ q->info_flags = info_flags;
+ queue_use(q, client, 1);
++ snd_use_lock_use(&q->use_lock);
+ if (queue_list_add(q) < 0) {
++ snd_use_lock_free(&q->use_lock);
+ queue_delete(q);
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+ }
+- return q->queue;
++ return q;
+ }
+
+ /* delete a queue - queue must be owned by the client */
+diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
+index 30c8111477f6..719093489a2c 100644
+--- a/sound/core/seq/seq_queue.h
++++ b/sound/core/seq/seq_queue.h
+@@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
+
+
+ /* create new queue (constructor) */
+-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
+
+ /* delete queue (destructor) */
+ int snd_seq_queue_delete(int client, int queueid);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 082736c539bc..e630813c5008 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+
+ if (size < sizeof(scale))
+ return -ENOMEM;
++ if (cval->min_mute)
++ scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
+ scale[2] = cval->dBmin;
+ scale[3] = cval->dBmax;
+ if (copy_to_user(_tlv, scale, sizeof(scale)))
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index 3417ef347e40..2b4b067646ab 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
+ int cached;
+ int cache_val[MAX_CHANNELS];
+ u8 initialized;
++ u8 min_mute;
+ void *private_data;
+ };
+
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 4fa0053a40af..7fbc90f5c6de 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1878,6 +1878,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+ if (unitid == 7 && cval->control == UAC_FU_VOLUME)
+ snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
+ break;
++ /* lowest playback value is muted on C-Media devices */
++ case USB_ID(0x0d8c, 0x000c):
++ case USB_ID(0x0d8c, 0x0014):
++ if (strstr(kctl->id.name, "Playback"))
++ cval->min_mute = 1;
++ break;
+ }
+ }
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index d7b0b0a3a2db..6a03f9697039 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
+ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
+ case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+@@ -1374,6 +1375,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ }
+ }
+ break;
++ case USB_ID(0x16d0, 0x0a23):
++ if (fp->altsetting == 2)
++ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
++ break;
+
+ default:
+ break;
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-08-30 10:05 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-08-30 10:05 UTC (permalink / raw
To: gentoo-commits
commit: 333bb572330727d30ce9e4e2b5563e63819eda44
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 30 10:05:12 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 30 10:05:12 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=333bb572
Linux patch 4.12.10
0000_README | 4 +
1009_linux-4.12.10.patch | 3576 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3580 insertions(+)
diff --git a/0000_README b/0000_README
index 90242d0..a64a189 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-4.12.9.patch
From: http://www.kernel.org
Desc: Linux 4.12.9
+Patch: 1009_linux-4.12.10.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1009_linux-4.12.10.patch b/1009_linux-4.12.10.patch
new file mode 100644
index 0000000..a2ab6c1
--- /dev/null
+++ b/1009_linux-4.12.10.patch
@@ -0,0 +1,3576 @@
+diff --git a/Makefile b/Makefile
+index a6c2a5e7a48d..6889ec6a091d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
+index 19ebddffb279..02fd1cece6ef 100644
+--- a/arch/arc/include/asm/cache.h
++++ b/arch/arc/include/asm/cache.h
+@@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
+ #define ARC_REG_SLC_FLUSH 0x904
+ #define ARC_REG_SLC_INVALIDATE 0x905
+ #define ARC_REG_SLC_RGN_START 0x914
++#define ARC_REG_SLC_RGN_START1 0x915
+ #define ARC_REG_SLC_RGN_END 0x916
++#define ARC_REG_SLC_RGN_END1 0x917
+
+ /* Bit val in SLC_CONTROL */
+ #define SLC_CTRL_DIS 0x001
+diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
+index db7319e9b506..efb79fafff1d 100644
+--- a/arch/arc/include/asm/mmu.h
++++ b/arch/arc/include/asm/mmu.h
+@@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
+ return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+ }
+
++extern int pae40_exist_but_not_enab(void);
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif
+diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
+index a867575a758b..7db283b46ebd 100644
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ unsigned int ctrl;
++ phys_addr_t end;
+
+ spin_lock_irqsave(&lock, flags);
+
+@@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
+ * END needs to be setup before START (latter triggers the operation)
+ * END can't be same as START, so add (l2_line_sz - 1) to sz
+ */
+- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
+- write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
++ end = paddr + sz + l2_line_sz - 1;
++ if (is_pae40_enabled())
++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
++
++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
++
++ if (is_pae40_enabled())
++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
++
++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
++
++ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
++ read_aux_reg(ARC_REG_SLC_CTRL);
+
+ while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+@@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
+ __dc_enable();
+ }
+
++/*
++ * Cache related boot time checks/setups only needed on master CPU:
++ * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
++ * Assume SMP only, so all cores will have same cache config. A check on
++ * one core suffices for all
++ * - IOC setup / dma callbacks only need to be done once
++ */
+ void __init arc_cache_init_master(void)
+ {
+ unsigned int __maybe_unused cpu = smp_processor_id();
+@@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+- /*
+- * Only master CPU needs to execute rest of function:
+- * - Assume SMP so all cores will have same cache config so
+- * any geomtry checks will be same for all
+- * - IOC setup / dma callbacks only need to be setup once
+- */
+ if (!cpu)
+ arc_cache_init_master();
++
++ /*
++ * In PAE regime, TLB and cache maintenance ops take wider addresses
++ * And even if PAE is not enabled in kernel, the upper 32-bits still need
++ * to be zeroed to keep the ops sane.
++ * As an optimization for more common !PAE enabled case, zero them out
++ * once at init, rather than checking/setting to 0 for every runtime op
++ */
++ if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
++
++ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
++ write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
++
++ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
++ write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
++
++ if (l2_line_sz) {
++ write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
++ write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
++ }
++ }
+ }
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index d0126fdfe2d8..b181f3ee38aa 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -104,6 +104,8 @@
+ /* A copy of the ASID from the PID reg is kept in asid_cache */
+ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
+
++static int __read_mostly pae_exists;
++
+ /*
+ * Utility Routine to erase a J-TLB entry
+ * Caller needs to setup Index Reg (manually or via getIndex)
+@@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
+ mmu->u_dtlb = mmu4->u_dtlb * 4;
+ mmu->u_itlb = mmu4->u_itlb * 4;
+ mmu->sasid = mmu4->sasid;
+- mmu->pae = mmu4->pae;
++ pae_exists = mmu->pae = mmu4->pae;
+ }
+ }
+
+@@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
+ return buf;
+ }
+
++int pae40_exist_but_not_enab(void)
++{
++ return pae_exists && !is_pae40_enabled();
++}
++
+ void arc_mmu_init(void)
+ {
+ char str[256];
+@@ -859,6 +866,9 @@ void arc_mmu_init(void)
+ /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+ #endif
++
++ if (pae40_exist_but_not_enab())
++ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 06da8ea16bbe..c7b4995868e1 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
+ {
+ if (!system_supports_fpsimd())
+ return;
++ preempt_disable();
+ memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+ fpsimd_flush_task_state(current);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
++ preempt_enable();
+ }
+
+ /*
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index da7e9432fa8f..db80b301c080 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -80,9 +80,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
+ struct task_struct *tsk)
+ {
+ /* Mark this context has been used on the new CPU */
+- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
++ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+
++ /*
++ * This full barrier orders the store to the cpumask above vs
++ * a subsequent operation which allows this CPU to begin loading
++ * translations for next.
++ *
++ * When using the radix MMU that operation is the load of the
++ * MMU context id, which is then moved to SPRN_PID.
++ *
++ * For the hash MMU it is either the first load from slb_cache
++ * in switch_slb(), and/or the store of paca->mm_ctx_id in
++ * copy_mm_to_paca().
++ *
++ * On the read side the barrier is in pte_xchg(), which orders
++ * the store to the PTE vs the load of mm_cpumask.
++ */
++ smp_mb();
++ }
++
+ /* 32-bit keeps track of the current PGDIR in the thread struct */
+ #ifdef CONFIG_PPC32
+ tsk->thread.pgdir = next->pgd;
+diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
+index 9c0f5db5cf46..67e7e3d990f4 100644
+--- a/arch/powerpc/include/asm/pgtable-be-types.h
++++ b/arch/powerpc/include/asm/pgtable-be-types.h
+@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+ unsigned long *p = (unsigned long *)ptep;
+ __be64 prev;
+
++ /* See comment in switch_mm_irqs_off() */
+ prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
+ (__force unsigned long)pte_raw(new));
+
+diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
+index 8bd3b13fe2fb..369a164b545c 100644
+--- a/arch/powerpc/include/asm/pgtable-types.h
++++ b/arch/powerpc/include/asm/pgtable-types.h
+@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+ {
+ unsigned long *p = (unsigned long *)ptep;
+
++ /* See comment in switch_mm_irqs_off() */
+ return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
+ }
+ #endif
+diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
+index 926b5244263e..a2e5c24f47a7 100644
+--- a/arch/s390/kvm/sthyi.c
++++ b/arch/s390/kvm/sthyi.c
+@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
+ "srl %[cc],28\n"
+ : [cc] "=d" (cc)
+ : [code] "d" (code), [addr] "a" (addr)
+- : "memory", "cc");
++ : "3", "memory", "cc");
+ return cc;
+ }
+
+@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+ VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
+ trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+
+- if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
++ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ if (code & 0xffff) {
+@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+ goto out;
+ }
+
++ if (addr & ~PAGE_MASK)
++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
+ /*
+ * If the page has not yet been faulted in, we want to do that
+ * now and not after all the expensive calculations.
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index 68bec7c97cb8..af6ac9c5d32e 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -1241,8 +1241,6 @@ static int pci_sun4v_probe(struct platform_device *op)
+ * ATU group, but ATU hcalls won't be available.
+ */
+ hv_atu = false;
+- pr_err(PFX "Could not register hvapi ATU err=%d\n",
+- err);
+ } else {
+ pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+ vatu_major, vatu_minor);
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 255645f60ca2..554cdb205d17 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+ return 0;
+ }
+
+-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
++static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
+ {
+ if (use_xsave()) {
+- copy_kernel_to_xregs(&fpstate->xsave, -1);
++ copy_kernel_to_xregs(&fpstate->xsave, mask);
+ } else {
+ if (use_fxsr())
+ copy_kernel_to_fxregs(&fpstate->fxsave);
+@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
+ : : [addr] "m" (fpstate));
+ }
+
+- __copy_kernel_to_fpregs(fpstate);
++ __copy_kernel_to_fpregs(fpstate, -1);
+ }
+
+ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 695605eb1dfb..ed8fdf86acfb 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -486,6 +486,7 @@ struct kvm_vcpu_arch {
+ unsigned long cr4;
+ unsigned long cr4_guest_owned_bits;
+ unsigned long cr8;
++ u32 pkru;
+ u32 hflags;
+ u64 efer;
+ u64 apic_base;
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 68b329d77b3a..8463a136f711 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ mm->context.execute_only_pkey = -1;
+ }
+ #endif
+- init_new_context_ldt(tsk, mm);
+-
+- return 0;
++ return init_new_context_ldt(tsk, mm);
+ }
+ static inline void destroy_context(struct mm_struct *mm)
+ {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 59ca2eea522c..19adbb418443 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+ cpuid_mask(&entry->ecx, CPUID_7_ECX);
+ /* PKU is not yet implemented for shadow paging. */
+- if (!tdp_enabled)
++ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
+ entry->ecx &= ~F(PKU);
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
+diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
+index 762cdf2595f9..e1e89ee4af75 100644
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
+ | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+ }
+
+-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
+-{
+- return kvm_x86_ops->get_pkru(vcpu);
+-}
+-
+ static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hflags |= HF_GUEST_MASK;
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index 330bf3a811fb..b0d36a229d2e 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -182,7 +182,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ * index of the protection domain, so pte_pkey * 2 is
+ * is the index of the first bit for the domain.
+ */
+- pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
++ pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
+
+ /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
+ offset = (pfec & ~1) +
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index ba9891ac5c56..58dbca7f2106 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1725,11 +1725,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ to_svm(vcpu)->vmcb->save.rflags = rflags;
+ }
+
+-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
+-{
+- return 0;
+-}
+-
+ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+ {
+ switch (reg) {
+@@ -5313,8 +5308,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .get_rflags = svm_get_rflags,
+ .set_rflags = svm_set_rflags,
+
+- .get_pkru = svm_get_pkru,
+-
+ .tlb_flush = svm_flush_tlb,
+
+ .run = svm_vcpu_run,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 270d83da090c..2461e1a53f8c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -636,8 +636,6 @@ struct vcpu_vmx {
+
+ u64 current_tsc_ratio;
+
+- bool guest_pkru_valid;
+- u32 guest_pkru;
+ u32 host_pkru;
+
+ /*
+@@ -2368,11 +2366,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ vmcs_writel(GUEST_RFLAGS, rflags);
+ }
+
+-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
+-{
+- return to_vmx(vcpu)->guest_pkru;
+-}
+-
+ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+ {
+ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+@@ -8860,8 +8853,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vmx_set_interrupt_shadow(vcpu, 0);
+
+- if (vmx->guest_pkru_valid)
+- __write_pkru(vmx->guest_pkru);
++ if (static_cpu_has(X86_FEATURE_PKU) &&
++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
++ vcpu->arch.pkru != vmx->host_pkru)
++ __write_pkru(vcpu->arch.pkru);
+
+ atomic_switch_perf_msrs(vmx);
+ debugctlmsr = get_debugctlmsr();
+@@ -9009,13 +9004,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ * back on host, so it is safe to read guest PKRU from current
+ * XSAVE.
+ */
+- if (boot_cpu_has(X86_FEATURE_OSPKE)) {
+- vmx->guest_pkru = __read_pkru();
+- if (vmx->guest_pkru != vmx->host_pkru) {
+- vmx->guest_pkru_valid = true;
++ if (static_cpu_has(X86_FEATURE_PKU) &&
++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
++ vcpu->arch.pkru = __read_pkru();
++ if (vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vmx->host_pkru);
+- } else
+- vmx->guest_pkru_valid = false;
+ }
+
+ /*
+@@ -11507,8 +11500,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .get_rflags = vmx_get_rflags,
+ .set_rflags = vmx_set_rflags,
+
+- .get_pkru = vmx_get_pkru,
+-
+ .tlb_flush = vmx_flush_tlb,
+
+ .run = vmx_vcpu_run,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0e846f0cb83b..786e47fc6092 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+- memcpy(dest + offset, src, size);
++ if (feature == XFEATURE_MASK_PKRU)
++ memcpy(dest + offset, &vcpu->arch.pkru,
++ sizeof(vcpu->arch.pkru));
++ else
++ memcpy(dest + offset, src, size);
++
+ }
+
+ valid -= feature;
+@@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
+ u32 size, offset, ecx, edx;
+ cpuid_count(XSTATE_CPUID, index,
+ &size, &offset, &ecx, &edx);
+- memcpy(dest, src + offset, size);
++ if (feature == XFEATURE_MASK_PKRU)
++ memcpy(&vcpu->arch.pkru, src + offset,
++ sizeof(vcpu->arch.pkru));
++ else
++ memcpy(dest, src + offset, size);
+ }
+
+ valid -= feature;
+@@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ */
+ vcpu->guest_fpu_loaded = 1;
+ __kernel_fpu_begin();
+- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
++ /* PKRU is separately restored in kvm_x86_ops->run. */
++ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
++ ~XFEATURE_MASK_PKRU);
+ trace_kvm_fpu(1);
+ }
+
+diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
+index fc6c416f8724..d5999eb41c00 100644
+--- a/drivers/acpi/acpi_apd.c
++++ b/drivers/acpi/acpi_apd.c
+@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
+ { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
+ { "BRCM900D", APD_ADDR(vulcan_spi_desc) },
+ { "CAV900D", APD_ADDR(vulcan_spi_desc) },
+- { "HISI0A21", APD_ADDR(hip07_i2c_desc) },
+- { "HISI0A22", APD_ADDR(hip08_i2c_desc) },
++ { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
++ { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
+ #endif
+ { }
+ };
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index cfad5d9a22f3..d8b2779b0140 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1703,7 +1703,7 @@ int __init acpi_ec_dsdt_probe(void)
+ * functioning ECDT EC first in order to handle the events.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=115021
+ */
+-int __init acpi_ec_ecdt_start(void)
++static int __init acpi_ec_ecdt_start(void)
+ {
+ acpi_handle handle;
+
+@@ -1906,20 +1906,17 @@ static inline void acpi_ec_query_exit(void)
+ int __init acpi_ec_init(void)
+ {
+ int result;
++ int ecdt_fail, dsdt_fail;
+
+ /* register workqueue for _Qxx evaluations */
+ result = acpi_ec_query_init();
+ if (result)
+- goto err_exit;
+- /* Now register the driver for the EC */
+- result = acpi_bus_register_driver(&acpi_ec_driver);
+- if (result)
+- goto err_exit;
++ return result;
+
+-err_exit:
+- if (result)
+- acpi_ec_query_exit();
+- return result;
++ /* Drivers must be started after acpi_ec_query_init() */
++ ecdt_fail = acpi_ec_ecdt_start();
++ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
++ return ecdt_fail && dsdt_fail ? -ENODEV : 0;
+ }
+
+ /* EC driver currently not unloadable */
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 66229ffa909b..7e66f3c72b81 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
+ int acpi_ec_init(void);
+ int acpi_ec_ecdt_probe(void);
+ int acpi_ec_dsdt_probe(void);
+-int acpi_ec_ecdt_start(void);
+ void acpi_ec_block_transactions(void);
+ void acpi_ec_unblock_transactions(void);
+ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 9364398204e9..6822ac9f106b 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1046,7 +1046,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
+ fwnode_for_each_child_node(fwnode, child) {
+ u32 nr;
+
+- if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
++ if (fwnode_property_read_u32(child, prop_name, &nr))
+ continue;
+
+ if (val == nr)
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index d53162997f32..359d16c30002 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -2085,7 +2085,6 @@ int __init acpi_scan_init(void)
+
+ acpi_gpe_apply_masked_gpes();
+ acpi_update_all_gpes();
+- acpi_ec_ecdt_start();
+
+ acpi_scan_initialized = true;
+
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index aae4d8d4be36..831cdd7d197d 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
+ list_add_tail(&t->work.entry, target_list);
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+- if (target_wait)
+- wake_up_interruptible(target_wait);
++ if (target_wait) {
++ if (reply || !(t->flags & TF_ONE_WAY))
++ wake_up_interruptible_sync(target_wait);
++ else
++ wake_up_interruptible(target_wait);
++ }
+ return;
+
+ err_translate_failed:
+@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ /*pr_info("binder_ioctl: %d:%d %x %lx\n",
+ proc->pid, current->pid, cmd, arg);*/
+
+- if (unlikely(current->mm != proc->vma_vm_mm)) {
+- pr_err("current mm mismatch proc mm\n");
+- return -EINVAL;
+- }
+ trace_binder_ioctl(cmd, arg);
+
+ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+@@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+- if (proc->tsk != current)
++ if (proc->tsk != current->group_leader)
+ return -EINVAL;
+
+ if ((vma->vm_end - vma->vm_start) > SZ_4M)
+@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (proc == NULL)
+ return -ENOMEM;
+- get_task_struct(current);
+- proc->tsk = current;
+- proc->vma_vm_mm = current->mm;
++ get_task_struct(current->group_leader);
++ proc->tsk = current->group_leader;
+ INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->wait);
+ proc->default_priority = task_nice(current);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index c0a806280257..f4a4efec8737 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -839,8 +839,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
+
+ mode_info = info->mode_info;
+ if (mode_info) {
+- /* if the displays are off, vblank time is max */
+- mode_info->vblank_time_us = 0xffffffff;
+ /* always set the reference clock */
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ }
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index f32506a7c1d6..422404dbfabb 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -1581,6 +1581,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
+ if (config->funcs->atomic_check)
+ ret = config->funcs->atomic_check(state->dev, state);
+
++ if (ret)
++ return ret;
++
+ if (!state->allow_modeset) {
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+@@ -1591,7 +1594,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
+ }
+ }
+
+- return ret;
++ return 0;
+ }
+ EXPORT_SYMBOL(drm_atomic_check_only);
+
+@@ -2093,10 +2096,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_plane *plane;
+- struct drm_out_fence_state *fence_state = NULL;
++ struct drm_out_fence_state *fence_state;
+ unsigned plane_mask;
+ int ret = 0;
+- unsigned int i, j, num_fences = 0;
++ unsigned int i, j, num_fences;
+
+ /* disallow for drivers not supporting atomic: */
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+@@ -2137,6 +2140,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
+ plane_mask = 0;
+ copied_objs = 0;
+ copied_props = 0;
++ fence_state = NULL;
++ num_fences = 0;
+
+ for (i = 0; i < arg->count_objs; i++) {
+ uint32_t obj_id, count_props;
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index b1e28c944637..08e6e981104f 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
+ struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
++ if (dev->driver->gem_close_object)
++ dev->driver->gem_close_object(obj, file_priv);
++
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv);
+
+- if (dev->driver->gem_close_object)
+- dev->driver->gem_close_object(obj, file_priv);
+-
+ drm_gem_object_handle_put_unlocked(obj);
+
+ return 0;
+diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
+index 5dc8c4350602..e40c12fabbde 100644
+--- a/drivers/gpu/drm/drm_plane.c
++++ b/drivers/gpu/drm/drm_plane.c
+@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
+
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
++ drm_framebuffer_put(fb);
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ return -ENOENT;
+diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+index 41b2c3aaa04a..37258b7d1bce 100644
+--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+@@ -2754,7 +2754,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+ unmap_src:
+ i915_gem_object_unpin_map(obj);
+ put_obj:
+- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
++ i915_gem_object_put(obj);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 639d45c1dd2e..7ea7fd1e8856 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ uint8_t aux_channel, ddc_pin;
+ /* Each DDI port can have more than one value on the "DVO Port" field,
+- * so look for all the possible values for each port and abort if more
+- * than one is found. */
++ * so look for all the possible values for each port.
++ */
+ int dvo_ports[][3] = {
+ {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+ {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+ };
+
+- /* Find the child device to use, abort if more than one found. */
++ /*
++ * Find the first child device to reference the port, report if more
++ * than one found.
++ */
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ it = dev_priv->vbt.child_dev + i;
+
+@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+
+ if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (child) {
+- DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
++ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
+- return;
++ } else {
++ child = it;
+ }
+- child = it;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index 8ddd72cd5873..05601ab27d7c 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -25,12 +25,20 @@
+ #include "sun4i_framebuffer.h"
+ #include "sun4i_tcon.h"
+
++static void sun4i_drv_lastclose(struct drm_device *dev)
++{
++ struct sun4i_drv *drv = dev->dev_private;
++
++ drm_fbdev_cma_restore_mode(drv->fbdev);
++}
++
+ DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
+
+ static struct drm_driver sun4i_drv_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+
+ /* Generic Operations */
++ .lastclose = sun4i_drv_lastclose,
+ .fops = &sun4i_drv_fops,
+ .name = "sun4i-drm",
+ .desc = "Allwinner sun4i Display Engine",
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 0703da1d946a..eea71c4e969d 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -392,7 +392,7 @@ static void dw_i2c_plat_complete(struct device *dev)
+ #endif
+
+ #ifdef CONFIG_PM
+-static int dw_i2c_plat_suspend(struct device *dev)
++static int dw_i2c_plat_runtime_suspend(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+@@ -414,11 +414,21 @@ static int dw_i2c_plat_resume(struct device *dev)
+ return 0;
+ }
+
++#ifdef CONFIG_PM_SLEEP
++static int dw_i2c_plat_suspend(struct device *dev)
++{
++ pm_runtime_resume(dev);
++ return dw_i2c_plat_runtime_suspend(dev);
++}
++#endif
++
+ static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
+ .prepare = dw_i2c_plat_prepare,
+ .complete = dw_i2c_plat_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
++ dw_i2c_plat_resume,
++ NULL)
+ };
+
+ #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index 0b5dea050239..6dda332f252a 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ s32 poll_value = 0;
+
+ if (state) {
+- if (!atomic_read(&st->user_requested_state))
+- return 0;
+ if (sensor_hub_device_open(st->hsdev))
+ return -EIO;
+
+@@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ &report_val);
+ }
+
++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
++ st->pdev->name, state_val, report_val);
++
+ sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+ st->power_state.index,
+ sizeof(state_val), &state_val);
+@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ ret = pm_runtime_get_sync(&st->pdev->dev);
+ else {
+ pm_runtime_mark_last_busy(&st->pdev->dev);
++ pm_runtime_use_autosuspend(&st->pdev->dev);
+ ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+ }
+ if (ret < 0) {
+@@ -205,8 +207,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
+ /* Default to 3 seconds, but can be changed from sysfs */
+ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
+ 3000);
+- pm_runtime_use_autosuspend(&attrb->pdev->dev);
+-
+ return ret;
+ error_unreg_trigger:
+ iio_trigger_unregister(trig);
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index 8cf84d3488b2..12898424d838 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
+ .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ .gyro_max_scale = 450,
+ .accel_max_val = IIO_M_S_2_TO_G(12500),
+- .accel_max_scale = 5,
++ .accel_max_scale = 10,
+ },
+ [ADIS16485] = {
+ .channels = adis16485_channels,
+diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
+index 8e1b0861fbe4..c38563699984 100644
+--- a/drivers/iio/magnetometer/st_magn_core.c
++++ b/drivers/iio/magnetometer/st_magn_core.c
+@@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
+ .drdy_irq = {
+ .addr = 0x62,
+ .mask_int1 = 0x01,
+- .addr_ihl = 0x63,
+- .mask_ihl = 0x04,
+- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
++ .addr_stat_drdy = 0x67,
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index f3f9d0b5dce0..5ea2d80800f9 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
+ cq->uobject = &obj->uobject;
+ cq->comp_handler = ib_uverbs_comp_handler;
+ cq->event_handler = ib_uverbs_cq_event_handler;
+- cq->cq_context = &ev_file->ev_queue;
++ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
+ atomic_set(&cq->usecnt, 0);
+
+ obj->uobject.object = cq;
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 262d1057c1da..850b00e3ad8e 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
+
+ case SS4_PACKET_ID_TWO:
+ if (priv->flags & ALPS_BUTTONPAD) {
+- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++ } else {
++ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
++ }
+ f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
+- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+ f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
+ } else {
+- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++ } else {
++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++ }
+ f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
+- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
+ }
+ f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
+@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
+
+ case SS4_PACKET_ID_MULTI:
+ if (priv->flags & ALPS_BUTTONPAD) {
+- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++ } else {
++ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
++ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
++ }
++
+ f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
+- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX_BL;
+ no_data_y = SS4_MFPACKET_NO_AY_BL;
+ } else {
+- f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++ } else {
++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++ }
+ f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
+- f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX;
+ no_data_y = SS4_MFPACKET_NO_AY;
+@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
+
+ memset(otp, 0, sizeof(otp));
+
+- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
+- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
++ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
++ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
+ return -1;
+
+ alps_update_device_area_ss4_v2(otp, priv);
+diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
+index ed2d6879fa52..c80a7c76cb76 100644
+--- a/drivers/input/mouse/alps.h
++++ b/drivers/input/mouse/alps.h
+@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
+ ((_b[1 + _i * 3] << 5) & 0x1F00) \
+ )
+
++#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
++ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
++ )
++
+ #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
+ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
+ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
+@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
+ ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
+ )
+
++#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
++ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
++ )
++
+ #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
+ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
+ )
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 3b0e9fb33afe..4f3d3543b2fb 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1223,6 +1223,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
++ { "ELAN0602", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0605", 0 },
+diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
+index 922ea02edcc3..fb3810d35c44 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
+ if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+ return -1;
+
+- if (param[0] != TP_MAGIC_IDENT)
++ /* add new TP ID. */
++ if (!(param[0] & TP_MAGIC_IDENT))
+ return -1;
+
+ if (firmware_id)
+diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
+index 5617ed3a7d7a..88055755f82e 100644
+--- a/drivers/input/mouse/trackpoint.h
++++ b/drivers/input/mouse/trackpoint.h
+@@ -21,8 +21,9 @@
+ #define TP_COMMAND 0xE2 /* Commands start with this */
+
+ #define TP_READ_ID 0xE1 /* Sent for device identification */
+-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
+ /* by the firmware ID */
++ /* Firmware ID includes 0x1, 0x2, 0x3 */
+
+
+ /*
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 4de8f4160bb8..09f9dd166827 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -571,7 +571,9 @@ struct amd_iommu {
+
+ static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
+ {
+- return container_of(dev, struct amd_iommu, iommu.dev);
++ struct iommu_device *iommu = dev_to_iommu_device(dev);
++
++ return container_of(iommu, struct amd_iommu, iommu);
+ }
+
+ #define ACPIHID_UID_LEN 256
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index fc2765ccdb57..76791fded8a4 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4749,7 +4749,9 @@ static void intel_disable_iommus(void)
+
+ static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
+ {
+- return container_of(dev, struct intel_iommu, iommu.dev);
++ struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
++
++ return container_of(iommu_dev, struct intel_iommu, iommu);
+ }
+
+ static ssize_t intel_iommu_show_version(struct device *dev,
+diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
+index c58351ed61c1..36d1a7ce7fc4 100644
+--- a/drivers/iommu/iommu-sysfs.c
++++ b/drivers/iommu/iommu-sysfs.c
+@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
+ va_list vargs;
+ int ret;
+
+- device_initialize(&iommu->dev);
++ iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
++ if (!iommu->dev)
++ return -ENOMEM;
+
+- iommu->dev.class = &iommu_class;
+- iommu->dev.parent = parent;
+- iommu->dev.groups = groups;
++ device_initialize(iommu->dev);
++
++ iommu->dev->class = &iommu_class;
++ iommu->dev->parent = parent;
++ iommu->dev->groups = groups;
+
+ va_start(vargs, fmt);
+- ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
++ ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
+ va_end(vargs);
+ if (ret)
+ goto error;
+
+- ret = device_add(&iommu->dev);
++ ret = device_add(iommu->dev);
+ if (ret)
+ goto error;
+
++ dev_set_drvdata(iommu->dev, iommu);
++
+ return 0;
+
+ error:
+- put_device(&iommu->dev);
++ put_device(iommu->dev);
+ return ret;
+ }
+
+ void iommu_device_sysfs_remove(struct iommu_device *iommu)
+ {
+- device_unregister(&iommu->dev);
++ dev_set_drvdata(iommu->dev, NULL);
++ device_unregister(iommu->dev);
++ iommu->dev = NULL;
+ }
+ /*
+ * IOMMU drivers can indicate a device is managed by a given IOMMU using
+@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
+ if (!iommu || IS_ERR(iommu))
+ return -ENODEV;
+
+- ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
++ ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
+ &link->kobj, dev_name(link));
+ if (ret)
+ return ret;
+
+- ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
++ ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
+ if (ret)
+- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
++ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
+ dev_name(link));
+
+ return ret;
+@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
+ return;
+
+ sysfs_remove_link(&link->kobj, "iommu");
+- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
++ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 224e93aa6d23..510a580e0348 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ new_slave->delay = 0;
+ new_slave->link_failure_count = 0;
+
+- if (bond_update_speed_duplex(new_slave))
++ if (bond_update_speed_duplex(new_slave) &&
++ bond_needs_speed_duplex(bond))
+ new_slave->link = BOND_LINK_DOWN;
+
+ new_slave->last_rx = jiffies -
+@@ -2137,11 +2138,13 @@ static void bond_miimon_commit(struct bonding *bond)
+ continue;
+
+ case BOND_LINK_UP:
+- if (bond_update_speed_duplex(slave)) {
++ if (bond_update_speed_duplex(slave) &&
++ bond_needs_speed_duplex(bond)) {
+ slave->link = BOND_LINK_DOWN;
+- netdev_warn(bond->dev,
+- "failed to get link speed/duplex for %s\n",
+- slave->dev->name);
++ if (net_ratelimit())
++ netdev_warn(bond->dev,
++ "failed to get link speed/duplex for %s\n",
++ slave->dev->name);
+ continue;
+ }
+ bond_set_slave_link_state(slave, BOND_LINK_UP,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 83aab1e4c8c8..9f214f9fb48c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ /* Virtual PCI function needs to determine UAR page size from
+ * firmware. Only master PCI function can set the uar page size
+ */
+- if (enable_4k_uar)
++ if (enable_4k_uar || !dev->persist->num_vfs)
+ dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
+ else
+ dev->uar_page_shift = PAGE_SHIFT;
+@@ -2275,7 +2275,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
+
+ dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
+
+- if (enable_4k_uar) {
++ if (enable_4k_uar || !dev->persist->num_vfs) {
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+ PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
+ init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index 82bd6b0935f1..fd4a785431ac 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -881,8 +881,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+ return NETDEV_TX_OK;
+
+ err_unmap:
+- --f;
+- while (f >= 0) {
++ while (--f >= 0) {
+ frag = &skb_shinfo(skb)->frags[f];
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 9ee7d4275640..5bd954d12541 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1876,6 +1876,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+
+ err_detach:
+ tun_detach_all(dev);
++ /* register_netdevice() already called tun_free_netdev() */
++ goto err_free_dev;
++
+ err_free_flow:
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index 10e5bf460139..f27d1344d198 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -920,10 +920,8 @@ static void ntb_transport_link_work(struct work_struct *work)
+ ntb_free_mw(nt, i);
+
+ /* if there's an actual failure, we should just bail */
+- if (rc < 0) {
+- ntb_link_disable(ndev);
++ if (rc < 0)
+ return;
+- }
+
+ out:
+ if (ntb_link_is_up(ndev, NULL, NULL) == 1)
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index d283341cfe43..56cd4e5e51b2 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index 007a4f366086..1c4797e53f68 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+ {
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ const char *name = dev_name(&vp_dev->vdev.dev);
++ unsigned flags = PCI_IRQ_MSIX;
+ unsigned i, v;
+ int err = -ENOMEM;
+
+@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+ GFP_KERNEL))
+ goto error;
+
++ if (desc) {
++ flags |= PCI_IRQ_AFFINITY;
++ desc->pre_vectors++; /* virtio config vector */
++ }
++
+ err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
+- nvectors, PCI_IRQ_MSIX |
+- (desc ? PCI_IRQ_AFFINITY : 0),
+- desc);
++ nvectors, flags, desc);
+ if (err < 0)
+ goto error;
+ vp_dev->msix_enabled = 1;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 56366e984076..569d3fb736be 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -194,15 +194,20 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, bool prefix)
+ }
+
+ /*
++ * Don't allow path components longer than the server max.
+ * Don't allow the separator character in a path component.
+ * The VFS will not allow "/", but "\" is allowed by posix.
+ */
+ static int
+-check_name(struct dentry *direntry)
++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ {
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ int i;
+
++ if (unlikely(direntry->d_name.len >
++ tcon->fsAttrInfo.MaxPathNameComponentLength))
++ return -ENAMETOOLONG;
++
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+ for (i = 0; i < direntry->d_name.len; i++) {
+ if (direntry->d_name.name[i] == '\\') {
+@@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ return finish_no_open(file, res);
+ }
+
+- rc = check_name(direntry);
+- if (rc)
+- return rc;
+-
+ xid = get_xid();
+
+ cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+@@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ }
+
+ tcon = tlink_tcon(tlink);
++
++ rc = check_name(direntry, tcon);
++ if (rc)
++ goto out_free_xid;
++
+ server = tcon->ses->server;
+
+ if (server->ops->new_lease_key)
+@@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ }
+ pTcon = tlink_tcon(tlink);
+
+- rc = check_name(direntry);
++ rc = check_name(direntry, pTcon);
+ if (rc)
+ goto lookup_out;
+
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index e4afdaae743f..c398f393f2b3 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -3195,8 +3195,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
+ kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
+ le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
+ kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
+- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
+- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
++ kst->f_bfree = kst->f_bavail =
++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+ return;
+ }
+
+diff --git a/fs/dax.c b/fs/dax.c
+index 9187f3b07f3e..f3ac7674b5cb 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1380,6 +1380,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
+
+ trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
+
++ /*
++ * Make sure that the faulting address's PMD offset (color) matches
++ * the PMD offset from the start of the file. This is necessary so
++ * that a PMD range in the page table overlaps exactly with a PMD
++ * range in the radix tree.
++ */
++ if ((vmf->pgoff & PG_PMD_COLOUR) !=
++ ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
++ goto fallback;
++
+ /* Fall back to PTEs if we're going to COW */
+ if (write && !(vma->vm_flags & VM_SHARED))
+ goto fallback;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 26780d53a6f9..ed8d6b73d12a 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
+ argp->p = page_address(argp->pagelist[0]);
+ argp->pagelist++;
+ if (argp->pagelen < PAGE_SIZE) {
+- argp->end = argp->p + (argp->pagelen>>2);
++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
+ argp->pagelen = 0;
+ } else {
+ argp->end = argp->p + (PAGE_SIZE>>2);
+@@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+ argp->pagelen -= pages * PAGE_SIZE;
+ len -= pages * PAGE_SIZE;
+
+- argp->p = (__be32 *)page_address(argp->pagelist[0]);
+- argp->pagelist++;
+- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
++ next_decode_page(argp);
+ }
+ argp->p += XDR_QUADLEN(len);
+
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 314a0b9219c6..a06342f11259 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -59,6 +59,22 @@
+ /* Align . to a 8 byte boundary equals to maximum function alignment. */
+ #define ALIGN_FUNCTION() . = ALIGN(8)
+
++/*
++ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
++ * generates .data.identifier sections, which need to be pulled in with
++ * .data. We don't want to pull in .data..other sections, which Linux
++ * has defined. Same for text and bss.
++ */
++#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
++#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
++#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
++#else
++#define TEXT_MAIN .text
++#define DATA_MAIN .data
++#define BSS_MAIN .bss
++#endif
++
+ /*
+ * Align to a 32 byte boundary equal to the
+ * alignment gcc 4.5 uses for a struct
+@@ -199,12 +215,9 @@
+
+ /*
+ * .data section
+- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
+- * .data.identifier which needs to be pulled in with .data, but don't want to
+- * pull in .data..stuff which has its own requirements. Same for bss.
+ */
+ #define DATA_DATA \
+- *(.data .data.[0-9a-zA-Z_]*) \
++ *(DATA_MAIN) \
+ *(.ref.data) \
+ *(.data..shared_aligned) /* percpu related */ \
+ MEM_KEEP(init.data) \
+@@ -435,16 +448,17 @@
+ VMLINUX_SYMBOL(__security_initcall_end) = .; \
+ }
+
+-/* .text section. Map to function alignment to avoid address changes
++/*
++ * .text section. Map to function alignment to avoid address changes
+ * during second ld run in second ld pass when generating System.map
+- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
+- * .text.identifier which needs to be pulled in with .text , but some
+- * architectures define .text.foo which is not intended to be pulled in here.
+- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
+- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
++ *
++ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
++ * code elimination is enabled, so these sections should be converted
++ * to use ".." first.
++ */
+ #define TEXT_TEXT \
+ ALIGN_FUNCTION(); \
+- *(.text.hot .text .text.fixup .text.unlikely) \
++ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.ref.text) \
+ MEM_KEEP(init.text) \
+ MEM_KEEP(exit.text) \
+@@ -613,7 +627,7 @@
+ BSS_FIRST_SECTIONS \
+ *(.bss..page_aligned) \
+ *(.dynbss) \
+- *(.bss .bss.[0-9a-zA-Z_]*) \
++ *(BSS_MAIN) \
+ *(COMMON) \
+ }
+
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index d5093b52b485..88f4289e7eee 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -43,6 +43,7 @@ struct bpf_reg_state {
+ u32 min_align;
+ u32 aux_off;
+ u32 aux_off_align;
++ bool value_from_signed;
+ };
+
+ enum bpf_stack_slot_type {
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 803e5a9b2654..d6d525039496 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -891,9 +891,9 @@ static inline struct file *get_file(struct file *f)
+ /* Page cache limit. The filesystems should put that into their s_maxbytes
+ limits, otherwise bad things can happen in VM. */
+ #if BITS_PER_LONG==32
+-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
++#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
+ #elif BITS_PER_LONG==64
+-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
++#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
+ #endif
+
+ #define FL_POSIX 1
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 2cb54adc4a33..176f7569d874 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -240,7 +240,7 @@ struct iommu_device {
+ struct list_head list;
+ const struct iommu_ops *ops;
+ struct fwnode_handle *fwnode;
+- struct device dev;
++ struct device *dev;
+ };
+
+ int iommu_device_register(struct iommu_device *iommu);
+@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+ iommu->fwnode = fwnode;
+ }
+
++static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
++{
++ return (struct iommu_device *)dev_get_drvdata(dev);
++}
++
+ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
+ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
+ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
+@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+ {
+ }
+
++static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
++{
++ return NULL;
++}
++
+ static inline void iommu_device_unregister(struct iommu_device *iommu)
+ {
+ }
+diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
+index 6b2e0dd88569..feff771e8ea0 100644
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -371,9 +371,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
+ __PTR_RING_PEEK_CALL_v; \
+ })
+
+-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
++static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
+ {
+- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
++ return kcalloc(size, sizeof(void *), gfp);
+ }
+
+ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
+@@ -462,7 +462,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
+-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
++static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
++ unsigned int nrings,
+ int size,
+ gfp_t gfp, void (*destroy)(void *))
+ {
+@@ -470,7 +471,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
+ void ***queues;
+ int i;
+
+- queues = kmalloc(nrings * sizeof *queues, gfp);
++ queues = kmalloc_array(nrings, sizeof(*queues), gfp);
+ if (!queues)
+ goto noqueues;
+
+diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
+index f4dfade428f0..be8b902b5845 100644
+--- a/include/linux/skb_array.h
++++ b/include/linux/skb_array.h
+@@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
+ }
+
+ static inline int skb_array_resize_multiple(struct skb_array **rings,
+- int nrings, int size, gfp_t gfp)
++ int nrings, unsigned int size,
++ gfp_t gfp)
+ {
+ BUILD_BUG_ON(offsetof(struct skb_array, ring));
+ return ptr_ring_resize_multiple((struct ptr_ring **)rings,
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index b00508d22e0a..b2e68657a216 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
+ BOND_MODE(bond) == BOND_MODE_ALB;
+ }
+
++static inline bool bond_needs_speed_duplex(const struct bonding *bond)
++{
++ return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
++}
++
+ static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+ {
+ return (BOND_MODE(bond) == BOND_MODE_TLB) &&
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 821cedcc8e73..0cf7f5a65fe6 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -352,7 +352,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ !forwarding)
+ return dst_mtu(dst);
+
+- return min(dst->dev->mtu, IP_MAX_MTU);
++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
+
+ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+@@ -364,7 +364,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+ }
+
+- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ }
+
+ u32 ip_idents_reserve(u32 hash, int segs);
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 22e52093bfda..db5b6b6346b3 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -785,8 +785,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ old = *pold;
+ *pold = new;
+ if (old != NULL) {
+- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
++ unsigned int qlen = old->q.qlen;
++ unsigned int backlog = old->qstats.backlog;
++
+ qdisc_reset(old);
++ qdisc_tree_reduce_backlog(old, qlen, backlog);
+ }
+ sch_tree_unlock(sch);
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a8a725697bed..1e64ee3dd650 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
+ {
+ regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
+ regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
++ regs[regno].value_from_signed = false;
+ regs[regno].min_align = 0;
+ }
+
+@@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
+ return -EACCES;
+ }
+
+-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
++static bool __is_pointer_value(bool allow_ptr_leaks,
++ const struct bpf_reg_state *reg)
+ {
+- if (env->allow_ptr_leaks)
++ if (allow_ptr_leaks)
+ return false;
+
+- switch (env->cur_state.regs[regno].type) {
++ switch (reg->type) {
+ case UNKNOWN_VALUE:
+ case CONST_IMM:
+ return false;
+@@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+ }
+ }
+
++static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
++{
++ return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
++}
++
+ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
+ int off, int size, bool strict)
+ {
+@@ -1650,6 +1657,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ return 0;
+ }
+
++static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
++ struct bpf_insn *insn)
++{
++ struct bpf_reg_state *regs = env->cur_state.regs;
++ struct bpf_reg_state *dst_reg = ®s[insn->dst_reg];
++ struct bpf_reg_state *src_reg = ®s[insn->src_reg];
++ u8 opcode = BPF_OP(insn->code);
++ s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
++
++ /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
++ if (src_reg->imm > 0 && dst_reg->imm) {
++ switch (opcode) {
++ case BPF_ADD:
++ /* dreg += sreg
++ * where both have zero upper bits. Adding them
++ * can only result making one more bit non-zero
++ * in the larger value.
++ * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
++ * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
++ */
++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
++ dst_reg->imm--;
++ break;
++ case BPF_AND:
++ /* dreg &= sreg
++ * AND can not extend zero bits only shrink
++ * Ex. 0x00..00ffffff
++ * & 0x0f..ffffffff
++ * ----------------
++ * 0x00..00ffffff
++ */
++ dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
++ break;
++ case BPF_OR:
++ /* dreg |= sreg
++ * OR can only extend zero bits
++ * Ex. 0x00..00ffffff
++ * | 0x0f..ffffffff
++ * ----------------
++ * 0x0f..00ffffff
++ */
++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
++ break;
++ case BPF_SUB:
++ case BPF_MUL:
++ case BPF_RSH:
++ case BPF_LSH:
++ /* These may be flushed out later */
++ default:
++ mark_reg_unknown_value(regs, insn->dst_reg);
++ }
++ } else {
++ mark_reg_unknown_value(regs, insn->dst_reg);
++ }
++
++ dst_reg->type = UNKNOWN_VALUE;
++ return 0;
++}
++
+ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
+ {
+@@ -1659,6 +1725,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
+ u8 opcode = BPF_OP(insn->code);
+ u64 dst_imm = dst_reg->imm;
+
++ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
++ return evaluate_reg_imm_alu_unknown(env, insn);
++
+ /* dst_reg->type == CONST_IMM here. Simulate execution of insns
+ * containing ALU ops. Don't care about overflow or negative
+ * values, just add/sub/... them; registers are in u64.
+@@ -1763,10 +1832,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ dst_align = dst_reg->min_align;
+
+ /* We don't know anything about what was done to this register, mark it
+- * as unknown.
++ * as unknown. Also, if both derived bounds came from signed/unsigned
++ * mixed compares and one side is unbounded, we cannot really do anything
++ * with them as boundaries cannot be trusted. Thus, arithmetic of two
++ * regs of such kind will get invalidated bounds on the dst side.
+ */
+- if (min_val == BPF_REGISTER_MIN_RANGE &&
+- max_val == BPF_REGISTER_MAX_RANGE) {
++ if ((min_val == BPF_REGISTER_MIN_RANGE &&
++ max_val == BPF_REGISTER_MAX_RANGE) ||
++ (BPF_SRC(insn->code) == BPF_X &&
++ ((min_val != BPF_REGISTER_MIN_RANGE &&
++ max_val == BPF_REGISTER_MAX_RANGE) ||
++ (min_val == BPF_REGISTER_MIN_RANGE &&
++ max_val != BPF_REGISTER_MAX_RANGE) ||
++ (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
++ dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
++ (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
++ dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
++ regs[insn->dst_reg].value_from_signed !=
++ regs[insn->src_reg].value_from_signed)) {
+ reset_reg_range_values(regs, insn->dst_reg);
+ return;
+ }
+@@ -1775,10 +1858,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ * do our normal operations to the register, we need to set the values
+ * to the min/max since they are undefined.
+ */
+- if (min_val == BPF_REGISTER_MIN_RANGE)
+- dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+- if (max_val == BPF_REGISTER_MAX_RANGE)
+- dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
++ if (opcode != BPF_SUB) {
++ if (min_val == BPF_REGISTER_MIN_RANGE)
++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
++ if (max_val == BPF_REGISTER_MAX_RANGE)
++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
++ }
+
+ switch (opcode) {
+ case BPF_ADD:
+@@ -1789,10 +1874,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ dst_reg->min_align = min(src_align, dst_align);
+ break;
+ case BPF_SUB:
++ /* If one of our values was at the end of our ranges, then the
++ * _opposite_ value in the dst_reg goes to the end of our range.
++ */
++ if (min_val == BPF_REGISTER_MIN_RANGE)
++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
++ if (max_val == BPF_REGISTER_MAX_RANGE)
++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+- dst_reg->min_value -= min_val;
++ dst_reg->min_value -= max_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+- dst_reg->max_value -= max_val;
++ dst_reg->max_value -= min_val;
+ dst_reg->min_align = min(src_align, dst_align);
+ break;
+ case BPF_MUL:
+@@ -1953,6 +2045,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ regs[insn->dst_reg].max_value = insn->imm;
+ regs[insn->dst_reg].min_value = insn->imm;
+ regs[insn->dst_reg].min_align = calc_align(insn->imm);
++ regs[insn->dst_reg].value_from_signed = false;
+ }
+
+ } else if (opcode > BPF_END) {
+@@ -2128,40 +2221,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
+ struct bpf_reg_state *false_reg, u64 val,
+ u8 opcode)
+ {
++ bool value_from_signed = true;
++ bool is_range = true;
++
+ switch (opcode) {
+ case BPF_JEQ:
+ /* If this is false then we know nothing Jon Snow, but if it is
+ * true then we know for sure.
+ */
+ true_reg->max_value = true_reg->min_value = val;
++ is_range = false;
+ break;
+ case BPF_JNE:
+ /* If this is true we know nothing Jon Snow, but if it is false
+ * we know the value for sure;
+ */
+ false_reg->max_value = false_reg->min_value = val;
++ is_range = false;
+ break;
+ case BPF_JGT:
+- /* Unsigned comparison, the minimum value is 0. */
+- false_reg->min_value = 0;
++ value_from_signed = false;
+ /* fallthrough */
+ case BPF_JSGT:
++ if (true_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(true_reg, 0);
++ if (false_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(false_reg, 0);
++ if (opcode == BPF_JGT) {
++ /* Unsigned comparison, the minimum value is 0. */
++ false_reg->min_value = 0;
++ }
+ /* If this is false then we know the maximum val is val,
+ * otherwise we know the min val is val+1.
+ */
+ false_reg->max_value = val;
++ false_reg->value_from_signed = value_from_signed;
+ true_reg->min_value = val + 1;
++ true_reg->value_from_signed = value_from_signed;
+ break;
+ case BPF_JGE:
+- /* Unsigned comparison, the minimum value is 0. */
+- false_reg->min_value = 0;
++ value_from_signed = false;
+ /* fallthrough */
+ case BPF_JSGE:
++ if (true_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(true_reg, 0);
++ if (false_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(false_reg, 0);
++ if (opcode == BPF_JGE) {
++ /* Unsigned comparison, the minimum value is 0. */
++ false_reg->min_value = 0;
++ }
+ /* If this is false then we know the maximum value is val - 1,
+ * otherwise we know the mimimum value is val.
+ */
+ false_reg->max_value = val - 1;
++ false_reg->value_from_signed = value_from_signed;
+ true_reg->min_value = val;
++ true_reg->value_from_signed = value_from_signed;
+ break;
+ default:
+ break;
+@@ -2169,6 +2285,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
+
+ check_reg_overflow(false_reg);
+ check_reg_overflow(true_reg);
++ if (is_range) {
++ if (__is_pointer_value(false, false_reg))
++ reset_reg_range_values(false_reg, 0);
++ if (__is_pointer_value(false, true_reg))
++ reset_reg_range_values(true_reg, 0);
++ }
+ }
+
+ /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
+@@ -2178,41 +2300,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+ struct bpf_reg_state *false_reg, u64 val,
+ u8 opcode)
+ {
++ bool value_from_signed = true;
++ bool is_range = true;
++
+ switch (opcode) {
+ case BPF_JEQ:
+ /* If this is false then we know nothing Jon Snow, but if it is
+ * true then we know for sure.
+ */
+ true_reg->max_value = true_reg->min_value = val;
++ is_range = false;
+ break;
+ case BPF_JNE:
+ /* If this is true we know nothing Jon Snow, but if it is false
+ * we know the value for sure;
+ */
+ false_reg->max_value = false_reg->min_value = val;
++ is_range = false;
+ break;
+ case BPF_JGT:
+- /* Unsigned comparison, the minimum value is 0. */
+- true_reg->min_value = 0;
++ value_from_signed = false;
+ /* fallthrough */
+ case BPF_JSGT:
++ if (true_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(true_reg, 0);
++ if (false_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(false_reg, 0);
++ if (opcode == BPF_JGT) {
++ /* Unsigned comparison, the minimum value is 0. */
++ true_reg->min_value = 0;
++ }
+ /*
+ * If this is false, then the val is <= the register, if it is
+ * true the register <= to the val.
+ */
+ false_reg->min_value = val;
++ false_reg->value_from_signed = value_from_signed;
+ true_reg->max_value = val - 1;
++ true_reg->value_from_signed = value_from_signed;
+ break;
+ case BPF_JGE:
+- /* Unsigned comparison, the minimum value is 0. */
+- true_reg->min_value = 0;
++ value_from_signed = false;
+ /* fallthrough */
+ case BPF_JSGE:
++ if (true_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(true_reg, 0);
++ if (false_reg->value_from_signed != value_from_signed)
++ reset_reg_range_values(false_reg, 0);
++ if (opcode == BPF_JGE) {
++ /* Unsigned comparison, the minimum value is 0. */
++ true_reg->min_value = 0;
++ }
+ /* If this is false then constant < register, if it is true then
+ * the register < constant.
+ */
+ false_reg->min_value = val + 1;
++ false_reg->value_from_signed = value_from_signed;
+ true_reg->max_value = val;
++ true_reg->value_from_signed = value_from_signed;
+ break;
+ default:
+ break;
+@@ -2220,6 +2365,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+
+ check_reg_overflow(false_reg);
+ check_reg_overflow(true_reg);
++ if (is_range) {
++ if (__is_pointer_value(false, false_reg))
++ reset_reg_range_values(false_reg, 0);
++ if (__is_pointer_value(false, true_reg))
++ reset_reg_range_values(true_reg, 0);
++ }
+ }
+
+ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index dbb3d273d497..51ecc01b78ff 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9996,28 +9996,27 @@ SYSCALL_DEFINE5(perf_event_open,
+ goto err_context;
+
+ /*
+- * Do not allow to attach to a group in a different
+- * task or CPU context:
++ * Make sure we're both events for the same CPU;
++ * grouping events for different CPUs is broken; since
++ * you can never concurrently schedule them anyhow.
+ */
+- if (move_group) {
+- /*
+- * Make sure we're both on the same task, or both
+- * per-cpu events.
+- */
+- if (group_leader->ctx->task != ctx->task)
+- goto err_context;
++ if (group_leader->cpu != event->cpu)
++ goto err_context;
+
+- /*
+- * Make sure we're both events for the same CPU;
+- * grouping events for different CPUs is broken; since
+- * you can never concurrently schedule them anyhow.
+- */
+- if (group_leader->cpu != event->cpu)
+- goto err_context;
+- } else {
+- if (group_leader->ctx != ctx)
+- goto err_context;
+- }
++ /*
++ * Make sure we're both on the same task, or both
++ * per-CPU events.
++ */
++ if (group_leader->ctx->task != ctx->task)
++ goto err_context;
++
++ /*
++ * Do not allow to attach to a group in a different task
++ * or CPU context. If we're moving SW events, we'll fix
++ * this up later, so allow that.
++ */
++ if (!move_group && group_leader->ctx != ctx)
++ goto err_context;
+
+ /*
+ * Only a group leader can be exclusive or pinned
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6440e0b70cad..9a2b4b4f13b4 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -802,6 +802,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ mm_init_cpumask(mm);
+ mm_init_aio(mm);
+ mm_init_owner(mm, p);
++ RCU_INIT_POINTER(mm->exe_file, NULL);
+ mmu_notifier_mm_init(mm);
+ clear_tlb_flush_pending(mm);
+ #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index d3f33020a06b..36cec054b8ae 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -203,6 +203,7 @@ struct timer_base {
+ bool migration_enabled;
+ bool nohz_active;
+ bool is_idle;
++ bool must_forward_clk;
+ DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+ struct hlist_head vectors[WHEEL_SIZE];
+ } ____cacheline_aligned;
+@@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
+
+ static inline void forward_timer_base(struct timer_base *base)
+ {
+- unsigned long jnow = READ_ONCE(jiffies);
++ unsigned long jnow;
+
+ /*
+- * We only forward the base when it's idle and we have a delta between
+- * base clock and jiffies.
++ * We only forward the base when we are idle or have just come out of
++ * idle (must_forward_clk logic), and have a delta between base clock
++ * and jiffies. In the common case, run_timers will take care of it.
+ */
+- if (!base->is_idle || (long) (jnow - base->clk) < 2)
++ if (likely(!base->must_forward_clk))
++ return;
++
++ jnow = READ_ONCE(jiffies);
++ base->must_forward_clk = base->is_idle;
++ if ((long)(jnow - base->clk) < 2)
+ return;
+
+ /*
+@@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ * same array bucket then just return:
+ */
+ if (timer_pending(timer)) {
++ /*
++ * The downside of this optimization is that it can result in
++ * larger granularity than you would get from adding a new
++ * timer with this expiry.
++ */
+ if (timer->expires == expires)
+ return 1;
+
+@@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ * dequeue/enqueue dance.
+ */
+ base = lock_timer_base(timer, &flags);
++ forward_timer_base(base);
+
+ clk = base->clk;
+ idx = calc_wheel_index(expires, clk);
+@@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ }
+ } else {
+ base = lock_timer_base(timer, &flags);
++ forward_timer_base(base);
+ }
+
+ ret = detach_if_pending(timer, base, false);
+@@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ spin_lock(&base->lock);
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | base->cpu);
++ forward_timer_base(base);
+ }
+ }
+
+- /* Try to forward a stale timer base clock */
+- forward_timer_base(base);
+-
+ timer->expires = expires;
+ /*
+ * If 'idx' was calculated above and the base time did not advance
+@@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | cpu);
+ }
++ forward_timer_base(base);
+
+ debug_activate(timer, timer->expires);
+ internal_add_timer(base, timer);
+@@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ if (!is_max_delta)
+ expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
+ /*
+- * If we expect to sleep more than a tick, mark the base idle:
++ * If we expect to sleep more than a tick, mark the base idle.
++ * Also the tick is stopped so any added timer must forward
++ * the base clk itself to keep granularity small. This idle
++ * logic is only maintained for the BASE_STD base, deferrable
++ * timers may still see large granularity skew (by design).
+ */
+- if ((expires - basem) > TICK_NSEC)
++ if ((expires - basem) > TICK_NSEC) {
++ base->must_forward_clk = true;
+ base->is_idle = true;
++ }
+ }
+ spin_unlock(&base->lock);
+
+@@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+ {
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
++ /*
++ * must_forward_clk must be cleared before running timers so that any
++ * timer functions that call mod_timer will not try to forward the
++ * base. idle trcking / clock forwarding logic is only used with
++ * BASE_STD timers.
++ *
++ * The deferrable base does not do idle tracking at all, so we do
++ * not forward it. This can result in very large variations in
++ * granularity for deferrable timers, but they can be deferred for
++ * long periods due to idle.
++ */
++ base->must_forward_clk = false;
++
+ __run_timers(base);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 460a031c77e5..d521b301dee9 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
+ fmt_cnt++;
+ }
+
+- return __trace_printk(1/* fake ip will not be printed */, fmt,
+- mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
+- mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
+- mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
++/* Horrid workaround for getting va_list handling working with different
++ * argument type combinations generically for 32 and 64 bit archs.
++ */
++#define __BPF_TP_EMIT() __BPF_ARG3_TP()
++#define __BPF_TP(...) \
++ __trace_printk(1 /* Fake ip will not be printed. */, \
++ fmt, ##__VA_ARGS__)
++
++#define __BPF_ARG1_TP(...) \
++ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
++ ? __BPF_TP(arg1, ##__VA_ARGS__) \
++ : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
++ ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
++ : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
++
++#define __BPF_ARG2_TP(...) \
++ ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
++ ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
++ : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
++ ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
++ : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
++
++#define __BPF_ARG3_TP(...) \
++ ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
++ ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
++ : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
++ ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
++ : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
++
++ return __BPF_TP_EMIT();
+ }
+
+ static const struct bpf_func_proto bpf_trace_printk_proto = {
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 28e980d2851b..a2bbce575e88 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -878,6 +878,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
+
+ function_profile_call(trace->func, 0, NULL, NULL);
+
++ /* If function graph is shutting down, ret_stack can be NULL */
++ if (!current->ret_stack)
++ return 0;
++
+ if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
+ current->ret_stack[index].subtime = 0;
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 4ae268e687fe..912f62df0279 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
+ * the page that was allocated, with the read page of the buffer.
+ *
+ * Returns:
+- * The page allocated, or NULL on error.
++ * The page allocated, or ERR_PTR
+ */
+ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
+ {
+- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
++ struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_data_page *bpage = NULL;
+ unsigned long flags;
+ struct page *page;
+
++ if (!cpumask_test_cpu(cpu, buffer->cpumask))
++ return ERR_PTR(-ENODEV);
++
++ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ arch_spin_lock(&cpu_buffer->lock);
+
+@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+ if (!page)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ bpage = page_address(page);
+
+@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
+ *
+ * for example:
+ * rpage = ring_buffer_alloc_read_page(buffer, cpu);
+- * if (!rpage)
+- * return error;
++ * if (IS_ERR(rpage))
++ * return PTR_ERR(rpage);
+ * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+ * if (ret >= 0)
+ * process_page(rpage, ret);
+diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
+index 9fbcaf567886..68ee79afe31c 100644
+--- a/kernel/trace/ring_buffer_benchmark.c
++++ b/kernel/trace/ring_buffer_benchmark.c
+@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
+ int i;
+
+ bpage = ring_buffer_alloc_read_page(buffer, cpu);
+- if (!bpage)
++ if (IS_ERR(bpage))
+ return EVENT_DROPPED;
+
+ ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5764318357de..749a82c6a832 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6403,7 +6403,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
+ {
+ struct ftrace_buffer_info *info = filp->private_data;
+ struct trace_iterator *iter = &info->iter;
+- ssize_t ret;
++ ssize_t ret = 0;
+ ssize_t size;
+
+ if (!count)
+@@ -6417,10 +6417,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
+ if (!info->spare) {
+ info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
+ iter->cpu_file);
+- info->spare_cpu = iter->cpu_file;
++ if (IS_ERR(info->spare)) {
++ ret = PTR_ERR(info->spare);
++ info->spare = NULL;
++ } else {
++ info->spare_cpu = iter->cpu_file;
++ }
+ }
+ if (!info->spare)
+- return -ENOMEM;
++ return ret;
+
+ /* Do we have previous read data to read? */
+ if (info->read < PAGE_SIZE)
+@@ -6595,8 +6600,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ ref->ref = 1;
+ ref->buffer = iter->trace_buffer->buffer;
+ ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
+- if (!ref->page) {
+- ret = -ENOMEM;
++ if (IS_ERR(ref->page)) {
++ ret = PTR_ERR(ref->page);
++ ref->page = NULL;
+ kfree(ref);
+ break;
+ }
+@@ -8110,6 +8116,7 @@ __init static int tracer_alloc_buffers(void)
+ if (ret < 0)
+ goto out_free_cpumask;
+ /* Used for event triggers */
++ ret = -ENOMEM;
+ temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
+ if (!temp_buffer)
+ goto out_rm_hp_state;
+@@ -8224,4 +8231,4 @@ __init static int clear_boot_tracer(void)
+ }
+
+ fs_initcall(tracer_init_tracefs);
+-late_initcall(clear_boot_tracer);
++late_initcall_sync(clear_boot_tracer);
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 59a411ff60c7..181e139a8057 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
+ if (err && set_str)
+ append_filter_err(ps, filter);
+ }
++ if (err && !set_str) {
++ free_event_filter(filter);
++ filter = NULL;
++ }
+ create_filter_finish(ps);
+
+ *filterp = filter;
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index 0a689bbb78ef..305039b122fa 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
+ if (!a)
+ return;
+
+- if (!a->pages) {
+- kfree(a);
+- return;
+- }
++ if (!a->pages)
++ goto free;
+
+ for (i = 0; i < a->n_pages; i++) {
+ if (!a->pages[i])
+ break;
+ free_page((unsigned long)a->pages[i]);
+ }
++
++ kfree(a->pages);
++
++ free:
++ kfree(a);
+ }
+
+ struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 75d2cffbe61d..fc6bfbe19a16 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
+ pte_offset_map_lock(mm, pmd, addr, &ptl);
+ goto out;
+ }
+- put_page(page);
+ unlock_page(page);
++ put_page(page);
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte--;
+ addr -= PAGE_SIZE;
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 7087d5578866..43d0919e29f3 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -302,7 +302,7 @@ void __init memblock_discard(void)
+ __memblock_free_late(addr, size);
+ }
+
+- if (memblock.memory.regions == memblock_memory_init_regions) {
++ if (memblock.memory.regions != memblock_memory_init_regions) {
+ addr = __pa(memblock.memory.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.memory.max);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4d16ef9d42a9..f553b3a6eca8 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -66,6 +66,7 @@
+ #include <linux/kthread.h>
+ #include <linux/memcontrol.h>
+ #include <linux/ftrace.h>
++#include <linux/nmi.h>
+
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -2495,9 +2496,14 @@ void drain_all_pages(struct zone *zone)
+
+ #ifdef CONFIG_HIBERNATION
+
++/*
++ * Touch the watchdog for every WD_PAGE_COUNT pages.
++ */
++#define WD_PAGE_COUNT (128*1024)
++
+ void mark_free_pages(struct zone *zone)
+ {
+- unsigned long pfn, max_zone_pfn;
++ unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
+ unsigned long flags;
+ unsigned int order, t;
+ struct page *page;
+@@ -2512,6 +2518,11 @@ void mark_free_pages(struct zone *zone)
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+
++ if (!--page_count) {
++ touch_nmi_watchdog();
++ page_count = WD_PAGE_COUNT;
++ }
++
+ if (page_zone(page) != zone)
+ continue;
+
+@@ -2525,8 +2536,13 @@ void mark_free_pages(struct zone *zone)
+ unsigned long i;
+
+ pfn = page_to_pfn(page);
+- for (i = 0; i < (1UL << order); i++)
++ for (i = 0; i < (1UL << order); i++) {
++ if (!--page_count) {
++ touch_nmi_watchdog();
++ page_count = WD_PAGE_COUNT;
++ }
+ swsusp_set_page_free(pfn_to_page(pfn + i));
++ }
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 1183e898743b..0474c7a73cfa 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3964,7 +3964,7 @@ int __init shmem_init(void)
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+- if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
++ if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
+ SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
+ else
+ shmem_huge = 0; /* just in case it was patched */
+@@ -4025,7 +4025,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
+ return -EINVAL;
+
+ shmem_huge = huge;
+- if (shmem_huge < SHMEM_HUGE_DENY)
++ if (shmem_huge > SHMEM_HUGE_DENY)
+ SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
+ return count;
+ }
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index fbf251fef70f..4d6b94d7ce5f 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -484,16 +484,16 @@ static int bnep_session(void *arg)
+ struct net_device *dev = s->dev;
+ struct sock *sk = s->sock->sk;
+ struct sk_buff *skb;
+- wait_queue_t wait;
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ BT_DBG("");
+
+ set_user_nice(current, -15);
+
+- init_waitqueue_entry(&wait, current);
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+- set_current_state(TASK_INTERRUPTIBLE);
++ /* Ensure session->terminate is updated */
++ smp_mb__before_atomic();
+
+ if (atomic_read(&s->terminate))
+ break;
+@@ -515,9 +515,8 @@ static int bnep_session(void *arg)
+ break;
+ netif_wake_queue(dev);
+
+- schedule();
++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
+- __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ /* Cleanup session */
+@@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
+ s = __bnep_get_session(req->dst);
+ if (s) {
+ atomic_inc(&s->terminate);
+- wake_up_process(s->task);
++ wake_up_interruptible(sk_sleep(s->sock->sk));
+ } else
+ err = -ENOENT;
+
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index 9e59b6654126..1152ce34dad4 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -280,16 +280,16 @@ static int cmtp_session(void *arg)
+ struct cmtp_session *session = arg;
+ struct sock *sk = session->sock->sk;
+ struct sk_buff *skb;
+- wait_queue_t wait;
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ BT_DBG("session %p", session);
+
+ set_user_nice(current, -15);
+
+- init_waitqueue_entry(&wait, current);
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+- set_current_state(TASK_INTERRUPTIBLE);
++ /* Ensure session->terminate is updated */
++ smp_mb__before_atomic();
+
+ if (atomic_read(&session->terminate))
+ break;
+@@ -306,9 +306,8 @@ static int cmtp_session(void *arg)
+
+ cmtp_process_transmit(session);
+
+- schedule();
++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
+- __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ down_write(&cmtp_session_sem);
+@@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
+ err = cmtp_attach_device(session);
+ if (err < 0) {
+ atomic_inc(&session->terminate);
+- wake_up_process(session->task);
++ wake_up_interruptible(sk_sleep(session->sock->sk));
+ up_write(&cmtp_session_sem);
+ return err;
+ }
+@@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
+
+ /* Stop session thread */
+ atomic_inc(&session->terminate);
+- wake_up_process(session->task);
++
++ /* Ensure session->terminate is updated */
++ smp_mb__after_atomic();
++
++ wake_up_interruptible(sk_sleep(session->sock->sk));
+ } else
+ err = -ENOENT;
+
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 0bec4588c3c8..1fc076420d1e 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -36,6 +36,7 @@
+ #define VERSION "1.2"
+
+ static DECLARE_RWSEM(hidp_session_sem);
++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
+ static LIST_HEAD(hidp_session_list);
+
+ static unsigned char hidp_keycode[256] = {
+@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
+ * Wake up session thread and notify it to stop. This is asynchronous and
+ * returns immediately. Call this whenever a runtime error occurs and you want
+ * the session to stop.
+- * Note: wake_up_process() performs any necessary memory-barriers for us.
++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
+ */
+ static void hidp_session_terminate(struct hidp_session *session)
+ {
+ atomic_inc(&session->terminate);
+- wake_up_process(session->task);
++ wake_up_interruptible(&hidp_session_wq);
+ }
+
+ /*
+@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session)
+ struct sock *ctrl_sk = session->ctrl_sock->sk;
+ struct sock *intr_sk = session->intr_sock->sk;
+ struct sk_buff *skb;
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
++ add_wait_queue(&hidp_session_wq, &wait);
+ for (;;) {
+ /*
+ * This thread can be woken up two ways:
+@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session)
+ * session->terminate flag and wakes this thread up.
+ * - Via modifying the socket state of ctrl/intr_sock. This
+ * thread is woken up by ->sk_state_changed().
+- *
+- * Note: set_current_state() performs any necessary
+- * memory-barriers for us.
+ */
+- set_current_state(TASK_INTERRUPTIBLE);
+
++ /* Ensure session->terminate is updated */
++ smp_mb__before_atomic();
+ if (atomic_read(&session->terminate))
+ break;
+
+@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session)
+ hidp_process_transmit(session, &session->ctrl_transmit,
+ session->ctrl_sock);
+
+- schedule();
++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
++ remove_wait_queue(&hidp_session_wq, &wait);
+
+ atomic_inc(&session->terminate);
+- set_current_state(TASK_RUNNING);
++
++ /* Ensure session->terminate is updated */
++ smp_mb__after_atomic();
++}
++
++static int hidp_session_wake_function(wait_queue_t *wait,
++ unsigned int mode,
++ int sync, void *key)
++{
++ wake_up_interruptible(&hidp_session_wq);
++ return false;
+ }
+
+ /*
+@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session)
+ static int hidp_session_thread(void *arg)
+ {
+ struct hidp_session *session = arg;
+- wait_queue_t ctrl_wait, intr_wait;
++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
+
+ BT_DBG("session %p", session);
+
+@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg)
+ set_user_nice(current, -15);
+ hidp_set_timer(session);
+
+- init_waitqueue_entry(&ctrl_wait, current);
+- init_waitqueue_entry(&intr_wait, current);
+ add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
+ add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+ /* This memory barrier is paired with wq_has_sleeper(). See
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 9fe25bf63296..b68168fcc06a 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -24,6 +24,7 @@
+ #include <net/checksum.h>
+
+ #include <net/inet_sock.h>
++#include <net/inet_common.h>
+ #include <net/sock.h>
+ #include <net/xfrm.h>
+
+@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
+
+ EXPORT_SYMBOL_GPL(dccp_packet_name);
+
++static void dccp_sk_destruct(struct sock *sk)
++{
++ struct dccp_sock *dp = dccp_sk(sk);
++
++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
++ dp->dccps_hc_tx_ccid = NULL;
++ inet_sock_destruct(sk);
++}
++
+ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
+ {
+ struct dccp_sock *dp = dccp_sk(sk);
+@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
+ icsk->icsk_syn_retries = sysctl_dccp_request_retries;
+ sk->sk_state = DCCP_CLOSED;
+ sk->sk_write_space = dccp_write_space;
++ sk->sk_destruct = dccp_sk_destruct;
+ icsk->icsk_sync_mss = dccp_sync_mss;
+ dp->dccps_mss_cache = 536;
+ dp->dccps_rate_last = jiffies;
+@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
+ {
+ struct dccp_sock *dp = dccp_sk(sk);
+
+- /*
+- * DCCP doesn't use sk_write_queue, just sk_send_head
+- * for retransmissions
+- */
++ __skb_queue_purge(&sk->sk_write_queue);
+ if (sk->sk_send_head != NULL) {
+ kfree_skb(sk->sk_send_head);
+ sk->sk_send_head = NULL;
+@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
+ dp->dccps_hc_rx_ackvec = NULL;
+ }
+ ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
++ dp->dccps_hc_rx_ccid = NULL;
+
+ /* clean up feature negotiation state */
+ dccp_feat_list_purge(&dp->dccps_featneg);
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ce7bc2e5175a..ac9a8fbbacfd 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1033,15 +1033,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+ if (!fi)
+ goto failure;
+- fib_info_cnt++;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
+- if (!fi->fib_metrics)
+- goto failure;
++ if (unlikely(!fi->fib_metrics)) {
++ kfree(fi);
++ return ERR_PTR(err);
++ }
+ atomic_set(&fi->fib_metrics->refcnt, 1);
+- } else
++ } else {
+ fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
+-
++ }
++ fib_info_cnt++;
+ fi->fib_net = net;
+ fi->fib_protocol = cfg->fc_protocol;
+ fi->fib_scope = cfg->fc_scope;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 3db1adb6b7a0..abdbe79ee175 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1007,10 +1007,18 @@ int igmp_rcv(struct sk_buff *skb)
+ {
+ /* This basically follows the spec line by line -- see RFC1112 */
+ struct igmphdr *ih;
+- struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
++ struct net_device *dev = skb->dev;
++ struct in_device *in_dev;
+ int len = skb->len;
+ bool dropped = true;
+
++ if (netif_is_l3_master(dev)) {
++ dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
++ if (!dev)
++ goto drop;
++ }
++
++ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ goto drop;
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6883b3d4ba8f..22ba873546c3 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1268,7 +1268,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
+ if (mtu)
+ return mtu;
+
+- mtu = dst->dev->mtu;
++ mtu = READ_ONCE(dst->dev->mtu);
+
+ if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+ if (rt->rt_uses_gateway && mtu > 576)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 57bcae81fe42..fbaac4423a99 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3007,8 +3007,7 @@ void tcp_rearm_rto(struct sock *sk)
+ /* delta may not be positive if the socket is locked
+ * when the retrans timer fires and is rescheduled.
+ */
+- if (delta > 0)
+- rto = delta;
++ rto = max(delta, 1);
+ }
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+ TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index e4e9f752ebbf..cd8dd8c4e819 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -912,6 +912,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
+ }
+ nsiblings = iter->rt6i_nsiblings;
+ fib6_purge_rt(iter, fn, info->nl_net);
++ if (fn->rr_ptr == iter)
++ fn->rr_ptr = NULL;
+ rt6_release(iter);
+
+ if (nsiblings) {
+@@ -924,6 +926,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
+ if (rt6_qualify_for_ecmp(iter)) {
+ *ins = iter->dst.rt6_next;
+ fib6_purge_rt(iter, fn, info->nl_net);
++ if (fn->rr_ptr == iter)
++ fn->rr_ptr = NULL;
+ rt6_release(iter);
+ nsiblings--;
+ } else {
+@@ -1012,7 +1016,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ /* Create subtree root node */
+ sfn = node_alloc();
+ if (!sfn)
+- goto st_failure;
++ goto failure;
+
+ sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
+ atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
+@@ -1028,12 +1032,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+
+ if (IS_ERR(sn)) {
+ /* If it is failed, discard just allocated
+- root, and then (in st_failure) stale node
++ root, and then (in failure) stale node
+ in main tree.
+ */
+ node_free(sfn);
+ err = PTR_ERR(sn);
+- goto st_failure;
++ goto failure;
+ }
+
+ /* Now link new subtree to main tree */
+@@ -1047,7 +1051,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+
+ if (IS_ERR(sn)) {
+ err = PTR_ERR(sn);
+- goto st_failure;
++ goto failure;
+ }
+ }
+
+@@ -1089,22 +1093,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ atomic_inc(&pn->leaf->rt6i_ref);
+ }
+ #endif
+- if (!(rt->dst.flags & DST_NOCACHE))
+- dst_free(&rt->dst);
++ goto failure;
+ }
+ return err;
+
+-#ifdef CONFIG_IPV6_SUBTREES
+- /* Subtree creation failed, probably main tree node
+- is orphan. If it is, shoot it.
++failure:
++ /* fn->leaf could be NULL if fn is an intermediate node and we
++ * failed to add the new route to it in both subtree creation
++ * failure and fib6_add_rt2node() failure case.
++ * In both cases, fib6_repair_tree() should be called to fix
++ * fn->leaf.
+ */
+-st_failure:
+ if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
+ fib6_repair_tree(info->nl_net, fn);
+ if (!(rt->dst.flags & DST_NOCACHE))
+ dst_free(&rt->dst);
+ return err;
+-#endif
+ }
+
+ /*
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 8d77ad5cadaf..4cadc29f547c 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2225,7 +2225,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
+ {
+ struct sock *sk = sock->sk;
+ struct irda_sock *self = irda_sk(sk);
+- struct irda_device_list list;
++ struct irda_device_list list = { 0 };
+ struct irda_device_info *discoveries;
+ struct irda_ias_set * ias_opt; /* IAS get/query params */
+ struct ias_object * ias_obj; /* Object in IAS */
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index b1432b668033..166e32c93038 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
+ #define BROADCAST_ONE 1
+ #define BROADCAST_REGISTERED 2
+ #define BROADCAST_PROMISC_ONLY 4
+-static int pfkey_broadcast(struct sk_buff *skb,
++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
+ int broadcast_flags, struct sock *one_sk,
+ struct net *net)
+ {
+@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
+ rcu_read_unlock();
+
+ if (one_sk != NULL)
+- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+
+ kfree_skb(skb2);
+ kfree_skb(skb);
+@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
+ hdr = (struct sadb_msg *) pfk->dump.skb->data;
+ hdr->sadb_msg_seq = 0;
+ hdr->sadb_msg_errno = rc;
+- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ &pfk->sk, sock_net(&pfk->sk));
+ pfk->dump.skb = NULL;
+ }
+@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
+ sizeof(uint64_t));
+
+- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
+
+ return 0;
+ }
+@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
+
+ xfrm_state_put(x);
+
+- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
+
+ return 0;
+ }
+@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
+ hdr->sadb_msg_seq = c->seq;
+ hdr->sadb_msg_pid = c->portid;
+
+- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
+
+ return 0;
+ }
+@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
+ out_hdr->sadb_msg_reserved = 0;
+ out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
+ out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
+- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+
+ return 0;
+ }
+@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
+ return -ENOBUFS;
+ }
+
+- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
+-
++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
++ sock_net(sk));
+ return 0;
+ }
+
+@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+
+- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
++ sock_net(sk));
+ }
+
+ static int key_notify_sa_flush(const struct km_event *c)
+@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ hdr->sadb_msg_reserved = 0;
+
+- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+
+ return 0;
+ }
+@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
+ out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
+
+ if (pfk->dump.skb)
+- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ &pfk->sk, sock_net(&pfk->sk));
+ pfk->dump.skb = out_skb;
+
+@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
+ new_hdr->sadb_msg_errno = 0;
+ }
+
+- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+ return 0;
+ }
+
+@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
+ out_hdr->sadb_msg_errno = 0;
+ out_hdr->sadb_msg_seq = c->seq;
+ out_hdr->sadb_msg_pid = c->portid;
+- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
+ return 0;
+
+ }
+@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
+ out_hdr->sadb_msg_errno = 0;
+ out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
+ out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
+- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
+ err = 0;
+
+ out:
+@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
+ out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
+
+ if (pfk->dump.skb)
+- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ &pfk->sk, sock_net(&pfk->sk));
+ pfk->dump.skb = out_skb;
+
+@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ hdr->sadb_msg_reserved = 0;
+- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ return 0;
+
+ }
+@@ -2816,7 +2817,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
+ void *ext_hdrs[SADB_EXT_MAX];
+ int err;
+
+- pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+ BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
+
+ memset(ext_hdrs, 0, sizeof(ext_hdrs));
+@@ -3038,7 +3039,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
+ out_hdr->sadb_msg_seq = 0;
+ out_hdr->sadb_msg_pid = 0;
+
+- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
++ xs_net(x));
+ return 0;
+ }
+
+@@ -3228,7 +3230,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
+ xfrm_ctx->ctx_len);
+ }
+
+- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
++ xs_net(x));
+ }
+
+ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
+@@ -3426,7 +3429,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
+ n_port->sadb_x_nat_t_port_port = sport;
+ n_port->sadb_x_nat_t_port_reserved = 0;
+
+- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
++ xs_net(x));
+ }
+
+ #ifdef CONFIG_NET_KEY_MIGRATE
+@@ -3618,7 +3622,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ }
+
+ /* broadcast migrate message to sockets */
+- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
+
+ return 0;
+
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index e03d16ed550d..899c2c36da13 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ h = nf_ct_expect_dst_hash(net, &expect->tuple);
+ hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
+ if (expect_matches(i, expect)) {
+- if (nf_ct_remove_expect(expect))
++ if (nf_ct_remove_expect(i))
+ break;
+ } else if (expect_clash(i, expect)) {
+ ret = -EBUSY;
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 6c72922d20ca..b93a46ef812d 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
+ .tuple = tuple,
+ .zone = zone
+ };
+- struct rhlist_head *hl;
++ struct rhlist_head *hl, *h;
+
+ hl = rhltable_lookup(&nf_nat_bysource_table, &key,
+ nf_nat_bysource_params);
+- if (!hl)
+- return 0;
+
+- ct = container_of(hl, typeof(*ct), nat_bysource);
++ rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
++ nf_ct_invert_tuplepr(result,
++ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++ result->dst = tuple->dst;
+
+- nf_ct_invert_tuplepr(result,
+- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+- result->dst = tuple->dst;
++ if (in_range(l3proto, l4proto, result, range))
++ return 1;
++ }
+
+- return in_range(l3proto, l4proto, result, range);
++ return 0;
+ }
+
+ /* For [FUTURE] fragmentation handling, we want the least-used
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 80f5ecf2c3d7..ff1f4ce6fba4 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -463,8 +463,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
+ if (msglen > skb->len)
+ msglen = skb->len;
+
+- if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+- skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
++ if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+ return;
+
+ err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
+@@ -491,7 +490,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
+ {
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+
+- if (nlh->nlmsg_len < NLMSG_HDRLEN ||
++ if (skb->len < NLMSG_HDRLEN ||
++ nlh->nlmsg_len < NLMSG_HDRLEN ||
+ skb->len < nlh->nlmsg_len)
+ return;
+
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index e4610676299b..a54a556fcdb5 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -1337,6 +1337,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ goto out;
+ }
+
++ OVS_CB(skb)->acts_origlen = acts->orig_len;
+ err = do_execute_actions(dp, skb, key,
+ acts->actions, acts->actions_len);
+
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 7b17da9a94a0..57ce10b6cf6b 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -381,7 +381,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
+ }
+
+ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
+- unsigned int hdrlen)
++ unsigned int hdrlen, int actions_attrlen)
+ {
+ size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+@@ -398,7 +398,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
+
+ /* OVS_PACKET_ATTR_ACTIONS */
+ if (upcall_info->actions_len)
+- size += nla_total_size(upcall_info->actions_len);
++ size += nla_total_size(actions_attrlen);
+
+ /* OVS_PACKET_ATTR_MRU */
+ if (upcall_info->mru)
+@@ -465,7 +465,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
+ else
+ hlen = skb->len;
+
+- len = upcall_msg_size(upcall_info, hlen - cutlen);
++ len = upcall_msg_size(upcall_info, hlen - cutlen,
++ OVS_CB(skb)->acts_origlen);
+ user_skb = genlmsg_new(len, GFP_ATOMIC);
+ if (!user_skb) {
+ err = -ENOMEM;
+diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
+index da931bdef8a7..98a28f78aff2 100644
+--- a/net/openvswitch/datapath.h
++++ b/net/openvswitch/datapath.h
+@@ -98,12 +98,14 @@ struct datapath {
+ * @input_vport: The original vport packet came in on. This value is cached
+ * when a packet is received by OVS.
+ * @mru: The maximum received fragement size; 0 if the packet is not
++ * @acts_origlen: The netlink size of the flow actions applied to this skb.
+ * @cutlen: The number of bytes from the packet end to be removed.
+ * fragmented.
+ */
+ struct ovs_skb_cb {
+ struct vport *input_vport;
+ u16 mru;
++ u16 acts_origlen;
+ u32 cutlen;
+ };
+ #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index d516ba8178b8..541707802a23 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -41,6 +41,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+ {
+ struct xt_tgchk_param par;
+ struct xt_target *target;
++ struct ipt_entry e = {};
+ int ret = 0;
+
+ target = xt_request_find_target(AF_INET, t->u.user.name,
+@@ -52,6 +53,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+ memset(&par, 0, sizeof(par));
+ par.net = net;
+ par.table = table;
++ par.entryinfo = &e;
+ par.target = target;
+ par.targinfo = t->data;
+ par.hook_mask = hook;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index cfdbfa18a95e..fdbbdfd8e9a8 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+ void qdisc_hash_add(struct Qdisc *q, bool invisible)
+ {
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+- struct Qdisc *root = qdisc_dev(q)->qdisc;
+-
+- WARN_ON_ONCE(root == &noop_qdisc);
+ ASSERT_RTNL();
+ hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
+ if (invisible)
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 332d94be6e1c..22451a9eb89d 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -435,6 +435,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ qdisc_drop(head, sch, to_free);
+
+ slot_queue_add(slot, skb);
++ qdisc_tree_reduce_backlog(sch, 0, delta);
+ return NET_XMIT_CN;
+ }
+
+@@ -466,8 +467,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
+ /* Return Congestion Notification only if we dropped a packet
+ * from this flow.
+ */
+- if (qlen != slot->qlen)
++ if (qlen != slot->qlen) {
++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
+ return NET_XMIT_CN;
++ }
+
+ /* As we dropped a packet, better let upper stack know this */
+ qdisc_tree_reduce_backlog(sch, 1, dropped);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index f5b45b8b8b16..0de5f5f8ddbc 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ {
+ addr->sa.sa_family = AF_INET6;
+ addr->v6.sin6_port = port;
++ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_addr = *saddr;
++ addr->v6.sin6_scope_id = 0;
+ }
+
+ /* Compare addresses exactly.
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 2b720fa35c4f..e18500151236 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk)
+ dprintk("svc: socket %p(inet %p), busy=%d\n",
+ svsk, sk,
+ test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
++
++ /* Refer to svc_setup_socket() for details. */
++ rmb();
+ svsk->sk_odata(sk);
+ if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
+ svc_xprt_enqueue(&svsk->sk_xprt);
+@@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk)
+ if (svsk) {
+ dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
+ svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
++
++ /* Refer to svc_setup_socket() for details. */
++ rmb();
+ svsk->sk_owspace(sk);
+ svc_xprt_enqueue(&svsk->sk_xprt);
+ }
+@@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
+ dprintk("svc: socket %p TCP (listen) state change %d\n",
+ sk, sk->sk_state);
+
+- if (svsk)
++ if (svsk) {
++ /* Refer to svc_setup_socket() for details. */
++ rmb();
+ svsk->sk_odata(sk);
++ }
++
+ /*
+ * This callback may called twice when a new connection
+ * is established as a child socket inherits everything
+@@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk)
+ if (!svsk)
+ printk("svc: socket %p: no user data\n", sk);
+ else {
++ /* Refer to svc_setup_socket() for details. */
++ rmb();
+ svsk->sk_ostate(sk);
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+@@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
+ return ERR_PTR(err);
+ }
+
+- inet->sk_user_data = svsk;
+ svsk->sk_sock = sock;
+ svsk->sk_sk = inet;
+ svsk->sk_ostate = inet->sk_state_change;
+ svsk->sk_odata = inet->sk_data_ready;
+ svsk->sk_owspace = inet->sk_write_space;
++ /*
++ * This barrier is necessary in order to prevent race condition
++ * with svc_data_ready(), svc_listen_data_ready() and others
++ * when calling callbacks above.
++ */
++ wmb();
++ inet->sk_user_data = svsk;
+
+ /* Initialize the socket */
+ if (sock->type == SOCK_DGRAM)
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 9bfe886ab330..750949dfc1d7 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ arg = nlmsg_new(0, GFP_KERNEL);
+ if (!arg) {
+ kfree_skb(msg->rep);
++ msg->rep = NULL;
+ return -ENOMEM;
+ }
+
+ err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+- if (err)
++ if (err) {
+ kfree_skb(msg->rep);
+-
++ msg->rep = NULL;
++ }
+ kfree_skb(arg);
+
+ return err;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index c109b82eef4b..7b43b0f74b84 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1157,7 +1157,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
+ mutex_lock(&ue->card->user_ctl_lock);
+ change = ue->tlv_data_size != size;
+ if (!change)
+- change = memcmp(ue->tlv_data, new_data, size);
++ change = memcmp(ue->tlv_data, new_data, size) != 0;
+ kfree(ue->tlv_data);
+ ue->tlv_data = new_data;
+ ue->tlv_data_size = size;
+diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
+index f0e4d502d604..066b5df666f4 100644
+--- a/sound/firewire/iso-resources.c
++++ b/sound/firewire/iso-resources.c
+@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
+ */
+ void fw_iso_resources_free(struct fw_iso_resources *r)
+ {
+- struct fw_card *card = fw_parent_device(r->unit)->card;
++ struct fw_card *card;
+ int bandwidth, channel;
+
++ /* Not initialized. */
++ if (r->unit == NULL)
++ return;
++ card = fw_parent_device(r->unit)->card;
++
+ mutex_lock(&r->mutex);
+
+ if (r->allocated) {
+diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
+index bf779cfeef0d..59a270406353 100644
+--- a/sound/firewire/motu/motu.c
++++ b/sound/firewire/motu/motu.c
+@@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work)
+ return;
+ error:
+ snd_motu_transaction_unregister(motu);
++ snd_motu_stream_destroy_duplex(motu);
+ snd_card_free(motu->card);
+ dev_info(&motu->unit->device,
+ "Sound card registration failed: %d\n", err);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 8c1289963c80..a81aacf684b2 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 6a03f9697039..5d2a63248b1d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
+- /* Zoom R16/24 needs a tiny delay here, otherwise requests like
+- * get/set frequency return as failed despite actually succeeding.
++ /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
++ * otherwise requests like get/set frequency return as failed despite
++ * actually succeeding.
+ */
+- if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
++ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
++ chip->usb_id == USB_ID(0x046d, 0x0a46) ||
++ chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(1);
+ }
+diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
+index 13f5198ba0ee..b3c48fc6ea4b 100755
+--- a/tools/testing/selftests/ntb/ntb_test.sh
++++ b/tools/testing/selftests/ntb/ntb_test.sh
+@@ -326,6 +326,10 @@ function ntb_tool_tests()
+ link_test $LOCAL_TOOL $REMOTE_TOOL
+ link_test $REMOTE_TOOL $LOCAL_TOOL
+
++ #Ensure the link is up on both sides before continuing
++ write_file Y $LOCAL_TOOL/link_event
++ write_file Y $REMOTE_TOOL/link_event
++
+ for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
+ PT=$(basename $PEER_TRANS)
+ write_file $MW_SIZE $LOCAL_TOOL/$PT
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-09-07 22:45 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-09-07 22:45 UTC (permalink / raw
To: gentoo-commits
commit: b3689468077d82446d7542c21032fc93b7d30202
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 7 22:44:59 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 7 22:44:59 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b3689468
Linux patch 4.12.11
0000_README | 4 +
1010_linux-4.12.11.patch | 1009 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1013 insertions(+)
diff --git a/0000_README b/0000_README
index a64a189..dd06605 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch: 1009_linux-4.12.10.patch
From: http://www.kernel.org
Desc: Linux 4.12.10
+Patch: 1010_linux-4.12.11.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.11
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1010_linux-4.12.11.patch b/1010_linux-4.12.11.patch
new file mode 100644
index 0000000..b43b160
--- /dev/null
+++ b/1010_linux-4.12.11.patch
@@ -0,0 +1,1009 @@
+diff --git a/Makefile b/Makefile
+index 6889ec6a091d..e7b2b54b032c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
+index 4cb4b6d3452c..0bc66e1d3a7e 100644
+--- a/arch/alpha/include/asm/types.h
++++ b/arch/alpha/include/asm/types.h
+@@ -1,6 +1,6 @@
+ #ifndef _ALPHA_TYPES_H
+ #define _ALPHA_TYPES_H
+
+-#include <asm-generic/int-ll64.h>
++#include <uapi/asm/types.h>
+
+ #endif /* _ALPHA_TYPES_H */
+diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
+index 9fd3cd459777..8d1024d7be05 100644
+--- a/arch/alpha/include/uapi/asm/types.h
++++ b/arch/alpha/include/uapi/asm/types.h
+@@ -9,8 +9,18 @@
+ * need to be careful to avoid a name clashes.
+ */
+
+-#ifndef __KERNEL__
++/*
++ * This is here because we used to use l64 for alpha
++ * and we don't want to impact user mode with our change to ll64
++ * in the kernel.
++ *
++ * However, some user programs are fine with this. They can
++ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
++ */
++#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
+ #include <asm-generic/int-l64.h>
++#else
++#include <asm-generic/int-ll64.h>
+ #endif
+
+ #endif /* _UAPI_ALPHA_TYPES_H */
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 37b95dff0b07..3952d5ef8a7a 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -397,8 +397,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
+ * signal first. We do not need to release the mmap_sem because it
+ * would already be released in __lock_page_or_retry in mm/filemap.c.
+ */
+- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++ if (!user_mode(regs))
++ goto no_context;
+ return 0;
++ }
+
+ /*
+ * Major/minor page fault accounting is only done on the initial
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index 7afb0e2f07f4..48febf07e828 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -328,13 +328,13 @@ static inline unsigned type in##bwl##_p(int port) \
+ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+ { \
+ asm volatile("rep; outs" #bwl \
+- : "+S"(addr), "+c"(count) : "d"(port)); \
++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ } \
+ \
+ static inline void ins##bwl(int port, void *addr, unsigned long count) \
+ { \
+ asm volatile("rep; ins" #bwl \
+- : "+D"(addr), "+c"(count) : "d"(port)); \
++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ }
+
+ BUILDIO(b, b, char)
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 43839b00fe6c..903605dbc1a5 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -87,8 +87,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+ }
+ sgl = sreq->tsg;
+ n = sg_nents(sgl);
+- for_each_sg(sgl, sg, n, i)
+- put_page(sg_page(sg));
++ for_each_sg(sgl, sg, n, i) {
++ struct page *page = sg_page(sg);
++
++ /* some SGs may not have a page mapped */
++ if (page && page_ref_count(page))
++ put_page(page);
++ }
+
+ kfree(sreq->tsg);
+ }
+diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
+index 8b3c04d625c3..4a45fa4890c0 100644
+--- a/crypto/chacha20_generic.c
++++ b/crypto/chacha20_generic.c
+@@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req)
+ crypto_chacha20_init(state, ctx, walk.iv);
+
+ while (walk.nbytes > 0) {
++ unsigned int nbytes = walk.nbytes;
++
++ if (nbytes < walk.total)
++ nbytes = round_down(nbytes, walk.stride);
++
+ chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+- walk.nbytes);
+- err = skcipher_walk_done(&walk, 0);
++ nbytes);
++ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index f802bcd94457..de30a822ccab 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1155,8 +1155,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
+ return -ENODEV;
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+- if (msg->size == 0)
+- return msg->size;
+
+ ret = nvkm_i2c_aux_acquire(aux);
+ if (ret)
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 06e564a9ccb2..70ab2eee9b04 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3618,15 +3618,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ if (dcbe->type == DCB_OUTPUT_DP) {
++ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nvkm_i2c_aux *aux =
+ nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+ if (aux) {
+- nv_encoder->i2c = &nv_connector->aux.ddc;
++ if (disp->disp->oclass < GF110_DISP) {
++ /* HW has no support for address-only
++ * transactions, so we're required to
++ * use custom I2C-over-AUX code.
++ */
++ nv_encoder->i2c = &aux->i2c;
++ } else {
++ nv_encoder->i2c = &nv_connector->aux.ddc;
++ }
+ nv_encoder->aux = aux;
+ }
+
+ /*TODO: Use DP Info Table to check for support. */
+- if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
++ if (disp->disp->oclass >= GF110_DISP) {
+ ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
+ nv_connector->base.base.id,
+ &nv_encoder->dp.mstm);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+index 48f01e40b8fc..b768e66a472b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
+
+ nvkm-y += nvkm/subdev/i2c/aux.o
+ nvkm-y += nvkm/subdev/i2c/auxg94.o
++nvkm-y += nvkm/subdev/i2c/auxgf119.o
+ nvkm-y += nvkm/subdev/i2c/auxgm200.o
+
+ nvkm-y += nvkm/subdev/i2c/anx9805.o
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+index d172e42dd228..4c1f547da463 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+@@ -117,6 +117,10 @@ int
+ nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
+ u32 addr, u8 *data, u8 *size)
+ {
++ if (!*size && !aux->func->address_only) {
++ AUX_ERR(aux, "address-only transaction dropped");
++ return -ENOSYS;
++ }
+ return aux->func->xfer(aux, retry, type, addr, data, size);
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+index 27a4a39c87f0..9587ab456d9e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+@@ -3,6 +3,7 @@
+ #include "pad.h"
+
+ struct nvkm_i2c_aux_func {
++ bool address_only;
+ int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
+ u32 addr, u8 *data, u8 *size);
+ int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+ int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+ u32 addr, u8 *data, u8 *size);
+
++int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
++ int, u8, struct nvkm_i2c_aux **);
++
+ int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
++int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
++int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+ int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+
+ #define AUX_MSG(b,l,f,a...) do { \
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+index ab8cb196c34e..c8ab1b5741a3 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
+ return 0;
+ }
+
+-static int
++int
+ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+ u8 type, u32 addr, u8 *data, u8 *size)
+ {
+@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+ }
+
+ ctrl = nvkm_rd32(device, 0x00e4e4 + base);
+- ctrl &= ~0x0001f0ff;
++ ctrl &= ~0x0001f1ff;
+ ctrl |= type << 12;
+- ctrl |= *size - 1;
++ ctrl |= (*size ? (*size - 1) : 0x00000100);
+ nvkm_wr32(device, 0x00e4e0 + base, addr);
+
+ /* (maybe) retry transaction a number of times on failure... */
+@@ -160,14 +160,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+ return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+ }
+
+-static const struct nvkm_i2c_aux_func
+-g94_i2c_aux_func = {
+- .xfer = g94_i2c_aux_xfer,
+-};
+-
+ int
+-g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+- struct nvkm_i2c_aux **paux)
++g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
++ struct nvkm_i2c_pad *pad, int index, u8 drive,
++ struct nvkm_i2c_aux **paux)
+ {
+ struct g94_i2c_aux *aux;
+
+@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ return -ENOMEM;
+ *paux = &aux->base;
+
+- nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
++ nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
+ aux->ch = drive;
+ aux->base.intr = 1 << aux->ch;
+ return 0;
+ }
++
++static const struct nvkm_i2c_aux_func
++g94_i2c_aux = {
++ .xfer = g94_i2c_aux_xfer,
++};
++
++int
++g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
++ struct nvkm_i2c_aux **paux)
++{
++ return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
++}
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
+new file mode 100644
+index 000000000000..dab40cd8fe3a
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2017 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "aux.h"
++
++static const struct nvkm_i2c_aux_func
++gf119_i2c_aux = {
++ .address_only = true,
++ .xfer = g94_i2c_aux_xfer,
++};
++
++int
++gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
++ struct nvkm_i2c_aux **paux)
++{
++ return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
++}
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+index ee091fa79628..7ef60895f43a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+ }
+
+ ctrl = nvkm_rd32(device, 0x00d954 + base);
+- ctrl &= ~0x0001f0ff;
++ ctrl &= ~0x0001f1ff;
+ ctrl |= type << 12;
+- ctrl |= *size - 1;
++ ctrl |= (*size ? (*size - 1) : 0x00000100);
+ nvkm_wr32(device, 0x00d950 + base, addr);
+
+ /* (maybe) retry transaction a number of times on failure... */
+@@ -162,6 +162,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+
+ static const struct nvkm_i2c_aux_func
+ gm200_i2c_aux_func = {
++ .address_only = true,
+ .xfer = gm200_i2c_aux_xfer,
+ };
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+index d53212f1aa52..3bc4d0310076 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+@@ -28,7 +28,7 @@
+ static const struct nvkm_i2c_pad_func
+ gf119_i2c_pad_s_func = {
+ .bus_new_4 = gf119_i2c_bus_new,
+- .aux_new_6 = g94_i2c_aux_new,
++ .aux_new_6 = gf119_i2c_aux_new,
+ .mode = g94_i2c_pad_mode,
+ };
+
+@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+ static const struct nvkm_i2c_pad_func
+ gf119_i2c_pad_x_func = {
+ .bus_new_4 = gf119_i2c_bus_new,
+- .aux_new_6 = g94_i2c_aux_new,
++ .aux_new_6 = gf119_i2c_aux_new,
+ };
+
+ int
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index eeddc1e48409..871599826773 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ } else {
+ pr_err("Failed to fill pool (%p)\n", pool);
+ /* If we have any pages left put them to the pool. */
+- list_for_each_entry(p, &pool->list, lru) {
++ list_for_each_entry(p, &new_pages, lru) {
+ ++cpages;
+ }
+ list_splice(&new_pages, &pool->list);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index f8a977f86ec7..a17478028649 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
+ }
+
+
++/**
++ * vmw_kms_atomic_commit - Perform an atomic state commit
++ *
++ * @dev: DRM device
++ * @state: the driver state object
++ * @nonblock: Whether nonblocking behaviour is requested
++ *
++ * This is a simple wrapper around drm_atomic_helper_commit() for
++ * us to clear the nonblocking value.
++ *
++ * Nonblocking commits currently cause synchronization issues
++ * for vmwgfx.
++ *
++ * RETURNS
++ * Zero for success or negative error code on failure.
++ */
++int vmw_kms_atomic_commit(struct drm_device *dev,
++ struct drm_atomic_state *state,
++ bool nonblock)
++{
++ return drm_atomic_helper_commit(dev, state, false);
++}
++
++
+ static const struct drm_mode_config_funcs vmw_kms_funcs = {
+ .fb_create = vmw_kms_fb_create,
+ .atomic_check = vmw_kms_atomic_check_modeset,
+- .atomic_commit = drm_atomic_helper_commit,
++ .atomic_commit = vmw_kms_atomic_commit,
+ };
+
+ static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index e98e44e584a4..22ffcb73c185 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -341,8 +341,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+- data->block[0] = desc->rxbytes;
++ if (desc->rxbytes != dma_buffer[0] + 1)
++ return -EMSGSIZE;
++
++ memcpy(data->block, dma_buffer, desc->rxbytes);
+ break;
+ }
+ return 0;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index def96cd2479b..68894f7ccb54 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -389,10 +389,21 @@ static const u8 xboxone_hori_init[] = {
+ };
+
+ /*
+- * A rumble packet is required for some PowerA pads to start
++ * A specific rumble packet is required for some PowerA pads to start
+ * sending input reports. One of those pads is (0x24c6:0x543a).
+ */
+-static const u8 xboxone_zerorumble_init[] = {
++static const u8 xboxone_rumblebegin_init[] = {
++ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
++ 0x1D, 0x1D, 0xFF, 0x00, 0x00
++};
++
++/*
++ * A rumble packet with zero FF intensity will immediately
++ * terminate the rumbling required to init PowerA pads.
++ * This should happen fast enough that the motors don't
++ * spin up to enough speed to actually vibrate the gamepad.
++ */
++static const u8 xboxone_rumbleend_init[] = {
+ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+@@ -407,9 +418,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
+ XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
+ XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+- XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init),
+- XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init),
+- XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init),
++ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
++ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
++ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
++ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
++ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
++ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
+ };
+
+ struct xpad_output_packet {
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 16c30460ef04..5af0b7d200bc 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse,
+ }
+ }
+
++static bool synaptics_has_agm(struct synaptics_data *priv)
++{
++ return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
++ SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c));
++}
++
+ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
+ {
+ static u8 param = 0xc8;
+- struct synaptics_data *priv = psmouse->private;
+ int error;
+
+- if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
+- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)))
+- return 0;
+-
+ error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL);
+ if (error)
+ return error;
+@@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
+ if (error)
+ return error;
+
+- /* Advanced gesture mode also sends multi finger data */
+- priv->info.capabilities |= BIT(1);
+-
+ return 0;
+ }
+
+@@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse)
+ if (error)
+ return error;
+
+- if (priv->absolute_mode) {
++ if (priv->absolute_mode && synaptics_has_agm(priv)) {
+ error = synaptics_set_advanced_gesture_mode(psmouse);
+ if (error) {
+ psmouse_err(psmouse,
+@@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[],
+ ((buf[0] & 0x04) >> 1) |
+ ((buf[3] & 0x04) >> 2));
+
+- if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
+- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) &&
+- hw->w == 2) {
++ if (synaptics_has_agm(priv) && hw->w == 2) {
+ synaptics_parse_agm(buf, priv, hw);
+ return 1;
+ }
+@@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse,
+ synaptics_report_mt_data(psmouse, sgm, num_fingers);
+ }
+
++static bool synaptics_has_multifinger(struct synaptics_data *priv)
++{
++ if (SYN_CAP_MULTIFINGER(priv->info.capabilities))
++ return true;
++
++ /* Advanced gesture mode also sends multi finger data */
++ return synaptics_has_agm(priv);
++}
++
+ /*
+ * called for each full received packet from the touchpad
+ */
+@@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
+ if (SYN_CAP_EXTENDED(info->capabilities)) {
+ switch (hw.w) {
+ case 0 ... 1:
+- if (SYN_CAP_MULTIFINGER(info->capabilities))
++ if (synaptics_has_multifinger(priv))
+ num_fingers = hw.w + 2;
+ break;
+ case 2:
+@@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
+ input_report_abs(dev, ABS_TOOL_WIDTH, finger_width);
+
+ input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1);
+- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
++ if (synaptics_has_multifinger(priv)) {
+ input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2);
+ input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3);
+ }
+@@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse,
+ __set_bit(BTN_TOUCH, dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, dev->keybit);
+
+- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
++ if (synaptics_has_multifinger(priv)) {
+ __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
+ }
+diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
+index 929f8558bf1c..90e74f9f5f9c 100644
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -1022,8 +1022,11 @@ static int __init gic_of_init(struct device_node *node,
+ gic_len = resource_size(&res);
+ }
+
+- if (mips_cm_present())
++ if (mips_cm_present()) {
+ write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
++ /* Ensure GIC region is enabled before trying to access it */
++ __sync();
++ }
+ gic_present = true;
+
+ __gic_init(gic_base, gic_len, cpu_vec, 0, node);
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 3df056b73b66..fa0df4a3e1be 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
+ if (queue_dying) {
+ atomic_inc(&m->pg_init_in_progress);
+ activate_or_offline_path(pgpath);
+- return DM_MAPIO_REQUEUE;
+ }
+ return DM_MAPIO_DELAY_REQUEUE;
+ }
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index bc1781bb070b..c580af05b033 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -210,8 +210,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host,
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ }
+
++static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
++{
++ struct mmc_host *mmc = host->mmc;
++ u8 pwr = host->pwr;
++
++ sdhci_set_power_noreg(host, mode, vdd);
++
++ if (host->pwr == pwr)
++ return;
++
++ if (host->pwr == 0)
++ vdd = 0;
++
++ if (!IS_ERR(mmc->supply.vmmc))
++ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
++}
++
+ static const struct sdhci_ops sdhci_xenon_ops = {
+ .set_clock = sdhci_set_clock,
++ .set_power = xenon_set_power,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = xenon_reset,
+ .set_uhs_signaling = xenon_set_uhs_signaling,
+diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
+index bbf7604889b7..1c539c83e8cf 100644
+--- a/drivers/net/wireless/ti/wl1251/main.c
++++ b/drivers/net/wireless/ti/wl1251/main.c
+@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
+
+ wl->state = WL1251_STATE_OFF;
+ mutex_init(&wl->mutex);
++ spin_lock_init(&wl->wl_lock);
+
+ wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
+ wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 1e71e6ca5ddf..8e03c9ae0bf0 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
+ /*
+ * read a single page, without unlocking it.
+ */
+-static int readpage_nounlock(struct file *filp, struct page *page)
++static int ceph_do_readpage(struct file *filp, struct page *page)
+ {
+ struct inode *inode = file_inode(filp);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
+
+ err = ceph_readpage_from_fscache(inode, page);
+ if (err == 0)
+- goto out;
++ return -EINPROGRESS;
+
+ dout("readpage inode %p file %p page %p index %lu\n",
+ inode, filp, page, page->index);
+@@ -249,8 +249,11 @@ static int readpage_nounlock(struct file *filp, struct page *page)
+
+ static int ceph_readpage(struct file *filp, struct page *page)
+ {
+- int r = readpage_nounlock(filp, page);
+- unlock_page(page);
++ int r = ceph_do_readpage(filp, page);
++ if (r != -EINPROGRESS)
++ unlock_page(page);
++ else
++ r = 0;
+ return r;
+ }
+
+@@ -1240,7 +1243,7 @@ static int ceph_update_writeable_page(struct file *file,
+ goto retry_locked;
+ r = writepage_nounlock(page, NULL);
+ if (r < 0)
+- goto fail_nosnap;
++ goto fail_unlock;
+ goto retry_locked;
+ }
+
+@@ -1268,11 +1271,14 @@ static int ceph_update_writeable_page(struct file *file,
+ }
+
+ /* we need to read it. */
+- r = readpage_nounlock(file, page);
+- if (r < 0)
+- goto fail_nosnap;
++ r = ceph_do_readpage(file, page);
++ if (r < 0) {
++ if (r == -EINPROGRESS)
++ return -EAGAIN;
++ goto fail_unlock;
++ }
+ goto retry_locked;
+-fail_nosnap:
++fail_unlock:
+ unlock_page(page);
+ return r;
+ }
+diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
+index 4e7421caf380..bf56392ecec2 100644
+--- a/fs/ceph/cache.c
++++ b/fs/ceph/cache.c
+@@ -240,13 +240,7 @@ void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+ }
+ }
+
+-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
+-{
+- if (!error)
+- SetPageUptodate(page);
+-}
+-
+-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
++static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
+ {
+ if (!error)
+ SetPageUptodate(page);
+@@ -274,7 +268,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
+ return -ENOBUFS;
+
+ ret = fscache_read_or_alloc_page(ci->fscache, page,
+- ceph_vfs_readpage_complete, NULL,
++ ceph_readpage_from_fscache_complete, NULL,
+ GFP_KERNEL);
+
+ switch (ret) {
+@@ -303,7 +297,7 @@ int ceph_readpages_from_fscache(struct inode *inode,
+ return -ENOBUFS;
+
+ ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
+- ceph_vfs_readpage_complete_unlock,
++ ceph_readpage_from_fscache_complete,
+ NULL, mapping_gfp_mask(mapping));
+
+ switch (ret) {
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 569d3fb736be..e702d48bd023 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -205,7 +205,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ int i;
+
+ if (unlikely(direntry->d_name.len >
+- tcon->fsAttrInfo.MaxPathNameComponentLength))
++ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+ return -ENAMETOOLONG;
+
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 18700fd25a0b..2826882c81d1 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -84,8 +84,8 @@
+
+ #define NUMBER_OF_SMB2_COMMANDS 0x0013
+
+-/* BB FIXME - analyze following length BB */
+-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
++/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
++#define MAX_SMB2_HDR_SIZE 0x00b0
+
+ #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
+ #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 5420767c9b68..2ca248425e5d 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -600,8 +600,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
+ wait_queue_head_t *whead;
+
+ rcu_read_lock();
+- /* If it is cleared by POLLFREE, it should be rcu-safe */
+- whead = rcu_dereference(pwq->whead);
++ /*
++ * If it is cleared by POLLFREE, it should be rcu-safe.
++ * If we read NULL we need a barrier paired with
++ * smp_store_release() in ep_poll_callback(), otherwise
++ * we rely on whead->lock.
++ */
++ whead = smp_load_acquire(&pwq->whead);
+ if (whead)
+ remove_wait_queue(whead, &pwq->wait);
+ rcu_read_unlock();
+@@ -1086,17 +1091,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
+ struct eventpoll *ep = epi->ep;
+ int ewake = 0;
+
+- if ((unsigned long)key & POLLFREE) {
+- ep_pwq_from_wait(wait)->whead = NULL;
+- /*
+- * whead = NULL above can race with ep_remove_wait_queue()
+- * which can do another remove_wait_queue() after us, so we
+- * can't use __remove_wait_queue(). whead->lock is held by
+- * the caller.
+- */
+- list_del_init(&wait->task_list);
+- }
+-
+ spin_lock_irqsave(&ep->lock, flags);
+
+ ep_set_busy_poll_napi_id(epi);
+@@ -1180,10 +1174,26 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
+ if (pwake)
+ ep_poll_safewake(&ep->poll_wait);
+
+- if (epi->event.events & EPOLLEXCLUSIVE)
+- return ewake;
++ if (!(epi->event.events & EPOLLEXCLUSIVE))
++ ewake = 1;
++
++ if ((unsigned long)key & POLLFREE) {
++ /*
++ * If we race with ep_remove_wait_queue() it can miss
++ * ->whead = NULL and do another remove_wait_queue() after
++ * us, so we can't use __remove_wait_queue().
++ */
++ list_del_init(&wait->task_list);
++ /*
++ * ->whead != NULL protects us from the race with ep_free()
++ * or ep_remove(), ep_remove_wait_queue() takes whead->lock
++ * held by the caller. Once we nullify it, nothing protects
++ * ep/epi or even wait.
++ */
++ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
++ }
+
+- return 1;
++ return ewake;
+ }
+
+ /*
+diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
+index fc824e2828f3..5d2add1a6c96 100644
+--- a/include/asm-generic/topology.h
++++ b/include/asm-generic/topology.h
+@@ -48,7 +48,11 @@
+ #define parent_node(node) ((void)(node),0)
+ #endif
+ #ifndef cpumask_of_node
+-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
++ #ifdef CONFIG_NEED_MULTIPLE_NODES
++ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
++ #else
++ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
++ #endif
+ #endif
+ #ifndef pcibus_to_node
+ #define pcibus_to_node(bus) ((void)(bus), -1)
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index b625bacf37ef..f4f9481a0c8a 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -245,7 +245,7 @@ enum {
+ NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
+ NVME_CTRL_VWC_PRESENT = 1 << 0,
+ NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+- NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
++ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
+ };
+
+ struct nvme_lbaf {
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 8f26927f16a1..0e2fe5eb6c30 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1907,6 +1907,7 @@ static struct cftype files[] = {
+ {
+ .name = "memory_pressure",
+ .read_u64 = cpuset_read_u64,
++ .private = FILE_MEMORY_PRESSURE,
+ },
+
+ {
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 0e137f98a50c..267f6ef91d97 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1262,8 +1262,6 @@ void uprobe_end_dup_mmap(void)
+
+ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
+ {
+- newmm->uprobes_state.xol_area = NULL;
+-
+ if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
+ set_bit(MMF_HAS_UPROBES, &newmm->flags);
+ /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 9a2b4b4f13b4..31d2b97a792d 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -781,6 +781,13 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+ #endif
+ }
+
++static void mm_init_uprobes_state(struct mm_struct *mm)
++{
++#ifdef CONFIG_UPROBES
++ mm->uprobes_state.xol_area = NULL;
++#endif
++}
++
+ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ struct user_namespace *user_ns)
+ {
+@@ -808,6 +815,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+ mm->pmd_huge_pte = NULL;
+ #endif
++ mm_init_uprobes_state(mm);
+
+ if (current->mm) {
+ mm->flags = current->mm->flags & MMF_INIT_MASK;
+diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
+index 5a0f75a3bf01..eead4b339466 100644
+--- a/lib/mpi/mpicoder.c
++++ b/lib/mpi/mpicoder.c
+@@ -364,11 +364,11 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
+ }
+
+ miter.consumed = lzeros;
+- sg_miter_stop(&miter);
+
+ nbytes -= lzeros;
+ nbits = nbytes * 8;
+ if (nbits > MAX_EXTERN_MPI_BITS) {
++ sg_miter_stop(&miter);
+ pr_info("MPI: mpi too large (%u bits)\n", nbits);
+ return NULL;
+ }
+@@ -376,6 +376,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
+ if (nbytes > 0)
+ nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
+
++ sg_miter_stop(&miter);
++
+ nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
+ val = mpi_alloc(nlimbs);
+ if (!val)
+diff --git a/mm/madvise.c b/mm/madvise.c
+index fc6bfbe19a16..738066502ffa 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -610,6 +610,7 @@ static int madvise_inject_error(int behavior,
+ unsigned long start, unsigned long end)
+ {
+ struct page *page;
++ struct zone *zone;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -643,6 +644,11 @@ static int madvise_inject_error(int behavior,
+ if (ret)
+ return ret;
+ }
++
++ /* Ensure that all poisoned pages are removed from per-cpu lists */
++ for_each_populated_zone(zone)
++ drain_all_pages(zone);
++
+ return 0;
+ }
+ #endif
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 643a18f72032..4ba6513e21fc 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3301,9 +3301,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_state *x_new[XFRM_MAX_DEPTH];
+ struct xfrm_migrate *mp;
+
++ /* Stage 0 - sanity checks */
+ if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
+ goto out;
+
++ if (dir >= XFRM_POLICY_MAX) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ /* Stage 1 - find policy */
+ if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
+ err = -ENOENT;
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-09-10 14:37 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-09-10 14:37 UTC (permalink / raw
To: gentoo-commits
commit: e6019d54e7ac2d157f240768f137469e7e934f3f
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Sep 10 14:37:39 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Sep 10 14:37:39 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e6019d54
Linux patch 4.12.12
0000_README | 4 +
1011_linux-4.12.12.patch | 1428 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1432 insertions(+)
diff --git a/0000_README b/0000_README
index dd06605..c3ac518 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch: 1010_linux-4.12.11.patch
From: http://www.kernel.org
Desc: Linux 4.12.11
+Patch: 1011_linux-4.12.12.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.12
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1011_linux-4.12.12.patch b/1011_linux-4.12.12.patch
new file mode 100644
index 0000000..d3993f4
--- /dev/null
+++ b/1011_linux-4.12.12.patch
@@ -0,0 +1,1428 @@
+diff --git a/Makefile b/Makefile
+index e7b2b54b032c..e96306381ee8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index e6e3b887bee3..683560763238 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -502,7 +502,7 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
+ * In the case that a guest uses storage keys
+ * faults should no longer be backed by zero pages
+ */
+-#define mm_forbids_zeropage mm_use_skey
++#define mm_forbids_zeropage mm_has_pgste
+ static inline int mm_use_skey(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_PGSTE
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 7f6db1e6c048..3ce36f92ba93 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2117,6 +2117,37 @@ static inline void thp_split_mm(struct mm_struct *mm)
+ #endif
+ }
+
++/*
++ * Remove all empty zero pages from the mapping for lazy refaulting
++ * - This must be called after mm->context.has_pgste is set, to avoid
++ * future creation of zero pages
++ * - This must be called after THP was enabled
++ */
++static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
++ unsigned long end, struct mm_walk *walk)
++{
++ unsigned long addr;
++
++ for (addr = start; addr != end; addr += PAGE_SIZE) {
++ pte_t *ptep;
++ spinlock_t *ptl;
++
++ ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++ if (is_zero_pfn(pte_pfn(*ptep)))
++ ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
++ pte_unmap_unlock(ptep, ptl);
++ }
++ return 0;
++}
++
++static inline void zap_zero_pages(struct mm_struct *mm)
++{
++ struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
++
++ walk.mm = mm;
++ walk_page_range(0, TASK_SIZE, &walk);
++}
++
+ /*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+@@ -2134,6 +2165,7 @@ int s390_enable_sie(void)
+ mm->context.has_pgste = 1;
+ /* split thp mappings and disable thp for future mappings */
+ thp_split_mm(mm);
++ zap_zero_pages(mm);
+ up_write(&mm->mmap_sem);
+ return 0;
+ }
+@@ -2146,13 +2178,6 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
+ static int __s390_enable_skey(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+ {
+- /*
+- * Remove all zero page mappings,
+- * after establishing a policy to forbid zero page mappings
+- * following faults for that page will get fresh anonymous pages
+- */
+- if (is_zero_pfn(pte_pfn(*pte)))
+- ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
+ /* Clear storage key */
+ ptep_zap_key(walk->mm, addr, pte);
+ return 0;
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index b854b1da281a..888bee99fe64 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -119,7 +119,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ return addr;
+
+ check_asce_limit:
+- if (addr + len > current->mm->context.asce_limit) {
++ if (addr + len > current->mm->context.asce_limit &&
++ addr + len <= TASK_SIZE) {
+ rc = crst_table_upgrade(mm);
+ if (rc)
+ return (unsigned long) rc;
+@@ -183,7 +184,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ }
+
+ check_asce_limit:
+- if (addr + len > current->mm->context.asce_limit) {
++ if (addr + len > current->mm->context.asce_limit &&
++ addr + len <= TASK_SIZE) {
+ rc = crst_table_upgrade(mm);
+ if (rc)
+ return (unsigned long) rc;
+diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
+index 832e885349b1..4d4cdc1a6e25 100644
+--- a/drivers/android/Kconfig
++++ b/drivers/android/Kconfig
+@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
+ config ANDROID_BINDER_DEVICES
+ string "Android Binder devices"
+ depends on ANDROID_BINDER_IPC
+- default "binder,hwbinder"
++ default "binder,hwbinder,vndbinder"
+ ---help---
+ Default value for the binder.devices parameter.
+
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 831cdd7d197d..3db96b79d122 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4215,7 +4215,7 @@ static int __init init_binder_device(const char *name)
+ static int __init binder_init(void)
+ {
+ int ret;
+- char *device_name, *device_names;
++ char *device_name, *device_names, *device_tmp;
+ struct binder_device *device;
+ struct hlist_node *tmp;
+
+@@ -4263,7 +4263,8 @@ static int __init binder_init(void)
+ }
+ strcpy(device_names, binder_devices_param);
+
+- while ((device_name = strsep(&device_names, ","))) {
++ device_tmp = device_names;
++ while ((device_name = strsep(&device_tmp, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
+@@ -4277,6 +4278,9 @@ static int __init binder_init(void)
+ hlist_del(&device->hlist);
+ kfree(device);
+ }
++
++ kfree(device_names);
++
+ err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index c69954023c2e..13080f1284e8 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1467,7 +1467,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
+ return;
+
+ dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+- dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
++ dev_warn(&pdev->dev,
++ "Switch your BIOS from RAID to AHCI mode to use them.\n");
++
++ /*
++ * Don't rely on the msi-x capability in the remap case,
++ * share the legacy interrupt across ahci and remapped devices.
++ */
++ hpriv->flags |= AHCI_HFLAG_NO_MSI;
+ }
+
+ static int ahci_get_irq_vector(struct ata_host *host, int port)
+diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
+index 8d4d959a821c..8706533db57b 100644
+--- a/drivers/ata/pata_amd.c
++++ b/drivers/ata/pata_amd.c
+@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
+
+ { },
+ };
+diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
+index 6c15a554efbe..dc1255294628 100644
+--- a/drivers/ata/pata_cs5536.c
++++ b/drivers/ata/pata_cs5536.c
+@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+
+ static const struct pci_device_id cs5536[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
+ { },
+ };
+
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 6470eb8088f4..e32a74eb28a3 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -736,7 +736,7 @@ int bus_add_driver(struct device_driver *drv)
+
+ out_unregister:
+ kobject_put(&priv->kobj);
+- kfree(drv->p);
++ /* drv->p is freed in driver_release() */
+ drv->p = NULL;
+ out_put_bus:
+ bus_put(bus);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 7fa373b428f8..ea1612f2b52f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -355,6 +355,7 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
++ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
+
+ /* Additional Realtek 8821AE Bluetooth devices */
+ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index d4a716326f67..e7b9f8695ecb 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -81,40 +81,6 @@
+ #define debug(format, arg...)
+ #endif
+
+-#ifdef DEBUG
+-#include <linux/highmem.h>
+-
+-static void dbg_dump_sg(const char *level, const char *prefix_str,
+- int prefix_type, int rowsize, int groupsize,
+- struct scatterlist *sg, size_t tlen, bool ascii)
+-{
+- struct scatterlist *it;
+- void *it_page;
+- size_t len;
+- void *buf;
+-
+- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
+- /*
+- * make sure the scatterlist's page
+- * has a valid virtual memory mapping
+- */
+- it_page = kmap_atomic(sg_page(it));
+- if (unlikely(!it_page)) {
+- printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
+- return;
+- }
+-
+- buf = it_page + it->offset;
+- len = min_t(size_t, tlen, it->length);
+- print_hex_dump(level, prefix_str, prefix_type, rowsize,
+- groupsize, buf, len, ascii);
+- tlen -= len;
+-
+- kunmap_atomic(it_page);
+- }
+-}
+-#endif
+-
+ static struct list_head alg_list;
+
+ struct caam_alg_entry {
+@@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+ ablkcipher_unmap(jrdev, edesc, req);
+
+@@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+ ablkcipher_unmap(jrdev, edesc, req);
+
+@@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ ivsize, 1);
+ pr_err("asked=%d, nbytes%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+@@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+@@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req)
+ u32 *desc;
+ int ret = 0;
+
+-#ifdef DEBUG
+- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+- req->assoclen + req->cryptlen, 1);
+-#endif
++ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++ req->assoclen + req->cryptlen, 1);
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index ea0e5b8b9171..5fe768065142 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -791,9 +791,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+- dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
+
+ ablkcipher_unmap(qidev, edesc, req);
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 6f44ccb55c63..3d639f3b45aa 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -9,6 +9,46 @@
+ #include "desc.h"
+ #include "error.h"
+
++#ifdef DEBUG
++#include <linux/highmem.h>
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize, struct scatterlist *sg,
++ size_t tlen, bool ascii)
++{
++ struct scatterlist *it;
++ void *it_page;
++ size_t len;
++ void *buf;
++
++ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
++ /*
++ * make sure the scatterlist's page
++ * has a valid virtual memory mapping
++ */
++ it_page = kmap_atomic(sg_page(it));
++ if (unlikely(!it_page)) {
++ pr_err("caam_dump_sg: kmap failed\n");
++ return;
++ }
++
++ buf = it_page + it->offset;
++ len = min_t(size_t, tlen, it->length);
++ print_hex_dump(level, prefix_str, prefix_type, rowsize,
++ groupsize, buf, len, ascii);
++ tlen -= len;
++
++ kunmap_atomic(it_page);
++ }
++}
++#else
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize, struct scatterlist *sg,
++ size_t tlen, bool ascii)
++{}
++#endif /* DEBUG */
++EXPORT_SYMBOL(caam_dump_sg);
++
+ static const struct {
+ u8 value;
+ const char *error_text;
+diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
+index b6350b0d9153..250e1a21c473 100644
+--- a/drivers/crypto/caam/error.h
++++ b/drivers/crypto/caam/error.h
+@@ -8,4 +8,8 @@
+ #define CAAM_ERROR_H
+ #define CAAM_ERROR_STR_MAX 302
+ void caam_jr_strstatus(struct device *jrdev, u32 status);
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize, struct scatterlist *sg,
++ size_t tlen, bool ascii);
+ #endif /* CAAM_ERROR_H */
+diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
+index 1990ed460c46..53aed5816416 100644
+--- a/drivers/crypto/caam/qi.c
++++ b/drivers/crypto/caam/qi.c
+@@ -55,6 +55,7 @@ struct caam_qi_pcpu_priv {
+ } ____cacheline_aligned;
+
+ static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
++static DEFINE_PER_CPU(int, last_cpu);
+
+ /*
+ * caam_qi_priv - CAAM QI backend private params
+@@ -392,7 +393,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+ dma_addr_t hwdesc;
+ struct caam_drv_ctx *drv_ctx;
+ const cpumask_t *cpus = qman_affine_cpus();
+- static DEFINE_PER_CPU(int, last_cpu);
+
+ num_words = desc_len(sh_desc);
+ if (num_words > MAX_SDLEN) {
+diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
+index 3066b805f2d0..08c0ecb7d109 100644
+--- a/drivers/fpga/altera-hps2fpga.c
++++ b/drivers/fpga/altera-hps2fpga.c
+@@ -66,7 +66,7 @@ static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+
+ /* The L3 REMAP register is write only, so keep a cached value. */
+ static unsigned int l3_remap_shadow;
+-static spinlock_t l3_remap_lock;
++static DEFINE_SPINLOCK(l3_remap_lock);
+
+ static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+ bool enable)
+@@ -171,8 +171,6 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
+ return -EBUSY;
+ }
+
+- spin_lock_init(&l3_remap_lock);
+-
+ if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ if (enable > 1) {
+ dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 70ab2eee9b04..0b6e1a1d1398 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -4091,7 +4091,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+- goto done;
++ goto err_cleanup;
+ }
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+@@ -4119,7 +4119,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ if (crtc->state->enable) {
+ if (!drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+- return ret;
++ return 0;
+ }
+ active = true;
+ break;
+@@ -4131,6 +4131,9 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ drm->have_disp_power_ref = false;
+ }
+
++err_cleanup:
++ if (ret)
++ drm_atomic_helper_cleanup_planes(dev, state);
+ done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+index eb9b278198b2..a4cb82495cee 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+@@ -192,6 +192,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
+ }
+ }
+
++#ifdef __BIG_ENDIAN
++ pci->msi = false;
++#endif
++
+ pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
+ if (pci->msi && func->msi_rearm) {
+ pci->msi = pci_enable_msi(pci->pdev) == 0;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index e274c9dc32f3..b7f920b35ecd 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1767,7 +1767,13 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+- wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
++ /*
++ * Only set up type/code association. Completely mapping
++ * this usage may overwrite the axis resolution and range.
++ */
++ usage->type = EV_ABS;
++ usage->code = ABS_WHEEL;
++ set_bit(EV_ABS, input->evbit);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONCONFIG:
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 590cf90dd21a..da40df2ff27d 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -95,6 +95,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+ .driver_data = (kernel_ulong_t)0,
+ },
++ {
++ /* Cannon Lake H */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
++ .driver_data = (kernel_ulong_t)0,
++ },
++ {
++ /* Cannon Lake LP */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
++ .driver_data = (kernel_ulong_t)0,
++ },
+ { 0 },
+ };
+
+diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
+index f76d979fb7e8..cc8e0b55a12e 100644
+--- a/drivers/iio/adc/ti-ads1015.c
++++ b/drivers/iio/adc/ti-ads1015.c
+@@ -81,18 +81,12 @@ static const unsigned int ads1115_data_rate[] = {
+ 8, 16, 32, 64, 128, 250, 475, 860
+ };
+
+-static const struct {
+- int scale;
+- int uscale;
+-} ads1015_scale[] = {
+- {3, 0},
+- {2, 0},
+- {1, 0},
+- {0, 500000},
+- {0, 250000},
+- {0, 125000},
+- {0, 125000},
+- {0, 125000},
++/*
++ * Translation from PGA bits to full-scale positive and negative input voltage
++ * range in mV
++ */
++static int ads1015_fullscale_range[] = {
++ 6144, 4096, 2048, 1024, 512, 256, 256, 256
+ };
+
+ #define ADS1015_V_CHAN(_chan, _addr) { \
+@@ -183,6 +177,12 @@ struct ads1015_data {
+ struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+
+ unsigned int *data_rate;
++ /*
++ * Set to true when the ADC is switched to the continuous-conversion
++ * mode and exits from a power-down state. This flag is used to avoid
++ * getting the stale result from the conversion register.
++ */
++ bool conv_invalid;
+ };
+
+ static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
+@@ -235,33 +235,43 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+- return ret;
++ return ret < 0 ? ret : 0;
+ }
+
+ static
+ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
+ {
+ int ret, pga, dr, conv_time;
+- bool change;
++ unsigned int old, mask, cfg;
+
+ if (chan < 0 || chan >= ADS1015_CHANNELS)
+ return -EINVAL;
+
++ ret = regmap_read(data->regmap, ADS1015_CFG_REG, &old);
++ if (ret)
++ return ret;
++
+ pga = data->channel_data[chan].pga;
+ dr = data->channel_data[chan].data_rate;
++ mask = ADS1015_CFG_MUX_MASK | ADS1015_CFG_PGA_MASK |
++ ADS1015_CFG_DR_MASK;
++ cfg = chan << ADS1015_CFG_MUX_SHIFT | pga << ADS1015_CFG_PGA_SHIFT |
++ dr << ADS1015_CFG_DR_SHIFT;
+
+- ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
+- ADS1015_CFG_MUX_MASK |
+- ADS1015_CFG_PGA_MASK,
+- chan << ADS1015_CFG_MUX_SHIFT |
+- pga << ADS1015_CFG_PGA_SHIFT,
+- &change);
+- if (ret < 0)
++ cfg = (old & ~mask) | (cfg & mask);
++
++ ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
++ if (ret)
+ return ret;
+
+- if (change) {
+- conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
++ if (old != cfg || data->conv_invalid) {
++ int dr_old = (old & ADS1015_CFG_DR_MASK) >>
++ ADS1015_CFG_DR_SHIFT;
++
++ conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
++ conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ usleep_range(conv_time, conv_time + 1);
++ data->conv_invalid = false;
+ }
+
+ return regmap_read(data->regmap, ADS1015_CONV_REG, val);
+@@ -298,17 +308,20 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+ return IRQ_HANDLED;
+ }
+
+-static int ads1015_set_scale(struct ads1015_data *data, int chan,
++static int ads1015_set_scale(struct ads1015_data *data,
++ struct iio_chan_spec const *chan,
+ int scale, int uscale)
+ {
+ int i, ret, rindex = -1;
++ int fullscale = div_s64((scale * 1000000LL + uscale) <<
++ (chan->scan_type.realbits - 1), 1000000);
+
+- for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
+- if (ads1015_scale[i].scale == scale &&
+- ads1015_scale[i].uscale == uscale) {
++ for (i = 0; i < ARRAY_SIZE(ads1015_fullscale_range); i++) {
++ if (ads1015_fullscale_range[i] == fullscale) {
+ rindex = i;
+ break;
+ }
++ }
+ if (rindex < 0)
+ return -EINVAL;
+
+@@ -318,32 +331,23 @@ static int ads1015_set_scale(struct ads1015_data *data, int chan,
+ if (ret < 0)
+ return ret;
+
+- data->channel_data[chan].pga = rindex;
++ data->channel_data[chan->address].pga = rindex;
+
+ return 0;
+ }
+
+ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
+ {
+- int i, ret, rindex = -1;
++ int i;
+
+- for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
++ for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
+ if (data->data_rate[i] == rate) {
+- rindex = i;
+- break;
++ data->channel_data[chan].data_rate = i;
++ return 0;
+ }
+- if (rindex < 0)
+- return -EINVAL;
+-
+- ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+- ADS1015_CFG_DR_MASK,
+- rindex << ADS1015_CFG_DR_SHIFT);
+- if (ret < 0)
+- return ret;
+-
+- data->channel_data[chan].data_rate = rindex;
++ }
+
+- return 0;
++ return -EINVAL;
+ }
+
+ static int ads1015_read_raw(struct iio_dev *indio_dev,
+@@ -385,9 +389,9 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
+ }
+ case IIO_CHAN_INFO_SCALE:
+ idx = data->channel_data[chan->address].pga;
+- *val = ads1015_scale[idx].scale;
+- *val2 = ads1015_scale[idx].uscale;
+- ret = IIO_VAL_INT_PLUS_MICRO;
++ *val = ads1015_fullscale_range[idx];
++ *val2 = chan->scan_type.realbits - 1;
++ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ idx = data->channel_data[chan->address].data_rate;
+@@ -414,7 +418,7 @@ static int ads1015_write_raw(struct iio_dev *indio_dev,
+ mutex_lock(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+- ret = ads1015_set_scale(data, chan->address, val, val2);
++ ret = ads1015_set_scale(data, chan, val, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ads1015_set_data_rate(data, chan->address, val);
+@@ -446,7 +450,10 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
+ .validate_scan_mask = &iio_validate_scan_mask_onehot,
+ };
+
+-static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
++static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
++ "3 2 1 0.5 0.25 0.125");
++static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
++ "0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
+
+ static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
+ sampling_frequency_available, "128 250 490 920 1600 2400 3300");
+@@ -454,7 +461,7 @@ static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
+ sampling_frequency_available, "8 16 32 64 128 250 475 860");
+
+ static struct attribute *ads1015_attributes[] = {
+- &iio_const_attr_scale_available.dev_attr.attr,
++ &iio_const_attr_ads1015_scale_available.dev_attr.attr,
+ &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
+ NULL,
+ };
+@@ -464,7 +471,7 @@ static const struct attribute_group ads1015_attribute_group = {
+ };
+
+ static struct attribute *ads1115_attributes[] = {
+- &iio_const_attr_scale_available.dev_attr.attr,
++ &iio_const_attr_ads1115_scale_available.dev_attr.attr,
+ &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
+ NULL,
+ };
+@@ -630,6 +637,15 @@ static int ads1015_probe(struct i2c_client *client,
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
++
++ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
++ ADS1015_CFG_MOD_MASK,
++ ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
++ if (ret)
++ return ret;
++
++ data->conv_invalid = true;
++
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ goto err_buffer_cleanup;
+@@ -685,10 +701,15 @@ static int ads1015_runtime_resume(struct device *dev)
+ {
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ads1015_data *data = iio_priv(indio_dev);
++ int ret;
+
+- return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
++ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
++ if (!ret)
++ data->conv_invalid = true;
++
++ return ret;
+ }
+ #endif
+
+diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
+index fb3810d35c44..0871010f18d5 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -381,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+ return 0;
+
+ if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
+- psmouse_warn(psmouse, "failed to get extended button data\n");
+- button_info = 0;
++ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
++ button_info = 0x33;
+ }
+
+ psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
+diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
+index d072c088ce73..945091a88354 100644
+--- a/drivers/mcb/mcb-lpc.c
++++ b/drivers/mcb/mcb-lpc.c
+@@ -114,6 +114,12 @@ static struct resource sc24_fpga_resource = {
+ .flags = IORESOURCE_MEM,
+ };
+
++static struct resource sc31_fpga_resource = {
++ .start = 0xf000e000,
++ .end = 0xf000e000 + CHAM_HEADER_SIZE,
++ .flags = IORESOURCE_MEM,
++};
++
+ static struct platform_driver mcb_lpc_driver = {
+ .driver = {
+ .name = "mcb-lpc",
+@@ -132,6 +138,15 @@ static const struct dmi_system_id mcb_lpc_dmi_table[] = {
+ .driver_data = (void *)&sc24_fpga_resource,
+ .callback = mcb_lpc_create_platform_device,
+ },
++ {
++ .ident = "SC31",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
++ },
++ .driver_data = (void *)&sc31_fpga_resource,
++ .callback = mcb_lpc_create_platform_device,
++ },
+ {}
+ };
+ MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 5a0638915874..4e1b30c29751 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -1984,6 +1984,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ goto err_wmi_detach;
+ }
+
++ /* If firmware indicates Full Rx Reorder support it must be used in a
++ * slightly different manner. Let HTT code know.
++ */
++ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
++ ar->wmi.svc_map));
++
+ status = ath10k_htt_rx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+@@ -2096,12 +2102,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ }
+ }
+
+- /* If firmware indicates Full Rx Reorder support it must be used in a
+- * slightly different manner. Let HTT code know.
+- */
+- ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+- ar->wmi.svc_map));
+-
+ status = ath10k_htt_rx_ring_refill(ar);
+ if (status) {
+ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index e51760e752d4..93e2fb667633 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -429,6 +429,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
+
+ /* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 025bc06a19d6..9e6b55b9bfc4 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4221,7 +4221,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
+ if (adapter->config_bands & BAND_A)
+ n_channels_a = mwifiex_band_5ghz.n_channels;
+
+- adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
++ adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
+ adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
+ adapter->num_in_chan_stats);
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index ce6936d0c5c0..abd156db08fb 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -2492,6 +2492,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
+ sizeof(struct mwifiex_chan_stats);
+
+ for (i = 0 ; i < num_chan; i++) {
++ if (adapter->survey_idx >= adapter->num_in_chan_stats) {
++ mwifiex_dbg(adapter, WARN,
++ "FW reported too many channel results (max %d)\n",
++ adapter->num_in_chan_stats);
++ return;
++ }
+ chan_stats.chan_num = fw_chan_stats->chan_num;
+ chan_stats.bandcfg = fw_chan_stats->bandcfg;
+ chan_stats.flags = fw_chan_stats->flags;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 2e6b888bd417..264dae5129ab 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -2255,7 +2255,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ /* find adapter */
+ if (!_rtl_pci_find_adapter(pdev, hw)) {
+ err = -ENODEV;
+- goto fail3;
++ goto fail2;
+ }
+
+ /* Init IO handler */
+@@ -2316,10 +2316,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ pci_set_drvdata(pdev, NULL);
+ rtl_deinit_core(hw);
+
++fail2:
+ if (rtlpriv->io.pci_mem_start != 0)
+ pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+
+-fail2:
+ pci_release_regions(pdev);
+ complete(&rtlpriv->firmware_loading_complete);
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+index 7661cfa53032..37489b43bb23 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+@@ -175,6 +175,8 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ rtl_fw_cb);
+ if (err) {
+ pr_info("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+index bcbb0c60f1f1..38f85bfdf0c7 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+@@ -176,6 +176,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+index 96c923b3feb4..e3eb850bb1de 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+@@ -85,6 +85,10 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ err = request_firmware_nowait(THIS_MODULE, 1,
+ fw_name, rtlpriv->io.dev,
+ GFP_KERNEL, hw, rtl_fw_cb);
++ if (err) {
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
++ }
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+index 16132c66e5e1..e38d6f7370aa 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+@@ -183,6 +183,8 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+index 48820bc497d8..6f2d1f053ecd 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+@@ -177,6 +177,8 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+index 2006b09ea74f..1ec20efb9ce1 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+@@ -216,6 +216,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+ rtl92se_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+index 7bf9f2557920..aab86667a7f3 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+@@ -184,6 +184,8 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+ return 0;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+index 8c0ac96b5430..81ef1b312491 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+@@ -187,16 +187,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+- /* Failed to get firmware. Check if old version available */
+- fw_name = "rtlwifi/rtl8723befw.bin";
+- pr_info("Using firmware %s\n", fw_name);
+- err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+- rtlpriv->io.dev, GFP_KERNEL, hw,
+- rtl_fw_cb);
+- if (err) {
+- pr_err("Failed to request firmware!\n");
+- return 1;
+- }
++ pr_err("Failed to request firmware!\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
++ return 1;
+ }
+ return 0;
+ }
+@@ -287,6 +281,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8723be_pci",
++ .alt_fw_name = "rtlwifi/rtl8723befw.bin",
+ .ops = &rtl8723be_hal_ops,
+ .mod_params = &rtl8723be_mod_params,
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+index abaf34cb1433..227d27bed5f6 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+@@ -196,6 +196,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
+ if (!rtlpriv->rtlhal.wowlan_firmware) {
+ pr_err("Can't alloc buffer for wowlan fw.\n");
++ vfree(rtlpriv->rtlhal.pfirmware);
++ rtlpriv->rtlhal.pfirmware = NULL;
+ return 1;
+ }
+
+@@ -214,16 +216,10 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+- /* Failed to get firmware. Check if old version available */
+- fw_name = "rtlwifi/rtl8821aefw.bin";
+- pr_info("Using firmware %s\n", fw_name);
+- err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+- rtlpriv->io.dev, GFP_KERNEL, hw,
+- rtl_fw_cb);
+- if (err) {
+- pr_err("Failed to request normal firmware!\n");
+- return 1;
+- }
++ pr_err("Failed to request normal firmware!\n");
++ vfree(rtlpriv->rtlhal.wowlan_firmware);
++ vfree(rtlpriv->rtlhal.pfirmware);
++ return 1;
+ }
+ /*load wowlan firmware*/
+ pr_info("Using firmware %s\n", wowlan_fw_name);
+@@ -233,6 +229,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ rtl_wowlan_fw_cb);
+ if (err) {
+ pr_err("Failed to request wowlan firmware!\n");
++ vfree(rtlpriv->rtlhal.wowlan_firmware);
++ vfree(rtlpriv->rtlhal.pfirmware);
+ return 1;
+ }
+ return 0;
+@@ -325,6 +323,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8821ae_pci",
++ .alt_fw_name = "rtlwifi/rtl8821aefw.bin",
+ .ops = &rtl8821ae_hal_ops,
+ .mod_params = &rtl8821ae_mod_params,
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index 28c38c756f92..9af44f6dc17b 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -274,6 +274,8 @@ ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
+ ssize_t sl = of_device_get_modalias(dev, str, len - 2);
+ if (sl < 0)
+ return sl;
++ if (sl > len - 2)
++ return -ENOMEM;
+
+ str[sl++] = '\n';
+ str[sl] = 0;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index aa6f1debeaa7..f0804d5a6db7 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1233,6 +1233,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ unsigned long req_sz, len, sa;
+ Sg_scatter_hold *rsv_schp;
+ int k, length;
++ int ret = 0;
+
+ if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
+ return -ENXIO;
+@@ -1243,8 +1244,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ if (vma->vm_pgoff)
+ return -EINVAL; /* want no offset */
+ rsv_schp = &sfp->reserve;
+- if (req_sz > rsv_schp->bufflen)
+- return -ENOMEM; /* cannot map more than reserved buffer */
++ mutex_lock(&sfp->f_mutex);
++ if (req_sz > rsv_schp->bufflen) {
++ ret = -ENOMEM; /* cannot map more than reserved buffer */
++ goto out;
++ }
+
+ sa = vma->vm_start;
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+@@ -1258,7 +1262,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = sfp;
+ vma->vm_ops = &sg_mmap_vm_ops;
+- return 0;
++out:
++ mutex_unlock(&sfp->f_mutex);
++ return ret;
+ }
+
+ static void
+@@ -1737,9 +1743,12 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
+ sg_link_reserve(sfp, srp, dxfer_len);
+- } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
++ } else if (hp->flags & SG_FLAG_MMAP_IO) {
++ res = -EBUSY; /* sfp->res_in_use == 1 */
++ if (dxfer_len > rsv_schp->bufflen)
++ res = -ENOMEM;
+ mutex_unlock(&sfp->f_mutex);
+- return -EBUSY;
++ return res;
+ } else {
+ res = sg_build_indirect(req_schp, sfp, dxfer_len);
+ if (res) {
+diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
+index a95c5de1aa00..d9a98f0c327f 100644
+--- a/drivers/staging/rts5208/rtsx_scsi.c
++++ b/drivers/staging/rts5208/rtsx_scsi.c
+@@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ sense->ascq = ascq;
+ if (sns_key_info0 != 0) {
+ sense->sns_key_info[0] = SKSV | sns_key_info0;
+- sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
++ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
+ sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ }
+ }
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 8e6ef671be9b..a76e45219f16 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -623,6 +623,8 @@ static void async_completed(struct urb *urb)
+ if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
+ as->status != -ENOENT)
+ cancel_bulk_urbs(ps, as->bulk_addr);
++
++ wake_up(&ps->wait);
+ spin_unlock(&ps->lock);
+
+ if (signr) {
+@@ -630,8 +632,6 @@ static void async_completed(struct urb *urb)
+ put_pid(pid);
+ put_cred(cred);
+ }
+-
+- wake_up(&ps->wait);
+ }
+
+ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 574da2b4529c..82806e311202 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech HD Pro Webcams C920 and C930e */
++ /* Logitech HD Pro Webcams C920, C920-C and C930e */
+ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
++ { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+
+ /* Logitech ConferenceCam CC3000e */
+@@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
++ /* Corsair Strafe RGB */
++ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ /* Acer C120 LED Projector */
+ { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index c8f38649f749..658d9d1f9ea3 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -142,29 +142,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ else if (rev >= 0x40 && rev <= 0x4f)
+ pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+- }
+- pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+- 0x145c, NULL);
+- if (pinfo->smbus_dev) {
+- pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+
+- if (!pinfo->smbus_dev) {
+- pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+- return 0;
++ if (pinfo->smbus_dev) {
++ rev = pinfo->smbus_dev->revision;
++ if (rev >= 0x11 && rev <= 0x14)
++ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
++ else if (rev >= 0x15 && rev <= 0x18)
++ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
++ else if (rev >= 0x39 && rev <= 0x3a)
++ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
++ } else {
++ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++ 0x145c, NULL);
++ if (pinfo->smbus_dev) {
++ rev = pinfo->smbus_dev->revision;
++ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
++ } else {
++ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
++ return 0;
++ }
+ }
+-
+- rev = pinfo->smbus_dev->revision;
+- if (rev >= 0x11 && rev <= 0x14)
+- pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+- else if (rev >= 0x15 && rev <= 0x18)
+- pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+- else if (rev >= 0x39 && rev <= 0x3a)
+- pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ }
+-
+ pinfo->sb_type.rev = rev;
+ return 1;
+ }
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 870da18f5077..6aae19d8de7b 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2668,6 +2668,13 @@ static int musb_suspend(struct device *dev)
+ {
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
++ int ret;
++
++ ret = pm_runtime_get_sync(dev);
++ if (ret < 0) {
++ pm_runtime_put_noidle(dev);
++ return ret;
++ }
+
+ musb_platform_disable(musb);
+ musb_disable_interrupts(musb);
+@@ -2718,14 +2725,6 @@ static int musb_resume(struct device *dev)
+ if ((devctl & mask) != (musb->context.devctl & mask))
+ musb->port1_status = 0;
+
+- /*
+- * The USB HUB code expects the device to be in RPM_ACTIVE once it came
+- * out of suspend
+- */
+- pm_runtime_disable(dev);
+- pm_runtime_set_active(dev);
+- pm_runtime_enable(dev);
+-
+ musb_start(musb);
+
+ spin_lock_irqsave(&musb->lock, flags);
+@@ -2735,6 +2734,9 @@ static int musb_resume(struct device *dev)
+ error);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
++ pm_runtime_mark_last_busy(dev);
++ pm_runtime_put_autosuspend(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fe123153b1a5..2a9944326210 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2023,6 +2023,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index 23488f559cf9..84199151b64b 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -355,6 +355,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
+ error = misc_register(&ls->ls_device);
+ if (error) {
+ kfree(ls->ls_device.name);
++ /* this has to be set to NULL
++ * to avoid a double-free in dlm_device_deregister
++ */
++ ls->ls_device.name = NULL;
+ }
+ fail:
+ return error;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 5f6b71d15393..73605fe53097 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -576,6 +576,7 @@
+ #define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
+ #define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
+ #define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
++#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
+ #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
+ #define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
+ #define PCI_DEVICE_ID_AMD_LX_AES 0x2082
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index db6dc9dc0482..1c49431f3121 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -323,8 +323,8 @@ enum {
+
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
+- __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
+ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
++ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
+
+ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
+index 51f891fb1b18..7668b5791c91 100644
+--- a/include/uapi/linux/android/binder.h
++++ b/include/uapi/linux/android/binder.h
+@@ -132,6 +132,7 @@ enum {
+
+ /* struct binder_fd_array_object - object describing an array of fds in a buffer
+ * @hdr: common header structure
++ * @pad: padding to ensure correct alignment
+ * @num_fds: number of file descriptors in the buffer
+ * @parent: index in offset array to buffer holding the fd array
+ * @parent_offset: start offset of fd array in the buffer
+@@ -152,6 +153,7 @@ enum {
+ */
+ struct binder_fd_array_object {
+ struct binder_object_header hdr;
++ __u32 pad;
+ binder_size_t num_fds;
+ binder_size_t parent;
+ binder_size_t parent_offset;
+diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
+index 26a06e09a5bd..0b8ba343b478 100644
+--- a/kernel/configs/android-base.config
++++ b/kernel/configs/android-base.config
+@@ -7,6 +7,7 @@
+ # CONFIG_SYSVIPC is not set
+ CONFIG_ANDROID=y
+ CONFIG_ANDROID_BINDER_IPC=y
++CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
+ CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+ CONFIG_ARMV8_DEPRECATED=y
+ CONFIG_ASHMEM=y
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-09-13 12:23 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-09-13 12:23 UTC (permalink / raw
To: gentoo-commits
commit: 54b1ba4af883c5b5915f7ef50d6504bae1d1a664
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 12:23:15 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 12:23:15 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54b1ba4a
Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
0000_README | 4 +
2400_BT-check-L2CAP-buffer-length.patch | 357 ++++++++++++++++++++++++++++++++
2 files changed, 361 insertions(+)
diff --git a/0000_README b/0000_README
index c3ac518..bd9f666 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch: 2300_enable-poweroff-on-Mac-Pro-11.patch
From: http://kernel.ubuntu.com/git/ubuntu/ubuntu-xenial.git/patch/drivers/pci/quirks.c?id=5080ff61a438f3dd80b88b423e1a20791d8a774c
Desc: Workaround to enable poweroff on Mac Pro 11. See bug #601964.
+Patch: 2400_BT-check-L2CAP-buffer-length.patch
+From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
+Desc: Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
+
Patch: 2600_enable-key-swapping-for-apple-mac.patch
From: https://github.com/free5lot/hid-apple-patched
Desc: This hid-apple patch enables swapping of the FN and left Control keys and some additional on some apple keyboards. See bug #622902
diff --git a/2400_BT-check-L2CAP-buffer-length.patch b/2400_BT-check-L2CAP-buffer-length.patch
new file mode 100644
index 0000000..c6bfdf7
--- /dev/null
+++ b/2400_BT-check-L2CAP-buffer-length.patch
@@ -0,0 +1,357 @@
+From e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 Mon Sep 17 00:00:00 2001
+From: Ben Seri <ben@armis.com>
+Date: Sat, 9 Sep 2017 23:15:59 +0200
+Subject: Bluetooth: Properly check L2CAP config option output buffer length
+
+Validate the output buffer length for L2CAP config requests and responses
+to avoid overflowing the stack buffer used for building the option blocks.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ben Seri <ben@armis.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ net/bluetooth/l2cap_core.c | 80 +++++++++++++++++++++++++---------------------
+ 1 file changed, 43 insertions(+), 37 deletions(-)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 303c779..43ba91c 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ return len;
+ }
+
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ struct l2cap_conf_opt *opt = *ptr;
+
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+
++ if (size < L2CAP_CONF_OPT_SIZE + len)
++ return;
++
+ opt->type = type;
+ opt->len = len;
+
+@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ *ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ struct l2cap_conf_efs efs;
+
+@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, size);
+ }
+
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ chan->ack_win = chan->tx_win;
+ }
+
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ void *ptr = req->data;
++ void *endptr = data + data_size;
+ u16 size;
+
+ BT_DBG("chan %p", chan);
+@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+
+ done:
+ if (chan->imtu != L2CAP_DEFAULT_MTU)
+- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+@@ -3239,7 +3243,7 @@ done:
+ rfc.max_pdu_size = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+ break;
+
+ case L2CAP_MODE_ERTM:
+@@ -3259,21 +3263,21 @@ done:
+ L2CAP_DEFAULT_TX_WINDOW);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+- l2cap_add_opt_efs(&ptr, chan);
++ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+- chan->tx_win);
++ chan->tx_win, endptr - ptr);
+
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+- chan->fcs);
++ chan->fcs, endptr - ptr);
+ }
+ break;
+
+@@ -3291,17 +3295,17 @@ done:
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+- l2cap_add_opt_efs(&ptr, chan);
++ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+- chan->fcs);
++ chan->fcs, endptr - ptr);
+ }
+ break;
+ }
+@@ -3312,10 +3316,11 @@ done:
+ return ptr - data;
+ }
+
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ struct l2cap_conf_rsp *rsp = data;
+ void *ptr = rsp->data;
++ void *endptr = data + data_size;
+ void *req = chan->conf_req;
+ int len = chan->conf_len;
+ int type, hint, olen;
+@@ -3417,7 +3422,7 @@ done:
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+ }
+
+ if (result == L2CAP_CONF_SUCCESS) {
+@@ -3430,7 +3435,7 @@ done:
+ chan->omtu = mtu;
+ set_bit(CONF_MTU_DONE, &chan->conf_state);
+ }
+- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3444,7 +3449,7 @@ done:
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, endptr - ptr);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+@@ -3477,7 +3482,7 @@ done:
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+- sizeof(rfc), (unsigned long) &rfc);
++ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+@@ -3491,7 +3496,7 @@ done:
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, endptr - ptr);
+ }
+ break;
+
+@@ -3505,7 +3510,7 @@ done:
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+
+ break;
+
+@@ -3527,10 +3532,11 @@ done:
+ }
+
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+- void *data, u16 *result)
++ void *data, size_t size, u16 *result)
+ {
+ struct l2cap_conf_req *req = data;
+ void *ptr = req->data;
++ void *endptr = data + size;
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ } else
+ chan->imtu = val;
+- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ chan->flush_to = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+- 2, chan->flush_to);
++ 2, chan->flush_to, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_RFC:
+@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ chan->fcs = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+- sizeof(rfc), (unsigned long) &rfc);
++ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_EWS:
+ chan->ack_win = min_t(u16, val, chan->ack_win);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+- chan->tx_win);
++ chan->tx_win, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_EFS:
+@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_FCS:
+@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ return;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -3900,7 +3906,7 @@ sendresp:
+ u8 buf[128];
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ break;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, req), req);
++ l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ chan->num_conf_req++;
+ break;
+
+@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ }
+
+ /* Complete config. */
+- len = l2cap_parse_conf_req(chan, rsp);
++ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto unlock;
+@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ u8 buf[64];
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ char buf[64];
+
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+- buf, &result);
++ buf, sizeof(buf), &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ /* throw out any old stored conf requests */
+ result = L2CAP_CONF_SUCCESS;
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+- req, &result);
++ req, sizeof(req), &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+ }
+@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf),
++ l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ buf);
+ chan->num_conf_req++;
+ }
+--
+cgit v1.1
+
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-09-13 22:28 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-09-13 22:28 UTC (permalink / raw
To: gentoo-commits
commit: 74695b4b8b53d91a791c3227c4d6db6b45b0371a
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 22:28:51 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 22:28:51 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=74695b4b
Linux patch 4.12.13
0000_README | 4 +
1012_linux-4.12.13.patch | 1076 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1080 insertions(+)
diff --git a/0000_README b/0000_README
index bd9f666..5320ea5 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch: 1011_linux-4.12.12.patch
From: http://www.kernel.org
Desc: Linux 4.12.12
+Patch: 1012_linux-4.12.13.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.13
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1012_linux-4.12.13.patch b/1012_linux-4.12.13.patch
new file mode 100644
index 0000000..763a970
--- /dev/null
+++ b/1012_linux-4.12.13.patch
@@ -0,0 +1,1076 @@
+diff --git a/Makefile b/Makefile
+index e96306381ee8..983224467a4d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index ff8b0aa2dfde..42f585379e19 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -315,8 +315,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ * signal first. We do not need to release the mmap_sem because
+ * it would already be released in __lock_page_or_retry in
+ * mm/filemap.c. */
+- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++ if (!user_mode(regs))
++ goto no_context;
+ return 0;
++ }
+
+ /*
+ * Major/minor page fault accounting is only done on the
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index b69e4a4ecdd8..1ce5e773dd30 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -312,6 +312,7 @@
+ interrupt-controller;
+ reg = <0x1d00000 0x10000>, /* GICD */
+ <0x1d40000 0x40000>; /* GICR */
++ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index cb8225969255..97fc5f18b0a8 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4759,7 +4759,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+ * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+ * in PFERR_NEXT_GUEST_PAGE)
+ */
+- if (error_code == PFERR_NESTED_GUEST_PAGE) {
++ if (vcpu->arch.mmu.direct_map &&
++ error_code == PFERR_NESTED_GUEST_PAGE) {
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+ return 1;
+ }
+diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
+index 61ca020c5272..d929111b5ebe 100644
+--- a/drivers/mtd/nand/mxc_nand.c
++++ b/drivers/mtd/nand/mxc_nand.c
+@@ -877,6 +877,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+ }
+ }
+
++#define MXC_V1_ECCBYTES 5
++
+ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+ {
+@@ -886,7 +888,7 @@ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+- oobregion->length = nand_chip->ecc.bytes;
++ oobregion->length = MXC_V1_ECCBYTES;
+
+ return 0;
+ }
+@@ -908,8 +910,7 @@ static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+ oobregion->length = 4;
+ }
+ } else {
+- oobregion->offset = ((section - 1) * 16) +
+- nand_chip->ecc.bytes + 6;
++ oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
+ if (section < nand_chip->ecc.steps)
+ oobregion->length = (section * 16) + 6 -
+ oobregion->offset;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 6f9771e82476..2be78d1bc195 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3972,10 +3972,13 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
+ * nand_decode_ext_id() otherwise.
+ */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+- chip->manufacturer.desc->ops->detect)
++ chip->manufacturer.desc->ops->detect) {
++ /* The 3rd id byte holds MLC / multichip data */
++ chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ chip->manufacturer.desc->ops->detect(chip);
+- else
++ } else {
+ nand_decode_ext_id(chip);
++ }
+ }
+
+ /*
+diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
+index b12dc7325378..bd9a6e343848 100644
+--- a/drivers/mtd/nand/nand_hynix.c
++++ b/drivers/mtd/nand/nand_hynix.c
+@@ -477,7 +477,7 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
+ * The ECC requirements field meaning depends on the
+ * NAND technology.
+ */
+- u8 nand_tech = chip->id.data[5] & 0x3;
++ u8 nand_tech = chip->id.data[5] & 0x7;
+
+ if (nand_tech < 3) {
+ /* > 26nm, reference: H27UBG8T2A datasheet */
+@@ -533,7 +533,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
+ if (nand_tech > 0)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ } else {
+- nand_tech = chip->id.data[5] & 0x3;
++ nand_tech = chip->id.data[5] & 0x7;
+
+ /* < 32nm */
+ if (nand_tech > 2)
+diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
+index 57d483ac5765..6f0fd1512ad2 100644
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -109,7 +109,11 @@
+ #define READ_ADDR 0
+
+ /* NAND_DEV_CMD_VLD bits */
+-#define READ_START_VLD 0
++#define READ_START_VLD BIT(0)
++#define READ_STOP_VLD BIT(1)
++#define WRITE_START_VLD BIT(2)
++#define ERASE_START_VLD BIT(3)
++#define SEQ_READ_START_VLD BIT(4)
+
+ /* NAND_EBI2_ECC_BUF_CFG bits */
+ #define NUM_STEPS 0
+@@ -148,6 +152,10 @@
+ #define FETCH_ID 0xb
+ #define RESET_DEVICE 0xd
+
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
++ ERASE_START_VLD | SEQ_READ_START_VLD)
++
+ /*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+@@ -672,8 +680,7 @@ static int nandc_param(struct qcom_nand_host *host)
+
+ /* configure CMD1 and VLD for ONFI param probing */
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+- (nandc->vld & ~(1 << READ_START_VLD))
+- | 0 << READ_START_VLD);
++ (nandc->vld & ~READ_START_VLD));
+ nandc_set_reg(nandc, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+@@ -1893,7 +1900,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
+ | wide_bus << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE;
+
+- host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
++ host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+ | 0 << ECC_SW_RESET
+ | host->cw_data << ECC_NUM_DATA_BYTES
+ | 1 << ECC_FORCE_CLK_OPEN
+@@ -1972,13 +1979,14 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+ /* kill onenand */
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
++ nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
+
+ /* enable ADM DMA */
+ nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+
+ /* save the original values of these registers */
+ nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
+- nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
++ nandc->vld = NAND_DEV_CMD_VLD_VAL;
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index d11c7b210e81..5672aec48572 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -3699,7 +3699,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ if (rt2x00_rt(rt2x00dev, RT3572))
+ rt2800_rfcsr_write(rt2x00dev, 8, 0);
+
+- rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
++ if (rt2x00_rt(rt2x00dev, RT6352))
++ rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
++ else
++ tx_pin = 0;
+
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+index 2f3946be4ce2..34cb46a0c904 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+@@ -1153,7 +1153,10 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ }
+
+ /* fixed internal switch S1->WiFi, S0->BT */
+- btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
++ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
++ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++ else
++ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+
+ switch (antpos_type) {
+ case BTC_ANT_WIFI_AT_MAIN:
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 990e6fb32a63..e353e151ffa9 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -77,7 +77,7 @@ static struct nvmf_host *nvmf_host_default(void)
+ kref_init(&host->ref);
+ uuid_be_gen(&host->id);
+ snprintf(host->nqn, NVMF_NQN_SIZE,
+- "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
++ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_add_tail(&host->list, &nvmf_hosts);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 4f1cdd5058f1..76209e7fb6e1 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1828,6 +1828,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ goto restore;
+ }
+
++ btrfs_qgroup_rescan_resume(fs_info);
++
+ if (!fs_info->uuid_root) {
+ btrfs_info(fs_info, "creating UUID tree");
+ ret = btrfs_create_uuid_tree(fs_info);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index d264363559db..426e4e06b333 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -629,11 +629,11 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ if (result <= 0)
+ goto out;
+
+- result = generic_write_sync(iocb, result);
+- if (result < 0)
+- goto out;
+ written = result;
+ iocb->ki_pos += written;
++ result = generic_write_sync(iocb, written);
++ if (result < 0)
++ goto out;
+
+ /* Return error values */
+ if (nfs_need_check_write(file, inode)) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 3e24392f2caa..4651bf48aa86 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -248,7 +248,6 @@ int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
+ extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
+ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
+ void nfs_pgio_header_free(struct nfs_pgio_header *);
+-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
+ int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
+ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 7ddba5022948..0e1d3f263f8c 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -515,16 +515,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
+ }
+ EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
+
+-/*
+- * nfs_pgio_header_free - Free a read or write header
+- * @hdr: The header to free
+- */
+-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+-{
+- hdr->rw_ops->rw_free_header(hdr);
+-}
+-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+-
+ /**
+ * nfs_pgio_data_destroy - make @hdr suitable for reuse
+ *
+@@ -533,14 +523,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+ *
+ * @hdr: A header that has had nfs_generic_pgio called
+ */
+-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
++static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+ {
+ if (hdr->args.context)
+ put_nfs_open_context(hdr->args.context);
+ if (hdr->page_array.pagevec != hdr->page_array.page_array)
+ kfree(hdr->page_array.pagevec);
+ }
+-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
++
++/*
++ * nfs_pgio_header_free - Free a read or write header
++ * @hdr: The header to free
++ */
++void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
++{
++ nfs_pgio_data_destroy(hdr);
++ hdr->rw_ops->rw_free_header(hdr);
++}
++EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+
+ /**
+ * nfs_pgio_rpcsetup - Set up arguments for a pageio call
+@@ -654,7 +654,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
+ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
+ {
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
+- nfs_pgio_data_destroy(hdr);
+ hdr->completion_ops->completion(hdr);
+ }
+
+@@ -665,7 +664,6 @@ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
+ static void nfs_pgio_release(void *calldata)
+ {
+ struct nfs_pgio_header *hdr = calldata;
+- nfs_pgio_data_destroy(hdr);
+ hdr->completion_ops->completion(hdr);
+ }
+
+@@ -699,9 +697,6 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ int io_flags,
+ gfp_t gfp_flags)
+ {
+- struct nfs_pgio_mirror *new;
+- int i;
+-
+ desc->pg_moreio = 0;
+ desc->pg_inode = inode;
+ desc->pg_ops = pg_ops;
+@@ -717,21 +712,9 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ desc->pg_mirror_count = 1;
+ desc->pg_mirror_idx = 0;
+
+- if (pg_ops->pg_get_mirror_count) {
+- /* until we have a request, we don't have an lseg and no
+- * idea how many mirrors there will be */
+- new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
+- sizeof(struct nfs_pgio_mirror), gfp_flags);
+- desc->pg_mirrors_dynamic = new;
+- desc->pg_mirrors = new;
+-
+- for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
+- nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
+- } else {
+- desc->pg_mirrors_dynamic = NULL;
+- desc->pg_mirrors = desc->pg_mirrors_static;
+- nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+- }
++ desc->pg_mirrors_dynamic = NULL;
++ desc->pg_mirrors = desc->pg_mirrors_static;
++ nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+ }
+ EXPORT_SYMBOL_GPL(nfs_pageio_init);
+
+@@ -850,32 +833,52 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
+ return ret;
+ }
+
++static struct nfs_pgio_mirror *
++nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
++ unsigned int mirror_count)
++{
++ struct nfs_pgio_mirror *ret;
++ unsigned int i;
++
++ kfree(desc->pg_mirrors_dynamic);
++ desc->pg_mirrors_dynamic = NULL;
++ if (mirror_count == 1)
++ return desc->pg_mirrors_static;
++ ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
++ if (ret != NULL) {
++ for (i = 0; i < mirror_count; i++)
++ nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
++ desc->pg_mirrors_dynamic = ret;
++ }
++ return ret;
++}
++
+ /*
+ * nfs_pageio_setup_mirroring - determine if mirroring is to be used
+ * by calling the pg_get_mirror_count op
+ */
+-static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
++static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+ {
+- int mirror_count = 1;
++ unsigned int mirror_count = 1;
+
+- if (!pgio->pg_ops->pg_get_mirror_count)
+- return 0;
+-
+- mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+-
+- if (pgio->pg_error < 0)
+- return pgio->pg_error;
+-
+- if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
+- return -EINVAL;
++ if (pgio->pg_ops->pg_get_mirror_count)
++ mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
++ if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
++ return;
+
+- if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
+- return -EINVAL;
++ if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
++ pgio->pg_error = -EINVAL;
++ return;
++ }
+
++ pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
++ if (pgio->pg_mirrors == NULL) {
++ pgio->pg_error = -ENOMEM;
++ pgio->pg_mirrors = pgio->pg_mirrors_static;
++ mirror_count = 1;
++ }
+ pgio->pg_mirror_count = mirror_count;
+-
+- return 0;
+ }
+
+ /*
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index c383d0913b54..64bb20130edf 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2274,7 +2274,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
+ nfs_pageio_reset_write_mds(desc);
+ mirror->pg_recoalesce = 1;
+ }
+- nfs_pgio_data_destroy(hdr);
+ hdr->release(hdr);
+ }
+
+@@ -2398,7 +2397,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
+ nfs_pageio_reset_read_mds(desc);
+ mirror->pg_recoalesce = 1;
+ }
+- nfs_pgio_data_destroy(hdr);
+ hdr->release(hdr);
+ }
+
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index 044fb0e15390..f6586691d989 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -279,7 +279,14 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ #endif /* DEBUG */
+
+ #ifdef CONFIG_XFS_RT
+-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
++
++/*
++ * make sure we ignore the inode flag if the filesystem doesn't have a
++ * configured realtime device.
++ */
++#define XFS_IS_REALTIME_INODE(ip) \
++ (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
++ (ip)->i_mount->m_rtdev_targp)
+ #else
+ #define XFS_IS_REALTIME_INODE(ip) (0)
+ #endif
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 898e87998417..79a804f1aab9 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -463,7 +463,7 @@ radix_tree_node_free(struct radix_tree_node *node)
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
+ */
+-static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
++static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
+ {
+ struct radix_tree_preload *rtp;
+ struct radix_tree_node *node;
+@@ -2103,7 +2103,8 @@ EXPORT_SYMBOL(radix_tree_tagged);
+ */
+ void idr_preload(gfp_t gfp_mask)
+ {
+- __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
++ if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
++ preempt_disable();
+ }
+ EXPORT_SYMBOL(idr_preload);
+
+@@ -2117,13 +2118,13 @@ EXPORT_SYMBOL(idr_preload);
+ */
+ int ida_pre_get(struct ida *ida, gfp_t gfp)
+ {
+- __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
+ /*
+ * The IDA API has no preload_end() equivalent. Instead,
+ * ida_get_new() can return -EAGAIN, prompting the caller
+ * to return to the ida_pre_get() step.
+ */
+- preempt_enable();
++ if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
++ preempt_enable();
+
+ if (!this_cpu_read(ida_bitmap)) {
+ struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+diff --git a/mm/memory.c b/mm/memory.c
+index 9e50ffcf9639..0a98a1a55dfa 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3843,6 +3843,11 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ /* do counter updates before entering really critical section. */
+ check_sync_rss_stat(current);
+
++ if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
++ flags & FAULT_FLAG_INSTRUCTION,
++ flags & FAULT_FLAG_REMOTE))
++ return VM_FAULT_SIGSEGV;
++
+ /*
+ * Enable the memcg OOM handling for faults triggered in user
+ * space. Kernel faults are handled more gracefully.
+@@ -3850,11 +3855,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ if (flags & FAULT_FLAG_USER)
+ mem_cgroup_oom_enable();
+
+- if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
+- flags & FAULT_FLAG_INSTRUCTION,
+- flags & FAULT_FLAG_REMOTE))
+- return VM_FAULT_SIGSEGV;
+-
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
+ else
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 4f6cba1b6632..2e09f67bc99b 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2903,7 +2903,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ p->flags = 0;
+ spin_unlock(&swap_lock);
+ vfree(swap_map);
+- vfree(cluster_info);
++ kvfree(cluster_info);
++ kvfree(frontswap_map);
+ if (swap_file) {
+ if (inode && S_ISREG(inode->i_mode)) {
+ inode_unlock(inode);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index f88ac99528ce..6754e93d2096 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ return len;
+ }
+
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ struct l2cap_conf_opt *opt = *ptr;
+
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+
++ if (size < L2CAP_CONF_OPT_SIZE + len)
++ return;
++
+ opt->type = type;
+ opt->len = len;
+
+@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ *ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ struct l2cap_conf_efs efs;
+
+@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, size);
+ }
+
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ chan->ack_win = chan->tx_win;
+ }
+
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ void *ptr = req->data;
++ void *endptr = data + data_size;
+ u16 size;
+
+ BT_DBG("chan %p", chan);
+@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+
+ done:
+ if (chan->imtu != L2CAP_DEFAULT_MTU)
+- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+@@ -3239,7 +3243,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ rfc.max_pdu_size = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+ break;
+
+ case L2CAP_MODE_ERTM:
+@@ -3259,21 +3263,21 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ L2CAP_DEFAULT_TX_WINDOW);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+- l2cap_add_opt_efs(&ptr, chan);
++ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+- chan->tx_win);
++ chan->tx_win, endptr - ptr);
+
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+- chan->fcs);
++ chan->fcs, endptr - ptr);
+ }
+ break;
+
+@@ -3291,17 +3295,17 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+- l2cap_add_opt_efs(&ptr, chan);
++ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+- chan->fcs);
++ chan->fcs, endptr - ptr);
+ }
+ break;
+ }
+@@ -3312,10 +3316,11 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ return ptr - data;
+ }
+
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ struct l2cap_conf_rsp *rsp = data;
+ void *ptr = rsp->data;
++ void *endptr = data + data_size;
+ void *req = chan->conf_req;
+ int len = chan->conf_len;
+ int type, hint, olen;
+@@ -3417,7 +3422,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+ }
+
+ if (result == L2CAP_CONF_SUCCESS) {
+@@ -3430,7 +3435,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ chan->omtu = mtu;
+ set_bit(CONF_MTU_DONE, &chan->conf_state);
+ }
+- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3444,7 +3449,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, endptr - ptr);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+@@ -3477,7 +3482,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+- sizeof(rfc), (unsigned long) &rfc);
++ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+@@ -3491,7 +3496,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, endptr - ptr);
+ }
+ break;
+
+@@ -3505,7 +3510,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+- (unsigned long) &rfc);
++ (unsigned long) &rfc, endptr - ptr);
+
+ break;
+
+@@ -3527,10 +3532,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ }
+
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+- void *data, u16 *result)
++ void *data, size_t size, u16 *result)
+ {
+ struct l2cap_conf_req *req = data;
+ void *ptr = req->data;
++ void *endptr = data + size;
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ } else
+ chan->imtu = val;
+- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ chan->flush_to = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+- 2, chan->flush_to);
++ 2, chan->flush_to, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_RFC:
+@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ chan->fcs = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+- sizeof(rfc), (unsigned long) &rfc);
++ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_EWS:
+ chan->ack_win = min_t(u16, val, chan->ack_win);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+- chan->tx_win);
++ chan->tx_win, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_EFS:
+@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+- (unsigned long) &efs);
++ (unsigned long) &efs, endptr - ptr);
+ break;
+
+ case L2CAP_CONF_FCS:
+@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ return;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -3900,7 +3906,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ u8 buf[128];
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ break;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, req), req);
++ l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ chan->num_conf_req++;
+ break;
+
+@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ }
+
+ /* Complete config. */
+- len = l2cap_parse_conf_req(chan, rsp);
++ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto unlock;
+@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ u8 buf[64];
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+
+@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ char buf[64];
+
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+- buf, &result);
++ buf, sizeof(buf), &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ /* throw out any old stored conf requests */
+ result = L2CAP_CONF_SUCCESS;
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+- req, &result);
++ req, sizeof(req), &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf), buf);
++ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ chan->num_conf_req++;
+ }
+ }
+@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf),
++ l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ buf);
+ chan->num_conf_req++;
+ }
+diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
+index 912b5a9ccbab..013d8d1170fe 100644
+--- a/sound/isa/msnd/msnd_midi.c
++++ b/sound/isa/msnd/msnd_midi.c
+@@ -120,24 +120,24 @@ void snd_msndmidi_input_read(void *mpuv)
+ unsigned long flags;
+ struct snd_msndmidi *mpu = mpuv;
+ void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
++ u16 head, tail, size;
+
+ spin_lock_irqsave(&mpu->input_lock, flags);
+- while (readw(mpu->dev->MIDQ + JQS_wTail) !=
+- readw(mpu->dev->MIDQ + JQS_wHead)) {
+- u16 wTmp, val;
+- val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
+-
+- if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
+- &mpu->mode))
+- snd_rawmidi_receive(mpu->substream_input,
+- (unsigned char *)&val, 1);
+-
+- wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
+- if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
+- writew(0, mpu->dev->MIDQ + JQS_wHead);
+- else
+- writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
++ head = readw(mpu->dev->MIDQ + JQS_wHead);
++ tail = readw(mpu->dev->MIDQ + JQS_wTail);
++ size = readw(mpu->dev->MIDQ + JQS_wSize);
++ if (head > size || tail > size)
++ goto out;
++ while (head != tail) {
++ unsigned char val = readw(pwMIDQData + 2 * head);
++
++ if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
++ snd_rawmidi_receive(mpu->substream_input, &val, 1);
++ if (++head > size)
++ head = 0;
++ writew(head, mpu->dev->MIDQ + JQS_wHead);
+ }
++ out:
+ spin_unlock_irqrestore(&mpu->input_lock, flags);
+ }
+ EXPORT_SYMBOL(snd_msndmidi_input_read);
+diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
+index ad4897337df5..fc4fb1904aef 100644
+--- a/sound/isa/msnd/msnd_pinnacle.c
++++ b/sound/isa/msnd/msnd_pinnacle.c
+@@ -170,23 +170,24 @@ static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
+ {
+ struct snd_msnd *chip = dev_id;
+ void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
++ u16 head, tail, size;
+
+ /* Send ack to DSP */
+ /* inb(chip->io + HP_RXL); */
+
+ /* Evaluate queued DSP messages */
+- while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
+- u16 wTmp;
+-
+- snd_msnd_eval_dsp_msg(chip,
+- readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
+-
+- wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
+- if (wTmp > readw(chip->DSPQ + JQS_wSize))
+- writew(0, chip->DSPQ + JQS_wHead);
+- else
+- writew(wTmp, chip->DSPQ + JQS_wHead);
++ head = readw(chip->DSPQ + JQS_wHead);
++ tail = readw(chip->DSPQ + JQS_wTail);
++ size = readw(chip->DSPQ + JQS_wSize);
++ if (head > size || tail > size)
++ goto out;
++ while (head != tail) {
++ snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
++ if (++head > size)
++ head = 0;
++ writew(head, chip->DSPQ + JQS_wHead);
+ }
++ out:
+ /* Send ack to DSP */
+ inb(chip->io + HP_RXL);
+ return IRQ_HANDLED;
+diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
+index b4967d875236..f249e042b3b5 100644
+--- a/tools/testing/selftests/x86/fsgsbase.c
++++ b/tools/testing/selftests/x86/fsgsbase.c
+@@ -285,9 +285,12 @@ static void *threadproc(void *ctx)
+ }
+ }
+
+-static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
++static void set_gs_and_switch_to(unsigned long local,
++ unsigned short force_sel,
++ unsigned long remote)
+ {
+ unsigned long base;
++ unsigned short sel_pre_sched, sel_post_sched;
+
+ bool hard_zero = false;
+ if (local == HARD_ZERO) {
+@@ -297,6 +300,8 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+
+ printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
+ local, hard_zero ? " and clear gs" : "", remote);
++ if (force_sel)
++ printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
+ if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
+ err(1, "ARCH_SET_GS");
+ if (hard_zero)
+@@ -307,18 +312,35 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+ printf("[FAIL]\tGSBASE wasn't set as expected\n");
+ }
+
++ if (force_sel) {
++ asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
++ sel_pre_sched = force_sel;
++ local = read_base(GS);
++
++ /*
++ * Signal delivery seems to mess up weird selectors. Put it
++ * back.
++ */
++ asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
++ } else {
++ asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
++ }
++
+ remote_base = remote;
+ ftx = 1;
+ syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+ while (ftx != 0)
+ syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
+
++ asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
+ base = read_base(GS);
+- if (base == local) {
+- printf("[OK]\tGSBASE remained 0x%lx\n", local);
++ if (base == local && sel_pre_sched == sel_post_sched) {
++ printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
++ sel_pre_sched, local);
+ } else {
+ nerrs++;
+- printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
++ printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
++ sel_pre_sched, local, sel_post_sched, base);
+ }
+ }
+
+@@ -381,8 +403,15 @@ int main()
+
+ for (int local = 0; local < 4; local++) {
+ for (int remote = 0; remote < 4; remote++) {
+- set_gs_and_switch_to(bases_with_hard_zero[local],
+- bases_with_hard_zero[remote]);
++ for (unsigned short s = 0; s < 5; s++) {
++ unsigned short sel = s;
++ if (s == 4)
++ asm ("mov %%ss, %0" : "=rm" (sel));
++ set_gs_and_switch_to(
++ bases_with_hard_zero[local],
++ sel,
++ bases_with_hard_zero[remote]);
++ }
+ }
+ }
+
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-09-13 23:09 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-09-13 23:09 UTC (permalink / raw
To: gentoo-commits
commit: 96f06d593059fe935778616edb37a74fd0b42536
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 23:09:47 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 23:09:47 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=96f06d59
Removal of redundant patch
0000_README | 4 -
2400_BT-check-L2CAP-buffer-length.patch | 357 --------------------------------
2 files changed, 361 deletions(-)
diff --git a/0000_README b/0000_README
index 5320ea5..e3c1b9e 100644
--- a/0000_README
+++ b/0000_README
@@ -111,10 +111,6 @@ Patch: 2300_enable-poweroff-on-Mac-Pro-11.patch
From: http://kernel.ubuntu.com/git/ubuntu/ubuntu-xenial.git/patch/drivers/pci/quirks.c?id=5080ff61a438f3dd80b88b423e1a20791d8a774c
Desc: Workaround to enable poweroff on Mac Pro 11. See bug #601964.
-Patch: 2400_BT-check-L2CAP-buffer-length.patch
-From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
-Desc: Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
-
Patch: 2600_enable-key-swapping-for-apple-mac.patch
From: https://github.com/free5lot/hid-apple-patched
Desc: This hid-apple patch enables swapping of the FN and left Control keys and some additional on some apple keyboards. See bug #622902
diff --git a/2400_BT-check-L2CAP-buffer-length.patch b/2400_BT-check-L2CAP-buffer-length.patch
deleted file mode 100644
index c6bfdf7..0000000
--- a/2400_BT-check-L2CAP-buffer-length.patch
+++ /dev/null
@@ -1,357 +0,0 @@
-From e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 Mon Sep 17 00:00:00 2001
-From: Ben Seri <ben@armis.com>
-Date: Sat, 9 Sep 2017 23:15:59 +0200
-Subject: Bluetooth: Properly check L2CAP config option output buffer length
-
-Validate the output buffer length for L2CAP config requests and responses
-to avoid overflowing the stack buffer used for building the option blocks.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Ben Seri <ben@armis.com>
-Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
- net/bluetooth/l2cap_core.c | 80 +++++++++++++++++++++++++---------------------
- 1 file changed, 43 insertions(+), 37 deletions(-)
-
-diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index 303c779..43ba91c 100644
---- a/net/bluetooth/l2cap_core.c
-+++ b/net/bluetooth/l2cap_core.c
-@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data);
- static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
- void *data);
--static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
-+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
- static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
-
- static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
-@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
-
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, buf), buf);
-+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
-
-@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
- return len;
- }
-
--static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
-+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
- {
- struct l2cap_conf_opt *opt = *ptr;
-
- BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
-
-+ if (size < L2CAP_CONF_OPT_SIZE + len)
-+ return;
-+
- opt->type = type;
- opt->len = len;
-
-@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
- *ptr += L2CAP_CONF_OPT_SIZE + len;
- }
-
--static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
-+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
- {
- struct l2cap_conf_efs efs;
-
-@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
- }
-
- l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
-- (unsigned long) &efs);
-+ (unsigned long) &efs, size);
- }
-
- static void l2cap_ack_timeout(struct work_struct *work)
-@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
- chan->ack_win = chan->tx_win;
- }
-
--static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
-+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
- {
- struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = chan->mode };
- void *ptr = req->data;
-+ void *endptr = data + data_size;
- u16 size;
-
- BT_DBG("chan %p", chan);
-@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
-
- done:
- if (chan->imtu != L2CAP_DEFAULT_MTU)
-- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
-+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
-
- switch (chan->mode) {
- case L2CAP_MODE_BASIC:
-@@ -3239,7 +3243,7 @@ done:
- rfc.max_pdu_size = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-- (unsigned long) &rfc);
-+ (unsigned long) &rfc, endptr - ptr);
- break;
-
- case L2CAP_MODE_ERTM:
-@@ -3259,21 +3263,21 @@ done:
- L2CAP_DEFAULT_TX_WINDOW);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-- (unsigned long) &rfc);
-+ (unsigned long) &rfc, endptr - ptr);
-
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
-- l2cap_add_opt_efs(&ptr, chan);
-+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
-
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
-- chan->tx_win);
-+ chan->tx_win, endptr - ptr);
-
- if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
- if (chan->fcs == L2CAP_FCS_NONE ||
- test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
- chan->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
-- chan->fcs);
-+ chan->fcs, endptr - ptr);
- }
- break;
-
-@@ -3291,17 +3295,17 @@ done:
- rfc.max_pdu_size = cpu_to_le16(size);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-- (unsigned long) &rfc);
-+ (unsigned long) &rfc, endptr - ptr);
-
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
-- l2cap_add_opt_efs(&ptr, chan);
-+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
-
- if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
- if (chan->fcs == L2CAP_FCS_NONE ||
- test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
- chan->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
-- chan->fcs);
-+ chan->fcs, endptr - ptr);
- }
- break;
- }
-@@ -3312,10 +3316,11 @@ done:
- return ptr - data;
- }
-
--static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
-+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
- {
- struct l2cap_conf_rsp *rsp = data;
- void *ptr = rsp->data;
-+ void *endptr = data + data_size;
- void *req = chan->conf_req;
- int len = chan->conf_len;
- int type, hint, olen;
-@@ -3417,7 +3422,7 @@ done:
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-- (unsigned long) &rfc);
-+ (unsigned long) &rfc, endptr - ptr);
- }
-
- if (result == L2CAP_CONF_SUCCESS) {
-@@ -3430,7 +3435,7 @@ done:
- chan->omtu = mtu;
- set_bit(CONF_MTU_DONE, &chan->conf_state);
- }
-- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
-+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
-
- if (remote_efs) {
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-@@ -3444,7 +3449,7 @@ done:
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- sizeof(efs),
-- (unsigned long) &efs);
-+ (unsigned long) &efs, endptr - ptr);
- } else {
- /* Send PENDING Conf Rsp */
- result = L2CAP_CONF_PENDING;
-@@ -3477,7 +3482,7 @@ done:
- set_bit(CONF_MODE_DONE, &chan->conf_state);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-- sizeof(rfc), (unsigned long) &rfc);
-+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
-
- if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
- chan->remote_id = efs.id;
-@@ -3491,7 +3496,7 @@ done:
- le32_to_cpu(efs.sdu_itime);
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- sizeof(efs),
-- (unsigned long) &efs);
-+ (unsigned long) &efs, endptr - ptr);
- }
- break;
-
-@@ -3505,7 +3510,7 @@ done:
- set_bit(CONF_MODE_DONE, &chan->conf_state);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-- (unsigned long) &rfc);
-+ (unsigned long) &rfc, endptr - ptr);
-
- break;
-
-@@ -3527,10 +3532,11 @@ done:
- }
-
- static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
-- void *data, u16 *result)
-+ void *data, size_t size, u16 *result)
- {
- struct l2cap_conf_req *req = data;
- void *ptr = req->data;
-+ void *endptr = data + size;
- int type, olen;
- unsigned long val;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
-@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- chan->imtu = L2CAP_DEFAULT_MIN_MTU;
- } else
- chan->imtu = val;
-- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
-+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
- break;
-
- case L2CAP_CONF_FLUSH_TO:
- chan->flush_to = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
-- 2, chan->flush_to);
-+ 2, chan->flush_to, endptr - ptr);
- break;
-
- case L2CAP_CONF_RFC:
-@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- chan->fcs = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-- sizeof(rfc), (unsigned long) &rfc);
-+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
- break;
-
- case L2CAP_CONF_EWS:
- chan->ack_win = min_t(u16, val, chan->ack_win);
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
-- chan->tx_win);
-+ chan->tx_win, endptr - ptr);
- break;
-
- case L2CAP_CONF_EFS:
-@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
-- (unsigned long) &efs);
-+ (unsigned long) &efs, endptr - ptr);
- break;
-
- case L2CAP_CONF_FCS:
-@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
- return;
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, buf), buf);
-+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
-
-@@ -3900,7 +3906,7 @@ sendresp:
- u8 buf[128];
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, buf), buf);
-+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
-
-@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
- break;
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, req), req);
-+ l2cap_build_conf_req(chan, req, sizeof(req)), req);
- chan->num_conf_req++;
- break;
-
-@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
- }
-
- /* Complete config. */
-- len = l2cap_parse_conf_req(chan, rsp);
-+ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
- if (len < 0) {
- l2cap_send_disconn_req(chan, ECONNRESET);
- goto unlock;
-@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
- if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
- u8 buf[64];
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, buf), buf);
-+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
-
-@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
- char buf[64];
-
- len = l2cap_parse_conf_rsp(chan, rsp->data, len,
-- buf, &result);
-+ buf, sizeof(buf), &result);
- if (len < 0) {
- l2cap_send_disconn_req(chan, ECONNRESET);
- goto done;
-@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
- /* throw out any old stored conf requests */
- result = L2CAP_CONF_SUCCESS;
- len = l2cap_parse_conf_rsp(chan, rsp->data, len,
-- req, &result);
-+ req, sizeof(req), &result);
- if (len < 0) {
- l2cap_send_disconn_req(chan, ECONNRESET);
- goto done;
-@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, buf), buf);
-+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
- }
-@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_CONF_REQ,
-- l2cap_build_conf_req(chan, buf),
-+ l2cap_build_conf_req(chan, buf, sizeof(buf)),
- buf);
- chan->num_conf_req++;
- }
---
-cgit v1.1
-
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [gentoo-commits] proj/linux-patches:4.12 commit in: /
@ 2017-09-20 10:10 Mike Pagano
0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2017-09-20 10:10 UTC (permalink / raw
To: gentoo-commits
commit: af6dfb8341d6ec2f07e8be8c04cc67db993f7d5c
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 20 10:10:06 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 20 10:10:06 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=af6dfb83
Linux patch 4.12.14
0000_README | 4 +
1013_linux-4.12.14.patch | 1928 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1932 insertions(+)
diff --git a/0000_README b/0000_README
index e3c1b9e..3fe24e4 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch: 1012_linux-4.12.13.patch
From: http://www.kernel.org
Desc: Linux 4.12.13
+Patch: 1013_linux-4.12.14.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.14
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1013_linux-4.12.14.patch b/1013_linux-4.12.14.patch
new file mode 100644
index 0000000..7fe464a
--- /dev/null
+++ b/1013_linux-4.12.14.patch
@@ -0,0 +1,1928 @@
+diff --git a/Makefile b/Makefile
+index 983224467a4d..9ad227ddbfcd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 9aeb91935ce0..e2c4dd051ef8 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
+
+ #define ELF_CORE_COPY_REGS(pr_reg, regs) \
+ do { \
++ unsigned long base; \
+ unsigned v; \
+ (pr_reg)[0] = (regs)->r15; \
+ (pr_reg)[1] = (regs)->r14; \
+@@ -226,8 +227,8 @@ do { \
+ (pr_reg)[18] = (regs)->flags; \
+ (pr_reg)[19] = (regs)->sp; \
+ (pr_reg)[20] = (regs)->ss; \
+- (pr_reg)[21] = current->thread.fsbase; \
+- (pr_reg)[22] = current->thread.gsbase; \
++ rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
++ rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
+ asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
+ asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
+ asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index b6840bf3940b..d0fdce3d1d83 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -149,6 +149,123 @@ void release_thread(struct task_struct *dead_task)
+ }
+ }
+
++enum which_selector {
++ FS,
++ GS
++};
++
++/*
++ * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
++ * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
++ * It's forcibly inlined because it'll generate better code and this function
++ * is hot.
++ */
++static __always_inline void save_base_legacy(struct task_struct *prev_p,
++ unsigned short selector,
++ enum which_selector which)
++{
++ if (likely(selector == 0)) {
++ /*
++ * On Intel (without X86_BUG_NULL_SEG), the segment base could
++ * be the pre-existing saved base or it could be zero. On AMD
++ * (with X86_BUG_NULL_SEG), the segment base could be almost
++ * anything.
++ *
++ * This branch is very hot (it's hit twice on almost every
++ * context switch between 64-bit programs), and avoiding
++ * the RDMSR helps a lot, so we just assume that whatever
++ * value is already saved is correct. This matches historical
++ * Linux behavior, so it won't break existing applications.
++ *
++ * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
++ * report that the base is zero, it needs to actually be zero:
++ * see the corresponding logic in load_seg_legacy.
++ */
++ } else {
++ /*
++ * If the selector is 1, 2, or 3, then the base is zero on
++ * !X86_BUG_NULL_SEG CPUs and could be anything on
++ * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
++ * has never attempted to preserve the base across context
++ * switches.
++ *
++ * If selector > 3, then it refers to a real segment, and
++ * saving the base isn't necessary.
++ */
++ if (which == FS)
++ prev_p->thread.fsbase = 0;
++ else
++ prev_p->thread.gsbase = 0;
++ }
++}
++
++static __always_inline void save_fsgs(struct task_struct *task)
++{
++ savesegment(fs, task->thread.fsindex);
++ savesegment(gs, task->thread.gsindex);
++ save_base_legacy(task, task->thread.fsindex, FS);
++ save_base_legacy(task, task->thread.gsindex, GS);
++}
++
++static __always_inline void loadseg(enum which_selector which,
++ unsigned short sel)
++{
++ if (which == FS)
++ loadsegment(fs, sel);
++ else
++ load_gs_index(sel);
++}
++
++static __always_inline void load_seg_legacy(unsigned short prev_index,
++ unsigned long prev_base,
++ unsigned short next_index,
++ unsigned long next_base,
++ enum which_selector which)
++{
++ if (likely(next_index <= 3)) {
++ /*
++ * The next task is using 64-bit TLS, is not using this
++ * segment at all, or is having fun with arcane CPU features.
++ */
++ if (next_base == 0) {
++ /*
++ * Nasty case: on AMD CPUs, we need to forcibly zero
++ * the base.
++ */
++ if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
++ loadseg(which, __USER_DS);
++ loadseg(which, next_index);
++ } else {
++ /*
++ * We could try to exhaustively detect cases
++ * under which we can skip the segment load,
++ * but there's really only one case that matters
++ * for performance: if both the previous and
++ * next states are fully zeroed, we can skip
++ * the load.
++ *
++ * (This assumes that prev_base == 0 has no
++ * false positives. This is the case on
++ * Intel-style CPUs.)
++ */
++ if (likely(prev_index | next_index | prev_base))
++ loadseg(which, next_index);
++ }
++ } else {
++ if (prev_index != next_index)
++ loadseg(which, next_index);
++ wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
++ next_base);
++ }
++ } else {
++ /*
++ * The next task is using a real segment. Loading the selector
++ * is sufficient.
++ */
++ loadseg(which, next_index);
++ }
++}
++
+ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
+ {
+@@ -229,10 +346,19 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
+ unsigned long new_sp,
+ unsigned int _cs, unsigned int _ss, unsigned int _ds)
+ {
++ WARN_ON_ONCE(regs != current_pt_regs());
++
++ if (static_cpu_has(X86_BUG_NULL_SEG)) {
++ /* Loading zero below won't clear the base. */
++ loadsegment(fs, __USER_DS);
++ load_gs_index(__USER_DS);
++ }
++
+ loadsegment(fs, 0);
+ loadsegment(es, _ds);
+ loadsegment(ds, _ds);
+ load_gs_index(0);
++
+ regs->ip = new_ip;
+ regs->sp = new_sp;
+ regs->cs = _cs;
+@@ -277,7 +403,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct fpu *next_fpu = &next->fpu;
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+- unsigned prev_fsindex, prev_gsindex;
+
+ switch_fpu_prepare(prev_fpu, cpu);
+
+@@ -286,8 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ *
+ * (e.g. xen_load_tls())
+ */
+- savesegment(fs, prev_fsindex);
+- savesegment(gs, prev_gsindex);
++ save_fsgs(prev_p);
+
+ /*
+ * Load TLS before restoring any segments so that segment loads
+@@ -326,108 +450,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ if (unlikely(next->ds | prev->ds))
+ loadsegment(ds, next->ds);
+
+- /*
+- * Switch FS and GS.
+- *
+- * These are even more complicated than DS and ES: they have
+- * 64-bit bases are that controlled by arch_prctl. The bases
+- * don't necessarily match the selectors, as user code can do
+- * any number of things to cause them to be inconsistent.
+- *
+- * We don't promise to preserve the bases if the selectors are
+- * nonzero. We also don't promise to preserve the base if the
+- * selector is zero and the base doesn't match whatever was
+- * most recently passed to ARCH_SET_FS/GS. (If/when the
+- * FSGSBASE instructions are enabled, we'll need to offer
+- * stronger guarantees.)
+- *
+- * As an invariant,
+- * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
+- * impossible.
+- */
+- if (next->fsindex) {
+- /* Loading a nonzero value into FS sets the index and base. */
+- loadsegment(fs, next->fsindex);
+- } else {
+- if (next->fsbase) {
+- /* Next index is zero but next base is nonzero. */
+- if (prev_fsindex)
+- loadsegment(fs, 0);
+- wrmsrl(MSR_FS_BASE, next->fsbase);
+- } else {
+- /* Next base and index are both zero. */
+- if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+- /*
+- * We don't know the previous base and can't
+- * find out without RDMSR. Forcibly clear it.
+- */
+- loadsegment(fs, __USER_DS);
+- loadsegment(fs, 0);
+- } else {
+- /*
+- * If the previous index is zero and ARCH_SET_FS
+- * didn't change the base, then the base is
+- * also zero and we don't need to do anything.
+- */
+- if (prev->fsbase || prev_fsindex)
+- loadsegment(fs, 0);
+- }
+- }
+- }
+- /*
+- * Save the old state and preserve the invariant.
+- * NB: if prev_fsindex == 0, then we can't reliably learn the base
+- * without RDMSR because Intel user code can zero it without telling
+- * us and AMD user code can program any 32-bit value without telling
+- * us.
+- */
+- if (prev_fsindex)
+- prev->fsbase = 0;
+- prev->fsindex = prev_fsindex;
+-
+- if (next->gsindex) {
+- /* Loading a nonzero value into GS sets the index and base. */
+- load_gs_index(next->gsindex);
+- } else {
+- if (next->gsbase) {
+- /* Next index is zero but next base is nonzero. */
+- if (prev_gsindex)
+- load_gs_index(0);
+- wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
+- } else {
+- /* Next base and index are both zero. */
+- if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+- /*
+- * We don't know the previous base and can't
+- * find out without RDMSR. Forcibly clear it.
+- *
+- * This contains a pointless SWAPGS pair.
+- * Fixing it would involve an explicit check
+- * for Xen or a new pvop.
+- */
+- load_gs_index(__USER_DS);
+- load_gs_index(0);
+- } else {
+- /*
+- * If the previous index is zero and ARCH_SET_GS
+- * didn't change the base, then the base is
+- * also zero and we don't need to do anything.
+- */
+- if (prev->gsbase || prev_gsindex)
+- load_gs_index(0);
+- }
+- }
+- }
+- /*
+- * Save the old state and preserve the invariant.
+- * NB: if prev_gsindex == 0, then we can't reliably learn the base
+- * without RDMSR because Intel user code can zero it without telling
+- * us and AMD user code can program any 32-bit value without telling
+- * us.
+- */
+- if (prev_gsindex)
+- prev->gsbase = 0;
+- prev->gsindex = prev_gsindex;
++ load_seg_legacy(prev->fsindex, prev->fsbase,
++ next->fsindex, next->fsbase, FS);
++ load_seg_legacy(prev->gsindex, prev->gsbase,
++ next->gsindex, next->gsbase, GS);
+
+ switch_fpu_finish(next_fpu, cpu);
+
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 5de4b3d04eb5..aa5d5f1a7d72 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2586,6 +2586,23 @@ static int init_resync(struct r1conf *conf)
+ return 0;
+ }
+
++static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
++{
++ struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
++ struct resync_pages *rps;
++ struct bio *bio;
++ int i;
++
++ for (i = conf->poolinfo->raid_disks; i--; ) {
++ bio = r1bio->bios[i];
++ rps = bio->bi_private;
++ bio_reset(bio);
++ bio->bi_private = rps;
++ }
++ r1bio->master_bio = NULL;
++ return r1bio;
++}
++
+ /*
+ * perform a "sync" on one "block"
+ *
+@@ -2671,7 +2688,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
+- r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
++ r1_bio = raid1_alloc_init_r1buf(conf);
+
+ raise_barrier(conf, sector_nr);
+
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index bfc6db236348..422bf26f37c6 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2807,6 +2807,35 @@ static int init_resync(struct r10conf *conf)
+ return 0;
+ }
+
++static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
++{
++ struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++ struct rsync_pages *rp;
++ struct bio *bio;
++ int nalloc;
++ int i;
++
++ if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
++ test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
++ nalloc = conf->copies; /* resync */
++ else
++ nalloc = 2; /* recovery */
++
++ for (i = 0; i < nalloc; i++) {
++ bio = r10bio->devs[i].bio;
++ rp = bio->bi_private;
++ bio_reset(bio);
++ bio->bi_private = rp;
++ bio = r10bio->devs[i].repl_bio;
++ if (bio) {
++ rp = bio->bi_private;
++ bio_reset(bio);
++ bio->bi_private = rp;
++ }
++ }
++ return r10bio;
++}
++
+ /*
+ * perform a "sync" on one "block"
+ *
+@@ -3036,7 +3065,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ atomic_inc(&mreplace->nr_pending);
+ rcu_read_unlock();
+
+- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++ r10_bio = raid10_alloc_init_r10buf(conf);
+ r10_bio->state = 0;
+ raise_barrier(conf, rb2 != NULL);
+ atomic_set(&r10_bio->remaining, 0);
+@@ -3245,7 +3274,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ }
+ if (sync_blocks < max_sync)
+ max_sync = sync_blocks;
+- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++ r10_bio = raid10_alloc_init_r10buf(conf);
+ r10_bio->state = 0;
+
+ r10_bio->mddev = mddev;
+@@ -4369,7 +4398,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
+
+ read_more:
+ /* Now schedule reads for blocks from sector_nr to last */
+- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++ r10_bio = raid10_alloc_init_r10buf(conf);
+ r10_bio->state = 0;
+ raise_barrier(conf, sectors_done != 0);
+ atomic_set(&r10_bio->remaining, 0);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index e92dd2dc4b5a..2280bae40189 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6238,6 +6238,10 @@ static void raid5_do_work(struct work_struct *work)
+
+ spin_unlock_irq(&conf->device_lock);
+
++ flush_deferred_bios(conf);
++
++ r5l_flush_stripe_to_raid(conf->log);
++
+ async_tx_issue_pending_all();
+ blk_finish_plug(&plug);
+
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 2be963252ca5..e0e14f7cd208 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1055,6 +1055,7 @@ struct bcm_sf2_of_data {
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
++ unsigned int num_cfp_rules;
+ };
+
+ /* Register offsets for the SWITCH_REG_* block */
+@@ -1078,6 +1079,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
+ .type = BCM7445_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_7445_reg_offsets,
++ .num_cfp_rules = 256,
+ };
+
+ static const u16 bcm_sf2_7278_reg_offsets[] = {
+@@ -1100,6 +1102,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
+ .type = BCM7278_DEVICE_ID,
+ .core_reg_align = 1,
+ .reg_offsets = bcm_sf2_7278_reg_offsets,
++ .num_cfp_rules = 128,
+ };
+
+ static const struct of_device_id bcm_sf2_of_match[] = {
+@@ -1156,6 +1159,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ priv->type = data->type;
+ priv->reg_offsets = data->reg_offsets;
+ priv->core_reg_align = data->core_reg_align;
++ priv->num_cfp_rules = data->num_cfp_rules;
+
+ /* Auto-detection using standard registers will not work, so
+ * provide an indication of what kind of device we are for
+diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
+index 7d3030e04f11..7f9125eef3df 100644
+--- a/drivers/net/dsa/bcm_sf2.h
++++ b/drivers/net/dsa/bcm_sf2.h
+@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
++ unsigned int num_cfp_rules;
+
+ /* spinlock protecting access to the indirect registers */
+ spinlock_t indir_lock;
+diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
+index 2fb32d67065f..8a1da7e67707 100644
+--- a/drivers/net/dsa/bcm_sf2_cfp.c
++++ b/drivers/net/dsa/bcm_sf2_cfp.c
+@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
+ {
+ u32 reg;
+
+- WARN_ON(addr >= CFP_NUM_RULES);
++ WARN_ON(addr >= priv->num_cfp_rules);
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
+ static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
+ {
+ /* Entry #0 is reserved */
+- return CFP_NUM_RULES - 1;
++ return priv->num_cfp_rules - 1;
+ }
+
+ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
+ if (!(reg & OP_STR_DONE))
+ break;
+
+- } while (index < CFP_NUM_RULES);
++ } while (index < priv->num_cfp_rules);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ case ETHTOOL_GRXCLSRLCNT:
+ /* Subtract the default, unusable rule */
+ nfc->rule_cnt = bitmap_weight(priv->cfp.used,
+- CFP_NUM_RULES) - 1;
++ priv->num_cfp_rules) - 1;
+ /* We support specifying rule locations */
+ nfc->data |= RX_CLS_LOC_SPECIAL;
+ break;
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 5274501428e4..f328b3d86c13 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -593,7 +593,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
+
+ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+ {
+- dev_kfree_skb_any(cb->skb);
++ dev_consume_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+@@ -1342,6 +1342,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+
+ ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+ if (!ring->cbs) {
++ dma_free_coherent(kdev, sizeof(struct dma_desc),
++ ring->desc_cpu, ring->desc_dma);
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index a205a9ff9e17..ccb325cf03b5 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1203,7 +1203,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+ /* Simple helper to free a control block's resources */
+ static void bcmgenet_free_cb(struct enet_cb *cb)
+ {
+- dev_kfree_skb_any(cb->skb);
++ dev_consume_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+@@ -1868,7 +1868,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
+ cb = ring->cbs + i;
+ skb = bcmgenet_rx_refill(priv, cb);
+ if (skb)
+- dev_kfree_skb_any(skb);
++ dev_consume_skb_any(skb);
+ if (!cb->skb)
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 3a34aa629f7d..f5d7eee6d420 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ list_del(&entry.list);
+ spin_unlock(&adap->mbox_lock);
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
++ t4_record_mbox(adap, cmd, size, access, ret);
+ return ret;
+ }
+
+ /* Copy in the new mailbox command and send it on its way ... */
+- t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
++ t4_record_mbox(adap, cmd, size, access, 0);
+ for (i = 0; i < size; i += 8)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
+
+@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ }
+
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
+- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
++ t4_record_mbox(adap, cmd, size, access, ret);
+ dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ t4_report_fw_error(adap);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index f7c8649fd28f..01084cd4a5c1 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -173,10 +173,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #endif /* CONFIG_M5272 */
+
+ /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
++ *
++ * 2048 byte skbufs are allocated. However, alignment requirements
++ * varies between FEC variants. Worst case is 64, so round down by 64.
+ */
+-#define PKT_MAXBUF_SIZE 1522
++#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
+ #define PKT_MINBUF_SIZE 64
+-#define PKT_MAXBLR_SIZE 1536
+
+ /* FEC receive acceleration */
+ #define FEC_RACC_IPDIS (1 << 1)
+@@ -848,7 +850,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
+- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
++ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+
+ /* enable DMA1/2 */
+ if (i)
+diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
+index 6e67d22fd0d5..1c7da16ad0ff 100644
+--- a/drivers/net/ethernet/freescale/fman/mac.c
++++ b/drivers/net/ethernet/freescale/fman/mac.c
+@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
+ goto no_mem;
+ }
+
++ pdev->dev.of_node = node;
++ pdev->dev.parent = priv->dev;
+ set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
+
+ ret = platform_device_add_data(pdev, &data, sizeof(data));
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 0ff166ec3e7e..aac8490c910a 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -3687,7 +3687,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
+ u32 tempval1 = gfar_read(®s->maccfg1);
+ u32 tempval = gfar_read(®s->maccfg2);
+ u32 ecntrl = gfar_read(®s->ecntrl);
+- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
++ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
+
+ if (phydev->duplex != priv->oldduplex) {
+ if (!(phydev->duplex))
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 33c901622ed5..2ad48150b826 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -6465,7 +6465,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
+ struct resource *res;
+ const char *dt_mac_addr;
+ const char *mac_from;
+- char hw_mac_addr[ETH_ALEN];
++ char hw_mac_addr[ETH_ALEN] = {0};
+ u32 id;
+ int features;
+ int phy_mode;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 3b39dbd97e57..5a1b85c18e60 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -258,6 +258,7 @@ struct mlx5e_dcbx {
+
+ /* The only setting that cannot be read from FW */
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
++ u8 cap;
+ };
+ #endif
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 2eb54d36e16e..c1d384fca4dc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+ static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+- struct mlx5e_dcbx *dcbx = &priv->dcbx;
+- u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
+-
+- if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+- mode |= DCB_CAP_DCBX_HOST;
+
+- return mode;
++ return priv->dcbx.cap;
+ }
+
+ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+ /* set dcbx to fw controlled */
+ if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
+ dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
++ dcbx->cap &= ~DCB_CAP_DCBX_HOST;
+ return 0;
+ }
+
+@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+ if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+ return 1;
+
++ dcbx->cap = mode;
++
+ return 0;
+ }
+
+@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+- *cap = (DCB_CAP_DCBX_LLD_MANAGED |
+- DCB_CAP_DCBX_VER_CEE |
+- DCB_CAP_DCBX_STATIC);
++ *cap = priv->dcbx.cap |
++ DCB_CAP_DCBX_VER_CEE |
++ DCB_CAP_DCBX_VER_IEEE;
+ break;
+ default:
+ *cap = 0;
+@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
+ {
+ struct mlx5e_dcbx *dcbx = &priv->dcbx;
+
++ if (!MLX5_CAP_GEN(priv->mdev, qos))
++ return;
++
+ if (MLX5_CAP_GEN(priv->mdev, dcbx))
+ mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
+
++ priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
++ DCB_CAP_DCBX_VER_IEEE;
++ if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
++ priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
++
+ mlx5e_ets_init(priv);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 16486dff1493..a60f6f2fa4e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -580,8 +580,10 @@ static int mlx5e_set_channels(struct net_device *dev,
+
+ new_channels.params = priv->channels.params;
+ new_channels.params.num_channels = count;
+- mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
+- MLX5E_INDIR_RQT_SIZE, count);
++ if (!netif_is_rxfh_configured(priv->netdev))
++ mlx5e_build_default_indir_rqt(priv->mdev,
++ new_channels.params.indirection_rqt,
++ MLX5E_INDIR_RQT_SIZE, count);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 072aa8a13a0a..00b51252b803 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1936,6 +1936,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ }
+
+ mlx5e_build_common_cq_param(priv, param);
++ param->cq_period_mode = params->rx_cq_period_mode;
+ }
+
+ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 66b5fec15313..f70029d5eea1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -216,13 +216,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+ if (unlikely(!page))
+ return -ENOMEM;
+
+- dma_info->page = page;
+ dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+ put_page(page);
+ return -ENOMEM;
+ }
++ dma_info->page = page;
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 9df9fc0d26f5..558a8841c9a5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1262,12 +1262,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int ret;
+
+- dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
+- ret = dst->error;
+- if (ret) {
+- dst_release(dst);
++ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
++ fl6);
++ if (ret < 0)
+ return ret;
+- }
+
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index ab3bb026ff9e..091f03f0d8f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -127,10 +127,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
+ return mlx5e_skb_l2_header_offset(skb);
+ }
+
+-static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+- struct sk_buff *skb)
++static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
++ struct sk_buff *skb)
+ {
+- int hlen;
++ u16 hlen;
+
+ switch (mode) {
+ case MLX5_INLINE_MODE_NONE:
+@@ -139,19 +139,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
+ hlen += VLAN_HLEN;
+- return hlen;
++ break;
+ case MLX5_INLINE_MODE_IP:
+ /* When transport header is set to zero, it means no transport
+ * header. When transport header is set to 0xff's, it means
+ * transport header wasn't set.
+ */
+- if (skb_transport_offset(skb))
+- return mlx5e_skb_l3_header_offset(skb);
++ if (skb_transport_offset(skb)) {
++ hlen = mlx5e_skb_l3_header_offset(skb);
++ break;
++ }
+ /* fall through */
+ case MLX5_INLINE_MODE_L2:
+ default:
+- return mlx5e_skb_l2_header_offset(skb);
++ hlen = mlx5e_skb_l2_header_offset(skb);
+ }
++ return min_t(u16, hlen, skb->len);
+ }
+
+ static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index a53e982a6863..f28750bb56d6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -818,7 +818,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+- for (vport = 0; vport < nvports; vport++) {
++ for (vport = nvports - 1; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+index 3099630015d7..75a14547ee39 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+ {
+- /* arm_srq structs missing using identical xrc ones */
+- u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+- u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
++ u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
++ u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
+
+- MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+- MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
+- MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
++ MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
++ MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
++ MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
++ MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
+
+ return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 88357cee7679..940d61159b56 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -4110,6 +4110,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
+ return -EINVAL;
+ if (!info->linking)
+ break;
++ if (netdev_has_any_upper_dev(upper_dev))
++ return -EINVAL;
+ /* HW limitation forbids to put ports to multiple bridges. */
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
+@@ -4274,6 +4276,10 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ if (is_vlan_dev(upper_dev) &&
+ br_dev != mlxsw_sp->master_bridge.dev)
+ return -EINVAL;
++ if (!info->linking)
++ break;
++ if (netdev_has_any_upper_dev(upper_dev))
++ return -EINVAL;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+index 28ea0af89aef..e3223f2fe2ff 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
+ seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ seg_hdr->segNum = seg_number;
+ seg_hdr->segSize = seg_size;
+- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
++ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ }
+
+ /*
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 643c539a08ba..39293638d18e 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -1270,7 +1270,12 @@ static void netvsc_link_change(struct work_struct *w)
+ bool notify = false, reschedule = false;
+ unsigned long flags, next_reconfig, delay;
+
+- rtnl_lock();
++ /* if changes are happening, comeback later */
++ if (!rtnl_trylock()) {
++ schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
++ return;
++ }
++
+ net_device = rtnl_dereference(ndev_ctx->nvdev);
+ if (!net_device)
+ goto out_unlock;
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 79411675f0e6..d16ce61b3696 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3518,6 +3518,7 @@ module_init(macsec_init);
+ module_exit(macsec_exit);
+
+ MODULE_ALIAS_RTNL_LINK("macsec");
++MODULE_ALIAS_GENL_FAMILY("macsec");
+
+ MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index b30d9ceee8bc..eebb0e1c70ff 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+ phydev->state = PHY_UP;
+ mutex_unlock(&phydev->lock);
+-
+- /* Now we can run the state machine synchronously */
+- phy_state_machine(&phydev->state_queue.work);
+ }
+
+ /**
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index f61f852d6cfd..83ad2ac0cbea 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -557,8 +557,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+
+ preempt_enable();
+
+- if (vhost_enable_notify(&net->dev, vq))
++ if (!vhost_vq_avail_empty(&net->dev, vq))
+ vhost_poll_queue(&vq->poll);
++ else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
++ vhost_disable_notify(&net->dev, vq);
++ vhost_poll_queue(&vq->poll);
++ }
++
+ mutex_unlock(&vq->mutex);
+
+ len = peek_head_len(sk);
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 907d6b7dde6a..86d813a3f5d1 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -291,7 +291,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ return 0;
+
+ /* Get the previous summary */
+- for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
++ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ struct curseg_info *curseg = CURSEG_I(sbi, i);
+ if (curseg->segno == segno) {
+ sum = curseg->sum_blk->entries[blkoff];
+@@ -599,8 +599,6 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ }
+
+ clear_sbi_flag(sbi, SBI_POR_DOING);
+- if (err)
+- set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ mutex_unlock(&sbi->cp_mutex);
+
+ /* let's drop all the directory inodes for clean checkpoint */
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index c16d00e53264..13c65dd2d37d 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1222,9 +1222,6 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ struct fuse_in *in;
+ unsigned reqsize;
+
+- if (task_active_pid_ns(current) != fc->pid_ns)
+- return -EIO;
+-
+ restart:
+ spin_lock(&fiq->waitq.lock);
+ err = -EAGAIN;
+@@ -1262,6 +1259,13 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+
+ in = &req->in;
+ reqsize = in->h.len;
++
++ if (task_active_pid_ns(current) != fc->pid_ns) {
++ rcu_read_lock();
++ in->h.pid = pid_vnr(find_pid_ns(in->h.pid, fc->pid_ns));
++ rcu_read_unlock();
++ }
++
+ /* If request is too large, reply with an error and restart the read */
+ if (nbytes < reqsize) {
+ req->out.h.error = -EIO;
+@@ -1823,9 +1827,6 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
+ struct fuse_req *req;
+ struct fuse_out_header oh;
+
+- if (task_active_pid_ns(current) != fc->pid_ns)
+- return -EIO;
+-
+ if (nbytes < sizeof(struct fuse_out_header))
+ return -EINVAL;
+
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 76eac2a554c4..e6d40e4f5e83 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2180,9 +2180,6 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
+ if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
+ return 0;
+
+- if (pid && pid_nr == 0)
+- return -EOVERFLOW;
+-
+ fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
+ err = fuse_simple_request(fc, &args);
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 4ed952c17fc7..663c46ee0658 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3868,6 +3868,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
+ bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
+ struct net_device *upper_dev);
+
++bool netdev_has_any_upper_dev(struct net_device *dev);
++
+ void *netdev_lower_get_next_private(struct net_device *dev,
+ struct list_head **iter);
+ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
+index 5932e6de8fc0..634d19203e7d 100644
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -1,14 +1,9 @@
+ #ifndef __NET_FRAG_H__
+ #define __NET_FRAG_H__
+
+-#include <linux/percpu_counter.h>
+-
+ struct netns_frags {
+- /* The percpu_counter "mem" need to be cacheline aligned.
+- * mem.count must not share cacheline with other writers
+- */
+- struct percpu_counter mem ____cacheline_aligned_in_smp;
+-
++ /* Keep atomic mem on separate cachelines in structs that include it */
++ atomic_t mem ____cacheline_aligned_in_smp;
+ /* sysctls */
+ int timeout;
+ int high_thresh;
+@@ -108,15 +103,10 @@ struct inet_frags {
+ int inet_frags_init(struct inet_frags *);
+ void inet_frags_fini(struct inet_frags *);
+
+-static inline int inet_frags_init_net(struct netns_frags *nf)
+-{
+- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
+-}
+-static inline void inet_frags_uninit_net(struct netns_frags *nf)
++static inline void inet_frags_init_net(struct netns_frags *nf)
+ {
+- percpu_counter_destroy(&nf->mem);
++ atomic_set(&nf->mem, 0);
+ }
+-
+ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+
+ void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
+@@ -140,31 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+
+ /* Memory Tracking Functions. */
+
+-/* The default percpu_counter batch size is not big enough to scale to
+- * fragmentation mem acct sizes.
+- * The mem size of a 64K fragment is approx:
+- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
+- */
+-static unsigned int frag_percpu_counter_batch = 130000;
+-
+ static inline int frag_mem_limit(struct netns_frags *nf)
+ {
+- return percpu_counter_read(&nf->mem);
++ return atomic_read(&nf->mem);
+ }
+
+ static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+ {
+- percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
++ atomic_sub(i, &nf->mem);
+ }
+
+ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+ {
+- percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
++ atomic_add(i, &nf->mem);
+ }
+
+-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
++static inline int sum_frag_mem_limit(struct netns_frags *nf)
+ {
+- return percpu_counter_sum_positive(&nf->mem);
++ return atomic_read(&nf->mem);
+ }
+
+ /* RFC 3168 support :
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index c979c878df1c..0f29ea1bc7bf 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -70,6 +70,7 @@ struct fib6_node {
+ __u16 fn_flags;
+ int fn_sernum;
+ struct rt6_info *rr_ptr;
++ struct rcu_head rcu;
+ };
+
+ #ifndef CONFIG_IPV6_SUBTREES
+@@ -104,7 +105,7 @@ struct rt6_info {
+ * the same cache line.
+ */
+ struct fib6_table *rt6i_table;
+- struct fib6_node *rt6i_node;
++ struct fib6_node __rcu *rt6i_node;
+
+ struct in6_addr rt6i_gateway;
+
+@@ -167,13 +168,39 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
+ rt0->rt6i_flags |= RTF_EXPIRES;
+ }
+
++/* Function to safely get fn->sernum for passed in rt
++ * and store result in passed in cookie.
++ * Return true if we can get cookie safely
++ * Return false if not
++ */
++static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
++ u32 *cookie)
++{
++ struct fib6_node *fn;
++ bool status = false;
++
++ rcu_read_lock();
++ fn = rcu_dereference(rt->rt6i_node);
++
++ if (fn) {
++ *cookie = fn->fn_sernum;
++ status = true;
++ }
++
++ rcu_read_unlock();
++ return status;
++}
++
+ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
+ {
++ u32 cookie = 0;
++
+ if (rt->rt6i_flags & RTF_PCPU ||
+ (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
+ rt = (struct rt6_info *)(rt->dst.from);
++ rt6_get_cookie_safe(rt, &cookie);
+
+- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
++ return cookie;
+ }
+
+ static inline void ip6_rt_put(struct rt6_info *rt)
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 1933442cf1a6..a1bc3e7934d6 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -265,7 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
+ }
+
+ void udp_v4_early_demux(struct sk_buff *skb);
+-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
++bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+ int udp_get_port(struct sock *sk, unsigned short snum,
+ int (*saddr_cmp)(const struct sock *,
+ const struct sock *));
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 004334ea13ba..06c55ac15b07 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -652,12 +652,27 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
+ }
+ }
+
++static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
++{
++ return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
++ BITS_PER_LONG == 64;
++}
++
++static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
++{
++ u32 size = htab->map.value_size;
++
++ if (percpu || fd_htab_map_needs_adjust(htab))
++ size = round_up(size, 8);
++ return size;
++}
++
+ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ void *value, u32 key_size, u32 hash,
+ bool percpu, bool onallcpus,
+ struct htab_elem *old_elem)
+ {
+- u32 size = htab->map.value_size;
++ u32 size = htab_size_value(htab, percpu);
+ bool prealloc = htab_is_prealloc(htab);
+ struct htab_elem *l_new, **pl_new;
+ void __percpu *pptr;
+@@ -696,9 +711,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+
+ memcpy(l_new->key, key, key_size);
+ if (percpu) {
+- /* round up value_size to 8 bytes */
+- size = round_up(size, 8);
+-
+ if (prealloc) {
+ pptr = htab_elem_get_ptr(l_new, key_size);
+ } else {
+@@ -1209,17 +1221,9 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
+
+ static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+ {
+- struct bpf_map *map;
+-
+ if (attr->value_size != sizeof(u32))
+ return ERR_PTR(-EINVAL);
+-
+- /* pointer is stored internally */
+- attr->value_size = sizeof(void *);
+- map = htab_map_alloc(attr);
+- attr->value_size = sizeof(u32);
+-
+- return map;
++ return htab_map_alloc(attr);
+ }
+
+ static void fd_htab_map_free(struct bpf_map *map)
+diff --git a/lib/idr.c b/lib/idr.c
+index b13682bb0a1c..20c2779e8d12 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -154,7 +154,7 @@ void *idr_replace(struct idr *idr, void *ptr, int id)
+ void __rcu **slot = NULL;
+ void *entry;
+
+- if (WARN_ON_ONCE(id < 0))
++ if (id < 0)
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
+ return ERR_PTR(-EINVAL);
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index f0f3447e8aa4..b5d76bcb2d43 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ brstats->tx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
+
++#ifdef CONFIG_NET_SWITCHDEV
++ skb->offload_fwd_mark = 0;
++#endif
+ BR_INPUT_SKB_CB(skb)->brdev = dev;
+
+ skb_reset_mac_header(skb);
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index db1866f2ffcf..25c803e520da 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -345,7 +345,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
+ if (flags & MSG_PEEK) {
+ err = -ENOENT;
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+- if (skb == skb_peek(&sk->sk_receive_queue)) {
++ if (skb->next) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ atomic_dec(&skb->users);
+ if (destructor)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 528edc68a64a..3a40e30c8388 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5590,12 +5590,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
+ * Find out if a device is linked to an upper device and return true in case
+ * it is. The caller must hold the RTNL lock.
+ */
+-static bool netdev_has_any_upper_dev(struct net_device *dev)
++bool netdev_has_any_upper_dev(struct net_device *dev)
+ {
+ ASSERT_RTNL();
+
+ return !list_empty(&dev->adj_list.upper);
+ }
++EXPORT_SYMBOL(netdev_has_any_upper_dev);
+
+ /**
+ * netdev_master_upper_dev_get - Get master upper device
+diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
+index 30d875dff6b5..f85b08baff16 100644
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
+ {
+ struct netns_ieee802154_lowpan *ieee802154_lowpan =
+ net_ieee802154_lowpan(net);
+- int res;
+
+ ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+
+- res = inet_frags_init_net(&ieee802154_lowpan->frags);
+- if (res)
+- return res;
+- res = lowpan_frags_ns_sysctl_register(net);
+- if (res)
+- inet_frags_uninit_net(&ieee802154_lowpan->frags);
+- return res;
++ inet_frags_init_net(&ieee802154_lowpan->frags);
++
++ return lowpan_frags_ns_sysctl_register(net);
+ }
+
+ static void __net_exit lowpan_frags_exit_net(struct net *net)
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index b5e9317eaf9e..631c0d0d7cf8 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -234,10 +234,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+ cond_resched();
+
+ if (read_seqretry(&f->rnd_seqlock, seq) ||
+- percpu_counter_sum(&nf->mem))
++ sum_frag_mem_limit(nf))
+ goto evict_again;
+-
+- percpu_counter_destroy(&nf->mem);
+ }
+ EXPORT_SYMBOL(inet_frags_exit_net);
+
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b3cdeec85f1f..4bf3b8af0257 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -844,8 +844,6 @@ static void __init ip4_frags_ctl_register(void)
+
+ static int __net_init ipv4_frags_init_net(struct net *net)
+ {
+- int res;
+-
+ /* Fragment cache limits.
+ *
+ * The fragment memory accounting code, (tries to) account for
+@@ -871,13 +869,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
+
+ net->ipv4.frags.max_dist = 64;
+
+- res = inet_frags_init_net(&net->ipv4.frags);
+- if (res)
+- return res;
+- res = ip4_frags_ns_ctl_register(net);
+- if (res)
+- inet_frags_uninit_net(&net->ipv4.frags);
+- return res;
++ inet_frags_init_net(&net->ipv4.frags);
++
++ return ip4_frags_ns_ctl_register(net);
+ }
+
+ static void __net_exit ipv4_frags_exit_net(struct net *net)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index c991b97cbb28..2a7bff749764 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1762,13 +1762,14 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ /* For TCP sockets, sk_rx_dst is protected by socket lock
+ * For UDP, we use xchg() to guard against concurrent changes.
+ */
+-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
++bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+ struct dst_entry *old;
+
+ dst_hold(dst);
+ old = xchg(&sk->sk_rx_dst, dst);
+ dst_release(old);
++ return old != dst;
+ }
+
+ /*
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 39a44c0598f7..d16d642ea322 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5541,7 +5541,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+ * our DAD process, so we don't need
+ * to do it again
+ */
+- if (!(ifp->rt->rt6i_node))
++ if (!rcu_access_pointer(ifp->rt->rt6i_node))
+ ip6_ins_rt(ifp->rt);
+ if (ifp->idev->cnf.forwarding)
+ addrconf_join_anycast(ifp);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index cd8dd8c4e819..fa03fa469f92 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -148,11 +148,23 @@ static struct fib6_node *node_alloc(void)
+ return fn;
+ }
+
+-static void node_free(struct fib6_node *fn)
++static void node_free_immediate(struct fib6_node *fn)
++{
++ kmem_cache_free(fib6_node_kmem, fn);
++}
++
++static void node_free_rcu(struct rcu_head *head)
+ {
++ struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
++
+ kmem_cache_free(fib6_node_kmem, fn);
+ }
+
++static void node_free(struct fib6_node *fn)
++{
++ call_rcu(&fn->rcu, node_free_rcu);
++}
++
+ static void rt6_rcu_free(struct rt6_info *rt)
+ {
+ call_rcu(&rt->dst.rcu_head, dst_rcu_free);
+@@ -189,6 +201,12 @@ static void rt6_release(struct rt6_info *rt)
+ }
+ }
+
++static void fib6_free_table(struct fib6_table *table)
++{
++ inetpeer_invalidate_tree(&table->tb6_peers);
++ kfree(table);
++}
++
+ static void fib6_link_table(struct net *net, struct fib6_table *tb)
+ {
+ unsigned int h;
+@@ -599,9 +617,9 @@ static struct fib6_node *fib6_add_1(struct fib6_node *root,
+
+ if (!in || !ln) {
+ if (in)
+- node_free(in);
++ node_free_immediate(in);
+ if (ln)
+- node_free(ln);
++ node_free_immediate(ln);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -875,7 +893,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
+
+ rt->dst.rt6_next = iter;
+ *ins = rt;
+- rt->rt6i_node = fn;
++ rcu_assign_pointer(rt->rt6i_node, fn);
+ atomic_inc(&rt->rt6i_ref);
+ if (!info->skip_notify)
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
+@@ -901,7 +919,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
+ return err;
+
+ *ins = rt;
+- rt->rt6i_node = fn;
++ rcu_assign_pointer(rt->rt6i_node, fn);
+ rt->dst.rt6_next = iter->dst.rt6_next;
+ atomic_inc(&rt->rt6i_ref);
+ if (!info->skip_notify)
+@@ -1035,7 +1053,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ root, and then (in failure) stale node
+ in main tree.
+ */
+- node_free(sfn);
++ node_free_immediate(sfn);
+ err = PTR_ERR(sn);
+ goto failure;
+ }
+@@ -1463,8 +1481,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
+
+ int fib6_del(struct rt6_info *rt, struct nl_info *info)
+ {
++ struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
++ lockdep_is_held(&rt->rt6i_table->tb6_lock));
+ struct net *net = info->nl_net;
+- struct fib6_node *fn = rt->rt6i_node;
+ struct rt6_info **rtp;
+
+ #if RT6_DEBUG >= 2
+@@ -1653,7 +1672,9 @@ static int fib6_clean_node(struct fib6_walker *w)
+ if (res) {
+ #if RT6_DEBUG >= 2
+ pr_debug("%s: del failed: rt=%p@%p err=%d\n",
+- __func__, rt, rt->rt6i_node, res);
++ __func__, rt,
++ rcu_access_pointer(rt->rt6i_node),
++ res);
+ #endif
+ continue;
+ }
+@@ -1775,8 +1796,10 @@ static int fib6_age(struct rt6_info *rt, void *arg)
+ }
+ gc_args->more++;
+ } else if (rt->rt6i_flags & RTF_CACHE) {
++ if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout))
++ rt->dst.obsolete = DST_OBSOLETE_KILL;
+ if (atomic_read(&rt->dst.__refcnt) == 0 &&
+- time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
++ rt->dst.obsolete == DST_OBSOLETE_KILL) {
+ RT6_TRACE("aging clone %p\n", rt);
+ return -1;
+ } else if (rt->rt6i_flags & RTF_GATEWAY) {
+@@ -1894,15 +1917,22 @@ static int __net_init fib6_net_init(struct net *net)
+
+ static void fib6_net_exit(struct net *net)
+ {
++ unsigned int i;
++
+ rt6_ifdown(net, NULL);
+ del_timer_sync(&net->ipv6.ip6_fib_timer);
+
+-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
+- kfree(net->ipv6.fib6_local_tbl);
+-#endif
+- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
+- kfree(net->ipv6.fib6_main_tbl);
++ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
++ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
++ struct hlist_node *tmp;
++ struct fib6_table *tb;
++
++ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
++ hlist_del(&tb->tb6_hlist);
++ fib6_free_table(tb);
++ }
++ }
++
+ kfree(net->ipv6.fib_table_hash);
+ kfree(net->ipv6.rt6_stats);
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 64eea3962733..ca2a45134c6b 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -432,7 +432,9 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ }
+ break;
+ case ICMPV6_PKT_TOOBIG:
+- mtu = be32_to_cpu(info) - offset;
++ mtu = be32_to_cpu(info) - offset - t->tun_hlen;
++ if (t->dev->type == ARPHRD_ETHER)
++ mtu -= ETH_HLEN;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ t->dev->mtu = mtu;
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index a531ba032b85..f78478fdbfb9 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ pktopt = xchg(&np->pktoptions, NULL);
+ kfree_skb(pktopt);
+
+- sk->sk_destruct = inet_sock_destruct;
+ /*
+ * ... and add it to the refcnt debug socks count
+ * in the new family. -acme
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 986d4ca38832..b263bf3a19f7 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -622,18 +622,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
+
+ static int nf_ct_net_init(struct net *net)
+ {
+- int res;
+-
+ net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
+- res = inet_frags_init_net(&net->nf_frag.frags);
+- if (res)
+- return res;
+- res = nf_ct_frag6_sysctl_register(net);
+- if (res)
+- inet_frags_uninit_net(&net->nf_frag.frags);
+- return res;
++ inet_frags_init_net(&net->nf_frag.frags);
++
++ return nf_ct_frag6_sysctl_register(net);
+ }
+
+ static void nf_ct_net_exit(struct net *net)
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index abb2c307fbe8..a338bbc33cf3 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
+- unsigned int len;
+
+ switch (**nexthdr) {
+
+@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+
+ exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+ offset);
+- len = ipv6_optlen(exthdr);
+- if (len + offset >= IPV6_MAXPLEN)
++ offset += ipv6_optlen(exthdr);
++ if (offset > IPV6_MAXPLEN)
+ return -EINVAL;
+- offset += len;
+ *nexthdr = &exthdr->nexthdr;
+ }
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index e1da5b888cc4..846012eae526 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -714,19 +714,13 @@ static void ip6_frags_sysctl_unregister(void)
+
+ static int __net_init ipv6_frags_init_net(struct net *net)
+ {
+- int res;
+-
+ net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
+
+- res = inet_frags_init_net(&net->ipv6.frags);
+- if (res)
+- return res;
+- res = ip6_frags_ns_sysctl_register(net);
+- if (res)
+- inet_frags_uninit_net(&net->ipv6.frags);
+- return res;
++ inet_frags_init_net(&net->ipv6.frags);
++
++ return ip6_frags_ns_sysctl_register(net);
+ }
+
+ static void __net_exit ipv6_frags_exit_net(struct net *net)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index aeb7097acc0a..9b93b4a1f48e 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -444,7 +444,8 @@ static bool rt6_check_expired(const struct rt6_info *rt)
+ if (time_after(jiffies, rt->dst.expires))
+ return true;
+ } else if (rt->dst.from) {
+- return rt6_check_expired((struct rt6_info *) rt->dst.from);
++ return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
++ rt6_check_expired((struct rt6_info *)rt->dst.from);
+ }
+ return false;
+ }
+@@ -1289,7 +1290,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
+
+ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
+ {
+- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
++ u32 rt_cookie = 0;
++
++ if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
+ return NULL;
+
+ if (rt6_check_expired(rt))
+@@ -1357,8 +1360,14 @@ static void ip6_link_failure(struct sk_buff *skb)
+ if (rt->rt6i_flags & RTF_CACHE) {
+ dst_hold(&rt->dst);
+ ip6_del_rt(rt);
+- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
+- rt->rt6i_node->fn_sernum = -1;
++ } else {
++ struct fib6_node *fn;
++
++ rcu_read_lock();
++ fn = rcu_dereference(rt->rt6i_node);
++ if (fn && (rt->rt6i_flags & RTF_DEFAULT))
++ fn->fn_sernum = -1;
++ rcu_read_unlock();
+ }
+ }
+ }
+@@ -1375,7 +1384,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
+ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
+ {
+ return !(rt->rt6i_flags & RTF_CACHE) &&
+- (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
++ (rt->rt6i_flags & RTF_PCPU ||
++ rcu_access_pointer(rt->rt6i_node));
+ }
+
+ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 592270c310f4..5c7b2a94e358 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -752,6 +752,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
+ return 0;
+ }
+
++static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
++{
++ if (udp_sk_rx_dst_set(sk, dst)) {
++ const struct rt6_info *rt = (const struct rt6_info *)dst;
++
++ inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
++ }
++}
++
+ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ int proto)
+ {
+@@ -801,7 +810,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ int ret;
+
+ if (unlikely(sk->sk_rx_dst != dst))
+- udp_sk_rx_dst_set(sk, dst);
++ udp6_sk_rx_dst_set(sk, dst);
+
+ ret = udpv6_queue_rcv_skb(sk, skb);
+ sock_put(sk);
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index deca20fb2ce2..0ddcb209bea6 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1383,6 +1383,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
+ if (!csk)
+ return -EINVAL;
+
++ /* We must prevent loops or risk deadlock ! */
++ if (csk->sk_family == PF_KCM)
++ return -EOPNOTSUPP;
++
+ psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
+ if (!psock)
+ return -ENOMEM;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index aa2d4000bafc..2b31a69d42a5 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2192,6 +2192,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct timespec ts;
+ __u32 ts_status;
+ bool is_drop_n_account = false;
++ bool do_vnet = false;
+
+ /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
+ * We may add members to them until current aligned size without forcing
+@@ -2242,8 +2243,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ netoff = TPACKET_ALIGN(po->tp_hdrlen +
+ (maclen < 16 ? 16 : maclen)) +
+ po->tp_reserve;
+- if (po->has_vnet_hdr)
++ if (po->has_vnet_hdr) {
+ netoff += sizeof(struct virtio_net_hdr);
++ do_vnet = true;
++ }
+ macoff = netoff - maclen;
+ }
+ if (po->tp_version <= TPACKET_V2) {
+@@ -2260,8 +2263,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ skb_set_owner_r(copy_skb, sk);
+ }
+ snaplen = po->rx_ring.frame_size - macoff;
+- if ((int)snaplen < 0)
++ if ((int)snaplen < 0) {
+ snaplen = 0;
++ do_vnet = false;
++ }
+ }
+ } else if (unlikely(macoff + snaplen >
+ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+@@ -2274,6 +2279,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ if (unlikely((int)snaplen < 0)) {
+ snaplen = 0;
+ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
++ do_vnet = false;
+ }
+ }
+ spin_lock(&sk->sk_receive_queue.lock);
+@@ -2299,7 +2305,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+- if (po->has_vnet_hdr) {
++ if (do_vnet) {
+ if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+ vio_le(), true)) {
+diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
+index 9a647214a91e..e99518e79b52 100644
+--- a/net/sctp/sctp_diag.c
++++ b/net/sctp/sctp_diag.c
+@@ -70,7 +70,8 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
+
+ info = nla_data(attr);
+ list_for_each_entry_rcu(laddr, address_list, list) {
+- memcpy(info, &laddr->a, addrlen);
++ memcpy(info, &laddr->a, sizeof(laddr->a));
++ memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
+ info += addrlen;
+ }
+
+@@ -93,7 +94,9 @@ static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
+ info = nla_data(attr);
+ list_for_each_entry(from, &asoc->peer.transport_addr_list,
+ transports) {
+- memcpy(info, &from->ipaddr, addrlen);
++ memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
++ memset(info + sizeof(from->ipaddr), 0,
++ addrlen - sizeof(from->ipaddr));
+ info += addrlen;
+ }
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 3a8318e518f1..51532a1da8c6 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4538,8 +4538,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+ info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
+
+ prim = asoc->peer.primary_path;
+- memcpy(&info->sctpi_p_address, &prim->ipaddr,
+- sizeof(struct sockaddr_storage));
++ memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
+ info->sctpi_p_state = prim->state;
+ info->sctpi_p_cwnd = prim->cwnd;
+ info->sctpi_p_srtt = prim->srtt;
+diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
+index aa3624d50278..8354479178b9 100644
+--- a/net/sctp/ulpqueue.c
++++ b/net/sctp/ulpqueue.c
+@@ -265,7 +265,8 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
+ sctp_ulpq_clear_pd(ulpq);
+
+ if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+- sp->data_ready_signalled = 1;
++ if (!sock_owned_by_user(sk))
++ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+ return 1;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 1b92b72e812f..a0f50278901b 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2255,8 +2255,8 @@ void tipc_sk_reinit(struct net *net)
+
+ do {
+ tsk = ERR_PTR(rhashtable_walk_start(&iter));
+- if (tsk)
+- continue;
++ if (IS_ERR(tsk))
++ goto walk_stop;
+
+ while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
+ spin_lock_bh(&tsk->sk.sk_lock.slock);
+@@ -2265,7 +2265,7 @@ void tipc_sk_reinit(struct net *net)
+ msg_set_orignode(msg, tn->own_addr);
+ spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ }
+-
++walk_stop:
+ rhashtable_walk_stop(&iter);
+ } while (tsk == ERR_PTR(-EAGAIN));
+ }
^ permalink raw reply related [flat|nested] 12+ messages in thread
end of thread, other threads:[~2017-09-20 10:10 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-08-25 11:00 [gentoo-commits] proj/linux-patches:4.12 commit in: / Mike Pagano
-- strict thread matches above, loose matches on Subject: below --
2017-09-20 10:10 Mike Pagano
2017-09-13 23:09 Mike Pagano
2017-09-13 22:28 Mike Pagano
2017-09-13 12:23 Mike Pagano
2017-09-10 14:37 Mike Pagano
2017-09-07 22:45 Mike Pagano
2017-08-30 10:05 Mike Pagano
2017-08-16 22:28 Mike Pagano
2017-08-13 16:37 Mike Pagano
2017-08-11 17:40 Mike Pagano
2017-08-06 19:34 Mike Pagano
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox