public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sat, 26 Jan 2019 14:59:36 +0000 (UTC)	[thread overview]
Message-ID: <1548514744.7925b86f90f144813faafb0726a435199798af82.mpagano@gentoo> (raw)

commit:     7925b86f90f144813faafb0726a435199798af82
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 26 14:59:04 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan 26 14:59:04 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7925b86f

proj/linux-patches: Linux patch 4.4.172

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1171_linux-4.4.172.patch | 4333 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4337 insertions(+)

diff --git a/0000_README b/0000_README
index fb7be63..02e6688 100644
--- a/0000_README
+++ b/0000_README
@@ -727,6 +727,10 @@ Patch:  1170_linux-4.4.171.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.171
 
+Patch:  1171_linux-4.4.172.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.172
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1171_linux-4.4.172.patch b/1171_linux-4.4.172.patch
new file mode 100644
index 0000000..ca36344
--- /dev/null
+++ b/1171_linux-4.4.172.patch
@@ -0,0 +1,4333 @@
+diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
+index 6d2689ebf824..5b87946a53a3 100644
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -466,7 +466,9 @@ manner. The codes are the following:
+ 
+ Note that there is no guarantee that every flag and associated mnemonic will
+ be present in all further kernel releases. Things get changed, the flags may
+-be vanished or the reverse -- new added.
++be vanished or the reverse -- new added. Interpretation of their meaning
++might change in future as well. So each consumer of these flags has to
++follow each specific kernel version for the exact semantic.
+ 
+ This file is only present if the CONFIG_MMU kernel configuration option is
+ enabled.
+diff --git a/Makefile b/Makefile
+index c6b680faedd8..2aa8db459a74 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 171
++SUBLEVEL = 172
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
+index ef8e13d379cb..d7e7cf56e8d6 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -23,6 +23,8 @@
+ #include <asm/types.h>
+ 
+ /* Hyp Configuration Register (HCR) bits */
++#define HCR_API		(UL(1) << 41)
++#define HCR_APK		(UL(1) << 40)
+ #define HCR_ID		(UL(1) << 33)
+ #define HCR_CD		(UL(1) << 32)
+ #define HCR_RW_SHIFT	31
+@@ -81,6 +83,7 @@
+ 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
+ #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+ #define HCR_INT_OVERRIDE   (HCR_FMO | HCR_IMO)
++#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
+ 
+ 
+ /* Hyp System Control Register (SCTLR_EL2) bits */
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index d019c3a58cc2..0382eba4bf7b 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -30,6 +30,7 @@
+ #include <asm/cache.h>
+ #include <asm/cputype.h>
+ #include <asm/kernel-pgtable.h>
++#include <asm/kvm_arm.h>
+ #include <asm/memory.h>
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/pgtable.h>
+@@ -464,7 +465,7 @@ CPU_LE(	bic	x0, x0, #(3 << 24)	)	// Clear the EE and E0E bits for EL1
+ 	ret
+ 
+ 	/* Hyp configuration. */
+-2:	mov	x0, #(1 << 31)			// 64-bit EL1
++2:	mov_q	x0, HCR_HOST_NVHE_FLAGS
+ 	msr	hcr_el2, x0
+ 
+ 	/* Generic timers. */
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 62d3dc60ca09..e99a0ed7e66b 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -670,6 +670,7 @@ static struct platform_driver armv8_pmu_driver = {
+ 	.driver		= {
+ 		.name	= "armv8-pmu",
+ 		.of_match_table = armv8_pmu_of_device_ids,
++		.suppress_bind_attrs = true,
+ 	},
+ 	.probe		= armv8_pmu_device_probe,
+ };
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index 86c289832272..8d3da858c257 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -494,7 +494,7 @@
+ .endm
+ 
+ .macro deactivate_traps
+-	mov	x2, #HCR_RW
++	mov_q	x2, HCR_HOST_NVHE_FLAGS
+ 	msr	hcr_el2, x2
+ 	msr	hstr_el2, xzr
+ 
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 8b0424abc84c..333ea0389adb 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -760,6 +760,7 @@ config SIBYTE_SWARM
+ 	select SYS_SUPPORTS_HIGHMEM
+ 	select SYS_SUPPORTS_LITTLE_ENDIAN
+ 	select ZONE_DMA32 if 64BIT
++	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+ 
+ config SIBYTE_LITTLESUR
+ 	bool "Sibyte BCM91250C2-LittleSur"
+@@ -782,6 +783,7 @@ config SIBYTE_SENTOSA
+ 	select SYS_HAS_CPU_SB1
+ 	select SYS_SUPPORTS_BIG_ENDIAN
+ 	select SYS_SUPPORTS_LITTLE_ENDIAN
++	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+ 
+ config SIBYTE_BIGSUR
+ 	bool "Sibyte BCM91480B-BigSur"
+@@ -795,6 +797,7 @@ config SIBYTE_BIGSUR
+ 	select SYS_SUPPORTS_HIGHMEM
+ 	select SYS_SUPPORTS_LITTLE_ENDIAN
+ 	select ZONE_DMA32 if 64BIT
++	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+ 
+ config SNI_RM
+ 	bool "SNI RM200/300/400"
+@@ -2972,6 +2975,7 @@ config MIPS32_O32
+ config MIPS32_N32
+ 	bool "Kernel support for n32 binaries"
+ 	depends on 64BIT
++	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ 	select COMPAT
+ 	select MIPS32_COMPAT
+ 	select SYSVIPC_COMPAT if SYSVIPC
+diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
+index 2a5bb849b10e..288b58b00dc8 100644
+--- a/arch/mips/pci/msi-octeon.c
++++ b/arch/mips/pci/msi-octeon.c
+@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
+ 	int irq;
+ 	struct irq_chip *msi;
+ 
+-	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
++	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
++		return 0;
++	} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ 		msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
+ 		msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
+ 		msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
+diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
+index b3d6bf23a662..3ef3fb658136 100644
+--- a/arch/mips/sibyte/common/Makefile
++++ b/arch/mips/sibyte/common/Makefile
+@@ -1,4 +1,5 @@
+ obj-y := cfe.o
++obj-$(CONFIG_SWIOTLB)			+= dma.o
+ obj-$(CONFIG_SIBYTE_BUS_WATCHER)	+= bus_watcher.o
+ obj-$(CONFIG_SIBYTE_CFE_CONSOLE)	+= cfe_console.o
+ obj-$(CONFIG_SIBYTE_TBPROF)		+= sb_tbprof.o
+diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
+new file mode 100644
+index 000000000000..eb47a94f3583
+--- /dev/null
++++ b/arch/mips/sibyte/common/dma.c
+@@ -0,0 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ *	DMA support for Broadcom SiByte platforms.
++ *
++ *	Copyright (c) 2018  Maciej W. Rozycki
++ */
++
++#include <linux/swiotlb.h>
++#include <asm/bootinfo.h>
++
++void __init plat_swiotlb_setup(void)
++{
++	swiotlb_init(1);
++}
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index b7290c5b1eaa..5c25005ff398 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ 		return -EINVAL;
+ 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ 		return -EINVAL;
+-	if (RTA_PAYLOAD(rta) < sizeof(*param))
++
++	/*
++	 * RTA_OK() didn't align the rtattr's payload when validating that it
++	 * fits in the buffer.  Yet, the keys should start on the next 4-byte
++	 * aligned boundary.  To avoid confusion, require that the rtattr
++	 * payload be exactly the param struct, which has a 4-byte aligned size.
++	 */
++	if (RTA_PAYLOAD(rta) != sizeof(*param))
+ 		return -EINVAL;
++	BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
+ 
+ 	param = RTA_DATA(rta);
+ 	keys->enckeylen = be32_to_cpu(param->enckeylen);
+ 
+-	key += RTA_ALIGN(rta->rta_len);
+-	keylen -= RTA_ALIGN(rta->rta_len);
++	key += rta->rta_len;
++	keylen -= rta->rta_len;
+ 
+ 	if (keylen < keys->enckeylen)
+ 		return -EINVAL;
+diff --git a/crypto/authencesn.c b/crypto/authencesn.c
+index fa0c4567f697..5fdf3e532310 100644
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -276,7 +276,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+ 	struct aead_request *req = areq->data;
+ 
+ 	err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
+-	aead_request_complete(req, err);
++	authenc_esn_request_complete(req, err);
+ }
+ 
+ static int crypto_authenc_esn_decrypt(struct aead_request *req)
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 0346e46e2871..ecca4ae248e0 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -33,6 +33,9 @@ static struct kset *system_kset;
+ 
+ #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
+ 
++#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
++	struct driver_attribute driver_attr_##_name =		\
++		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+ 
+ static int __must_check bus_rescan_devices_helper(struct device *dev,
+ 						void *data);
+@@ -198,7 +201,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
+ 	bus_put(bus);
+ 	return err;
+ }
+-static DRIVER_ATTR_WO(unbind);
++static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
+ 
+ /*
+  * Manually attach a device to a driver.
+@@ -234,7 +237,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
+ 	bus_put(bus);
+ 	return err;
+ }
+-static DRIVER_ATTR_WO(bind);
++static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
+ 
+ static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
+ {
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index da3902ac16c8..b1cf891cb3d9 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -81,7 +81,7 @@
+ #include <asm/uaccess.h>
+ 
+ static DEFINE_IDR(loop_index_idr);
+-static DEFINE_MUTEX(loop_index_mutex);
++static DEFINE_MUTEX(loop_ctl_mutex);
+ 
+ static int max_part;
+ static int part_shift;
+@@ -1044,7 +1044,7 @@ static int loop_clr_fd(struct loop_device *lo)
+ 	 */
+ 	if (atomic_read(&lo->lo_refcnt) > 1) {
+ 		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+-		mutex_unlock(&lo->lo_ctl_mutex);
++		mutex_unlock(&loop_ctl_mutex);
+ 		return 0;
+ 	}
+ 
+@@ -1093,12 +1093,12 @@ static int loop_clr_fd(struct loop_device *lo)
+ 	if (!part_shift)
+ 		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
+ 	loop_unprepare_queue(lo);
+-	mutex_unlock(&lo->lo_ctl_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 	/*
+-	 * Need not hold lo_ctl_mutex to fput backing file.
+-	 * Calling fput holding lo_ctl_mutex triggers a circular
++	 * Need not hold loop_ctl_mutex to fput backing file.
++	 * Calling fput holding loop_ctl_mutex triggers a circular
+ 	 * lock dependency possibility warning as fput can take
+-	 * bd_mutex which is usually taken before lo_ctl_mutex.
++	 * bd_mutex which is usually taken before loop_ctl_mutex.
+ 	 */
+ 	fput(filp);
+ 	return 0;
+@@ -1361,7 +1361,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ 	struct loop_device *lo = bdev->bd_disk->private_data;
+ 	int err;
+ 
+-	mutex_lock_nested(&lo->lo_ctl_mutex, 1);
++	mutex_lock_nested(&loop_ctl_mutex, 1);
+ 	switch (cmd) {
+ 	case LOOP_SET_FD:
+ 		err = loop_set_fd(lo, mode, bdev, arg);
+@@ -1370,7 +1370,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ 		err = loop_change_fd(lo, bdev, arg);
+ 		break;
+ 	case LOOP_CLR_FD:
+-		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
++		/* loop_clr_fd would have unlocked loop_ctl_mutex on success */
+ 		err = loop_clr_fd(lo);
+ 		if (!err)
+ 			goto out_unlocked;
+@@ -1406,7 +1406,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ 	default:
+ 		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ 	}
+-	mutex_unlock(&lo->lo_ctl_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 
+ out_unlocked:
+ 	return err;
+@@ -1539,16 +1539,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ 
+ 	switch(cmd) {
+ 	case LOOP_SET_STATUS:
+-		mutex_lock(&lo->lo_ctl_mutex);
++		mutex_lock(&loop_ctl_mutex);
+ 		err = loop_set_status_compat(
+ 			lo, (const struct compat_loop_info __user *) arg);
+-		mutex_unlock(&lo->lo_ctl_mutex);
++		mutex_unlock(&loop_ctl_mutex);
+ 		break;
+ 	case LOOP_GET_STATUS:
+-		mutex_lock(&lo->lo_ctl_mutex);
++		mutex_lock(&loop_ctl_mutex);
+ 		err = loop_get_status_compat(
+ 			lo, (struct compat_loop_info __user *) arg);
+-		mutex_unlock(&lo->lo_ctl_mutex);
++		mutex_unlock(&loop_ctl_mutex);
+ 		break;
+ 	case LOOP_SET_CAPACITY:
+ 	case LOOP_CLR_FD:
+@@ -1570,9 +1570,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ static int lo_open(struct block_device *bdev, fmode_t mode)
+ {
+ 	struct loop_device *lo;
+-	int err = 0;
++	int err;
+ 
+-	mutex_lock(&loop_index_mutex);
++	err = mutex_lock_killable(&loop_ctl_mutex);
++	if (err)
++		return err;
+ 	lo = bdev->bd_disk->private_data;
+ 	if (!lo) {
+ 		err = -ENXIO;
+@@ -1581,18 +1583,20 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
+ 
+ 	atomic_inc(&lo->lo_refcnt);
+ out:
+-	mutex_unlock(&loop_index_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 	return err;
+ }
+ 
+-static void __lo_release(struct loop_device *lo)
++static void lo_release(struct gendisk *disk, fmode_t mode)
+ {
++	struct loop_device *lo;
+ 	int err;
+ 
++	mutex_lock(&loop_ctl_mutex);
++	lo = disk->private_data;
+ 	if (atomic_dec_return(&lo->lo_refcnt))
+-		return;
++		goto out_unlock;
+ 
+-	mutex_lock(&lo->lo_ctl_mutex);
+ 	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ 		/*
+ 		 * In autoclear mode, stop the loop thread
+@@ -1609,14 +1613,8 @@ static void __lo_release(struct loop_device *lo)
+ 		loop_flush(lo);
+ 	}
+ 
+-	mutex_unlock(&lo->lo_ctl_mutex);
+-}
+-
+-static void lo_release(struct gendisk *disk, fmode_t mode)
+-{
+-	mutex_lock(&loop_index_mutex);
+-	__lo_release(disk->private_data);
+-	mutex_unlock(&loop_index_mutex);
++out_unlock:
++	mutex_unlock(&loop_ctl_mutex);
+ }
+ 
+ static const struct block_device_operations lo_fops = {
+@@ -1655,10 +1653,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
+ 	struct loop_device *lo = ptr;
+ 	struct loop_func_table *xfer = data;
+ 
+-	mutex_lock(&lo->lo_ctl_mutex);
++	mutex_lock(&loop_ctl_mutex);
+ 	if (lo->lo_encryption == xfer)
+ 		loop_release_xfer(lo);
+-	mutex_unlock(&lo->lo_ctl_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 	return 0;
+ }
+ 
+@@ -1820,7 +1818,6 @@ static int loop_add(struct loop_device **l, int i)
+ 	if (!part_shift)
+ 		disk->flags |= GENHD_FL_NO_PART_SCAN;
+ 	disk->flags |= GENHD_FL_EXT_DEVT;
+-	mutex_init(&lo->lo_ctl_mutex);
+ 	atomic_set(&lo->lo_refcnt, 0);
+ 	lo->lo_number		= i;
+ 	spin_lock_init(&lo->lo_lock);
+@@ -1899,7 +1896,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ 	struct kobject *kobj;
+ 	int err;
+ 
+-	mutex_lock(&loop_index_mutex);
++	mutex_lock(&loop_ctl_mutex);
+ 	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
+ 	if (err < 0)
+ 		err = loop_add(&lo, MINOR(dev) >> part_shift);
+@@ -1907,7 +1904,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ 		kobj = NULL;
+ 	else
+ 		kobj = get_disk(lo->lo_disk);
+-	mutex_unlock(&loop_index_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 
+ 	*part = 0;
+ 	return kobj;
+@@ -1917,9 +1914,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ 			       unsigned long parm)
+ {
+ 	struct loop_device *lo;
+-	int ret = -ENOSYS;
++	int ret;
++
++	ret = mutex_lock_killable(&loop_ctl_mutex);
++	if (ret)
++		return ret;
+ 
+-	mutex_lock(&loop_index_mutex);
++	ret = -ENOSYS;
+ 	switch (cmd) {
+ 	case LOOP_CTL_ADD:
+ 		ret = loop_lookup(&lo, parm);
+@@ -1933,19 +1934,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ 		ret = loop_lookup(&lo, parm);
+ 		if (ret < 0)
+ 			break;
+-		mutex_lock(&lo->lo_ctl_mutex);
+ 		if (lo->lo_state != Lo_unbound) {
+ 			ret = -EBUSY;
+-			mutex_unlock(&lo->lo_ctl_mutex);
+ 			break;
+ 		}
+ 		if (atomic_read(&lo->lo_refcnt) > 0) {
+ 			ret = -EBUSY;
+-			mutex_unlock(&lo->lo_ctl_mutex);
+ 			break;
+ 		}
+ 		lo->lo_disk->private_data = NULL;
+-		mutex_unlock(&lo->lo_ctl_mutex);
+ 		idr_remove(&loop_index_idr, lo->lo_number);
+ 		loop_remove(lo);
+ 		break;
+@@ -1955,7 +1952,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ 			break;
+ 		ret = loop_add(&lo, -1);
+ 	}
+-	mutex_unlock(&loop_index_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 
+ 	return ret;
+ }
+@@ -2038,10 +2035,10 @@ static int __init loop_init(void)
+ 				  THIS_MODULE, loop_probe, NULL, NULL);
+ 
+ 	/* pre-create number of devices given by config or max_loop */
+-	mutex_lock(&loop_index_mutex);
++	mutex_lock(&loop_ctl_mutex);
+ 	for (i = 0; i < nr; i++)
+ 		loop_add(&lo, i);
+-	mutex_unlock(&loop_index_mutex);
++	mutex_unlock(&loop_ctl_mutex);
+ 
+ 	printk(KERN_INFO "loop: module loaded\n");
+ 	return 0;
+diff --git a/drivers/block/loop.h b/drivers/block/loop.h
+index 60f0fd2c0c65..a923e74495ce 100644
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -55,7 +55,6 @@ struct loop_device {
+ 
+ 	spinlock_t		lo_lock;
+ 	int			lo_state;
+-	struct mutex		lo_ctl_mutex;
+ 	struct kthread_worker	worker;
+ 	struct task_struct	*worker_task;
+ 	bool			use_dio;
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 7a2e23d6bfdd..b2da2382d544 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -637,8 +637,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 
+ 		/* Remove the multi-part read marker. */
+ 		len -= 2;
++		data += 2;
+ 		for (i = 0; i < len; i++)
+-			ssif_info->data[i] = data[i+2];
++			ssif_info->data[i] = data[i];
+ 		ssif_info->multi_len = len;
+ 		ssif_info->multi_pos = 1;
+ 
+@@ -666,8 +667,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 		}
+ 
+ 		blocknum = data[0];
++		len--;
++		data++;
++
++		if (blocknum != 0xff && len != 31) {
++		    /* All blocks but the last must have 31 data bytes. */
++			result = -EIO;
++			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
++				pr_info("Received middle message <31\n");
+ 
+-		if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
++			goto continue_op;
++		}
++
++		if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
+ 			/* Received message too big, abort the operation. */
+ 			result = -E2BIG;
+ 			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+@@ -676,16 +688,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			goto continue_op;
+ 		}
+ 
+-		/* Remove the blocknum from the data. */
+-		len--;
+ 		for (i = 0; i < len; i++)
+-			ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
++			ssif_info->data[i + ssif_info->multi_len] = data[i];
+ 		ssif_info->multi_len += len;
+ 		if (blocknum == 0xff) {
+ 			/* End of read */
+ 			len = ssif_info->multi_len;
+ 			data = ssif_info->data;
+-		} else if (blocknum + 1 != ssif_info->multi_pos) {
++		} else if (blocknum != ssif_info->multi_pos) {
+ 			/*
+ 			 * Out of sequence block, just abort.  Block
+ 			 * numbers start at zero for the second block,
+@@ -713,6 +723,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 		}
+ 	}
+ 
++ continue_op:
+ 	if (result < 0) {
+ 		ssif_inc_stat(ssif_info, receive_errors);
+ 	} else {
+@@ -720,8 +731,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 		ssif_inc_stat(ssif_info, received_message_parts);
+ 	}
+ 
+-
+- continue_op:
+ 	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ 		pr_info(PFX "DONE 1: state = %d, result=%d.\n",
+ 			ssif_info->ssif_state, result);
+diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
+index a0df83e6b84b..46c05c9a9354 100644
+--- a/drivers/clk/imx/clk-imx6q.c
++++ b/drivers/clk/imx/clk-imx6q.c
+@@ -239,8 +239,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 	 * lvds1_gate and lvds2_gate are pseudo-gates.  Both can be
+ 	 * independently configured as clock inputs or outputs.  We treat
+ 	 * the "output_enable" bit as a gate, even though it's really just
+-	 * enabling clock output.
++	 * enabling clock output. Initially the gate bits are cleared, as
++	 * otherwise the exclusive configuration gets locked in the setup done
++	 * by software running before the clock driver, with no way to change
++	 * it.
+ 	 */
++	writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
+ 	clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
+ 	clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
+ 
+diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
+index 07135e009d8b..601a6c3acc7f 100644
+--- a/drivers/cpuidle/cpuidle-pseries.c
++++ b/drivers/cpuidle/cpuidle-pseries.c
+@@ -240,7 +240,13 @@ static int pseries_idle_probe(void)
+ 		return -ENODEV;
+ 
+ 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+-		if (lppaca_shared_proc(get_lppaca())) {
++		/*
++		 * Use local_paca instead of get_lppaca() since
++		 * preemption is not disabled, and it is not required in
++		 * fact, since lppaca_ptr does not need to be the value
++		 * associated to the current CPU, it can be from any CPU.
++		 */
++		if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
+ 			cpuidle_state_table = shared_states;
+ 			max_idle_state = ARRAY_SIZE(shared_states);
+ 		} else {
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 5ad036741b99..e449f22c8f29 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1109,9 +1109,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ 	struct drm_framebuffer *fb = fb_helper->fb;
+ 	int depth;
+ 
+-	if (var->pixclock != 0 || in_dbg_master())
++	if (in_dbg_master())
+ 		return -EINVAL;
+ 
++	if (var->pixclock != 0) {
++		DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
++		var->pixclock = 0;
++	}
++
+ 	/* Need to resize the fb object !!! */
+ 	if (var->bits_per_pixel > fb->bits_per_pixel ||
+ 	    var->xres > fb->width || var->yres > fb->height ||
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index 54c308e6704f..04248394843e 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -55,15 +55,17 @@ struct dm_kcopyd_client {
+ 	struct dm_kcopyd_throttle *throttle;
+ 
+ /*
+- * We maintain three lists of jobs:
++ * We maintain four lists of jobs:
+  *
+  * i)   jobs waiting for pages
+  * ii)  jobs that have pages, and are waiting for the io to be issued.
+- * iii) jobs that have completed.
++ * iii) jobs that don't need to do any IO and just run a callback
++ * iv) jobs that have completed.
+  *
+- * All three of these are protected by job_lock.
++ * All four of these are protected by job_lock.
+  */
+ 	spinlock_t job_lock;
++	struct list_head callback_jobs;
+ 	struct list_head complete_jobs;
+ 	struct list_head io_jobs;
+ 	struct list_head pages_jobs;
+@@ -583,6 +585,7 @@ static void do_work(struct work_struct *work)
+ 	struct dm_kcopyd_client *kc = container_of(work,
+ 					struct dm_kcopyd_client, kcopyd_work);
+ 	struct blk_plug plug;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * The order that these are called is *very* important.
+@@ -591,6 +594,10 @@ static void do_work(struct work_struct *work)
+ 	 * list.  io jobs call wake when they complete and it all
+ 	 * starts again.
+ 	 */
++	spin_lock_irqsave(&kc->job_lock, flags);
++	list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
++	spin_unlock_irqrestore(&kc->job_lock, flags);
++
+ 	blk_start_plug(&plug);
+ 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
+ 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
+@@ -608,7 +615,7 @@ static void dispatch_job(struct kcopyd_job *job)
+ 	struct dm_kcopyd_client *kc = job->kc;
+ 	atomic_inc(&kc->nr_jobs);
+ 	if (unlikely(!job->source.count))
+-		push(&kc->complete_jobs, job);
++		push(&kc->callback_jobs, job);
+ 	else if (job->pages == &zero_page_list)
+ 		push(&kc->io_jobs, job);
+ 	else
+@@ -795,7 +802,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
+ 	job->read_err = read_err;
+ 	job->write_err = write_err;
+ 
+-	push(&kc->complete_jobs, job);
++	push(&kc->callback_jobs, job);
+ 	wake(kc);
+ }
+ EXPORT_SYMBOL(dm_kcopyd_do_callback);
+@@ -825,6 +832,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	spin_lock_init(&kc->job_lock);
++	INIT_LIST_HEAD(&kc->callback_jobs);
+ 	INIT_LIST_HEAD(&kc->complete_jobs);
+ 	INIT_LIST_HEAD(&kc->io_jobs);
+ 	INIT_LIST_HEAD(&kc->pages_jobs);
+@@ -874,6 +882,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
+ 	/* Wait for completion of all jobs submitted by this client. */
+ 	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
+ 
++	BUG_ON(!list_empty(&kc->callback_jobs));
+ 	BUG_ON(!list_empty(&kc->complete_jobs));
+ 	BUG_ON(!list_empty(&kc->io_jobs));
+ 	BUG_ON(!list_empty(&kc->pages_jobs));
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index e108deebbaaa..5d3797728b9c 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -19,6 +19,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/log2.h>
+ #include <linux/dm-kcopyd.h>
++#include <linux/semaphore.h>
+ 
+ #include "dm.h"
+ 
+@@ -105,6 +106,9 @@ struct dm_snapshot {
+ 	/* The on disk metadata handler */
+ 	struct dm_exception_store *store;
+ 
++	/* Maximum number of in-flight COW jobs. */
++	struct semaphore cow_count;
++
+ 	struct dm_kcopyd_client *kcopyd_client;
+ 
+ 	/* Wait for events based on state_bits */
+@@ -145,6 +149,19 @@ struct dm_snapshot {
+ #define RUNNING_MERGE          0
+ #define SHUTDOWN_MERGE         1
+ 
++/*
++ * Maximum number of chunks being copied on write.
++ *
++ * The value was decided experimentally as a trade-off between memory
++ * consumption, stalling the kernel's workqueues and maintaining a high enough
++ * throughput.
++ */
++#define DEFAULT_COW_THRESHOLD 2048
++
++static int cow_threshold = DEFAULT_COW_THRESHOLD;
++module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
++MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
++
+ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ 		"A percentage of time allocated for copy on write");
+ 
+@@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		goto bad_hash_tables;
+ 	}
+ 
++	sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
++
+ 	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
+ 	if (IS_ERR(s->kcopyd_client)) {
+ 		r = PTR_ERR(s->kcopyd_client);
+@@ -1563,6 +1582,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
+ 		}
+ 		list_add(&pe->out_of_order_entry, lh);
+ 	}
++	up(&s->cow_count);
+ }
+ 
+ /*
+@@ -1586,6 +1606,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
+ 	dest.count = src.count;
+ 
+ 	/* Hand over to kcopyd */
++	down(&s->cow_count);
+ 	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+ }
+ 
+@@ -1606,6 +1627,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
+ 	pe->full_bio_end_io = bio->bi_end_io;
+ 	pe->full_bio_private = bio->bi_private;
+ 
++	down(&s->cow_count);
+ 	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+ 						   copy_callback, pe);
+ 
+diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
+index 251a556112a9..280b5ffea592 100644
+--- a/drivers/media/firewire/firedtv-avc.c
++++ b/drivers/media/firewire/firedtv-avc.c
+@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
+ 	return r->operand[7];
+ }
+ 
+-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
++		    unsigned int *len)
+ {
+ 	struct avc_command_frame *c = (void *)fdtv->avc_data;
+ 	struct avc_response_frame *r = (void *)fdtv->avc_data;
+@@ -1009,7 +1010,8 @@ out:
+ 	return ret;
+ }
+ 
+-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
++		unsigned int *len)
+ {
+ 	struct avc_command_frame *c = (void *)fdtv->avc_data;
+ 	struct avc_response_frame *r = (void *)fdtv->avc_data;
+diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
+index 345d1eda8c05..5b18a08c6285 100644
+--- a/drivers/media/firewire/firedtv.h
++++ b/drivers/media/firewire/firedtv.h
+@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
+ 		    struct dvb_diseqc_master_cmd *diseqcmd);
+ void avc_remote_ctrl_work(struct work_struct *work);
+ int avc_register_remote_control(struct firedtv *fdtv);
+-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
+-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
++		    unsigned int *len);
++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
++		unsigned int *len);
+ int avc_ca_reset(struct firedtv *fdtv);
+ int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
+ int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
+diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
+index 83cc6d3b4784..81ba454a6d95 100644
+--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
+@@ -863,8 +863,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
+ 			"%s-vid-cap", dev->v4l2_dev.name);
+ 
+ 	if (IS_ERR(dev->kthread_vid_cap)) {
++		int err = PTR_ERR(dev->kthread_vid_cap);
++
++		dev->kthread_vid_cap = NULL;
+ 		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+-		return PTR_ERR(dev->kthread_vid_cap);
++		return err;
+ 	}
+ 	*pstreaming = true;
+ 	vivid_grab_controls(dev, true);
+diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
+index c2c46dcdbe95..2c5dbdcb576a 100644
+--- a/drivers/media/platform/vivid/vivid-kthread-out.c
++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
+@@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
+ 			"%s-vid-out", dev->v4l2_dev.name);
+ 
+ 	if (IS_ERR(dev->kthread_vid_out)) {
++		int err = PTR_ERR(dev->kthread_vid_out);
++
++		dev->kthread_vid_out = NULL;
+ 		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+-		return PTR_ERR(dev->kthread_vid_out);
++		return err;
+ 	}
+ 	*pstreaming = true;
+ 	vivid_grab_controls(dev, true);
+diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
+index 1678b730dba2..2e82f520a869 100644
+--- a/drivers/media/platform/vivid/vivid-vid-common.c
++++ b/drivers/media/platform/vivid/vivid-vid-common.c
+@@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
+ 	.type = V4L2_DV_BT_656_1120,
+ 	/* keep this initialization for compatibility with GCC < 4.4.6 */
+ 	.reserved = { 0 },
+-	V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
++	V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
+ 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ 		V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index 6cfcdcea27e0..873948e429e8 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -930,6 +930,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
+ 
+ 	em28xx_videodbg("%s\n", __func__);
+ 
++	dev->v4l2->field_count = 0;
++
+ 	/* Make sure streaming is not already in progress for this type
+ 	   of filehandle (e.g. video, vbi) */
+ 	rc = res_get(dev, vq->type);
+@@ -1149,8 +1151,6 @@ static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
+ {
+ 	struct em28xx *dev = priv;
+ 
+-	dev->v4l2->field_count = 0;
+-
+ 	/*
+ 	 * In the case of non-AC97 volume controls, we still need
+ 	 * to do some setups at em28xx, in order to mute/unmute
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index 8ce9c63dfc59..e0041fcfa783 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1976,9 +1976,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+ 			return -EINVAL;
+ 		}
+ 	}
++
++	mutex_lock(&q->mmap_lock);
++
+ 	if (vb2_fileio_is_active(q)) {
+ 		dprintk(1, "mmap: file io in progress\n");
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto unlock;
+ 	}
+ 
+ 	/*
+@@ -1986,7 +1990,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+ 	 */
+ 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ 	if (ret)
+-		return ret;
++		goto unlock;
+ 
+ 	vb = q->bufs[buffer];
+ 
+@@ -1999,11 +2003,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+ 	if (length < (vma->vm_end - vma->vm_start)) {
+ 		dprintk(1,
+ 			"MMAP invalid, as it would overflow buffer length\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto unlock;
+ 	}
+ 
+-	mutex_lock(&q->mmap_lock);
+ 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
++
++unlock:
+ 	mutex_unlock(&q->mmap_lock);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
+index 5628a6b5b19b..c5c320efc7b4 100644
+--- a/drivers/mfd/tps6586x.c
++++ b/drivers/mfd/tps6586x.c
+@@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
+ 	return 0;
+ }
+ 
++static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
++{
++	struct tps6586x *tps6586x = dev_get_drvdata(dev);
++
++	if (tps6586x->client->irq)
++		disable_irq(tps6586x->client->irq);
++
++	return 0;
++}
++
++static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
++{
++	struct tps6586x *tps6586x = dev_get_drvdata(dev);
++
++	if (tps6586x->client->irq)
++		enable_irq(tps6586x->client->irq);
++
++	return 0;
++}
++
++static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
++			 tps6586x_i2c_resume);
++
+ static const struct i2c_device_id tps6586x_id_table[] = {
+ 	{ "tps6586x", 0 },
+ 	{ },
+@@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
+ 	.driver	= {
+ 		.name	= "tps6586x",
+ 		.of_match_table = of_match_ptr(tps6586x_of_match),
++		.pm	= &tps6586x_pm_ops,
+ 	},
+ 	.probe		= tps6586x_i2c_probe,
+ 	.remove		= tps6586x_i2c_remove,
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index bf62e429f7fc..98be9eb3184b 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1840,13 +1840,14 @@ static void atmci_tasklet_func(unsigned long priv)
+ 			}
+ 
+ 			atmci_request_end(host, host->mrq);
+-			state = STATE_IDLE;
++			goto unlock; /* atmci_request_end() sets host->state */
+ 			break;
+ 		}
+ 	} while (state != prev_state);
+ 
+ 	host->state = state;
+ 
++unlock:
+ 	spin_unlock(&host->lock);
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
+index 25a0ad5102d6..855cf8c15c8a 100644
+--- a/drivers/net/ethernet/intel/e1000e/ptp.c
++++ b/drivers/net/ethernet/intel/e1000e/ptp.c
+@@ -111,10 +111,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+ 	struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ 						     ptp_clock_info);
+ 	unsigned long flags;
+-	u64 ns;
++	u64 cycles, ns;
+ 
+ 	spin_lock_irqsave(&adapter->systim_lock, flags);
+-	ns = timecounter_read(&adapter->tc);
++
++	/* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
++	cycles = adapter->cc.read(&adapter->cc);
++	ns = timecounter_cyc2time(&adapter->tc, cycles);
++
+ 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
+ 
+ 	*ts = ns_to_timespec64(ns);
+@@ -170,9 +174,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
+ 						     systim_overflow_work.work);
+ 	struct e1000_hw *hw = &adapter->hw;
+ 	struct timespec64 ts;
++	u64 ns;
+ 
+-	adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
++	/* Update the timecounter */
++	ns = timecounter_read(&adapter->tc);
+ 
++	ts = ns_to_timespec64(ns);
+ 	e_dbg("SYSTIM overflow check at %lld.%09lu\n",
+ 	      (long long) ts.tv_sec, ts.tv_nsec);
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 93543e176829..8f40e121f7d4 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -324,6 +324,8 @@ enum cfg_version {
+ };
+ 
+ static const struct pci_device_id rtl8169_pci_tbl[] = {
++	{ PCI_VDEVICE(REALTEK,	0x2502), RTL_CFG_1 },
++	{ PCI_VDEVICE(REALTEK,	0x2600), RTL_CFG_1 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index f96f7b865267..7c1defaef3f5 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -2084,7 +2084,8 @@ static int asus_wmi_add(struct platform_device *pdev)
+ 		err = asus_wmi_backlight_init(asus);
+ 		if (err && err != -ENODEV)
+ 			goto fail_backlight;
+-	}
++	} else
++		err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
+ 
+ 	status = wmi_install_notify_handler(asus->driver->event_guid,
+ 					    asus_wmi_notify, asus);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index 741509b35617..14f32c114c55 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -1273,7 +1273,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+ 
+ 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
+-		if (ld >= MAX_LOGICAL_DRIVES_EXT) {
++		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
+ 			lbInfo[ldCount].loadBalanceFlag = 0;
+ 			continue;
+ 		}
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 213944ed64d9..3d3bfa814093 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -1758,7 +1758,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
+ 		device_id < instance->fw_supported_vd_count)) {
+ 
+ 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+-		if (ld >= instance->fw_supported_vd_count)
++		if (ld >= instance->fw_supported_vd_count - 1)
+ 			fp_possible = 0;
+ 
+ 		raid = MR_LdRaidGet(ld, local_map_ptr);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6fffb73766de..ec80a0077ace 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -207,6 +207,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ 	sp = buffer_data[0] & 0x80 ? 1 : 0;
+ 	buffer_data[0] &= ~0x80;
+ 
++	/*
++	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
++	 * received mode parameter buffer before doing MODE SELECT.
++	 */
++	data.device_specific = 0;
++
+ 	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
+ 			     SD_MAX_RETRIES, &data, &sshdr)) {
+ 		if (scsi_sense_valid(&sshdr))
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 9413e1a949e5..5af4d6a03d6e 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
+ 
+ 	buf[7] = 0x2; /* CmdQue=1 */
+ 
+-	memcpy(&buf[8], "LIO-ORG ", 8);
+-	memset(&buf[16], 0x20, 16);
++	/*
++	 * ASCII data fields described as being left-aligned shall have any
++	 * unused bytes at the end of the field (i.e., highest offset) and the
++	 * unused bytes shall be filled with ASCII space characters (20h).
++	 */
++	memset(&buf[8], 0x20, 8 + 16 + 4);
++	memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
+ 	memcpy(&buf[16], dev->t10_wwn.model,
+-	       min_t(size_t, strlen(dev->t10_wwn.model), 16));
++	       strnlen(dev->t10_wwn.model, 16));
+ 	memcpy(&buf[32], dev->t10_wwn.revision,
+-	       min_t(size_t, strlen(dev->t10_wwn.revision), 4));
++	       strnlen(dev->t10_wwn.revision, 4));
+ 	buf[4] = 31; /* Set additional length to 31 */
+ 
+ 	return 0;
+@@ -251,7 +256,9 @@ check_t10_vend_desc:
+ 	buf[off] = 0x2; /* ASCII */
+ 	buf[off+1] = 0x1; /* T10 Vendor ID */
+ 	buf[off+2] = 0x0;
+-	memcpy(&buf[off+4], "LIO-ORG", 8);
++	/* left align Vendor ID and pad with spaces */
++	memset(&buf[off+4], 0x20, 8);
++	memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
+ 	/* Extra Byte for NULL Terminator */
+ 	id_len++;
+ 	/* Identifier Length */
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
+index ad7eba5ca380..34234c233851 100644
+--- a/drivers/tty/tty_ldsem.c
++++ b/drivers/tty/tty_ldsem.c
+@@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
+ 	if (!locked)
+ 		ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+ 	list_del(&waiter.list);
++
++	/*
++	 * In case of timeout, wake up every reader who gave the right of way
++	 * to writer. Prevent separation readers into two groups:
++	 * one that helds semaphore and another that sleeps.
++	 * (in case of no contention with a writer)
++	 */
++	if (!locked && list_empty(&sem->write_wait))
++		__ldsem_wake_readers(sem);
++
+ 	raw_spin_unlock_irq(&sem->wait_lock);
+ 
+ 	__set_task_state(tsk, TASK_RUNNING);
+diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+index 34ab4f950f0a..0c1c34ff40a9 100644
+--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
+ 
+ 	int r = 0;
+ 
++	memset(&p, 0, sizeof(p));
++
+ 	switch (cmd) {
+ 	case OMAPFB_SYNC_GFX:
+ 		DBG("ioctl SYNC_GFX\n");
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index f80a0af68736..78722aaffecd 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4111,6 +4111,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
+ 		spin_lock(&fs_info->ordered_root_lock);
+ 	}
+ 	spin_unlock(&fs_info->ordered_root_lock);
++
++	/*
++	 * We need this here because if we've been flipped read-only we won't
++	 * get sync() from the umount, so we need to make sure any ordered
++	 * extents that haven't had their dirty pages IO start writeout yet
++	 * actually get run and error out properly.
++	 */
++	btrfs_wait_ordered_roots(fs_info, -1);
+ }
+ 
+ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index f661d80474be..4b2f609f376d 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -58,6 +58,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+ 		.rw = READ_SYNC | REQ_META | REQ_PRIO,
+ 		.blk_addr = index,
+ 		.encrypted_page = NULL,
++		.is_meta = is_meta,
+ 	};
+ 
+ 	if (unlikely(!is_meta))
+@@ -74,8 +75,10 @@ repeat:
+ 	fio.page = page;
+ 
+ 	if (f2fs_submit_page_bio(&fio)) {
+-		f2fs_put_page(page, 1);
+-		goto repeat;
++		memset(page_address(page), 0, PAGE_SIZE);
++		f2fs_stop_checkpoint(sbi);
++		f2fs_bug_on(sbi, 1);
++		return page;
+ 	}
+ 
+ 	lock_page(page);
+@@ -106,7 +109,8 @@ struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
+ 	return __get_meta_page(sbi, index, false);
+ }
+ 
+-bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
++bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
++					block_t blkaddr, int type)
+ {
+ 	switch (type) {
+ 	case META_NAT:
+@@ -126,8 +130,20 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
+ 			return false;
+ 		break;
+ 	case META_POR:
++	case DATA_GENERIC:
+ 		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
+-			blkaddr < MAIN_BLKADDR(sbi)))
++			blkaddr < MAIN_BLKADDR(sbi))) {
++			if (type == DATA_GENERIC) {
++				f2fs_msg(sbi->sb, KERN_WARNING,
++					"access invalid blkaddr:%u", blkaddr);
++				WARN_ON(1);
++			}
++			return false;
++		}
++		break;
++	case META_GENERIC:
++		if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
++			blkaddr >= MAIN_BLKADDR(sbi)))
+ 			return false;
+ 		break;
+ 	default:
+@@ -151,6 +167,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ 		.type = META,
+ 		.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
+ 		.encrypted_page = NULL,
++		.is_meta = (type != META_POR),
+ 	};
+ 
+ 	if (unlikely(type == META_POR))
+@@ -158,7 +175,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ 
+ 	for (; nrpages-- > 0; blkno++) {
+ 
+-		if (!is_valid_blkaddr(sbi, blkno, type))
++		if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
+ 			goto out;
+ 
+ 		switch (type) {
+@@ -601,54 +618,73 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
+ 	}
+ }
+ 
+-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+-				block_t cp_addr, unsigned long long *version)
++static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
++		struct f2fs_checkpoint **cp_block, struct page **cp_page,
++		unsigned long long *version)
+ {
+-	struct page *cp_page_1, *cp_page_2 = NULL;
+ 	unsigned long blk_size = sbi->blocksize;
+-	struct f2fs_checkpoint *cp_block;
+-	unsigned long long cur_version = 0, pre_version = 0;
+-	size_t crc_offset;
++	size_t crc_offset = 0;
+ 	__u32 crc = 0;
+ 
+-	/* Read the 1st cp block in this CP pack */
+-	cp_page_1 = get_meta_page(sbi, cp_addr);
++	*cp_page = get_meta_page(sbi, cp_addr);
++	*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
+ 
+-	/* get the version number */
+-	cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
+-	crc_offset = le32_to_cpu(cp_block->checksum_offset);
+-	if (crc_offset >= blk_size)
+-		goto invalid_cp1;
+-
+-	crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
+-	if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+-		goto invalid_cp1;
++	crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
++	if (crc_offset >= blk_size) {
++		f2fs_put_page(*cp_page, 1);
++		f2fs_msg(sbi->sb, KERN_WARNING,
++			"invalid crc_offset: %zu", crc_offset);
++		return -EINVAL;
++	}
+ 
+-	pre_version = cur_cp_version(cp_block);
++	crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
++							+ crc_offset)));
++	if (!f2fs_crc_valid(crc, *cp_block, crc_offset)) {
++		f2fs_put_page(*cp_page, 1);
++		f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
++		return -EINVAL;
++	}
+ 
+-	/* Read the 2nd cp block in this CP pack */
+-	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+-	cp_page_2 = get_meta_page(sbi, cp_addr);
++	*version = cur_cp_version(*cp_block);
++	return 0;
++}
+ 
+-	cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
+-	crc_offset = le32_to_cpu(cp_block->checksum_offset);
+-	if (crc_offset >= blk_size)
+-		goto invalid_cp2;
++static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
++				block_t cp_addr, unsigned long long *version)
++{
++	struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
++	struct f2fs_checkpoint *cp_block = NULL;
++	unsigned long long cur_version = 0, pre_version = 0;
++	int err;
+ 
+-	crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
+-	if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+-		goto invalid_cp2;
++	err = get_checkpoint_version(sbi, cp_addr, &cp_block,
++					&cp_page_1, version);
++	if (err)
++		return NULL;
++
++	if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
++					sbi->blocks_per_seg) {
++		f2fs_msg(sbi->sb, KERN_WARNING,
++			"invalid cp_pack_total_block_count:%u",
++			le32_to_cpu(cp_block->cp_pack_total_block_count));
++		goto invalid_cp;
++	}
++	pre_version = *version;
+ 
+-	cur_version = cur_cp_version(cp_block);
++	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
++	err = get_checkpoint_version(sbi, cp_addr, &cp_block,
++					&cp_page_2, version);
++	if (err)
++		goto invalid_cp;
++	cur_version = *version;
+ 
+ 	if (cur_version == pre_version) {
+ 		*version = cur_version;
+ 		f2fs_put_page(cp_page_2, 1);
+ 		return cp_page_1;
+ 	}
+-invalid_cp2:
+ 	f2fs_put_page(cp_page_2, 1);
+-invalid_cp1:
++invalid_cp:
+ 	f2fs_put_page(cp_page_1, 1);
+ 	return NULL;
+ }
+@@ -696,6 +732,15 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
+ 	cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
+ 	memcpy(sbi->ckpt, cp_block, blk_size);
+ 
++	if (cur_page == cp1)
++		sbi->cur_cp_pack = 1;
++	else
++		sbi->cur_cp_pack = 2;
++
++	/* Sanity checking of checkpoint */
++	if (sanity_check_ckpt(sbi))
++		goto free_fail_no_cp;
++
+ 	if (cp_blks <= 1)
+ 		goto done;
+ 
+@@ -717,6 +762,9 @@ done:
+ 	f2fs_put_page(cp2, 1);
+ 	return 0;
+ 
++free_fail_no_cp:
++	f2fs_put_page(cp1, 1);
++	f2fs_put_page(cp2, 1);
+ fail_no_cp:
+ 	kfree(sbi->ckpt);
+ 	return -EINVAL;
+@@ -767,24 +815,6 @@ out:
+ 	f2fs_trace_pid(page);
+ }
+ 
+-void add_dirty_dir_inode(struct inode *inode)
+-{
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct inode_entry *new =
+-			f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+-	int ret = 0;
+-
+-	new->inode = inode;
+-	INIT_LIST_HEAD(&new->list);
+-
+-	spin_lock(&sbi->dir_inode_lock);
+-	ret = __add_dirty_inode(inode, new);
+-	spin_unlock(&sbi->dir_inode_lock);
+-
+-	if (ret)
+-		kmem_cache_free(inode_entry_slab, new);
+-}
+-
+ void remove_dirty_dir_inode(struct inode *inode)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+@@ -807,12 +837,6 @@ void remove_dirty_dir_inode(struct inode *inode)
+ 	stat_dec_dirty_dir(sbi);
+ 	spin_unlock(&sbi->dir_inode_lock);
+ 	kmem_cache_free(inode_entry_slab, entry);
+-
+-	/* Only from the recovery routine */
+-	if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
+-		clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
+-		iput(inode);
+-	}
+ }
+ 
+ void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
+@@ -922,7 +946,6 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ {
+ 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+-	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
+ 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+ 	unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
+ 	nid_t last_nid = nm_i->next_scan_nid;
+@@ -931,15 +954,6 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	__u32 crc32 = 0;
+ 	int i;
+ 	int cp_payload_blks = __cp_payload(sbi);
+-	block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
+-	bool invalidate = false;
+-
+-	/*
+-	 * This avoids to conduct wrong roll-forward operations and uses
+-	 * metapages, so should be called prior to sync_meta_pages below.
+-	 */
+-	if (discard_next_dnode(sbi, discard_blk))
+-		invalidate = true;
+ 
+ 	/* Flush all the NAT/SIT pages */
+ 	while (get_pages(sbi, F2FS_DIRTY_META)) {
+@@ -1016,6 +1030,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ 		set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ 
++	/* set this flag to activate crc|cp_ver for recovery */
++	set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
++
+ 	/* update SIT/NAT bitmap */
+ 	get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
+ 	get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
+@@ -1025,7 +1042,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 				le32_to_cpu(ckpt->checksum_offset)))
+ 				= cpu_to_le32(crc32);
+ 
+-	start_blk = __start_cp_addr(sbi);
++	start_blk = __start_cp_next_addr(sbi);
+ 
+ 	/* need to wait for end_io results */
+ 	wait_on_all_pages_writeback(sbi);
+@@ -1073,14 +1090,6 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	/* wait for previous submitted meta pages writeback */
+ 	wait_on_all_pages_writeback(sbi);
+ 
+-	/*
+-	 * invalidate meta page which is used temporarily for zeroing out
+-	 * block at the end of warm node chain.
+-	 */
+-	if (invalidate)
+-		invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
+-								discard_blk);
+-
+ 	release_dirty_inode(sbi);
+ 
+ 	if (unlikely(f2fs_cp_error(sbi)))
+@@ -1088,6 +1097,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 
+ 	clear_prefree_segments(sbi, cpc);
+ 	clear_sbi_flag(sbi, SBI_IS_DIRTY);
++	__set_cp_next_pack(sbi);
+ }
+ 
+ /*
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index f6ccb21f286b..2b0b671484bd 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -147,6 +147,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ 	struct bio *bio;
+ 	struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ 
++	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->blk_addr,
++			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
++		return -EFAULT;
++
+ 	trace_f2fs_submit_page_bio(page, fio);
+ 	f2fs_trace_ios(fio, 0);
+ 
+@@ -172,7 +176,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
+ 
+ 	io = is_read ? &sbi->read_io : &sbi->write_io[btype];
+ 
+-	verify_block_addr(sbi, fio->blk_addr);
++	verify_block_addr(fio, fio->blk_addr);
+ 
+ 	down_write(&io->io_rwsem);
+ 
+@@ -603,7 +607,13 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ 		goto unlock_out;
+ 	}
+ 
+-	if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
++	if (__is_valid_data_blkaddr(dn.data_blkaddr) &&
++		!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC)) {
++		err = -EFAULT;
++		goto sync_out;
++	}
++
++	if (!is_valid_data_blkaddr(sbi, dn.data_blkaddr)) {
+ 		if (create) {
+ 			if (unlikely(f2fs_cp_error(sbi))) {
+ 				err = -EIO;
+@@ -866,6 +876,40 @@ out:
+ 	return ret;
+ }
+ 
++struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
++							unsigned nr_pages)
++{
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	struct f2fs_crypto_ctx *ctx = NULL;
++	struct block_device *bdev = sbi->sb->s_bdev;
++	struct bio *bio;
++
++	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
++		return ERR_PTR(-EFAULT);
++
++	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
++		ctx = f2fs_get_crypto_ctx(inode);
++		if (IS_ERR(ctx))
++			return ERR_CAST(ctx);
++
++		/* wait the page to be moved by cleaning */
++		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
++	}
++
++	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
++	if (!bio) {
++		if (ctx)
++			f2fs_release_crypto_ctx(ctx);
++		return ERR_PTR(-ENOMEM);
++	}
++	bio->bi_bdev = bdev;
++	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
++	bio->bi_end_io = f2fs_read_end_io;
++	bio->bi_private = ctx;
++
++	return bio;
++}
++
+ /*
+  * This function was originally taken from fs/mpage.c, and customized for f2fs.
+  * Major change was from block_size == page_size in f2fs by default.
+@@ -884,7 +928,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
+ 	sector_t last_block;
+ 	sector_t last_block_in_file;
+ 	sector_t block_nr;
+-	struct block_device *bdev = inode->i_sb->s_bdev;
+ 	struct f2fs_map_blocks map;
+ 
+ 	map.m_pblk = 0;
+@@ -941,6 +984,10 @@ got_it:
+ 				SetPageUptodate(page);
+ 				goto confused;
+ 			}
++
++			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
++								DATA_GENERIC))
++				goto set_error_page;
+ 		} else {
+ 			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ 			SetPageUptodate(page);
+@@ -958,31 +1005,9 @@ submit_and_realloc:
+ 			bio = NULL;
+ 		}
+ 		if (bio == NULL) {
+-			struct f2fs_crypto_ctx *ctx = NULL;
+-
+-			if (f2fs_encrypted_inode(inode) &&
+-					S_ISREG(inode->i_mode)) {
+-
+-				ctx = f2fs_get_crypto_ctx(inode);
+-				if (IS_ERR(ctx))
+-					goto set_error_page;
+-
+-				/* wait the page to be moved by cleaning */
+-				f2fs_wait_on_encrypted_page_writeback(
+-						F2FS_I_SB(inode), block_nr);
+-			}
+-
+-			bio = bio_alloc(GFP_KERNEL,
+-				min_t(int, nr_pages, BIO_MAX_PAGES));
+-			if (!bio) {
+-				if (ctx)
+-					f2fs_release_crypto_ctx(ctx);
++			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
++			if (IS_ERR(bio))
+ 				goto set_error_page;
+-			}
+-			bio->bi_bdev = bdev;
+-			bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
+-			bio->bi_end_io = f2fs_read_end_io;
+-			bio->bi_private = ctx;
+ 		}
+ 
+ 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+@@ -1077,11 +1102,17 @@ int do_write_data_page(struct f2fs_io_info *fio)
+ 
+ 	set_page_writeback(page);
+ 
++	if (__is_valid_data_blkaddr(fio->blk_addr) &&
++		!f2fs_is_valid_blkaddr(fio->sbi, fio->blk_addr,
++							DATA_GENERIC)) {
++		err = -EFAULT;
++		goto out_writepage;
++	}
+ 	/*
+ 	 * If current allocation needs SSR,
+ 	 * it had better in-place writes for updated data.
+ 	 */
+-	if (unlikely(fio->blk_addr != NEW_ADDR &&
++	if (unlikely(is_valid_data_blkaddr(fio->sbi, fio->blk_addr) &&
+ 			!is_cold_data(page) &&
+ 			need_inplace_update(inode))) {
+ 		rewrite_data_page(fio);
+@@ -1482,17 +1513,21 @@ put_next:
+ 	if (dn.data_blkaddr == NEW_ADDR) {
+ 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ 	} else {
+-		struct f2fs_io_info fio = {
+-			.sbi = sbi,
+-			.type = DATA,
+-			.rw = READ_SYNC,
+-			.blk_addr = dn.data_blkaddr,
+-			.page = page,
+-			.encrypted_page = NULL,
+-		};
+-		err = f2fs_submit_page_bio(&fio);
+-		if (err)
++		struct bio *bio;
++
++		bio = f2fs_grab_bio(inode, dn.data_blkaddr, 1);
++		if (IS_ERR(bio)) {
++			err = PTR_ERR(bio);
+ 			goto fail;
++		}
++
++		if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++			bio_put(bio);
++			err = -EFAULT;
++			goto fail;
++		}
++
++		submit_bio(READ_SYNC, bio);
+ 
+ 		lock_page(page);
+ 		if (unlikely(!PageUptodate(page))) {
+@@ -1503,13 +1538,6 @@ put_next:
+ 			f2fs_put_page(page, 1);
+ 			goto repeat;
+ 		}
+-
+-		/* avoid symlink page */
+-		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+-			err = f2fs_decrypt_one(inode, page);
+-			if (err)
+-				goto fail;
+-		}
+ 	}
+ out_update:
+ 	SetPageUptodate(page);
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 60972a559685..92a240616f52 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
+ 	[F2FS_FT_SYMLINK]	= DT_LNK,
+ };
+ 
+-#define S_SHIFT 12
+ static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
+ 	[S_IFREG >> S_SHIFT]	= F2FS_FT_REG_FILE,
+ 	[S_IFDIR >> S_SHIFT]	= F2FS_FT_DIR,
+@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
+ 	de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
+ }
+ 
++unsigned char get_de_type(struct f2fs_dir_entry *de)
++{
++	if (de->file_type < F2FS_FT_MAX)
++		return f2fs_filetype_table[de->file_type];
++	return DT_UNKNOWN;
++}
++
+ static unsigned long dir_block_index(unsigned int level,
+ 				int dir_level, unsigned int idx)
+ {
+@@ -519,11 +525,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
+ 		test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
+ }
+ 
+-/*
+- * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+- * f2fs_unlock_op().
+- */
+-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
++int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ 				struct inode *inode, nid_t ino, umode_t mode)
+ {
+ 	unsigned int bit_pos;
+@@ -536,28 +538,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ 	struct f2fs_dentry_block *dentry_blk = NULL;
+ 	struct f2fs_dentry_ptr d;
+ 	struct page *page = NULL;
+-	struct f2fs_filename fname;
+-	struct qstr new_name;
+-	int slots, err;
+-
+-	err = f2fs_fname_setup_filename(dir, name, 0, &fname);
+-	if (err)
+-		return err;
+-
+-	new_name.name = fname_name(&fname);
+-	new_name.len = fname_len(&fname);
+-
+-	if (f2fs_has_inline_dentry(dir)) {
+-		err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
+-		if (!err || err != -EAGAIN)
+-			goto out;
+-		else
+-			err = 0;
+-	}
++	int slots, err = 0;
+ 
+ 	level = 0;
+-	slots = GET_DENTRY_SLOTS(new_name.len);
+-	dentry_hash = f2fs_dentry_hash(&new_name, NULL);
++	slots = GET_DENTRY_SLOTS(new_name->len);
++	dentry_hash = f2fs_dentry_hash(new_name, NULL);
+ 
+ 	current_depth = F2FS_I(dir)->i_current_depth;
+ 	if (F2FS_I(dir)->chash == dentry_hash) {
+@@ -566,10 +551,8 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ 	}
+ 
+ start:
+-	if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
+-		err = -ENOSPC;
+-		goto out;
+-	}
++	if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
++		return -ENOSPC;
+ 
+ 	/* Increase the depth, if required */
+ 	if (level == current_depth)
+@@ -583,10 +566,8 @@ start:
+ 
+ 	for (block = bidx; block <= (bidx + nblock - 1); block++) {
+ 		dentry_page = get_new_data_page(dir, NULL, block, true);
+-		if (IS_ERR(dentry_page)) {
+-			err = PTR_ERR(dentry_page);
+-			goto out;
+-		}
++		if (IS_ERR(dentry_page))
++			return PTR_ERR(dentry_page);
+ 
+ 		dentry_blk = kmap(dentry_page);
+ 		bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
+@@ -606,7 +587,7 @@ add_dentry:
+ 
+ 	if (inode) {
+ 		down_write(&F2FS_I(inode)->i_sem);
+-		page = init_inode_metadata(inode, dir, &new_name, NULL);
++		page = init_inode_metadata(inode, dir, new_name, NULL);
+ 		if (IS_ERR(page)) {
+ 			err = PTR_ERR(page);
+ 			goto fail;
+@@ -616,7 +597,7 @@ add_dentry:
+ 	}
+ 
+ 	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+-	f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
++	f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
+ 
+ 	set_page_dirty(dentry_page);
+ 
+@@ -638,7 +619,34 @@ fail:
+ 	}
+ 	kunmap(dentry_page);
+ 	f2fs_put_page(dentry_page, 1);
+-out:
++
++	return err;
++}
++
++/*
++ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
++ * f2fs_unlock_op().
++ */
++int __f2fs_add_link(struct inode *dir, const struct qstr *name,
++				struct inode *inode, nid_t ino, umode_t mode)
++{
++	struct f2fs_filename fname;
++	struct qstr new_name;
++	int err;
++
++	err = f2fs_fname_setup_filename(dir, name, 0, &fname);
++	if (err)
++		return err;
++
++	new_name.name = fname_name(&fname);
++	new_name.len = fname_len(&fname);
++
++	err = -EAGAIN;
++	if (f2fs_has_inline_dentry(dir))
++		err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
++	if (err == -EAGAIN)
++		err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
++
+ 	f2fs_fname_free_filename(&fname);
+ 	return err;
+ }
+@@ -792,10 +800,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ 			break;
+ 
+ 		de = &d->dentry[bit_pos];
+-		if (de->file_type < F2FS_FT_MAX)
+-			d_type = f2fs_filetype_table[de->file_type];
+-		else
+-			d_type = DT_UNKNOWN;
++		d_type = get_de_type(de);
+ 
+ 		de_name.name = d->filename[bit_pos];
+ 		de_name.len = le16_to_cpu(de->name_len);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 2871576fbca4..2bfce887dce2 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -135,7 +135,7 @@ struct cp_control {
+ };
+ 
+ /*
+- * For CP/NAT/SIT/SSA readahead
++ * indicate meta/data type
+  */
+ enum {
+ 	META_CP,
+@@ -143,6 +143,8 @@ enum {
+ 	META_SIT,
+ 	META_SSA,
+ 	META_POR,
++	DATA_GENERIC,
++	META_GENERIC,
+ };
+ 
+ /* for the list of ino */
+@@ -684,6 +686,7 @@ struct f2fs_io_info {
+ 	block_t blk_addr;	/* block address to be written */
+ 	struct page *page;	/* page to be written */
+ 	struct page *encrypted_page;	/* encrypted page */
++	bool is_meta;		/* indicate borrow meta inode mapping or not */
+ };
+ 
+ #define is_read_io(rw)	(((rw) & 1) == READ)
+@@ -731,6 +734,7 @@ struct f2fs_sb_info {
+ 
+ 	/* for checkpoint */
+ 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
++	int cur_cp_pack;			/* remain current cp pack */
+ 	struct inode *meta_inode;		/* cache meta blocks */
+ 	struct mutex cp_mutex;			/* checkpoint procedure lock */
+ 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
+@@ -1140,22 +1144,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
+ 
+ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
+ {
+-	block_t start_addr;
+-	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+-	unsigned long long ckpt_version = cur_cp_version(ckpt);
+-
+-	start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
++	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ 
+-	/*
+-	 * odd numbered checkpoint should at cp segment 0
+-	 * and even segment must be at cp segment 1
+-	 */
+-	if (!(ckpt_version & 1))
++	if (sbi->cur_cp_pack == 2)
+ 		start_addr += sbi->blocks_per_seg;
++	return start_addr;
++}
++
++static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
++{
++	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ 
++	if (sbi->cur_cp_pack == 1)
++		start_addr += sbi->blocks_per_seg;
+ 	return start_addr;
+ }
+ 
++static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
++{
++	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
++}
++
+ static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
+ {
+ 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
+@@ -1402,7 +1411,6 @@ enum {
+ 	FI_NO_ALLOC,		/* should not allocate any blocks */
+ 	FI_FREE_NID,		/* free allocated nide */
+ 	FI_UPDATE_DIR,		/* should update inode block for consistency */
+-	FI_DELAY_IPUT,		/* used for the recovery */
+ 	FI_NO_EXTENT,		/* not to use the extent cache */
+ 	FI_INLINE_XATTR,	/* used for inline xattr */
+ 	FI_INLINE_DATA,		/* used for inline data*/
+@@ -1641,6 +1649,39 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
+ 	(pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) /	\
+ 	ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+ 
++#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&	\
++				(!is_read_io(fio->rw) || fio->is_meta))
++
++bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
++					block_t blkaddr, int type);
++void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
++static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
++					block_t blkaddr, int type)
++{
++	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
++		f2fs_msg(sbi->sb, KERN_ERR,
++			"invalid blkaddr: %u, type: %d, run fsck to fix.",
++			blkaddr, type);
++		f2fs_bug_on(sbi, 1);
++	}
++}
++
++static inline bool __is_valid_data_blkaddr(block_t blkaddr)
++{
++	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
++		return false;
++	return true;
++}
++
++static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
++						block_t blkaddr)
++{
++	if (!__is_valid_data_blkaddr(blkaddr))
++		return false;
++	verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
++	return true;
++}
++
+ /*
+  * file.c
+  */
+@@ -1677,7 +1718,7 @@ struct dentry *f2fs_get_parent(struct dentry *child);
+  */
+ extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
+ void set_de_type(struct f2fs_dir_entry *, umode_t);
+-
++unsigned char get_de_type(struct f2fs_dir_entry *);
+ struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *,
+ 			f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
+ bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
+@@ -1698,6 +1739,8 @@ void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
+ int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
+ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
+ 			const struct qstr *, f2fs_hash_t , unsigned int);
++int f2fs_add_regular_entry(struct inode *, const struct qstr *,
++						struct inode *, nid_t, umode_t);
+ int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
+ 			umode_t);
+ void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
+@@ -1718,6 +1761,7 @@ int f2fs_commit_super(struct f2fs_sb_info *, bool);
+ int f2fs_sync_fs(struct super_block *, int);
+ extern __printf(3, 4)
+ void f2fs_msg(struct super_block *, const char *, const char *, ...);
++int sanity_check_ckpt(struct f2fs_sb_info *sbi);
+ 
+ /*
+  * hash.c
+@@ -1778,7 +1822,6 @@ bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
+ void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
+ void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
+ void release_discard_addrs(struct f2fs_sb_info *);
+-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
+ int npages_for_summary_flush(struct f2fs_sb_info *, bool);
+ void allocate_new_segments(struct f2fs_sb_info *);
+ int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
+@@ -1810,7 +1853,8 @@ void destroy_segment_manager_caches(void);
+ struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
+ struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
+ struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
+-bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
++bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
++					block_t blkaddr, int type);
+ int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
+ void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
+ long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
+@@ -1825,7 +1869,6 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
+ int recover_orphan_inodes(struct f2fs_sb_info *);
+ int get_valid_checkpoint(struct f2fs_sb_info *);
+ void update_dirty_page(struct inode *, struct page *);
+-void add_dirty_dir_inode(struct inode *);
+ void remove_dirty_dir_inode(struct inode *);
+ void sync_dirty_dir_inodes(struct f2fs_sb_info *);
+ void write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
+@@ -1864,7 +1907,7 @@ void build_gc_manager(struct f2fs_sb_info *);
+ /*
+  * recovery.c
+  */
+-int recover_fsync_data(struct f2fs_sb_info *);
++int recover_fsync_data(struct f2fs_sb_info *, bool);
+ bool space_for_roll_forward(struct f2fs_sb_info *);
+ 
+ /*
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 01eed94b01ea..96bfd9f0ea02 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -305,13 +305,13 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
+ 	return pgofs;
+ }
+ 
+-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
+-							int whence)
++static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
++				pgoff_t dirty, pgoff_t pgofs, int whence)
+ {
+ 	switch (whence) {
+ 	case SEEK_DATA:
+ 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
+-			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
++			is_valid_data_blkaddr(sbi, blkaddr))
+ 			return true;
+ 		break;
+ 	case SEEK_HOLE:
+@@ -374,7 +374,15 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
+ 			block_t blkaddr;
+ 			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ 
+-			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
++			if (__is_valid_data_blkaddr(blkaddr) &&
++				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
++						blkaddr, DATA_GENERIC)) {
++				f2fs_put_dnode(&dn);
++				goto fail;
++			}
++
++			if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
++							pgofs, whence)) {
+ 				f2fs_put_dnode(&dn);
+ 				goto found;
+ 			}
+@@ -466,6 +474,11 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 
+ 		dn->data_blkaddr = NULL_ADDR;
+ 		set_data_blkaddr(dn);
++
++		if (__is_valid_data_blkaddr(blkaddr) &&
++			!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
++			continue;
++
+ 		invalidate_blocks(sbi, blkaddr);
+ 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
+ 			clear_inode_flag(F2FS_I(dn->inode),
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index ad80f916b64d..00685a8b1418 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -127,6 +127,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
+ 	if (err)
+ 		return err;
+ 
++	if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
++		f2fs_put_dnode(dn);
++		set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
++		f2fs_msg(fio.sbi->sb, KERN_WARNING,
++			"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
++			"run fsck to fix.",
++			__func__, dn->inode->i_ino, dn->data_blkaddr);
++		return -EINVAL;
++	}
++
+ 	f2fs_wait_on_page_writeback(page, DATA);
+ 
+ 	if (PageUptodate(page))
+@@ -367,7 +377,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
+  * NOTE: ipage is grabbed by caller, but if any error occurs, we should
+  * release ipage in this function.
+  */
+-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
++static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+ 				struct f2fs_inline_dentry *inline_dentry)
+ {
+ 	struct page *page;
+@@ -386,6 +396,17 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+ 	if (err)
+ 		goto out;
+ 
++	if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
++		f2fs_put_dnode(&dn);
++		set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
++		f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
++			"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
++			"run fsck to fix.",
++			__func__, dir->i_ino, dn.data_blkaddr);
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	f2fs_wait_on_page_writeback(page, DATA);
+ 	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ 
+@@ -428,6 +449,98 @@ out:
+ 	return err;
+ }
+ 
++static int f2fs_add_inline_entries(struct inode *dir,
++			struct f2fs_inline_dentry *inline_dentry)
++{
++	struct f2fs_dentry_ptr d;
++	unsigned long bit_pos = 0;
++	int err = 0;
++
++	make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
++
++	while (bit_pos < d.max) {
++		struct f2fs_dir_entry *de;
++		struct qstr new_name;
++		nid_t ino;
++		umode_t fake_mode;
++
++		if (!test_bit_le(bit_pos, d.bitmap)) {
++			bit_pos++;
++			continue;
++		}
++
++		de = &d.dentry[bit_pos];
++		new_name.name = d.filename[bit_pos];
++		new_name.len = de->name_len;
++
++		ino = le32_to_cpu(de->ino);
++		fake_mode = get_de_type(de) << S_SHIFT;
++
++		err = f2fs_add_regular_entry(dir, &new_name, NULL,
++							ino, fake_mode);
++		if (err)
++			goto punch_dentry_pages;
++
++		if (unlikely(!de->name_len))
++			d.max = -1;
++
++		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
++	}
++	return 0;
++punch_dentry_pages:
++	truncate_inode_pages(&dir->i_data, 0);
++	truncate_blocks(dir, 0, false);
++	remove_dirty_dir_inode(dir);
++	return err;
++}
++
++static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
++				struct f2fs_inline_dentry *inline_dentry)
++{
++	struct f2fs_inline_dentry *backup_dentry;
++	int err;
++
++	backup_dentry = kmalloc(sizeof(struct f2fs_inline_dentry),
++							GFP_F2FS_ZERO);
++	if (!backup_dentry)
++		return -ENOMEM;
++
++	memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
++	truncate_inline_inode(ipage, 0);
++
++	unlock_page(ipage);
++
++	err = f2fs_add_inline_entries(dir, backup_dentry);
++	if (err)
++		goto recover;
++
++	lock_page(ipage);
++
++	stat_dec_inline_dir(dir);
++	clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
++	update_inode(dir, ipage);
++	kfree(backup_dentry);
++	return 0;
++recover:
++	lock_page(ipage);
++	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
++	i_size_write(dir, MAX_INLINE_DATA);
++	update_inode(dir, ipage);
++	f2fs_put_page(ipage, 1);
++
++	kfree(backup_dentry);
++	return err;
++}
++
++static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
++				struct f2fs_inline_dentry *inline_dentry)
++{
++	if (!F2FS_I(dir)->i_dir_level)
++		return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
++	else
++		return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
++}
++
+ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
+ 			struct inode *inode, nid_t ino, umode_t mode)
+ {
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 5528801a5baf..89bf8dd7758c 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -50,13 +50,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+ 	}
+ }
+ 
+-static bool __written_first_block(struct f2fs_inode *ri)
++static int __written_first_block(struct f2fs_sb_info *sbi,
++					struct f2fs_inode *ri)
+ {
+ 	block_t addr = le32_to_cpu(ri->i_addr[0]);
+ 
+-	if (addr != NEW_ADDR && addr != NULL_ADDR)
+-		return true;
+-	return false;
++	if (!__is_valid_data_blkaddr(addr))
++		return 1;
++	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
++		return -EFAULT;
++	return 0;
+ }
+ 
+ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+@@ -94,12 +97,57 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
+ 	return;
+ }
+ 
++static bool sanity_check_inode(struct inode *inode, struct page *node_page)
++{
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	unsigned long long iblocks;
++
++	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
++	if (!iblocks) {
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		f2fs_msg(sbi->sb, KERN_WARNING,
++			"%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
++			"run fsck to fix.",
++			__func__, inode->i_ino, iblocks);
++		return false;
++	}
++
++	if (ino_of_node(node_page) != nid_of_node(node_page)) {
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		f2fs_msg(sbi->sb, KERN_WARNING,
++			"%s: corrupted inode footer i_ino=%lx, ino,nid: "
++			"[%u, %u] run fsck to fix.",
++			__func__, inode->i_ino,
++			ino_of_node(node_page), nid_of_node(node_page));
++		return false;
++	}
++
++	if (F2FS_I(inode)->extent_tree) {
++		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
++
++		if (ei->len &&
++			(!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
++			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
++							DATA_GENERIC))) {
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
++			f2fs_msg(sbi->sb, KERN_WARNING,
++				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
++				"is incorrect, run fsck to fix",
++				__func__, inode->i_ino,
++				ei->blk, ei->fofs, ei->len);
++			return false;
++		}
++	}
++	return true;
++}
++
+ static int do_read_inode(struct inode *inode)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct f2fs_inode_info *fi = F2FS_I(inode);
+ 	struct page *node_page;
+ 	struct f2fs_inode *ri;
++	int err;
+ 
+ 	/* Check if ino is within scope */
+ 	if (check_nid_range(sbi, inode->i_ino)) {
+@@ -142,6 +190,11 @@ static int do_read_inode(struct inode *inode)
+ 
+ 	get_inline_info(fi, ri);
+ 
++	if (!sanity_check_inode(inode, node_page)) {
++		f2fs_put_page(node_page, 1);
++		return -EINVAL;
++	}
++
+ 	/* check data exist */
+ 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
+ 		__recover_inline_status(inode, node_page);
+@@ -149,7 +202,12 @@ static int do_read_inode(struct inode *inode)
+ 	/* get rdev by using inline_info */
+ 	__get_inode_rdev(inode, ri);
+ 
+-	if (__written_first_block(ri))
++	err = __written_first_block(sbi, ri);
++	if (err < 0) {
++		f2fs_put_page(node_page, 1);
++		return err;
++	}
++	if (!err)
+ 		set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ 
+ 	f2fs_put_page(node_page, 1);
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 7bcbc6e9c40d..3685fea62333 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -261,13 +261,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+ {
+ 	struct nat_entry *e;
+ 
+-	down_write(&nm_i->nat_tree_lock);
+ 	e = __lookup_nat_cache(nm_i, nid);
+ 	if (!e) {
+ 		e = grab_nat_entry(nm_i, nid);
+ 		node_info_from_raw_nat(&e->ni, ne);
+ 	}
+-	up_write(&nm_i->nat_tree_lock);
+ }
+ 
+ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
+@@ -298,8 +296,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
+ 			new_blkaddr == NULL_ADDR);
+ 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
+ 			new_blkaddr == NEW_ADDR);
+-	f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
+-			nat_get_blkaddr(e) != NULL_ADDR &&
++	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
+ 			new_blkaddr == NEW_ADDR);
+ 
+ 	/* increment version no as node is removed */
+@@ -314,7 +311,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
+ 
+ 	/* change address */
+ 	nat_set_blkaddr(e, new_blkaddr);
+-	if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
++	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
+ 		set_nat_flag(e, IS_CHECKPOINTED, false);
+ 	__set_nat_cache_dirty(nm_i, e);
+ 
+@@ -379,6 +376,8 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
+ 
+ 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
+ 
++	down_write(&nm_i->nat_tree_lock);
++
+ 	/* Check current segment summary */
+ 	mutex_lock(&curseg->curseg_mutex);
+ 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+@@ -399,6 +398,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
+ cache:
+ 	/* cache nat entry */
+ 	cache_nat_entry(NM_I(sbi), nid, &ne);
++	up_write(&nm_i->nat_tree_lock);
+ }
+ 
+ /*
+@@ -1341,6 +1341,12 @@ static int f2fs_write_node_page(struct page *page,
+ 		return 0;
+ 	}
+ 
++	if (__is_valid_data_blkaddr(ni.blk_addr) &&
++		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
++		up_read(&sbi->node_write);
++		goto redirty_out;
++	}
++
+ 	set_page_writeback(page);
+ 	fio.blk_addr = ni.blk_addr;
+ 	write_node_page(nid, &fio);
+@@ -1427,9 +1433,9 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
+ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
+ {
+ 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+-	struct free_nid *i;
++	struct free_nid *i, *e;
+ 	struct nat_entry *ne;
+-	bool allocated = false;
++	int err = -EINVAL;
+ 
+ 	if (!available_free_memory(sbi, FREE_NIDS))
+ 		return -1;
+@@ -1438,40 +1444,58 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
+ 	if (unlikely(nid == 0))
+ 		return 0;
+ 
+-	if (build) {
+-		/* do not add allocated nids */
+-		down_read(&nm_i->nat_tree_lock);
+-		ne = __lookup_nat_cache(nm_i, nid);
+-		if (ne &&
+-			(!get_nat_flag(ne, IS_CHECKPOINTED) ||
+-				nat_get_blkaddr(ne) != NULL_ADDR))
+-			allocated = true;
+-		up_read(&nm_i->nat_tree_lock);
+-		if (allocated)
+-			return 0;
+-	}
+-
+ 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
+ 	i->nid = nid;
+ 	i->state = NID_NEW;
+ 
+-	if (radix_tree_preload(GFP_NOFS)) {
+-		kmem_cache_free(free_nid_slab, i);
+-		return 0;
+-	}
++	if (radix_tree_preload(GFP_NOFS))
++		goto err;
+ 
+ 	spin_lock(&nm_i->free_nid_list_lock);
+-	if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
+-		spin_unlock(&nm_i->free_nid_list_lock);
+-		radix_tree_preload_end();
+-		kmem_cache_free(free_nid_slab, i);
+-		return 0;
++
++	if (build) {
++		/*
++		 *   Thread A             Thread B
++		 *  - f2fs_create
++		 *   - f2fs_new_inode
++		 *    - alloc_nid
++		 *     - __insert_nid_to_list(ALLOC_NID_LIST)
++		 *                     - f2fs_balance_fs_bg
++		 *                      - build_free_nids
++		 *                       - __build_free_nids
++		 *                        - scan_nat_page
++		 *                         - add_free_nid
++		 *                          - __lookup_nat_cache
++		 *  - f2fs_add_link
++		 *   - init_inode_metadata
++		 *    - new_inode_page
++		 *     - new_node_page
++		 *      - set_node_addr
++		 *  - alloc_nid_done
++		 *   - __remove_nid_from_list(ALLOC_NID_LIST)
++		 *                         - __insert_nid_to_list(FREE_NID_LIST)
++		 */
++		ne = __lookup_nat_cache(nm_i, nid);
++		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
++				nat_get_blkaddr(ne) != NULL_ADDR))
++			goto err_out;
++
++		e = __lookup_free_nid_list(nm_i, nid);
++		if (e)
++			goto err_out;
+ 	}
++	if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i))
++		goto err_out;
++	err = 0;
+ 	list_add_tail(&i->list, &nm_i->free_nid_list);
+ 	nm_i->fcnt++;
++err_out:
+ 	spin_unlock(&nm_i->free_nid_list_lock);
+ 	radix_tree_preload_end();
+-	return 1;
++err:
++	if (err)
++		kmem_cache_free(free_nid_slab, i);
++	return !err;
+ }
+ 
+ static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+@@ -1532,6 +1556,8 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
+ 	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+ 							META_NAT, true);
+ 
++	down_read(&nm_i->nat_tree_lock);
++
+ 	while (1) {
+ 		struct page *page = get_current_nat_page(sbi, nid);
+ 
+@@ -1560,6 +1586,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
+ 			remove_free_nid(nm_i, nid);
+ 	}
+ 	mutex_unlock(&curseg->curseg_mutex);
++	up_read(&nm_i->nat_tree_lock);
+ 
+ 	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
+ 					nm_i->ra_nid_pages, META_NAT, false);
+@@ -1842,14 +1869,12 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
+ 
+ 		raw_ne = nat_in_journal(sum, i);
+ 
+-		down_write(&nm_i->nat_tree_lock);
+ 		ne = __lookup_nat_cache(nm_i, nid);
+ 		if (!ne) {
+ 			ne = grab_nat_entry(nm_i, nid);
+ 			node_info_from_raw_nat(&ne->ni, &raw_ne);
+ 		}
+ 		__set_nat_cache_dirty(nm_i, ne);
+-		up_write(&nm_i->nat_tree_lock);
+ 	}
+ 	update_nats_in_cursum(sum, -i);
+ 	mutex_unlock(&curseg->curseg_mutex);
+@@ -1883,7 +1908,6 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+ 	struct f2fs_nat_block *nat_blk;
+ 	struct nat_entry *ne, *cur;
+ 	struct page *page = NULL;
+-	struct f2fs_nm_info *nm_i = NM_I(sbi);
+ 
+ 	/*
+ 	 * there are two steps to flush nat entries:
+@@ -1920,12 +1944,8 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+ 			raw_ne = &nat_blk->entries[nid - start_nid];
+ 		}
+ 		raw_nat_from_node_info(raw_ne, &ne->ni);
+-
+-		down_write(&NM_I(sbi)->nat_tree_lock);
+ 		nat_reset_flag(ne);
+ 		__clear_nat_cache_dirty(NM_I(sbi), ne);
+-		up_write(&NM_I(sbi)->nat_tree_lock);
+-
+ 		if (nat_get_blkaddr(ne) == NULL_ADDR)
+ 			add_free_nid(sbi, nid, false);
+ 	}
+@@ -1937,9 +1957,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+ 
+ 	f2fs_bug_on(sbi, set->entry_cnt);
+ 
+-	down_write(&nm_i->nat_tree_lock);
+ 	radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+-	up_write(&nm_i->nat_tree_lock);
+ 	kmem_cache_free(nat_entry_set_slab, set);
+ }
+ 
+@@ -1959,6 +1977,9 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
+ 
+ 	if (!nm_i->dirty_nat_cnt)
+ 		return;
++
++	down_write(&nm_i->nat_tree_lock);
++
+ 	/*
+ 	 * if there are no enough space in journal to store dirty nat
+ 	 * entries, remove all entries from journal and merge them
+@@ -1967,7 +1988,6 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
+ 	if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
+ 		remove_nats_in_journal(sbi);
+ 
+-	down_write(&nm_i->nat_tree_lock);
+ 	while ((found = __gang_lookup_nat_set(nm_i,
+ 					set_idx, SETVEC_SIZE, setvec))) {
+ 		unsigned idx;
+@@ -1976,12 +1996,13 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
+ 			__adjust_nat_entry_set(setvec[idx], &sets,
+ 							MAX_NAT_JENTRIES(sum));
+ 	}
+-	up_write(&nm_i->nat_tree_lock);
+ 
+ 	/* flush dirty nats in nat entry set */
+ 	list_for_each_entry_safe(set, tmp, &sets, set_list)
+ 		__flush_nat_entry_set(sbi, set);
+ 
++	up_write(&nm_i->nat_tree_lock);
++
+ 	f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
+ }
+ 
+diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
+index e4fffd2d98c4..0d6f0e3dc655 100644
+--- a/fs/f2fs/node.h
++++ b/fs/f2fs/node.h
+@@ -212,6 +212,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
+ 	f2fs_change_bit(block_off, nm_i->nat_bitmap);
+ }
+ 
++static inline nid_t ino_of_node(struct page *node_page)
++{
++	struct f2fs_node *rn = F2FS_NODE(node_page);
++	return le32_to_cpu(rn->footer.ino);
++}
++
++static inline nid_t nid_of_node(struct page *node_page)
++{
++	struct f2fs_node *rn = F2FS_NODE(node_page);
++	return le32_to_cpu(rn->footer.nid);
++}
++
++static inline unsigned int ofs_of_node(struct page *node_page)
++{
++	struct f2fs_node *rn = F2FS_NODE(node_page);
++	unsigned flag = le32_to_cpu(rn->footer.flag);
++	return flag >> OFFSET_BIT_SHIFT;
++}
++
++static inline __u64 cpver_of_node(struct page *node_page)
++{
++	struct f2fs_node *rn = F2FS_NODE(node_page);
++	return le64_to_cpu(rn->footer.cp_ver);
++}
++
++static inline block_t next_blkaddr_of_node(struct page *node_page)
++{
++	struct f2fs_node *rn = F2FS_NODE(node_page);
++	return le32_to_cpu(rn->footer.next_blkaddr);
++}
++
+ static inline void fill_node_footer(struct page *page, nid_t nid,
+ 				nid_t ino, unsigned int ofs, bool reset)
+ {
+@@ -242,40 +273,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
+ {
+ 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ 	struct f2fs_node *rn = F2FS_NODE(page);
++	size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
++	__u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
+ 
+-	rn->footer.cp_ver = ckpt->checkpoint_ver;
++	if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
++		__u64 crc = le32_to_cpu(*((__le32 *)
++				((unsigned char *)ckpt + crc_offset)));
++		cp_ver |= (crc << 32);
++	}
++	rn->footer.cp_ver = cpu_to_le64(cp_ver);
+ 	rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
+ }
+ 
+-static inline nid_t ino_of_node(struct page *node_page)
+-{
+-	struct f2fs_node *rn = F2FS_NODE(node_page);
+-	return le32_to_cpu(rn->footer.ino);
+-}
+-
+-static inline nid_t nid_of_node(struct page *node_page)
++static inline bool is_recoverable_dnode(struct page *page)
+ {
+-	struct f2fs_node *rn = F2FS_NODE(node_page);
+-	return le32_to_cpu(rn->footer.nid);
+-}
+-
+-static inline unsigned int ofs_of_node(struct page *node_page)
+-{
+-	struct f2fs_node *rn = F2FS_NODE(node_page);
+-	unsigned flag = le32_to_cpu(rn->footer.flag);
+-	return flag >> OFFSET_BIT_SHIFT;
+-}
+-
+-static inline unsigned long long cpver_of_node(struct page *node_page)
+-{
+-	struct f2fs_node *rn = F2FS_NODE(node_page);
+-	return le64_to_cpu(rn->footer.cp_ver);
+-}
++	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
++	size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
++	__u64 cp_ver = cur_cp_version(ckpt);
+ 
+-static inline block_t next_blkaddr_of_node(struct page *node_page)
+-{
+-	struct f2fs_node *rn = F2FS_NODE(node_page);
+-	return le32_to_cpu(rn->footer.next_blkaddr);
++	if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
++		__u64 crc = le32_to_cpu(*((__le32 *)
++				((unsigned char *)ckpt + crc_offset)));
++		cp_ver |= (crc << 32);
++	}
++	return cpu_to_le64(cp_ver) == cpver_of_node(page);
+ }
+ 
+ /*
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index e32f349f341b..2878be3e448f 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -67,7 +67,30 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
+ 	return NULL;
+ }
+ 
+-static int recover_dentry(struct inode *inode, struct page *ipage)
++static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
++							struct inode *inode)
++{
++	struct fsync_inode_entry *entry;
++
++	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
++	if (!entry)
++		return NULL;
++
++	entry->inode = inode;
++	list_add_tail(&entry->list, head);
++
++	return entry;
++}
++
++static void del_fsync_inode(struct fsync_inode_entry *entry)
++{
++	iput(entry->inode);
++	list_del(&entry->list);
++	kmem_cache_free(fsync_entry_slab, entry);
++}
++
++static int recover_dentry(struct inode *inode, struct page *ipage,
++						struct list_head *dir_list)
+ {
+ 	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
+ 	nid_t pino = le32_to_cpu(raw_inode->i_pino);
+@@ -75,18 +98,29 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
+ 	struct qstr name;
+ 	struct page *page;
+ 	struct inode *dir, *einode;
++	struct fsync_inode_entry *entry;
+ 	int err = 0;
+ 
+-	dir = f2fs_iget(inode->i_sb, pino);
+-	if (IS_ERR(dir)) {
+-		err = PTR_ERR(dir);
+-		goto out;
++	entry = get_fsync_inode(dir_list, pino);
++	if (!entry) {
++		dir = f2fs_iget(inode->i_sb, pino);
++		if (IS_ERR(dir)) {
++			err = PTR_ERR(dir);
++			goto out;
++		}
++
++		entry = add_fsync_inode(dir_list, dir);
++		if (!entry) {
++			err = -ENOMEM;
++			iput(dir);
++			goto out;
++		}
+ 	}
+ 
+-	if (file_enc_name(inode)) {
+-		iput(dir);
++	dir = entry->inode;
++
++	if (file_enc_name(inode))
+ 		return 0;
+-	}
+ 
+ 	name.len = le32_to_cpu(raw_inode->i_namelen);
+ 	name.name = raw_inode->i_name;
+@@ -94,7 +128,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
+ 	if (unlikely(name.len > F2FS_NAME_LEN)) {
+ 		WARN_ON(1);
+ 		err = -ENAMETOOLONG;
+-		goto out_err;
++		goto out;
+ 	}
+ retry:
+ 	de = f2fs_find_entry(dir, &name, &page);
+@@ -120,23 +154,12 @@ retry:
+ 		goto retry;
+ 	}
+ 	err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
+-	if (err)
+-		goto out_err;
+-
+-	if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
+-		iput(dir);
+-	} else {
+-		add_dirty_dir_inode(dir);
+-		set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
+-	}
+ 
+ 	goto out;
+ 
+ out_unmap_put:
+ 	f2fs_dentry_kunmap(dir, page);
+ 	f2fs_put_page(page, 0);
+-out_err:
+-	iput(dir);
+ out:
+ 	f2fs_msg(inode->i_sb, KERN_NOTICE,
+ 			"%s: ino = %x, name = %s, dir = %lx, err = %d",
+@@ -170,8 +193,8 @@ static void recover_inode(struct inode *inode, struct page *page)
+ 
+ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+ {
+-	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
+ 	struct curseg_info *curseg;
++	struct inode *inode;
+ 	struct page *page = NULL;
+ 	block_t blkaddr;
+ 	int err = 0;
+@@ -185,12 +208,12 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+ 	while (1) {
+ 		struct fsync_inode_entry *entry;
+ 
+-		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
++		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
+ 			return 0;
+ 
+ 		page = get_tmp_page(sbi, blkaddr);
+ 
+-		if (cp_ver != cpver_of_node(page))
++		if (!is_recoverable_dnode(page))
+ 			break;
+ 
+ 		if (!is_fsync_dnode(page))
+@@ -204,27 +227,27 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+ 					break;
+ 			}
+ 
+-			/* add this fsync inode to the list */
+-			entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+-			if (!entry) {
+-				err = -ENOMEM;
+-				break;
+-			}
+ 			/*
+ 			 * CP | dnode(F) | inode(DF)
+ 			 * For this case, we should not give up now.
+ 			 */
+-			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
+-			if (IS_ERR(entry->inode)) {
+-				err = PTR_ERR(entry->inode);
+-				kmem_cache_free(fsync_entry_slab, entry);
++			inode = f2fs_iget(sbi->sb, ino_of_node(page));
++			if (IS_ERR(inode)) {
++				err = PTR_ERR(inode);
+ 				if (err == -ENOENT) {
+ 					err = 0;
+ 					goto next;
+ 				}
+ 				break;
+ 			}
+-			list_add_tail(&entry->list, head);
++
++			/* add this fsync inode to the list */
++			entry = add_fsync_inode(head, inode);
++			if (!entry) {
++				err = -ENOMEM;
++				iput(inode);
++				break;
++			}
+ 		}
+ 		entry->blkaddr = blkaddr;
+ 
+@@ -248,11 +271,8 @@ static void destroy_fsync_dnodes(struct list_head *head)
+ {
+ 	struct fsync_inode_entry *entry, *tmp;
+ 
+-	list_for_each_entry_safe(entry, tmp, head, list) {
+-		iput(entry->inode);
+-		list_del(&entry->list);
+-		kmem_cache_free(fsync_entry_slab, entry);
+-	}
++	list_for_each_entry_safe(entry, tmp, head, list)
++		del_fsync_inode(entry);
+ }
+ 
+ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+@@ -423,7 +443,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ 		}
+ 
+ 		/* dest is valid block, try to recover from src to dest */
+-		if (is_valid_blkaddr(sbi, dest, META_POR)) {
++		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+ 
+ 			if (src == NULL_ADDR) {
+ 				err = reserve_new_block(&dn);
+@@ -459,35 +479,34 @@ out:
+ 	return err;
+ }
+ 
+-static int recover_data(struct f2fs_sb_info *sbi,
+-				struct list_head *head, int type)
++static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
++						struct list_head *dir_list)
+ {
+-	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
+ 	struct curseg_info *curseg;
+ 	struct page *page = NULL;
+ 	int err = 0;
+ 	block_t blkaddr;
+ 
+ 	/* get node pages in the current segment */
+-	curseg = CURSEG_I(sbi, type);
++	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
+ 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+ 
+ 	while (1) {
+ 		struct fsync_inode_entry *entry;
+ 
+-		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
++		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
+ 			break;
+ 
+ 		ra_meta_pages_cond(sbi, blkaddr);
+ 
+ 		page = get_tmp_page(sbi, blkaddr);
+ 
+-		if (cp_ver != cpver_of_node(page)) {
++		if (!is_recoverable_dnode(page)) {
+ 			f2fs_put_page(page, 1);
+ 			break;
+ 		}
+ 
+-		entry = get_fsync_inode(head, ino_of_node(page));
++		entry = get_fsync_inode(inode_list, ino_of_node(page));
+ 		if (!entry)
+ 			goto next;
+ 		/*
+@@ -498,7 +517,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
+ 		if (entry->last_inode == blkaddr)
+ 			recover_inode(entry->inode, page);
+ 		if (entry->last_dentry == blkaddr) {
+-			err = recover_dentry(entry->inode, page);
++			err = recover_dentry(entry->inode, page, dir_list);
+ 			if (err) {
+ 				f2fs_put_page(page, 1);
+ 				break;
+@@ -510,11 +529,8 @@ static int recover_data(struct f2fs_sb_info *sbi,
+ 			break;
+ 		}
+ 
+-		if (entry->blkaddr == blkaddr) {
+-			iput(entry->inode);
+-			list_del(&entry->list);
+-			kmem_cache_free(fsync_entry_slab, entry);
+-		}
++		if (entry->blkaddr == blkaddr)
++			del_fsync_inode(entry);
+ next:
+ 		/* check next segment */
+ 		blkaddr = next_blkaddr_of_node(page);
+@@ -525,12 +541,14 @@ next:
+ 	return err;
+ }
+ 
+-int recover_fsync_data(struct f2fs_sb_info *sbi)
++int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ {
+ 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
+ 	struct list_head inode_list;
++	struct list_head dir_list;
+ 	block_t blkaddr;
+ 	int err;
++	int ret = 0;
+ 	bool need_writecp = false;
+ 
+ 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
+@@ -539,6 +557,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
+ 		return -ENOMEM;
+ 
+ 	INIT_LIST_HEAD(&inode_list);
++	INIT_LIST_HEAD(&dir_list);
+ 
+ 	/* prevent checkpoint */
+ 	mutex_lock(&sbi->cp_mutex);
+@@ -547,21 +566,22 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
+ 
+ 	/* step #1: find fsynced inode numbers */
+ 	err = find_fsync_dnodes(sbi, &inode_list);
+-	if (err)
++	if (err || list_empty(&inode_list))
+ 		goto out;
+ 
+-	if (list_empty(&inode_list))
++	if (check_only) {
++		ret = 1;
+ 		goto out;
++	}
+ 
+ 	need_writecp = true;
+ 
+ 	/* step #2: recover data */
+-	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
++	err = recover_data(sbi, &inode_list, &dir_list);
+ 	if (!err)
+ 		f2fs_bug_on(sbi, !list_empty(&inode_list));
+ out:
+ 	destroy_fsync_dnodes(&inode_list);
+-	kmem_cache_destroy(fsync_entry_slab);
+ 
+ 	/* truncate meta pages to be used by the recovery */
+ 	truncate_inode_pages_range(META_MAPPING(sbi),
+@@ -573,31 +593,20 @@ out:
+ 	}
+ 
+ 	clear_sbi_flag(sbi, SBI_POR_DOING);
+-	if (err) {
+-		bool invalidate = false;
+-
+-		if (discard_next_dnode(sbi, blkaddr))
+-			invalidate = true;
+-
+-		/* Flush all the NAT/SIT pages */
+-		while (get_pages(sbi, F2FS_DIRTY_META))
+-			sync_meta_pages(sbi, META, LONG_MAX);
++	if (err)
++		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
++	mutex_unlock(&sbi->cp_mutex);
+ 
+-		/* invalidate temporary meta page */
+-		if (invalidate)
+-			invalidate_mapping_pages(META_MAPPING(sbi),
+-							blkaddr, blkaddr);
++	/* let's drop all the directory inodes for clean checkpoint */
++	destroy_fsync_dnodes(&dir_list);
+ 
+-		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+-		mutex_unlock(&sbi->cp_mutex);
+-	} else if (need_writecp) {
++	if (!err && need_writecp) {
+ 		struct cp_control cpc = {
+ 			.reason = CP_RECOVERY,
+ 		};
+-		mutex_unlock(&sbi->cp_mutex);
+ 		write_checkpoint(sbi, &cpc);
+-	} else {
+-		mutex_unlock(&sbi->cp_mutex);
+ 	}
+-	return err;
++
++	kmem_cache_destroy(fsync_entry_slab);
++	return ret ? ret: err;
+ }
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 39ec9da08bb5..6802cd754eda 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -519,28 +519,6 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
+ 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
+ }
+ 
+-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
+-{
+-	int err = -ENOTSUPP;
+-
+-	if (test_opt(sbi, DISCARD)) {
+-		struct seg_entry *se = get_seg_entry(sbi,
+-				GET_SEGNO(sbi, blkaddr));
+-		unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+-
+-		if (f2fs_test_bit(offset, se->discard_map))
+-			return false;
+-
+-		err = f2fs_issue_discard(sbi, blkaddr, 1);
+-	}
+-
+-	if (err) {
+-		update_meta_page(sbi, NULL, blkaddr);
+-		return true;
+-	}
+-	return false;
+-}
+-
+ static void __add_discard_entry(struct f2fs_sb_info *sbi,
+ 		struct cp_control *cpc, struct seg_entry *se,
+ 		unsigned int start, unsigned int end)
+@@ -774,7 +752,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
+ 	struct seg_entry *se;
+ 	bool is_cp = false;
+ 
+-	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
++	if (!is_valid_data_blkaddr(sbi, blkaddr))
+ 		return true;
+ 
+ 	mutex_lock(&sit_i->sentry_lock);
+@@ -1488,7 +1466,7 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
+ {
+ 	struct page *cpage;
+ 
+-	if (blkaddr == NEW_ADDR)
++	if (!is_valid_data_blkaddr(sbi, blkaddr))
+ 		return;
+ 
+ 	f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
+@@ -2123,7 +2101,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
+ 	return restore_curseg_summaries(sbi);
+ }
+ 
+-static void build_sit_entries(struct f2fs_sb_info *sbi)
++static int build_sit_entries(struct f2fs_sb_info *sbi)
+ {
+ 	struct sit_info *sit_i = SIT_I(sbi);
+ 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
+@@ -2132,6 +2110,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
+ 	unsigned int i, start, end;
+ 	unsigned int readed, start_blk = 0;
+ 	int nrpages = MAX_BIO_BLOCKS(sbi);
++	int err = 0;
+ 
+ 	do {
+ 		readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
+@@ -2145,36 +2124,62 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
+ 			struct f2fs_sit_entry sit;
+ 			struct page *page;
+ 
+-			mutex_lock(&curseg->curseg_mutex);
+-			for (i = 0; i < sits_in_cursum(sum); i++) {
+-				if (le32_to_cpu(segno_in_journal(sum, i))
+-								== start) {
+-					sit = sit_in_journal(sum, i);
+-					mutex_unlock(&curseg->curseg_mutex);
+-					goto got_it;
+-				}
+-			}
+-			mutex_unlock(&curseg->curseg_mutex);
+-
+ 			page = get_current_sit_page(sbi, start);
+ 			sit_blk = (struct f2fs_sit_block *)page_address(page);
+ 			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
+ 			f2fs_put_page(page, 1);
+-got_it:
+-			check_block_count(sbi, start, &sit);
++
++			err = check_block_count(sbi, start, &sit);
++			if (err)
++				return err;
+ 			seg_info_from_raw_sit(se, &sit);
+ 
+ 			/* build discard map only one time */
+ 			memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
+ 			sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
+ 
+-			if (sbi->segs_per_sec > 1) {
+-				struct sec_entry *e = get_sec_entry(sbi, start);
+-				e->valid_blocks += se->valid_blocks;
+-			}
++			if (sbi->segs_per_sec > 1)
++				get_sec_entry(sbi, start)->valid_blocks +=
++							se->valid_blocks;
+ 		}
+ 		start_blk += readed;
+ 	} while (start_blk < sit_blk_cnt);
++
++	mutex_lock(&curseg->curseg_mutex);
++	for (i = 0; i < sits_in_cursum(sum); i++) {
++		struct f2fs_sit_entry sit;
++		struct seg_entry *se;
++		unsigned int old_valid_blocks;
++
++		start = le32_to_cpu(segno_in_journal(sum, i));
++		if (start >= MAIN_SEGS(sbi)) {
++			f2fs_msg(sbi->sb, KERN_ERR,
++					"Wrong journal entry on segno %u",
++					start);
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
++			err = -EINVAL;
++			break;
++		}
++
++		se = &sit_i->sentries[start];
++		sit = sit_in_journal(sum, i);
++
++		old_valid_blocks = se->valid_blocks;
++
++		err = check_block_count(sbi, start, &sit);
++		if (err)
++			break;
++		seg_info_from_raw_sit(se, &sit);
++
++		memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
++		sbi->discard_blks += old_valid_blocks - se->valid_blocks;
++
++		if (sbi->segs_per_sec > 1)
++			get_sec_entry(sbi, start)->valid_blocks +=
++				se->valid_blocks - old_valid_blocks;
++	}
++	mutex_unlock(&curseg->curseg_mutex);
++	return err;
+ }
+ 
+ static void init_free_segmap(struct f2fs_sb_info *sbi)
+@@ -2336,7 +2341,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
+ 		return err;
+ 
+ 	/* reinit free segmap based on SIT */
+-	build_sit_entries(sbi);
++	err = build_sit_entries(sbi);
++	if (err)
++		return err;
+ 
+ 	init_free_segmap(sbi);
+ 	err = build_dirty_segmap(sbi);
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index bfa1d31f79aa..08b08ae6ba9d 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -17,6 +17,8 @@
+ 
+ #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
+ 
++#define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
++
+ /* L: Logical segment # in volume, R: Relative segment # in main area */
+ #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)
+ #define GET_R2L_SEGNO(free_i, segno)	(segno + free_i->start_segno)
+@@ -46,13 +48,19 @@
+ 	 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
+ 	  sbi->segs_per_sec))	\
+ 
+-#define MAIN_BLKADDR(sbi)	(SM_I(sbi)->main_blkaddr)
+-#define SEG0_BLKADDR(sbi)	(SM_I(sbi)->seg0_blkaddr)
++#define MAIN_BLKADDR(sbi)						\
++	(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : 				\
++		le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
++#define SEG0_BLKADDR(sbi)						\
++	(SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : 				\
++		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
+ 
+ #define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
+ #define MAIN_SECS(sbi)	(sbi->total_sections)
+ 
+-#define TOTAL_SEGS(sbi)	(SM_I(sbi)->segment_count)
++#define TOTAL_SEGS(sbi)							\
++	(SM_I(sbi) ? SM_I(sbi)->segment_count : 				\
++		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
+ #define TOTAL_BLKS(sbi)	(TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
+ 
+ #define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
+@@ -72,7 +80,7 @@
+ 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
+ 
+ #define GET_SEGNO(sbi, blk_addr)					\
+-	(((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ?		\
++	((!is_valid_data_blkaddr(sbi, blk_addr)) ?			\
+ 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
+ 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
+ #define GET_SECNO(sbi, segno)					\
+@@ -574,16 +582,20 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
+ 	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
+ }
+ 
+-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
++static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
+ {
+-	f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
+-					|| blk_addr >= MAX_BLKADDR(sbi));
++	struct f2fs_sb_info *sbi = fio->sbi;
++
++	if (__is_meta_io(fio))
++		verify_blkaddr(sbi, blk_addr, META_GENERIC);
++	else
++		verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
+ }
+ 
+ /*
+  * Summary block is always treated as an invalid block
+  */
+-static inline void check_block_count(struct f2fs_sb_info *sbi,
++static inline int check_block_count(struct f2fs_sb_info *sbi,
+ 		int segno, struct f2fs_sit_entry *raw_sit)
+ {
+ #ifdef CONFIG_F2FS_CHECK_FS
+@@ -605,11 +617,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
+ 		cur_pos = next_pos;
+ 		is_valid = !is_valid;
+ 	} while (cur_pos < sbi->blocks_per_seg);
+-	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
++
++	if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
++		f2fs_msg(sbi->sb, KERN_ERR,
++				"Mismatch valid blocks %d vs. %d",
++					GET_SIT_VBLOCKS(raw_sit), valid_blocks);
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		return -EINVAL;
++	}
+ #endif
+ 	/* check segment usage, and check boundary of a given segment number */
+-	f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+-					|| segno > TOTAL_SEGS(sbi) - 1);
++	if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
++					|| segno > TOTAL_SEGS(sbi) - 1)) {
++		f2fs_msg(sbi->sb, KERN_ERR,
++				"Wrong valid blocks %d or segno %u",
++					GET_SIT_VBLOCKS(raw_sit), segno);
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		return -EINVAL;
++	}
++	return 0;
+ }
+ 
+ static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 2ffc53d0c9c7..dbd7adff8b5a 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -994,6 +994,8 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
+ static int sanity_check_raw_super(struct super_block *sb,
+ 			struct f2fs_super_block *raw_super)
+ {
++	block_t segment_count, segs_per_sec, secs_per_zone;
++	block_t total_sections, blocks_per_seg;
+ 	unsigned int blocksize;
+ 
+ 	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+@@ -1047,6 +1049,68 @@ static int sanity_check_raw_super(struct super_block *sb,
+ 		return 1;
+ 	}
+ 
++	segment_count = le32_to_cpu(raw_super->segment_count);
++	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
++	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
++	total_sections = le32_to_cpu(raw_super->section_count);
++
++	/* blocks_per_seg should be 512, given the above check */
++	blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
++
++	if (segment_count > F2FS_MAX_SEGMENT ||
++				segment_count < F2FS_MIN_SEGMENTS) {
++		f2fs_msg(sb, KERN_INFO,
++			"Invalid segment count (%u)",
++			segment_count);
++		return 1;
++	}
++
++	if (total_sections > segment_count ||
++			total_sections < F2FS_MIN_SEGMENTS ||
++			segs_per_sec > segment_count || !segs_per_sec) {
++		f2fs_msg(sb, KERN_INFO,
++			"Invalid segment/section count (%u, %u x %u)",
++			segment_count, total_sections, segs_per_sec);
++		return 1;
++	}
++
++	if ((segment_count / segs_per_sec) < total_sections) {
++		f2fs_msg(sb, KERN_INFO,
++			"Small segment_count (%u < %u * %u)",
++			segment_count, segs_per_sec, total_sections);
++		return 1;
++	}
++
++	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
++		f2fs_msg(sb, KERN_INFO,
++			"Wrong segment_count / block_count (%u > %llu)",
++			segment_count, le64_to_cpu(raw_super->block_count));
++		return 1;
++	}
++
++	if (secs_per_zone > total_sections || !secs_per_zone) {
++		f2fs_msg(sb, KERN_INFO,
++			"Wrong secs_per_zone / total_sections (%u, %u)",
++			secs_per_zone, total_sections);
++		return 1;
++	}
++	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION) {
++		f2fs_msg(sb, KERN_INFO,
++			"Corrupted extension count (%u > %u)",
++			le32_to_cpu(raw_super->extension_count),
++			F2FS_MAX_EXTENSION);
++		return 1;
++	}
++
++	if (le32_to_cpu(raw_super->cp_payload) >
++				(blocks_per_seg - F2FS_CP_PACKS)) {
++		f2fs_msg(sb, KERN_INFO,
++			"Insane cp_payload (%u > %u)",
++			le32_to_cpu(raw_super->cp_payload),
++			blocks_per_seg - F2FS_CP_PACKS);
++		return 1;
++	}
++
+ 	/* check reserved ino info */
+ 	if (le32_to_cpu(raw_super->node_ino) != 1 ||
+ 		le32_to_cpu(raw_super->meta_ino) != 2 ||
+@@ -1059,13 +1123,6 @@ static int sanity_check_raw_super(struct super_block *sb,
+ 		return 1;
+ 	}
+ 
+-	if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
+-		f2fs_msg(sb, KERN_INFO,
+-			"Invalid segment count (%u)",
+-			le32_to_cpu(raw_super->segment_count));
+-		return 1;
+-	}
+-
+ 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
+ 	if (sanity_check_area_boundary(sb, raw_super))
+ 		return 1;
+@@ -1073,15 +1130,19 @@ static int sanity_check_raw_super(struct super_block *sb,
+ 	return 0;
+ }
+ 
+-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
++int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ {
+ 	unsigned int total, fsmeta;
+ 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
++	unsigned int ovp_segments, reserved_segments;
+ 	unsigned int main_segs, blocks_per_seg;
+ 	unsigned int sit_segs, nat_segs;
+ 	unsigned int sit_bitmap_size, nat_bitmap_size;
+ 	unsigned int log_blocks_per_seg;
++	unsigned int segment_count_main;
++	unsigned int cp_pack_start_sum, cp_payload;
++	block_t user_block_count;
+ 	int i;
+ 
+ 	total = le32_to_cpu(raw_super->segment_count);
+@@ -1096,6 +1157,26 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 	if (unlikely(fsmeta >= total))
+ 		return 1;
+ 
++	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
++	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
++
++	if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
++			ovp_segments == 0 || reserved_segments == 0)) {
++		f2fs_msg(sbi->sb, KERN_ERR,
++			"Wrong layout: check mkfs.f2fs version");
++		return 1;
++	}
++
++	user_block_count = le64_to_cpu(ckpt->user_block_count);
++	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
++	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
++	if (!user_block_count || user_block_count >=
++			segment_count_main << log_blocks_per_seg) {
++		f2fs_msg(sbi->sb, KERN_ERR,
++			"Wrong user_block_count: %u", user_block_count);
++		return 1;
++	}
++
+ 	main_segs = le32_to_cpu(raw_super->segment_count_main);
+ 	blocks_per_seg = sbi->blocks_per_seg;
+ 
+@@ -1112,7 +1193,6 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 
+ 	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+ 	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
+-	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ 
+ 	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
+ 		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
+@@ -1122,6 +1202,17 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 		return 1;
+ 	}
+ 
++	cp_pack_start_sum = __start_sum_addr(sbi);
++	cp_payload = __cp_payload(sbi);
++	if (cp_pack_start_sum < cp_payload + 1 ||
++		cp_pack_start_sum > blocks_per_seg - 1 -
++			NR_CURSEG_TYPE) {
++		f2fs_msg(sbi->sb, KERN_ERR,
++			"Wrong cp_pack_start_sum: %u",
++			cp_pack_start_sum);
++		return 1;
++	}
++
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+ 		return 1;
+@@ -1358,13 +1449,6 @@ try_onemore:
+ 		goto free_meta_inode;
+ 	}
+ 
+-	/* sanity checking of checkpoint */
+-	err = -EINVAL;
+-	if (sanity_check_ckpt(sbi)) {
+-		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
+-		goto free_cp;
+-	}
+-
+ 	sbi->total_valid_node_count =
+ 				le32_to_cpu(sbi->ckpt->valid_node_count);
+ 	sbi->total_valid_inode_count =
+@@ -1464,14 +1548,27 @@ try_onemore:
+ 		if (need_fsck)
+ 			set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 
+-		err = recover_fsync_data(sbi);
+-		if (err) {
++		if (!retry)
++			goto skip_recovery;
++
++		err = recover_fsync_data(sbi, false);
++		if (err < 0) {
+ 			need_fsck = true;
+ 			f2fs_msg(sb, KERN_ERR,
+ 				"Cannot recover all fsync data errno=%ld", err);
+ 			goto free_kobj;
+ 		}
++	} else {
++		err = recover_fsync_data(sbi, true);
++
++		if (!f2fs_readonly(sb) && err > 0) {
++			err = -EINVAL;
++			f2fs_msg(sb, KERN_ERR,
++				"Need to recover fsync data");
++			goto free_kobj;
++		}
+ 	}
++skip_recovery:
+ 	/* recover_fsync_data() cleared this already */
+ 	clear_sbi_flag(sbi, SBI_POR_DOING);
+ 
+@@ -1517,7 +1614,6 @@ free_nm:
+ 	destroy_node_manager(sbi);
+ free_sm:
+ 	destroy_segment_manager(sbi);
+-free_cp:
+ 	kfree(sbi->ckpt);
+ free_meta_inode:
+ 	make_bad_inode(sbi->meta_inode);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 1544f530ccd0..023e7f32ee1b 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
+ 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ 
+ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+-	cancel_delayed_work_sync(&c->wbuf_dwork);
++	if (jffs2_is_writebuffered(c))
++		cancel_delayed_work_sync(&c->wbuf_dwork);
+ #endif
+ 
+ 	mutex_lock(&c->alloc_sem);
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index 0a4457fb0711..85111d740c9d 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
+ 	if (num_used
+ 	    || alloc->id1.bitmap1.i_used
+ 	    || alloc->id1.bitmap1.i_total
+-	    || la->la_bm_off)
+-		mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
++	    || la->la_bm_off) {
++		mlog(ML_ERROR, "inconsistent detected, clean journal with"
++		     " unrecovered local alloc, please run fsck.ocfs2!\n"
+ 		     "found = %u, set = %u, taken = %u, off = %u\n",
+ 		     num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
+ 		     le32_to_cpu(alloc->id1.bitmap1.i_total),
+ 		     OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
+ 
++		status = -EINVAL;
++		goto bail;
++	}
++
+ 	osb->local_alloc_bh = alloc_bh;
+ 	osb->local_alloc_state = OCFS2_LA_ENABLED;
+ 
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index cb71cbae606d..60cbaa821164 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -333,7 +333,7 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ #ifdef CONFIG_SECCOMP
+ 	seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
+ #endif
+-	seq_printf(m, "\nSpeculation_Store_Bypass:\t");
++	seq_printf(m, "Speculation_Store_Bypass:\t");
+ 	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+ 	case -EINVAL:
+ 		seq_printf(m, "unknown");
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index bd21795ce657..679d75a864d0 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -445,6 +445,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+ 	sig ^= PERSISTENT_RAM_SIG;
+ 
+ 	if (prz->buffer->sig == sig) {
++		if (buffer_size(prz) == 0) {
++			pr_debug("found existing empty buffer\n");
++			return 0;
++		}
++
+ 		if (buffer_size(prz) > prz->buffer_size ||
+ 		    buffer_start(prz) > buffer_size(prz))
+ 			pr_info("found existing invalid buffer, size %zu, start %zu\n",
+diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
+index fb9636cc927c..5d8d12746e6e 100644
+--- a/fs/xfs/libxfs/xfs_attr.c
++++ b/fs/xfs/libxfs/xfs_attr.c
+@@ -528,7 +528,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
+ 		if (args->flags & ATTR_CREATE)
+ 			return retval;
+ 		retval = xfs_attr_shortform_remove(args);
+-		ASSERT(retval == 0);
++		if (retval)
++			return retval;
++		/*
++		 * Since we have removed the old attr, clear ATTR_REPLACE so
++		 * that the leaf format add routine won't trip over the attr
++		 * not being around.
++		 */
++		args->flags &= ~ATTR_REPLACE;
+ 	}
+ 
+ 	if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
+diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
+index a307c37c2e6c..072501a0ac86 100644
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -225,6 +225,14 @@ static inline void wb_get(struct bdi_writeback *wb)
+  */
+ static inline void wb_put(struct bdi_writeback *wb)
+ {
++	if (WARN_ON_ONCE(!wb->bdi)) {
++		/*
++		 * A driver bug might cause a file to be removed before bdi was
++		 * initialized.
++		 */
++		return;
++	}
++
+ 	if (wb != &wb->bdi->wb)
+ 		percpu_ref_put(&wb->refcnt);
+ }
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index 3d6e6ce44c5c..520fd854e7b3 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -99,6 +99,7 @@ struct f2fs_super_block {
+ /*
+  * For checkpoint
+  */
++#define CP_CRC_RECOVERY_FLAG	0x00000040
+ #define CP_FASTBOOT_FLAG	0x00000020
+ #define CP_FSCK_FLAG		0x00000010
+ #define CP_ERROR_FLAG		0x00000008
+@@ -497,4 +498,6 @@ enum {
+ 	F2FS_FT_MAX
+ };
+ 
++#define S_SHIFT 12
++
+ #endif  /* _LINUX_F2FS_FS_H */
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 3309dbda7ffa..0bc7fa21db85 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2151,6 +2151,7 @@ int write_cache_pages(struct address_space *mapping,
+ {
+ 	int ret = 0;
+ 	int done = 0;
++	int error;
+ 	struct pagevec pvec;
+ 	int nr_pages;
+ 	pgoff_t uninitialized_var(writeback_index);
+@@ -2247,25 +2248,31 @@ continue_unlock:
+ 				goto continue_unlock;
+ 
+ 			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
+-			ret = (*writepage)(page, wbc, data);
+-			if (unlikely(ret)) {
+-				if (ret == AOP_WRITEPAGE_ACTIVATE) {
++			error = (*writepage)(page, wbc, data);
++			if (unlikely(error)) {
++				/*
++				 * Handle errors according to the type of
++				 * writeback. There's no need to continue for
++				 * background writeback. Just push done_index
++				 * past this page so media errors won't choke
++				 * writeout for the entire file. For integrity
++				 * writeback, we must process the entire dirty
++				 * set regardless of errors because the fs may
++				 * still have state to clear for each page. In
++				 * that case we continue processing and return
++				 * the first error.
++				 */
++				if (error == AOP_WRITEPAGE_ACTIVATE) {
+ 					unlock_page(page);
+-					ret = 0;
+-				} else {
+-					/*
+-					 * done_index is set past this page,
+-					 * so media errors will not choke
+-					 * background writeout for the entire
+-					 * file. This has consequences for
+-					 * range_cyclic semantics (ie. it may
+-					 * not be suitable for data integrity
+-					 * writeout).
+-					 */
++					error = 0;
++				} else if (wbc->sync_mode != WB_SYNC_ALL) {
++					ret = error;
+ 					done_index = page->index + 1;
+ 					done = 1;
+ 					break;
+ 				}
++				if (!ret)
++					ret = error;
+ 			}
+ 
+ 			/*
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 55dcb2b20b59..6def85d75b1d 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -267,7 +267,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+ 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 		int ret;
+ 
+-		if (neigh->hh.hh_len) {
++		if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
+ 			neigh_hh_bridge(&neigh->hh, skb);
+ 			skb->dev = nf_bridge->physindev;
+ 			ret = br_handle_frame_finish(net, sk, skb);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 77c8af4047ef..81650affa3fa 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -418,13 +418,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ 	while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+ 		(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
+ 
+-	/* check for checksum updates when the CAN frame has been modified */
++	/* Has the CAN frame been modified? */
+ 	if (modidx) {
+-		if (gwj->mod.csumfunc.crc8)
++		/* get available space for the processed CAN frame type */
++		int max_len = nskb->len - offsetof(struct can_frame, data);
++
++		/* dlc may have changed, make sure it fits to the CAN frame */
++		if (cf->can_dlc > max_len)
++			goto out_delete;
++
++		/* check for checksum updates in classic CAN length only */
++		if (gwj->mod.csumfunc.crc8) {
++			if (cf->can_dlc > 8)
++				goto out_delete;
++
+ 			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
++		}
++
++		if (gwj->mod.csumfunc.xor) {
++			if (cf->can_dlc > 8)
++				goto out_delete;
+ 
+-		if (gwj->mod.csumfunc.xor)
+ 			(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
++		}
+ 	}
+ 
+ 	/* clear the skb timestamp if not configured the other way */
+@@ -436,6 +452,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ 		gwj->dropped_frames++;
+ 	else
+ 		gwj->handled_frames++;
++
++	return;
++
++ out_delete:
++	/* delete frame due to misconfiguration */
++	gwj->deleted_frames++;
++	kfree_skb(nskb);
++	return;
+ }
+ 
+ static inline int cgw_register_filter(struct cgw_job *gwj)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 9703924ed071..8a57bbaf7452 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2388,12 +2388,15 @@ EXPORT_SYMBOL(skb_queue_purge);
+  */
+ void skb_rbtree_purge(struct rb_root *root)
+ {
+-	struct sk_buff *skb, *next;
++	struct rb_node *p = rb_first(root);
+ 
+-	rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+-		kfree_skb(skb);
++	while (p) {
++		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+ 
+-	*root = RB_ROOT;
++		p = rb_next(p);
++		rb_erase(&skb->rbnode, root);
++		kfree_skb(skb);
++	}
+ }
+ 
+ /**
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 9fb1c073d0c4..8aa4a5f89572 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -732,6 +732,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 	case SO_DONTROUTE:
+ 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
++		sk_dst_reset(sk);
+ 		break;
+ 	case SO_BROADCAST:
+ 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 3f8caf7d19b8..1ea36bf778e6 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -133,19 +133,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
+ 
+ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+ {
++	__be16 _ports[2], *ports;
+ 	struct sockaddr_in sin;
+-	__be16 *ports;
+-	int end;
+-
+-	end = skb_transport_offset(skb) + 4;
+-	if (end > 0 && !pskb_may_pull(skb, end))
+-		return;
+ 
+ 	/* All current transport protocols have the port numbers in the
+ 	 * first four bytes of the transport header and this function is
+ 	 * written with this assumption in mind.
+ 	 */
+-	ports = (__be16 *)skb_transport_header(skb);
++	ports = skb_header_pointer(skb, skb_transport_offset(skb),
++				   sizeof(_ports), &_ports);
++	if (!ports)
++		return;
+ 
+ 	sin.sin_family = AF_INET;
+ 	sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 637a0e41b0aa..d6f2dab28d14 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -292,6 +292,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 
+ 	/* Check if the address belongs to the host. */
+ 	if (addr_type == IPV6_ADDR_MAPPED) {
++		struct net_device *dev = NULL;
+ 		int chk_addr_ret;
+ 
+ 		/* Binding to v4-mapped address on a v6-only socket
+@@ -302,9 +303,20 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 			goto out;
+ 		}
+ 
++		rcu_read_lock();
++		if (sk->sk_bound_dev_if) {
++			dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
++			if (!dev) {
++				err = -ENODEV;
++				goto out_unlock;
++			}
++		}
++
+ 		/* Reproduce AF_INET checks to make the bindings consistent */
+ 		v4addr = addr->sin6_addr.s6_addr32[3];
+-		chk_addr_ret = inet_addr_type(net, v4addr);
++		chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
++		rcu_read_unlock();
++
+ 		if (!net->ipv4.sysctl_ip_nonlocal_bind &&
+ 		    !(inet->freebind || inet->transparent) &&
+ 		    v4addr != htonl(INADDR_ANY) &&
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 9f6e57ded338..27cdf543c539 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -290,6 +290,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
+ 	skb_reset_network_header(skb);
+ 	iph = ipv6_hdr(skb);
+ 	iph->daddr = fl6->daddr;
++	ip6_flow_hdr(iph, 0, 0);
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 	serr->ee.ee_errno = err;
+@@ -657,17 +658,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
+ 	}
+ 	if (np->rxopt.bits.rxorigdstaddr) {
+ 		struct sockaddr_in6 sin6;
+-		__be16 *ports;
+-		int end;
++		__be16 _ports[2], *ports;
+ 
+-		end = skb_transport_offset(skb) + 4;
+-		if (end <= 0 || pskb_may_pull(skb, end)) {
++		ports = skb_header_pointer(skb, skb_transport_offset(skb),
++					   sizeof(_ports), &_ports);
++		if (ports) {
+ 			/* All current transport protocols have the port numbers in the
+ 			 * first four bytes of the transport header and this function is
+ 			 * written with this assumption in mind.
+ 			 */
+-			ports = (__be16 *)skb_transport_header(skb);
+-
+ 			sin6.sin6_family = AF_INET6;
+ 			sin6.sin6_addr = ipv6_hdr(skb)->daddr;
+ 			sin6.sin6_port = ports[1];
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 0f50977ed53b..753b2837318d 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2514,7 +2514,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
+ 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+ 		if (addr && dev && saddr->sll_halen < dev->addr_len)
+-			goto out;
++			goto out_put;
+ 	}
+ 
+ 	err = -ENXIO;
+@@ -2683,7 +2683,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
+ 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+ 		if (addr && dev && saddr->sll_halen < dev->addr_len)
+-			goto out;
++			goto out_unlock;
+ 	}
+ 
+ 	err = -ENXIO;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 7dffc97a953c..9fa0b0dc3868 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
+ 
+ 	switch (ev) {
+ 	case NETDEV_UP:
+-		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
++		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ 		if (addr) {
+ 			addr->a.v6.sin6_family = AF_INET6;
+-			addr->a.v6.sin6_port = 0;
+-			addr->a.v6.sin6_flowinfo = 0;
+ 			addr->a.v6.sin6_addr = ifa->addr;
+ 			addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
+ 			addr->valid = 1;
+@@ -412,7 +410,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
+ 		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ 		if (addr) {
+ 			addr->a.v6.sin6_family = AF_INET6;
+-			addr->a.v6.sin6_port = 0;
+ 			addr->a.v6.sin6_addr = ifp->addr;
+ 			addr->a.v6.sin6_scope_id = dev->ifindex;
+ 			addr->valid = 1;
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index dc030efa4447..9f2f3c48b7b6 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
+ 		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ 		if (addr) {
+ 			addr->a.v4.sin_family = AF_INET;
+-			addr->a.v4.sin_port = 0;
+ 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ 			addr->valid = 1;
+ 			INIT_LIST_HEAD(&addr->list);
+@@ -775,10 +774,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
+ 
+ 	switch (ev) {
+ 	case NETDEV_UP:
+-		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
++		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ 		if (addr) {
+ 			addr->a.v4.sin_family = AF_INET;
+-			addr->a.v4.sin_port = 0;
+ 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ 			addr->valid = 1;
+ 			spin_lock_bh(&net->sctp.local_addr_lock);
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index cf5770d8f49a..c89626b2afff 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -772,6 +772,12 @@ void rpcb_getport_async(struct rpc_task *task)
+ 	case RPCBVERS_3:
+ 		map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
+ 		map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
++		if (!map->r_addr) {
++			status = -ENOMEM;
++			dprintk("RPC: %5u %s: no memory available\n",
++				task->tk_pid, __func__);
++			goto bailout_free_args;
++		}
+ 		map->r_owner = "";
+ 		break;
+ 	case RPCBVERS_2:
+@@ -794,6 +800,8 @@ void rpcb_getport_async(struct rpc_task *task)
+ 	rpc_put_task(child);
+ 	return;
+ 
++bailout_free_args:
++	kfree(map);
+ bailout_release_client:
+ 	rpc_release_client(rpcb_clnt);
+ bailout_nofree:
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index f86c6555a539..e9653c42cdd1 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
+ 	return limit;
+ }
+ 
++static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
++{
++	return TLV_GET_LEN(tlv) - TLV_SPACE(0);
++}
++
+ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ {
+ 	struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
+@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
+ 	return buf;
+ }
+ 
++static inline bool string_is_valid(char *s, int len)
++{
++	return memchr(s, '\0', len) ? true : false;
++}
++
+ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ 				   struct tipc_nl_compat_msg *msg,
+ 				   struct sk_buff *arg)
+@@ -364,6 +374,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ 	struct nlattr *prop;
+ 	struct nlattr *bearer;
+ 	struct tipc_bearer_config *b;
++	int len;
+ 
+ 	b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
+ 
+@@ -371,6 +382,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ 	if (!bearer)
+ 		return -EMSGSIZE;
+ 
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
++	if (!string_is_valid(b->name, len))
++		return -EINVAL;
++
+ 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
+ 		return -EMSGSIZE;
+ 
+@@ -396,6 +411,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+ {
+ 	char *name;
+ 	struct nlattr *bearer;
++	int len;
+ 
+ 	name = (char *)TLV_DATA(msg->req);
+ 
+@@ -403,6 +419,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+ 	if (!bearer)
+ 		return -EMSGSIZE;
+ 
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
++	if (!string_is_valid(name, len))
++		return -EINVAL;
++
+ 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
+ 		return -EMSGSIZE;
+ 
+@@ -462,6 +482,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
+ 	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
+ 	struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
+ 	struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
++	int len;
+ 
+ 	nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+ 
+@@ -472,6 +493,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
+ 			 NULL);
+ 
+ 	name = (char *)TLV_DATA(msg->req);
++
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++	if (!string_is_valid(name, len))
++		return -EINVAL;
++
+ 	if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
+ 		return 0;
+ 
+@@ -605,6 +631,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
+ 	struct nlattr *prop;
+ 	struct nlattr *media;
+ 	struct tipc_link_config *lc;
++	int len;
+ 
+ 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ 
+@@ -612,6 +639,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
+ 	if (!media)
+ 		return -EMSGSIZE;
+ 
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
++	if (!string_is_valid(lc->name, len))
++		return -EINVAL;
++
+ 	if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
+ 		return -EMSGSIZE;
+ 
+@@ -632,6 +663,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+ 	struct nlattr *prop;
+ 	struct nlattr *bearer;
+ 	struct tipc_link_config *lc;
++	int len;
+ 
+ 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ 
+@@ -639,6 +671,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+ 	if (!bearer)
+ 		return -EMSGSIZE;
+ 
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
++	if (!string_is_valid(lc->name, len))
++		return -EINVAL;
++
+ 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
+ 		return -EMSGSIZE;
+ 
+@@ -687,9 +723,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+ 	struct tipc_link_config *lc;
+ 	struct tipc_bearer *bearer;
+ 	struct tipc_media *media;
++	int len;
+ 
+ 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ 
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++	if (!string_is_valid(lc->name, len))
++		return -EINVAL;
++
+ 	media = tipc_media_find(lc->name);
+ 	if (media) {
+ 		cmd->doit = &tipc_nl_media_set;
+@@ -711,6 +752,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+ {
+ 	char *name;
+ 	struct nlattr *link;
++	int len;
+ 
+ 	name = (char *)TLV_DATA(msg->req);
+ 
+@@ -718,6 +760,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+ 	if (!link)
+ 		return -EMSGSIZE;
+ 
++	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++	if (!string_is_valid(name, len))
++		return -EINVAL;
++
+ 	if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
+ 		return -EMSGSIZE;
+ 
+@@ -739,6 +785,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
+ 	};
+ 
+ 	ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
++	if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
++		return -EINVAL;
+ 
+ 	depth = ntohl(ntq->depth);
+ 
+@@ -1117,7 +1165,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+-	if (len && !TLV_OK(msg.req, len)) {
++	if (!len || !TLV_OK(msg.req, len)) {
+ 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ 		err = -EOPNOTSUPP;
+ 		goto send;
+diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
+index c410d257da06..0c7800112ff5 100644
+--- a/scripts/kconfig/zconf.l
++++ b/scripts/kconfig/zconf.l
+@@ -71,7 +71,7 @@ static void warn_ignored_character(char chr)
+ {
+ 	fprintf(stderr,
+ 	        "%s:%d:warning: ignoring unsupported character '%c'\n",
+-	        zconf_curname(), zconf_lineno(), chr);
++	        current_file->name, yylineno, chr);
+ }
+ %}
+ 
+@@ -191,6 +191,8 @@ n	[A-Za-z0-9_-]
+ 	}
+ 	<<EOF>>	{
+ 		BEGIN(INITIAL);
++		yylval.string = text;
++		return T_WORD_QUOTE;
+ 	}
+ }
+ 
+diff --git a/security/security.c b/security/security.c
+index 46f405ce6b0f..0dde287db5c5 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -861,6 +861,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+ 
+ void security_cred_free(struct cred *cred)
+ {
++	/*
++	 * There is a failure case in prepare_creds() that
++	 * may result in a call here with ->security being NULL.
++	 */
++	if (unlikely(cred->security == NULL))
++		return;
++
+ 	call_void_hook(cred_free, cred);
+ }
+ 
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 992a31530825..965a55eacaba 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -726,7 +726,8 @@ static int sens_destroy(void *key, void *datum, void *p)
+ 	kfree(key);
+ 	if (datum) {
+ 		levdatum = datum;
+-		ebitmap_destroy(&levdatum->level->cat);
++		if (levdatum->level)
++			ebitmap_destroy(&levdatum->level->cat);
+ 		kfree(levdatum->level);
+ 	}
+ 	kfree(datum);
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index cb6ed10816d4..0a8808954bd8 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -288,7 +288,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
+ 			break;
+ 		case YAMA_SCOPE_RELATIONAL:
+ 			rcu_read_lock();
+-			if (!task_is_descendant(current, child) &&
++			if (!pid_alive(child))
++				rc = -EPERM;
++			if (!rc && !task_is_descendant(current, child) &&
+ 			    !ptracer_exception_found(current, child) &&
+ 			    !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ 				rc = -EPERM;
+diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
+index 091290d1f3ea..1898fa4228ad 100644
+--- a/sound/firewire/bebob/bebob.c
++++ b/sound/firewire/bebob/bebob.c
+@@ -382,7 +382,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
+ 	/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
+ 	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
+ 	/* Apogee Electronics, Ensemble */
+-	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
++	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
+ 	/* ESI, Quatafire610 */
+ 	SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
+ 	/* AcousticReality, eARMasterOne */
+diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
+index c53f78767568..df21da796fa7 100644
+--- a/tools/perf/arch/x86/util/intel-pt.c
++++ b/tools/perf/arch/x86/util/intel-pt.c
+@@ -471,10 +471,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
+ 				    struct perf_evsel *evsel)
+ {
+ 	int err;
++	char c;
+ 
+ 	if (!evsel)
+ 		return 0;
+ 
++	/*
++	 * If supported, force pass-through config term (pt=1) even if user
++	 * sets pt=0, which avoids senseless kernel errors.
++	 */
++	if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
++	    !(evsel->attr.config & 1)) {
++		pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
++		evsel->attr.config |= 1;
++	}
++
+ 	err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
+ 				       "cyc_thresh", "caps/psb_cyc",
+ 				       evsel->attr.config);
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index e81dfb2e239c..9351738df703 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1903,7 +1903,7 @@ restart:
+ 		if (!name_only && strlen(syms->alias))
+ 			snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
+ 		else
+-			strncpy(name, syms->symbol, MAX_NAME_LEN);
++			strlcpy(name, syms->symbol, MAX_NAME_LEN);
+ 
+ 		evt_list[evt_i] = strdup(name);
+ 		if (evt_list[evt_i] == NULL)
+diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
+index eec6c1149f44..132878d4847a 100644
+--- a/tools/perf/util/svghelper.c
++++ b/tools/perf/util/svghelper.c
+@@ -333,7 +333,7 @@ static char *cpu_model(void)
+ 	if (file) {
+ 		while (fgets(buf, 255, file)) {
+ 			if (strstr(buf, "model name")) {
+-				strncpy(cpu_m, &buf[13], 255);
++				strlcpy(cpu_m, &buf[13], 255);
+ 				break;
+ 			}
+ 		}


             reply	other threads:[~2019-01-26 14:59 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-26 14:59 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1548514744.7925b86f90f144813faafb0726a435199798af82.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox