diff -ur a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
--- a/arch/arc/include/asm/cmpxchg.h	2017-03-23 14:35:24.000000000 +0100
+++ b/arch/arc/include/asm/cmpxchg.h	2017-03-14 02:09:14.000000000 +0100
@@ -25,10 +25,11 @@
 	"	scond   %3, [%1]	\n"
 	"	bnz     1b		\n"
 	"2:				\n"
-	: "=&r"(prev)
-	: "r"(ptr), "ir"(expected),
-	  "r"(new) /* can't be "ir". scond can't take limm for "b" */
-	: "cc");
+	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
+	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
+	  "ir"(expected),
+	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
+	: "cc", "memory"); /* so that gcc knows memory is being written here */
 
 	return prev;
 }
diff -ur a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
--- a/arch/arc/include/asm/ptrace.h	2017-03-23 14:35:20.000000000 +0100
+++ b/arch/arc/include/asm/ptrace.h	2017-03-14 02:09:11.000000000 +0100
@@ -83,7 +83,7 @@
 	long r13;
 };
 
-#define instruction_pointer(regs)	((regs)->ret)
+#define instruction_pointer(regs)	(unsigned long)((regs)->ret)
 #define profile_pc(regs)		instruction_pointer(regs)
 
 /* return 1 if user mode or 0 if kernel mode */
diff -ur a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
--- a/arch/arc/kernel/signal.c	2017-03-23 14:35:16.000000000 +0100
+++ b/arch/arc/kernel/signal.c	2017-03-14 02:09:08.000000000 +0100
@@ -131,6 +131,15 @@
 	/* Don't restart from sigreturn */
 	syscall_wont_restart(regs);
 
+	/*
+	 * Ensure that sigreturn always returns to user mode (in case the
+	 * regs saved on user stack got fudged between save and sigreturn)
+	 * Otherwise it is easy to panic the kernel with a custom
+	 * signal handler and/or restorer which clobberes the status32/ret
+	 * to return to a bogus location in kernel mode.
+	 */
+	regs->status32 |= STATUS_U_MASK;
+
 	return regs->r0;
 
 badframe:
@@ -234,8 +243,11 @@
 
 	/*
 	 * handler returns using sigreturn stub provided already by userpsace
+	 * If not, nuke the process right away
 	 */
-	BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
+	if(!(ka->sa.sa_flags & SA_RESTORER))
+		return 1;
+
 	regs->blink = (unsigned long)ka->sa.sa_restorer;
 
 	/* User Stack for signal handler will be above the frame just carved */
@@ -302,12 +314,12 @@
 	      struct pt_regs *regs)
 {
 	sigset_t *oldset = sigmask_to_save();
-	int ret;
+	int failed;
 
 	/* Set up the stack frame */
-	ret = setup_rt_frame(sig, ka, info, oldset, regs);
+	failed = setup_rt_frame(sig, ka, info, oldset, regs);
 
-	if (ret)
+	if (failed)
 		force_sigsegv(sig, current);
 	else
 		signal_delivered(sig, info, ka, regs, 0);
diff -ur a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
--- a/arch/arc/kernel/unwind.c	2017-03-23 14:35:18.000000000 +0100
+++ b/arch/arc/kernel/unwind.c	2017-03-14 02:09:09.000000000 +0100
@@ -984,42 +984,13 @@
 							    (const u8 *)(fde +
 									 1) +
 							    *fde, ptrType);
-				if (pc >= endLoc)
+				if (pc >= endLoc) {
 					fde = NULL;
-			} else
-				fde = NULL;
-		}
-		if (fde == NULL) {
-			for (fde = table->address, tableSize = table->size;
-			     cie = NULL, tableSize > sizeof(*fde)
-			     && tableSize - sizeof(*fde) >= *fde;
-			     tableSize -= sizeof(*fde) + *fde,
-			     fde += 1 + *fde / sizeof(*fde)) {
-				cie = cie_for_fde(fde, table);
-				if (cie == &bad_cie) {
 					cie = NULL;
-					break;
 				}
-				if (cie == NULL
-				    || cie == &not_fde
-				    || (ptrType = fde_pointer_type(cie)) < 0)
-					continue;
-				ptr = (const u8 *)(fde + 2);
-				startLoc = read_pointer(&ptr,
-							(const u8 *)(fde + 1) +
-							*fde, ptrType);
-				if (!startLoc)
-					continue;
-				if (!(ptrType & DW_EH_PE_indirect))
-					ptrType &=
-					    DW_EH_PE_FORM | DW_EH_PE_signed;
-				endLoc =
-				    startLoc + read_pointer(&ptr,
-							    (const u8 *)(fde +
-									 1) +
-							    *fde, ptrType);
-				if (pc >= startLoc && pc < endLoc)
-					break;
+			} else {
+				fde = NULL;
+				cie = NULL;
 			}
 		}
 	}
diff -ur a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts	2016-07-29 05:48:09.000000000 +0200
@@ -32,6 +32,10 @@
 			  0xf0000000 0 0xf0000000 0x8000000     /* Device Bus, NOR 128MiB   */>;
 
 		internal-regs {
+			rtc@10300 {
+				/* No crystal connected to the internal RTC */
+				status = "disabled";
+			};
 			serial@12000 {
 				clock-frequency = <250000000>;
 				status = "okay";
diff -ur a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
--- a/arch/arm/boot/dts/dove.dtsi	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/boot/dts/dove.dtsi	2016-07-29 05:48:09.000000000 +0200
@@ -75,7 +75,7 @@
 
 		uart2: serial@12200 {
 			compatible = "ns16550a";
-			reg = <0x12000 0x100>;
+			reg = <0x12200 0x100>;
 			reg-shift = <2>;
 			interrupts = <9>;
 			clocks = <&core_clk 0>;
@@ -84,7 +84,7 @@
 
 		uart3: serial@12300 {
 			compatible = "ns16550a";
-			reg = <0x12100 0x100>;
+			reg = <0x12300 0x100>;
 			reg-shift = <2>;
 			interrupts = <10>;
 			clocks = <&core_clk 0>;
diff -ur a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
--- a/arch/arm/boot/dts/imx23-olinuxino.dts	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts	2016-07-29 05:48:09.000000000 +0200
@@ -89,6 +89,7 @@
 
 	ahb@80080000 {
 		usb0: usb@80080000 {
+			dr_mode = "host";
 			vbus-supply = <&reg_usb0_vbus>;
 			status = "okay";
 		};
diff -ur a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
--- a/arch/arm/boot/dts/imx25.dtsi	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/boot/dts/imx25.dtsi	2016-07-29 05:48:09.000000000 +0200
@@ -393,6 +393,7 @@
 
 			pwm4: pwm@53fc8000 {
 				compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+				#pwm-cells = <2>;
 				reg = <0x53fc8000 0x4000>;
 				clocks = <&clks 108>, <&clks 52>;
 				clock-names = "ipg", "per";
diff -ur a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
--- a/arch/arm/boot/dts/imx27.dtsi	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/boot/dts/imx27.dtsi	2016-07-29 05:48:09.000000000 +0200
@@ -290,7 +290,7 @@
 
 			fec: ethernet@1002b000 {
 				compatible = "fsl,imx27-fec";
-				reg = <0x1002b000 0x4000>;
+				reg = <0x1002b000 0x1000>;
 				interrupts = <50>;
 				clocks = <&clks 48>, <&clks 67>, <&clks 0>;
 				clock-names = "ipg", "ahb", "ptp";
diff -ur a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
--- a/arch/arm/boot/dts/imx28.dtsi	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/boot/dts/imx28.dtsi	2016-07-29 05:48:09.000000000 +0200
@@ -691,7 +691,7 @@
 					      80 81 68 69
 					      70 71 72 73
 					      74 75 76 77>;
-				interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+				interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
 						  "saif0", "saif1", "i2c0", "i2c1",
 						  "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
 						  "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff -ur a/arch/arm/common/icst.c b/arch/arm/common/icst.c
--- a/arch/arm/common/icst.c	2017-03-23 14:23:09.000000000 +0100
+++ b/arch/arm/common/icst.c	2017-03-14 02:00:25.000000000 +0100
@@ -16,7 +16,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
-
+#include <asm/div64.h>
 #include <asm/hardware/icst.h>
 
 /*
@@ -29,7 +29,11 @@
 
 unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
 {
-	return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+	u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+	u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+	do_div(dividend, divisor);
+	return (unsigned long)dividend;
 }
 
 EXPORT_SYMBOL(icst_hz);
@@ -58,6 +62,7 @@
 
 		if (f > p->vco_min && f <= p->vco_max)
 			break;
+		i++;
 	} while (i < 8);
 
 	if (i >= 8)
diff -ur a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
--- a/arch/arm/kernel/entry-armv.S	2017-03-23 14:21:48.000000000 +0100
+++ b/arch/arm/kernel/entry-armv.S	2017-03-14 01:59:11.000000000 +0100
@@ -358,7 +358,8 @@
 	.endm
 
 	.macro	kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 #ifndef CONFIG_MMU
 #warning "NPTL on non MMU needs fixing"
 #else
diff -ur a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
--- a/arch/arm/kernel/entry-common.S	2017-03-23 14:21:47.000000000 +0100
+++ b/arch/arm/kernel/entry-common.S	2017-03-14 01:59:10.000000000 +0100
@@ -31,7 +31,9 @@
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)
 	disable_irq				@ disable interrupts
-	ldr	r1, [tsk, #TI_FLAGS]
+	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
+	tst	r1, #_TIF_SYSCALL_WORK
+	bne	__sys_trace_return
 	tst	r1, #_TIF_WORK_MASK
 	bne	fast_work_pending
 	asm_trace_hardirqs_on
diff -ur a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
--- a/arch/arm/kernel/fiq.c	2017-03-23 14:21:51.000000000 +0100
+++ b/arch/arm/kernel/fiq.c	2017-03-14 01:59:15.000000000 +0100
@@ -84,17 +84,14 @@
 
 void set_fiq_handler(void *start, unsigned int length)
 {
-#if defined(CONFIG_CPU_USE_DOMAINS)
-	void *base = (void *)0xffff0000;
-#else
 	void *base = vectors_page;
-#endif
 	unsigned offset = FIQ_OFFSET;
 
 	memcpy(base + offset, start, length);
+	if (!cache_is_vipt_nonaliasing())
+		flush_icache_range((unsigned long)base + offset, offset +
+				   length);
 	flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
-	if (!vectors_high())
-		flush_icache_range(offset, offset + length);
 }
 
 int claim_fiq(struct fiq_handler *f)
diff -ur a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
--- a/arch/arm/kvm/interrupts_head.S	2017-03-23 14:20:39.000000000 +0100
+++ b/arch/arm/kvm/interrupts_head.S	2017-03-14 01:57:52.000000000 +0100
@@ -570,8 +570,13 @@
 .endm
 
 /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
- * (hardware reset value is 0). Keep previous value in r2. */
-.macro set_hcptr operation, mask
+ * (hardware reset value is 0). Keep previous value in r2.
+ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
+ * VFP wasn't already enabled (always executed on vmtrap).
+ * If a label is specified with vmexit, it is branched to if VFP wasn't
+ * enabled.
+ */
+.macro set_hcptr operation, mask, label = none
 	mrc	p15, 4, r2, c1, c1, 2
 	ldr	r3, =\mask
 	.if \operation == vmentry
@@ -580,6 +585,17 @@
 	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
 	.endif
 	mcr	p15, 4, r3, c1, c1, 2
+	.if \operation != vmentry
+	.if \operation == vmexit
+	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+	beq	1f
+	.endif
+	isb
+	.if \label != none
+	b	\label
+	.endif
+1:
+	.endif
 .endm
 
 /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
diff -ur a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
--- a/arch/arm/kvm/interrupts.S	2017-03-23 14:20:38.000000000 +0100
+++ b/arch/arm/kvm/interrupts.S	2017-03-14 01:57:52.000000000 +0100
@@ -158,13 +158,9 @@
 	@ Don't trap coprocessor accesses for host kernel
 	set_hstr vmexit
 	set_hdcr vmexit
-	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
 
 #ifdef CONFIG_VFPv3
-	@ Save floating point registers we if let guest use them.
-	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
-	bne	after_vfp_restore
-
 	@ Switch VFP/NEON hardware state to the host's
 	add	r7, vcpu, #VCPU_VFP_GUEST
 	store_vfp_state r7
@@ -176,6 +172,8 @@
 	@ Restore FPEXC_EN which we clobbered on entry
 	pop	{r2}
 	VFPFMXR FPEXC, r2
+#else
+after_vfp_restore:
 #endif
 
 	@ Reset Hyp-role
@@ -457,7 +455,7 @@
 	push	{r3-r7}
 
 	@ NEON/VFP used.  Turn on VFP access.
-	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
+	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
 
 	@ Switch VFP/NEON hardware state to the guest's
 	add	r7, r0, #VCPU_VFP_HOST
diff -ur a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
--- a/arch/arm/mach-dove/common.c	2017-03-23 14:20:25.000000000 +0100
+++ b/arch/arm/mach-dove/common.c	2017-03-14 01:57:40.000000000 +0100
@@ -226,7 +226,7 @@
 	orion_time_set_base(TIMER_VIRT_BASE);
 	mvebu_mbus_init("marvell,dove-mbus",
 			BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
-			DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ);
+			DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ, 0);
 }
 
 static int __init dove_find_tclk(void)
diff -ur a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
--- a/arch/arm/mach-imx/clk-imx6q.c	2017-03-23 14:21:01.000000000 +0100
+++ b/arch/arm/mach-imx/clk-imx6q.c	2017-03-14 01:58:14.000000000 +0100
@@ -515,7 +515,7 @@
 	clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
 	clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
 	clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
-	clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
+	clk[sata]         = imx_clk_gate2("sata",          "ahb",               base + 0x7c, 4);
 	clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
 	clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
 	clk[ssi1_ipg]     = imx_clk_gate2("ssi1_ipg",      "ipg",               base + 0x7c, 18);
diff -ur a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
--- a/arch/arm/mach-kirkwood/common.c	2017-03-23 14:22:44.000000000 +0100
+++ b/arch/arm/mach-kirkwood/common.c	2017-03-14 02:00:09.000000000 +0100
@@ -517,7 +517,7 @@
 
 	mvebu_mbus_init("marvell,kirkwood-mbus",
 			BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
-			DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
+			DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ, 0);
 }
 
 int kirkwood_tclk;
diff -ur a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
--- a/arch/arm/mach-mv78xx0/common.c	2017-03-23 14:21:10.000000000 +0100
+++ b/arch/arm/mach-mv78xx0/common.c	2017-03-14 01:58:24.000000000 +0100
@@ -326,11 +326,11 @@
 	if (mv78xx0_core_index() == 0)
 		mvebu_mbus_init("marvell,mv78xx0-mbus",
 				BRIDGE_WINS_CPU0_BASE, BRIDGE_WINS_SZ,
-				DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ);
+				DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ, 0);
 	else
 		mvebu_mbus_init("marvell,mv78xx0-mbus",
 				BRIDGE_WINS_CPU1_BASE, BRIDGE_WINS_SZ,
-				DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ);
+				DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ, 0);
 }
 
 void __init_refok mv78xx0_timer_init(void)
diff -ur a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
--- a/arch/arm/mach-mvebu/armada-370-xp.c	2017-03-23 14:22:53.000000000 +0100
+++ b/arch/arm/mach-mvebu/armada-370-xp.c	2017-03-14 02:00:16.000000000 +0100
@@ -66,7 +66,8 @@
 			ARMADA_370_XP_MBUS_WINS_BASE,
 			ARMADA_370_XP_MBUS_WINS_SIZE,
 			ARMADA_370_XP_SDRAM_WINS_BASE,
-			ARMADA_370_XP_SDRAM_WINS_SIZE);
+			ARMADA_370_XP_SDRAM_WINS_SIZE,
+			coherency_available());
 
 #ifdef CONFIG_CACHE_L2X0
 	l2x0_of_init(0, ~0UL);
diff -ur a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
--- a/arch/arm/mach-mvebu/coherency.c	2017-03-23 14:22:53.000000000 +0100
+++ b/arch/arm/mach-mvebu/coherency.c	2017-03-14 02:00:16.000000000 +0100
@@ -136,6 +136,20 @@
 	.notifier_call = mvebu_hwcc_platform_notifier,
 };
 
+/*
+ * Keep track of whether we have IO hardware coherency enabled or not.
+ * On Armada 370's we will not be using it for example. We need to make
+ * that available [through coherency_available()] so the mbus controller
+ * doesn't enable the IO coherency bit in the attribute bits of the
+ * chip selects.
+ */
+static int coherency_enabled;
+
+int coherency_available(void)
+{
+	return coherency_enabled;
+}
+
 int __init coherency_init(void)
 {
 	struct device_node *np;
@@ -169,6 +183,7 @@
 		coherency_base = of_iomap(np, 0);
 		coherency_cpu_base = of_iomap(np, 1);
 		set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
+		coherency_enabled = 1;
 		bus_register_notifier(&platform_bus_type,
 					&mvebu_hwcc_platform_nb);
 	}
diff -ur a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
--- a/arch/arm/mach-mvebu/coherency.h	2017-03-23 14:22:53.000000000 +0100
+++ b/arch/arm/mach-mvebu/coherency.h	2017-03-14 02:00:16.000000000 +0100
@@ -19,6 +19,7 @@
 #endif
 
 int set_cpu_coherent(int cpu_id, int smp_group_id);
+int coherency_available(void);
 int coherency_init(void);
 
 #endif	/* __MACH_370_XP_COHERENCY_H */
diff -ur a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
--- a/arch/arm/mach-omap2/cpuidle34xx.c	2017-03-23 14:21:25.000000000 +0100
+++ b/arch/arm/mach-omap2/cpuidle34xx.c	2017-03-14 01:58:44.000000000 +0100
@@ -34,6 +34,7 @@
 #include "pm.h"
 #include "control.h"
 #include "common.h"
+#include "soc.h"
 
 /* Mach specific information to be recorded in the C-state driver_data */
 struct omap3_idle_statedata {
@@ -322,6 +323,69 @@
 	.safe_state_index = 0,
 };
 
+/*
+ * Numbers based on measurements made in October 2009 for PM optimized kernel
+ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
+ * and worst case latencies).
+ */
+static struct cpuidle_driver omap3430_idle_driver = {
+	.name             = "omap3430_idle",
+	.owner            = THIS_MODULE,
+	.states = {
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 110 + 162,
+			.target_residency = 5,
+			.name		  = "C1",
+			.desc		  = "MPU ON + CORE ON",
+		},
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 106 + 180,
+			.target_residency = 309,
+			.name		  = "C2",
+			.desc		  = "MPU ON + CORE ON",
+		},
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 107 + 410,
+			.target_residency = 46057,
+			.name		  = "C3",
+			.desc		  = "MPU RET + CORE ON",
+		},
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 121 + 3374,
+			.target_residency = 46057,
+			.name		  = "C4",
+			.desc		  = "MPU OFF + CORE ON",
+		},
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 855 + 1146,
+			.target_residency = 46057,
+			.name		  = "C5",
+			.desc		  = "MPU RET + CORE RET",
+		},
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 7580 + 4134,
+			.target_residency = 484329,
+			.name		  = "C6",
+			.desc		  = "MPU OFF + CORE RET",
+		},
+		{
+			.enter		  = omap3_enter_idle_bm,
+			.exit_latency	  = 7505 + 15274,
+			.target_residency = 484329,
+			.name		  = "C7",
+			.desc		  = "MPU OFF + CORE OFF",
+		},
+	},
+	.state_count = ARRAY_SIZE(omap3_idle_data),
+	.safe_state_index = 0,
+};
+
 /* Public functions */
 
 /**
@@ -340,5 +404,8 @@
 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
 		return -ENODEV;
 
-	return cpuidle_register(&omap3_idle_driver, NULL);
+	if (cpu_is_omap3430())
+		return cpuidle_register(&omap3430_idle_driver, NULL);
+	else
+		return cpuidle_register(&omap3_idle_driver, NULL);
 }
diff -ur a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
--- a/arch/arm/mach-omap2/sleep34xx.S	2017-03-23 14:21:19.000000000 +0100
+++ b/arch/arm/mach-omap2/sleep34xx.S	2017-03-14 01:58:37.000000000 +0100
@@ -202,23 +202,8 @@
 	 */
 	ldr	r1, kernel_flush
 	blx	r1
-	/*
-	 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
-	 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
-	 * This sequence switches back to ARM.  Note that .align may insert a
-	 * nop: bx pc needs to be word-aligned in order to work.
-	 */
- THUMB(	.thumb		)
- THUMB(	.align		)
- THUMB(	bx	pc	)
- THUMB(	nop		)
-	.arm
-
 	b	omap3_do_wfi
-
-/*
- * Local variables
- */
+ENDPROC(omap34xx_cpu_suspend)
 omap3_do_wfi_sram_addr:
 	.word omap3_do_wfi_sram
 kernel_flush:
@@ -363,10 +348,7 @@
  * ===================================
  */
 	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
-
-/*
- * Local variables
- */
+ENDPROC(omap3_do_wfi)
 sdrc_power:
 	.word	SDRC_POWER_V
 cm_idlest1_core:
diff -ur a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
--- a/arch/arm/mach-orion5x/common.c	2017-03-23 14:21:15.000000000 +0100
+++ b/arch/arm/mach-orion5x/common.c	2017-03-14 01:58:32.000000000 +0100
@@ -204,7 +204,7 @@
 		mbus_soc_name = NULL;
 	mvebu_mbus_init(mbus_soc_name, ORION5X_BRIDGE_WINS_BASE,
 			ORION5X_BRIDGE_WINS_SZ,
-			ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ);
+			ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ, 0);
 }
 
 void orion5x_setup_wins(void)
diff -ur a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
--- a/arch/arm/mach-realview/include/mach/memory.h	2017-03-23 14:20:42.000000000 +0100
+++ b/arch/arm/mach-realview/include/mach/memory.h	2017-03-14 01:57:56.000000000 +0100
@@ -56,6 +56,8 @@
 #define PAGE_OFFSET1	(PAGE_OFFSET + 0x10000000)
 #define PAGE_OFFSET2	(PAGE_OFFSET + 0x30000000)
 
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+
 #define __phys_to_virt(phys)						\
 	((phys) >= 0x80000000 ?	(phys) - 0x80000000 + PAGE_OFFSET2 :	\
 	 (phys) >= 0x20000000 ?	(phys) - 0x20000000 + PAGE_OFFSET1 :	\
diff -ur a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
--- a/arch/arm/mach-socfpga/headsmp.S	2017-03-23 14:19:41.000000000 +0100
+++ b/arch/arm/mach-socfpga/headsmp.S	2017-03-14 01:56:57.000000000 +0100
@@ -12,6 +12,7 @@
 
 	__CPUINIT
 	.arch	armv7-a
+	.arm
 
 ENTRY(secondary_trampoline)
 	movw	r2, #:lower16:cpu1start_addr
diff -ur a/arch/arm/Makefile b/arch/arm/Makefile
--- a/arch/arm/Makefile	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/arm/Makefile	2016-07-29 05:48:09.000000000 +0200
@@ -55,6 +55,14 @@
 
 comma = ,
 
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS	+= $(call cc-option,-fno-ipa-sra)
+
 # This selects which instruction set is used.
 # Note that GCC does not numerically define an architecture version
 # macro, but instead defines a whole series of macros which makes
diff -ur a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
--- a/arch/arm/mm/dma-mapping.c	2017-03-23 14:23:24.000000000 +0100
+++ b/arch/arm/mm/dma-mapping.c	2017-03-14 02:00:34.000000000 +0100
@@ -1353,12 +1353,19 @@
 	unsigned long uaddr = vma->vm_start;
 	unsigned long usize = vma->vm_end - vma->vm_start;
 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long off = vma->vm_pgoff;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
 	if (!pages)
 		return -ENXIO;
 
+	if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+		return -ENXIO;
+
+	pages += off;
+
 	do {
 		int ret = vm_insert_page(vma, uaddr, *pages++);
 		if (ret) {
diff -ur a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
--- a/arch/arm/plat-orion/common.c	2017-03-23 14:22:43.000000000 +0100
+++ b/arch/arm/plat-orion/common.c	2017-03-14 02:00:08.000000000 +0100
@@ -498,7 +498,7 @@
 
 	d->netdev = &orion_ge00.dev;
 	for (i = 0; i < d->nr_chips; i++)
-		d->chip[i].mii_bus = &orion_ge00_shared.dev;
+		d->chip[i].mii_bus = &orion_ge_mvmdio.dev;
 	orion_switch_device.dev.platform_data = d;
 
 	platform_device_register(&orion_switch_device);
diff -ur a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
--- a/arch/arm64/include/asm/ptrace.h	2017-03-23 14:24:50.000000000 +0100
+++ b/arch/arm64/include/asm/ptrace.h	2017-03-14 02:01:30.000000000 +0100
@@ -70,14 +70,14 @@
 #define compat_sp	regs[13]
 #define compat_lr	regs[14]
 #define compat_sp_hyp	regs[15]
-#define compat_sp_irq	regs[16]
-#define compat_lr_irq	regs[17]
-#define compat_sp_svc	regs[18]
-#define compat_lr_svc	regs[19]
-#define compat_sp_abt	regs[20]
-#define compat_lr_abt	regs[21]
-#define compat_sp_und	regs[22]
-#define compat_lr_und	regs[23]
+#define compat_lr_irq	regs[16]
+#define compat_sp_irq	regs[17]
+#define compat_lr_svc	regs[18]
+#define compat_sp_svc	regs[19]
+#define compat_lr_abt	regs[20]
+#define compat_sp_abt	regs[21]
+#define compat_lr_und	regs[22]
+#define compat_sp_und	regs[23]
 #define compat_r8_fiq	regs[24]
 #define compat_r9_fiq	regs[25]
 #define compat_r10_fiq	regs[26]
diff -ur a/arch/arm64/Kconfig b/arch/arm64/Kconfig
--- a/arch/arm64/Kconfig	2016-10-20 04:32:09.000000000 +0200
+++ b/arch/arm64/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -56,6 +56,10 @@
 config STACKTRACE_SUPPORT
 	def_bool y
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
diff -ur a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
--- a/arch/arm64/kernel/head.S	2017-03-23 14:24:44.000000000 +0100
+++ b/arch/arm64/kernel/head.S	2017-03-14 02:01:27.000000000 +0100
@@ -184,6 +184,11 @@
 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
 #endif
 
+	/* EL2 debug */
+	mrs	x0, pmcr_el0			// Disable debug access traps
+	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
+	msr	mdcr_el2, x0			// all PMU counters from EL1
+
 	/* Stage-2 translation */
 	msr	vttbr_el2, xzr
 
diff -ur a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
--- a/arch/arm64/kernel/ptrace.c	2017-03-23 14:24:46.000000000 +0100
+++ b/arch/arm64/kernel/ptrace.c	2017-03-14 02:01:28.000000000 +0100
@@ -51,6 +51,12 @@
  */
 void ptrace_disable(struct task_struct *child)
 {
+	/*
+	 * This would be better off in core code, but PTRACE_DETACH has
+	 * grown its fair share of arch-specific worts and changing it
+	 * is likely to cause regressions on obscure architectures.
+	 */
+	user_disable_single_step(child);
 }
 
 /*
diff -ur a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
--- a/arch/arm64/kernel/signal32.c	2017-03-23 14:24:47.000000000 +0100
+++ b/arch/arm64/kernel/signal32.c	2017-03-14 02:01:28.000000000 +0100
@@ -193,7 +193,8 @@
 		 * Other callers might not initialize the si_lsb field,
 		 * so check explicitely for the right codes here.
 		 */
-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+		if (from->si_signo == SIGBUS &&
+		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
 		break;
@@ -220,8 +221,6 @@
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-	memset(to, 0, sizeof *to);
-
 	if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
 	    copy_from_user(to->_sifields._pad,
 			   from->_sifields._pad, SI_PAD_SIZE))
@@ -232,14 +231,32 @@
 
 /*
  * VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
  */
+union __fpsimd_vreg {
+	__uint128_t	raw;
+	struct {
+#ifdef __AARCH64EB__
+		u64	hi;
+		u64	lo;
+#else
+		u64	lo;
+		u64	hi;
+#endif
+	};
+};
+
 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
 {
 	struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
 	compat_ulong_t magic = VFP_MAGIC;
 	compat_ulong_t size = VFP_STORAGE_SIZE;
 	compat_ulong_t fpscr, fpexc;
-	int err = 0;
+	int i, err = 0;
 
 	/*
 	 * Save the hardware registers to the fpsimd_state structure.
@@ -255,10 +272,15 @@
 	/*
 	 * Now copy the FP registers. Since the registers are packed,
 	 * we can copy the prefix we want (V0-V15) as it is.
-	 * FIXME: Won't work if big endian.
 	 */
-	err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
-			      sizeof(frame->ufp.fpregs));
+	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+		union __fpsimd_vreg vreg = {
+			.raw = fpsimd->vregs[i >> 1],
+		};
+
+		__put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+		__put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+	}
 
 	/* Create an AArch32 fpscr from the fpsr and the fpcr. */
 	fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -283,7 +305,7 @@
 	compat_ulong_t magic = VFP_MAGIC;
 	compat_ulong_t size = VFP_STORAGE_SIZE;
 	compat_ulong_t fpscr;
-	int err = 0;
+	int i, err = 0;
 
 	__get_user_error(magic, &frame->magic, err);
 	__get_user_error(size, &frame->size, err);
@@ -293,12 +315,14 @@
 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
 		return -EINVAL;
 
-	/*
-	 * Copy the FP registers into the start of the fpsimd_state.
-	 * FIXME: Won't work if big endian.
-	 */
-	err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
-				sizeof(frame->ufp.fpregs));
+	/* Copy the FP registers into the start of the fpsimd_state. */
+	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+		union __fpsimd_vreg vreg;
+
+		__get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+		__get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+		fpsimd.vregs[i >> 1] = vreg.raw;
+	}
 
 	/* Extract the fpsr and the fpcr from the fpscr */
 	__get_user_error(fpscr, &frame->ufp.fpscr, err);
diff -ur a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
--- a/arch/arm64/kernel/stacktrace.c	2017-03-23 14:24:45.000000000 +0100
+++ b/arch/arm64/kernel/stacktrace.c	2017-03-14 02:01:28.000000000 +0100
@@ -48,11 +48,7 @@
 
 	frame->sp = fp + 0x10;
 	frame->fp = *(unsigned long *)(fp);
-	/*
-	 * -4 here because we care about the PC at time of bl,
-	 * not where the return will go.
-	 */
-	frame->pc = *(unsigned long *)(fp + 8) - 4;
+	frame->pc = *(unsigned long *)(fp + 8);
 
 	return 0;
 }
diff -ur a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
--- a/arch/arm64/kernel/vdso/Makefile	2016-10-20 04:32:09.000000000 +0200
+++ b/arch/arm64/kernel/vdso/Makefile	2016-07-29 05:48:09.000000000 +0200
@@ -15,6 +15,10 @@
 ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
 		$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 
+# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
+# down to collect2, resulting in silent corruption of the vDSO image.
+ccflags-y += -Wl,-shared
+
 obj-y += vdso.o
 extra-y += vdso.lds vdso-offsets.h
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff -ur a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
--- a/arch/arm64/mm/context.c	2017-03-23 14:25:00.000000000 +0100
+++ b/arch/arm64/mm/context.c	2017-03-14 02:01:35.000000000 +0100
@@ -92,6 +92,14 @@
 	unsigned int cpu = smp_processor_id();
 	struct mm_struct *mm = current->active_mm;
 
+	/*
+	 * current->active_mm could be init_mm for the idle thread immediately
+	 * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
+	 * the reserved value, so no need to reset any context.
+	 */
+	if (mm == &init_mm)
+		return;
+
 	smp_rmb();
 	asid = cpu_last_asid + cpu;
 
diff -ur a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
--- a/arch/arm64/mm/fault.c	2017-03-23 14:25:00.000000000 +0100
+++ b/arch/arm64/mm/fault.c	2017-03-14 02:01:35.000000000 +0100
@@ -278,6 +278,7 @@
 			 * starvation.
 			 */
 			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			mm_flags |= FAULT_FLAG_TRIED;
 			goto retry;
 		}
 	}
diff -ur a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
--- a/arch/arm64/mm/init.c	2017-03-23 14:25:01.000000000 +0100
+++ b/arch/arm64/mm/init.c	2017-03-14 02:01:35.000000000 +0100
@@ -262,7 +262,7 @@
 		 * memmap entries are valid from the bank end aligned to
 		 * MAX_ORDER_NR_PAGES.
 		 */
-		prev_end = ALIGN(start + __phys_to_pfn(reg->size),
+		prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
 				 MAX_ORDER_NR_PAGES);
 	}
 
diff -ur a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
--- a/arch/arm64/mm/mmap.c	2017-03-23 14:25:00.000000000 +0100
+++ b/arch/arm64/mm/mmap.c	2017-03-14 02:01:35.000000000 +0100
@@ -47,22 +47,14 @@
 	return sysctl_legacy_va_layout;
 }
 
-/*
- * Since get_random_int() returns the same value within a 1 jiffy window, we
- * will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will be
- * the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
 static unsigned long mmap_rnd(void)
 {
 	unsigned long rnd = 0;
 
 	if (current->flags & PF_RANDOMIZE)
-		rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
+		rnd = (long)get_random_int() & STACK_RND_MASK;
 
-	return rnd << (PAGE_SHIFT + 1);
+	return rnd << PAGE_SHIFT;
 }
 
 static unsigned long mmap_base(void)
diff -ur a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
--- a/arch/arm64/mm/mmu.c	2017-03-23 14:25:00.000000000 +0100
+++ b/arch/arm64/mm/mmu.c	2017-03-14 02:01:35.000000000 +0100
@@ -348,6 +348,9 @@
 
 	empty_zero_page = virt_to_page(zero_page);
 
+	/* Ensure the zero page is visible to the page table walker */
+	dsb();
+
 	/*
 	 * TTBR0 is only used for the identity mapping at this stage. Make it
 	 * point to zero page to avoid speculatively fetching new entries.
diff -ur a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
--- a/arch/m32r/kernel/setup.c	2017-03-23 14:16:22.000000000 +0100
+++ b/arch/m32r/kernel/setup.c	2017-03-14 01:54:33.000000000 +0100
@@ -81,7 +81,10 @@
 };
 
 unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+
 unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
 
 void __init setup_arch(char **);
 int get_cpuinfo(char *);
diff -ur a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
--- a/arch/m68k/include/asm/linkage.h	2017-03-23 14:36:00.000000000 +0100
+++ b/arch/m68k/include/asm/linkage.h	2017-03-14 02:09:50.000000000 +0100
@@ -4,4 +4,34 @@
 #define __ALIGN .align 4
 #define __ALIGN_STR ".align 4"
 
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+	__asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+	__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+	__asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+	__asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+			      "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+			      "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+			      "m" (arg4), "m" (arg5), "m" (arg6))
+
 #endif
diff -ur a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
--- a/arch/m68k/include/asm/uaccess_mm.h	2017-03-23 14:36:16.000000000 +0100
+++ b/arch/m68k/include/asm/uaccess_mm.h	2017-03-14 02:10:06.000000000 +0100
@@ -90,7 +90,7 @@
 		__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT);	\
 		break;							\
 	case 2:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT);	\
+		__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT);	\
 		break;							\
 	case 4:								\
 		__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT);	\
@@ -157,7 +157,7 @@
 		__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);	\
 		break;							\
 	case 2:								\
-		__get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT);	\
+		__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);	\
 		break;							\
 	case 4:								\
 		__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);	\
@@ -244,7 +244,7 @@
 		__get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
 		break;
 	case 2:
-		__get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2);
+		__get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2);
 		break;
 	case 3:
 		__constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
@@ -325,7 +325,7 @@
 		__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
 		break;
 	case 2:
-		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
+		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
 		break;
 	case 3:
 		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
diff -ur a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
--- a/arch/m68k/lib/uaccess.c	2017-03-23 14:35:58.000000000 +0100
+++ b/arch/m68k/lib/uaccess.c	2017-03-14 02:09:48.000000000 +0100
@@ -52,7 +52,7 @@
 		"	.long	3b,30b\n"
 		"	.long	5b,50b\n"
 		"	.previous"
-		: "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp)
+		: "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp)
 		: "0" (n / 4), "d" (n & 3));
 
 	return res;
@@ -96,7 +96,7 @@
 		"	.long	7b,50b\n"
 		"	.long	8b,50b\n"
 		"	.previous"
-		: "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp)
+		: "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp)
 		: "0" (n / 4), "d" (n & 3));
 
 	return res;
@@ -141,7 +141,7 @@
 		"	.long	7b,40b\n"
 		"	.previous"
 		: "=d" (res), "+a" (to)
-		: "r" (0), "0" (n / 4), "d" (n & 3));
+		: "d" (0), "0" (n / 4), "d" (n & 3));
 
     return res;
 }
diff -ur a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
--- a/arch/mips/include/asm/mach-generic/spaces.h	2017-03-23 14:29:00.000000000 +0100
+++ b/arch/mips/include/asm/mach-generic/spaces.h	2017-03-14 02:04:01.000000000 +0100
@@ -90,7 +90,11 @@
 #endif
 
 #ifndef FIXADDR_TOP
+#ifdef CONFIG_KVM_GUEST
+#define FIXADDR_TOP		((unsigned long)(long)(int)0x7ffe0000)
+#else
 #define FIXADDR_TOP		((unsigned long)(long)(int)0xfffe0000)
 #endif
+#endif
 
 #endif /* __ASM_MACH_GENERIC_SPACES_H */
diff -ur a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
--- a/arch/mips/include/asm/pgtable.h	2017-03-23 14:28:19.000000000 +0100
+++ b/arch/mips/include/asm/pgtable.h	2017-03-14 02:03:32.000000000 +0100
@@ -150,8 +150,39 @@
 		 * Make sure the buddy is global too (if it's !none,
 		 * it better already be global)
 		 */
+#ifdef CONFIG_SMP
+		/*
+		 * For SMP, multiple CPUs can race, so we need to do
+		 * this atomically.
+		 */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+		unsigned long page_global = _PAGE_GLOBAL;
+		unsigned long tmp;
+
+		__asm__ __volatile__ (
+			"	.set	push\n"
+			"	.set	noreorder\n"
+			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
+			"	bnez	%[tmp], 2f\n"
+			"	 or	%[tmp], %[tmp], %[global]\n"
+			"	" SC_INSN "	%[tmp], %[buddy]\n"
+			"	beqz	%[tmp], 1b\n"
+			"	 nop\n"
+			"2:\n"
+			"	.set pop"
+			: [buddy] "+m" (buddy->pte),
+			  [tmp] "=&r" (tmp)
+			: [global] "r" (page_global));
+#else /* !CONFIG_SMP */
 		if (pte_none(*buddy))
 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
 	}
 #endif
 }
diff -ur a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
--- a/arch/mips/kernel/irq.c	2017-03-23 14:27:50.000000000 +0100
+++ b/arch/mips/kernel/irq.c	2017-03-14 02:03:15.000000000 +0100
@@ -110,7 +110,7 @@
 #endif
 }
 
-#ifdef DEBUG_STACKOVERFLOW
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
 static inline void check_stack_overflow(void)
 {
 	unsigned long sp;
diff -ur a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
--- a/arch/mips/kernel/mips-mt-fpaff.c	2017-03-23 14:27:53.000000000 +0100
+++ b/arch/mips/kernel/mips-mt-fpaff.c	2017-03-14 02:03:16.000000000 +0100
@@ -154,7 +154,7 @@
 				      unsigned long __user *user_mask_ptr)
 {
 	unsigned int real_len;
-	cpumask_t mask;
+	cpumask_t allowed, mask;
 	int retval;
 	struct task_struct *p;
 
@@ -173,7 +173,8 @@
 	if (retval)
 		goto out_unlock;
 
-	cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+	cpumask_and(&mask, &allowed, cpu_active_mask);
 
 out_unlock:
 	read_unlock(&tasklist_lock);
diff -ur a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
--- a/arch/mips/kernel/signal32.c	2017-03-23 14:27:55.000000000 +0100
+++ b/arch/mips/kernel/signal32.c	2017-03-14 02:03:17.000000000 +0100
@@ -368,8 +368,6 @@
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-	memset(to, 0, sizeof *to);
-
 	if (copy_from_user(to, from, 3*sizeof(int)) ||
 	    copy_from_user(to->_sifields._pad,
 			   from->_sifields._pad, SI_PAD_SIZE32))
diff -ur a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
--- a/arch/mips/kvm/kvm_locore.S	2017-03-23 14:27:39.000000000 +0100
+++ b/arch/mips/kvm/kvm_locore.S	2017-03-14 02:03:08.000000000 +0100
@@ -154,9 +154,11 @@
 
 FEXPORT(__kvm_mips_load_asid)
     /* Set the ASID for the Guest Kernel */
-    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
-                                                /* addresses shift to 0x80000000 */
-    bltz        t0, 1f                          /* If kernel */
+    PTR_L	t0, VCPU_COP0(k1)
+    LONG_L	t0, COP0_STATUS(t0)
+    andi	t0, KSU_USER | ST0_ERL | ST0_EXL
+    xori	t0, KSU_USER
+    bnez	t0, 1f		/* If kernel */
 	addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
     addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
 1:
@@ -437,9 +439,11 @@
 	mtc0		t0, CP0_EPC
 
     /* Set the ASID for the Guest Kernel */
-    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
-                                                /* addresses shift to 0x80000000 */
-    bltz        t0, 1f                          /* If kernel */
+    PTR_L	t0, VCPU_COP0(k1)
+    LONG_L	t0, COP0_STATUS(t0)
+    andi	t0, KSU_USER | ST0_ERL | ST0_EXL
+    xori	t0, KSU_USER
+    bnez	t0, 1f		/* If kernel */
 	addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
     addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
 1:
diff -ur a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
--- a/arch/mips/kvm/kvm_mips.c	2017-03-23 14:27:40.000000000 +0100
+++ b/arch/mips/kvm/kvm_mips.c	2017-03-14 02:03:09.000000000 +0100
@@ -307,7 +307,7 @@
 
 	if (!gebase) {
 		err = -ENOMEM;
-		goto out_free_cpu;
+		goto out_uninit_cpu;
 	}
 	kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 		 ALIGN(size, PAGE_SIZE), gebase);
@@ -367,6 +367,9 @@
 out_free_gebase:
 	kfree(gebase);
 
+out_uninit_cpu:
+	kvm_vcpu_uninit(vcpu);
+
 out_free_cpu:
 	kfree(vcpu);
 
diff -ur a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
--- a/arch/mips/kvm/kvm_mips_emul.c	2017-03-23 14:27:42.000000000 +0100
+++ b/arch/mips/kvm/kvm_mips_emul.c	2017-03-14 02:03:10.000000000 +0100
@@ -935,7 +935,7 @@
 
 	base = (inst >> 21) & 0x1f;
 	op_inst = (inst >> 16) & 0x1f;
-	offset = inst & 0xffff;
+	offset = (int16_t)inst;
 	cache = (inst >> 16) & 0x3;
 	op = (inst >> 18) & 0x7;
 
@@ -1626,7 +1626,7 @@
 		if (vcpu->mmio_needed == 2)
 			*gpr = *(int16_t *) run->mmio.data;
 		else
-			*gpr = *(int16_t *) run->mmio.data;
+			*gpr = *(uint16_t *)run->mmio.data;
 
 		break;
 	case 1:
diff -ur a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
--- a/arch/mips/mm/dma-default.c	2017-03-23 14:29:46.000000000 +0100
+++ b/arch/mips/mm/dma-default.c	2017-03-14 02:04:41.000000000 +0100
@@ -91,7 +91,7 @@
 	else
 #endif
 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+	     if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
 		dma_flag = __GFP_DMA;
 	else
 #endif
diff -ur a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
--- a/arch/mn10300/Kconfig	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/mn10300/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -2,6 +2,7 @@
 	def_bool y
 	select HAVE_OPROFILE
 	select HAVE_GENERIC_HARDIRQS
+	select HAVE_UID16
 	select GENERIC_IRQ_SHOW
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@
 config NUMA
 	def_bool n
 
-config UID16
-	def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
 
diff -ur a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
--- a/arch/openrisc/Kconfig	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/openrisc/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -17,6 +17,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_IOMAP
 	select GENERIC_CPU_DEVICES
+	select HAVE_UID16
 	select GENERIC_ATOMIC64
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_STRNCPY_FROM_USER
@@ -29,9 +30,6 @@
 config HAVE_DMA_ATTRS
 	def_bool y
 
-config UID16
-	def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
 
diff -ur a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
--- a/arch/parisc/include/uapi/asm/mman.h	2017-03-23 14:18:11.000000000 +0100
+++ b/arch/parisc/include/uapi/asm/mman.h	2017-03-14 01:55:40.000000000 +0100
@@ -46,16 +46,6 @@
 #define MADV_DONTFORK	10		/* don't inherit across fork */
 #define MADV_DOFORK	11		/* do inherit across fork */
 
-/* The range 12-64 is reserved for page size specification. */
-#define MADV_4K_PAGES   12              /* Use 4K pages  */
-#define MADV_16K_PAGES  14              /* Use 16K pages */
-#define MADV_64K_PAGES  16              /* Use 64K pages */
-#define MADV_256K_PAGES 18              /* Use 256K pages */
-#define MADV_1M_PAGES   20              /* Use 1 Megabyte pages */
-#define MADV_4M_PAGES   22              /* Use 4 Megabyte pages */
-#define MADV_16M_PAGES  24              /* Use 16 Megabyte pages */
-#define MADV_64M_PAGES  26              /* Use 64 Megabyte pages */
-
 #define MADV_MERGEABLE   65		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 66		/* KSM may not merge identical pages */
 
diff -ur a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
--- a/arch/parisc/include/uapi/asm/siginfo.h	2017-03-23 14:18:12.000000000 +0100
+++ b/arch/parisc/include/uapi/asm/siginfo.h	2017-03-14 01:55:40.000000000 +0100
@@ -1,6 +1,10 @@
 #ifndef _PARISC_SIGINFO_H
 #define _PARISC_SIGINFO_H
 
+#if defined(__LP64__)
+#define __ARCH_SI_PREAMBLE_SIZE   (4 * sizeof(int))
+#endif
+
 #include <asm-generic/siginfo.h>
 
 #undef NSIGTRAP
diff -ur a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
--- a/arch/parisc/kernel/irq.c	2017-03-23 14:17:57.000000000 +0100
+++ b/arch/parisc/kernel/irq.c	2017-03-14 01:55:31.000000000 +0100
@@ -518,8 +518,8 @@
 	struct pt_regs *old_regs;
 	unsigned long eirr_val;
 	int irq, cpu = smp_processor_id();
-#ifdef CONFIG_SMP
 	struct irq_desc *desc;
+#ifdef CONFIG_SMP
 	cpumask_t dest;
 #endif
 
@@ -532,8 +532,12 @@
 		goto set_out;
 	irq = eirr_to_irq(eirr_val);
 
-#ifdef CONFIG_SMP
+	/* Filter out spurious interrupts, mostly from serial port at bootup */
 	desc = irq_to_desc(irq);
+	if (unlikely(!desc->action))
+		goto set_out;
+
+#ifdef CONFIG_SMP
 	cpumask_copy(&dest, desc->irq_data.affinity);
 	if (irqd_is_per_cpu(&desc->irq_data) &&
 	    !cpu_isset(smp_processor_id(), dest)) {
diff -ur a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
--- a/arch/parisc/kernel/parisc_ksyms.c	2017-03-23 14:17:58.000000000 +0100
+++ b/arch/parisc/kernel/parisc_ksyms.c	2017-03-14 01:55:32.000000000 +0100
@@ -47,11 +47,11 @@
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
-/* Global fixups */
-extern void fixup_get_user_skip_1(void);
-extern void fixup_get_user_skip_2(void);
-extern void fixup_put_user_skip_1(void);
-extern void fixup_put_user_skip_2(void);
+/* Global fixups - defined as int to avoid creation of function pointers */
+extern int fixup_get_user_skip_1;
+extern int fixup_get_user_skip_2;
+extern int fixup_put_user_skip_1;
+extern int fixup_put_user_skip_2;
 EXPORT_SYMBOL(fixup_get_user_skip_1);
 EXPORT_SYMBOL(fixup_get_user_skip_2);
 EXPORT_SYMBOL(fixup_put_user_skip_1);
diff -ur a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
--- a/arch/parisc/kernel/signal.c	2017-03-23 14:18:01.000000000 +0100
+++ b/arch/parisc/kernel/signal.c	2017-03-14 01:55:33.000000000 +0100
@@ -445,6 +445,55 @@
 		regs->gr[28]);
 }
 
+/*
+ * Check how the syscall number gets loaded into %r20 within
+ * the delay branch in userspace and adjust as needed.
+ */
+
+static void check_syscallno_in_delay_branch(struct pt_regs *regs)
+{
+	u32 opcode, source_reg;
+	u32 __user *uaddr;
+	int err;
+
+	/* Usually we don't have to restore %r20 (the system call number)
+	 * because it gets loaded in the delay slot of the branch external
+	 * instruction via the ldi instruction.
+	 * In some cases a register-to-register copy instruction might have
+	 * been used instead, in which case we need to copy the syscall
+	 * number into the source register before returning to userspace.
+	 */
+
+	/* A syscall is just a branch, so all we have to do is fiddle the
+	 * return pointer so that the ble instruction gets executed again.
+	 */
+	regs->gr[31] -= 8; /* delayed branching */
+
+	/* Get assembler opcode of code in delay branch */
+	uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
+	err = get_user(opcode, uaddr);
+	if (err)
+		return;
+
+	/* Check if delay branch uses "ldi int,%r20" */
+	if ((opcode & 0xffff0000) == 0x34140000)
+		return;	/* everything ok, just return */
+
+	/* Check if delay branch uses "nop" */
+	if (opcode == INSN_NOP)
+		return;
+
+	/* Check if delay branch uses "copy %rX,%r20" */
+	if ((opcode & 0xffe0ffff) == 0x08000254) {
+		source_reg = (opcode >> 16) & 31;
+		regs->gr[source_reg] = regs->gr[20];
+		return;
+	}
+
+	pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
+		current->comm, task_pid_nr(current), opcode);
+}
+
 static inline void
 syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
 {
@@ -467,10 +516,7 @@
 		}
 		/* fallthrough */
 	case -ERESTARTNOINTR:
-		/* A syscall is just a branch, so all
-		 * we have to do is fiddle the return pointer.
-		 */
-		regs->gr[31] -= 8; /* delayed branching */
+		check_syscallno_in_delay_branch(regs);
 		break;
 	}
 }
@@ -519,15 +565,9 @@
 	}
 	case -ERESTARTNOHAND:
 	case -ERESTARTSYS:
-	case -ERESTARTNOINTR: {
-		/* Hooray for delayed branching.  We don't
-		 * have to restore %r20 (the system call
-		 * number) because it gets loaded in the delay
-		 * slot of the branch external instruction.
-		 */
-		regs->gr[31] -= 8;
+	case -ERESTARTNOINTR:
+		check_syscallno_in_delay_branch(regs);
 		return;
-	}
 	default:
 		break;
 	}
diff -ur a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
--- a/arch/parisc/kernel/traps.c	2017-03-23 14:17:59.000000000 +0100
+++ b/arch/parisc/kernel/traps.c	2017-03-14 01:55:33.000000000 +0100
@@ -809,6 +809,9 @@
 
 	    if (fault_space == 0 && !in_atomic())
 	    {
+		/* Clean up and return if in exception table. */
+		if (fixup_exception(regs))
+			return;
 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
 		parisc_terminate("Kernel Fault", regs, code, fault_address);
 	    }
diff -ur a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
--- a/arch/powerpc/include/asm/cmpxchg.h	2017-03-23 14:26:33.000000000 +0100
+++ b/arch/powerpc/include/asm/cmpxchg.h	2017-03-14 02:02:36.000000000 +0100
@@ -18,12 +18,12 @@
 	unsigned long prev;
 
 	__asm__ __volatile__(
-	PPC_RELEASE_BARRIER
+	PPC_ATOMIC_ENTRY_BARRIER
 "1:	lwarx	%0,0,%2 \n"
 	PPC405_ERR77(0,%2)
 "	stwcx.	%3,0,%2 \n\
 	bne-	1b"
-	PPC_ACQUIRE_BARRIER
+	PPC_ATOMIC_EXIT_BARRIER
 	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
 	: "r" (p), "r" (val)
 	: "cc", "memory");
@@ -61,12 +61,12 @@
 	unsigned long prev;
 
 	__asm__ __volatile__(
-	PPC_RELEASE_BARRIER
+	PPC_ATOMIC_ENTRY_BARRIER
 "1:	ldarx	%0,0,%2 \n"
 	PPC405_ERR77(0,%2)
 "	stdcx.	%3,0,%2 \n\
 	bne-	1b"
-	PPC_ACQUIRE_BARRIER
+	PPC_ATOMIC_EXIT_BARRIER
 	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
 	: "r" (p), "r" (val)
 	: "cc", "memory");
@@ -152,14 +152,14 @@
 	unsigned int prev;
 
 	__asm__ __volatile__ (
-	PPC_RELEASE_BARRIER
+	PPC_ATOMIC_ENTRY_BARRIER
 "1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
 	cmpw	0,%0,%3\n\
 	bne-	2f\n"
 	PPC405_ERR77(0,%2)
 "	stwcx.	%4,0,%2\n\
 	bne-	1b"
-	PPC_ACQUIRE_BARRIER
+	PPC_ATOMIC_EXIT_BARRIER
 	"\n\
 2:"
 	: "=&r" (prev), "+m" (*p)
@@ -198,13 +198,13 @@
 	unsigned long prev;
 
 	__asm__ __volatile__ (
-	PPC_RELEASE_BARRIER
+	PPC_ATOMIC_ENTRY_BARRIER
 "1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
 	cmpd	0,%0,%3\n\
 	bne-	2f\n\
 	stdcx.	%4,0,%2\n\
 	bne-	1b"
-	PPC_ACQUIRE_BARRIER
+	PPC_ATOMIC_EXIT_BARRIER
 	"\n\
 2:"
 	: "=&r" (prev), "+m" (*p)
diff -ur a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
--- a/arch/powerpc/include/asm/pgtable-ppc64.h	2017-03-23 14:26:15.000000000 +0100
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h	2017-03-14 02:02:23.000000000 +0100
@@ -127,7 +127,19 @@
 #define pte_iterate_hashed_end() } while(0)
 
 #ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte)			\
+	({							\
+		unsigned int psize;				\
+		if (is_kernel_addr(addr))			\
+			psize = MMU_PAGE_4K;			\
+		else						\
+			psize = get_slice_psize(mm, addr);	\
+		psize;						\
+	})
 #else
 #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
 #endif
diff -ur a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
--- a/arch/powerpc/include/asm/reg.h	2017-03-23 14:26:28.000000000 +0100
+++ b/arch/powerpc/include/asm/reg.h	2017-03-14 02:02:33.000000000 +0100
@@ -108,6 +108,7 @@
 #define MSR_TS_T	__MASK(MSR_TS_T_LG)	/*  Transaction Transactional */
 #define MSR_TS_MASK	(MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
 #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
 #define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
 
diff -ur a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
--- a/arch/powerpc/include/asm/rtas.h	2017-03-23 14:26:28.000000000 +0100
+++ b/arch/powerpc/include/asm/rtas.h	2017-03-14 02:02:33.000000000 +0100
@@ -255,6 +255,7 @@
 extern void rtas_halt(void);
 extern void rtas_os_term(char *str);
 extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
 extern int rtas_get_power_level(int powerdomain, int *level);
 extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
 extern bool rtas_indicator_present(int token, int *maxindex);
diff -ur a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
--- a/arch/powerpc/include/asm/synch.h	2017-03-23 14:26:22.000000000 +0100
+++ b/arch/powerpc/include/asm/synch.h	2017-03-14 02:02:29.000000000 +0100
@@ -44,7 +44,7 @@
 	MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
 #define PPC_ACQUIRE_BARRIER	 "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
 #define PPC_RELEASE_BARRIER	 stringify_in_c(LWSYNC) "\n"
-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
 #define PPC_ATOMIC_EXIT_BARRIER	 "\n" stringify_in_c(sync) "\n"
 #else
 #define PPC_ACQUIRE_BARRIER
diff -ur a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
--- a/arch/powerpc/include/uapi/asm/cputable.h	2017-03-23 14:26:36.000000000 +0100
+++ b/arch/powerpc/include/uapi/asm/cputable.h	2017-03-14 02:02:38.000000000 +0100
@@ -31,6 +31,7 @@
 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
 					0x00000040
 
+/* Reserved - do not use		0x00000004 */
 #define PPC_FEATURE_TRUE_LE		0x00000002
 #define PPC_FEATURE_PPC_LE		0x00000001
 
diff -ur a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
--- a/arch/powerpc/kernel/module_64.c	2017-03-23 14:25:59.000000000 +0100
+++ b/arch/powerpc/kernel/module_64.c	2017-03-14 02:02:11.000000000 +0100
@@ -192,7 +192,7 @@
 		if (syms[i].st_shndx == SHN_UNDEF) {
 			char *name = strtab + syms[i].st_name;
 			if (name[0] == '.')
-				memmove(name, name+1, strlen(name));
+				syms[i].st_name++;
 		}
 	}
 }
diff -ur a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
--- a/arch/powerpc/kernel/prom.c	2017-03-23 14:26:02.000000000 +0100
+++ b/arch/powerpc/kernel/prom.c	2017-03-14 02:02:12.000000000 +0100
@@ -159,7 +159,7 @@
 	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
 	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
 	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
-	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 5, 0, 0},
 };
 
 static void __init scan_features(unsigned long node, unsigned char *ftrs,
diff -ur a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
--- a/arch/powerpc/kernel/rtas.c	2017-03-23 14:25:57.000000000 +0100
+++ b/arch/powerpc/kernel/rtas.c	2017-03-14 02:02:08.000000000 +0100
@@ -582,6 +582,23 @@
 }
 EXPORT_SYMBOL(rtas_get_sensor);
 
+int rtas_get_sensor_fast(int sensor, int index, int *state)
+{
+	int token = rtas_token("get-sensor-state");
+	int rc;
+
+	if (token == RTAS_UNKNOWN_SERVICE)
+		return -ENOENT;
+
+	rc = rtas_call(token, 2, 2, state, sensor, index);
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
+
+	if (rc < 0)
+		return rtas_error_rc(rc);
+	return rc;
+}
+
 bool rtas_indicator_present(int token, int *maxindex)
 {
 	int proplen, count, i;
@@ -1022,6 +1039,9 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
+	if (!rtas.entry)
+		return -EINVAL;
+
 	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
 		return -EFAULT;
 
diff -ur a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
--- a/arch/powerpc/kernel/signal_32.c	2017-03-23 14:25:56.000000000 +0100
+++ b/arch/powerpc/kernel/signal_32.c	2017-03-14 02:02:09.000000000 +0100
@@ -858,6 +858,15 @@
 		return 1;
 #endif /* CONFIG_SPE */
 
+	/* Get the top half of the MSR from the user context */
+	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+		return 1;
+	msr_hi <<= 32;
+	/* If TM bits are set to the reserved value, it's an invalid context */
+	if (MSR_TM_RESV(msr_hi))
+		return 1;
+	/* Pull in the MSR TM bits from the user context */
+	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
 	 * transactional versions should be loaded.
@@ -867,11 +876,6 @@
 	current->thread.tm_texasr |= TEXASR_FS;
 	/* This loads the checkpointed FP/VEC state, if used */
 	tm_recheckpoint(&current->thread, msr);
-	/* Get the top half of the MSR */
-	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
-		return 1;
-	/* Pull in MSR TM from user context */
-	regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
 
 	/* This loads the speculative FP/VEC state, if used */
 	if (msr & MSR_FP) {
@@ -949,8 +953,6 @@
 
 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 {
-	memset(to, 0, sizeof *to);
-
 	if (copy_from_user(to, from, 3*sizeof(int)) ||
 	    copy_from_user(to->_sifields._pad,
 			   from->_sifields._pad, SI_PAD_SIZE32))
diff -ur a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
--- a/arch/powerpc/kernel/signal_64.c	2017-03-23 14:26:00.000000000 +0100
+++ b/arch/powerpc/kernel/signal_64.c	2017-03-14 02:02:11.000000000 +0100
@@ -416,6 +416,10 @@
 
 	/* get MSR separately, transfer the LE bit if doing signal return */
 	err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+	/* Don't allow reserved mode. */
+	if (MSR_TM_RESV(msr))
+		return -EINVAL;
+
 	/* pull in MSR TM from user context */
 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 
diff -ur a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
--- a/arch/powerpc/kernel/vmlinux.lds.S	2017-03-23 14:25:49.000000000 +0100
+++ b/arch/powerpc/kernel/vmlinux.lds.S	2017-03-14 02:02:03.000000000 +0100
@@ -213,6 +213,7 @@
 		*(.opd)
 	}
 
+	. = ALIGN(256);
 	.got : AT(ADDR(.got) - LOAD_OFFSET) {
 		__toc_start = .;
 #ifndef CONFIG_RELOCATABLE
diff -ur a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
--- a/arch/powerpc/kvm/book3s_hv.c	2017-03-23 14:25:31.000000000 +0100
+++ b/arch/powerpc/kvm/book3s_hv.c	2017-03-14 02:01:54.000000000 +0100
@@ -160,6 +160,12 @@
 
 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
 {
+	/*
+	 * Check for illegal transactional state bit combination
+	 * and if we find it, force the TS field to a safe state.
+	 */
+	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
+		msr &= ~MSR_TS_MASK;
 	vcpu->arch.shregs.msr = msr;
 	kvmppc_end_cede(vcpu);
 }
diff -ur a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
--- a/arch/powerpc/perf/core-book3s.c	2017-03-23 14:25:49.000000000 +0100
+++ b/arch/powerpc/perf/core-book3s.c	2017-03-14 02:02:04.000000000 +0100
@@ -112,7 +112,16 @@
 
 static bool regs_use_siar(struct pt_regs *regs)
 {
-	return !!regs->result;
+	/*
+	 * When we take a performance monitor exception the regs are setup
+	 * using perf_read_regs() which overloads some fields, in particular
+	 * regs->result to tell us whether to use SIAR.
+	 *
+	 * However if the regs are from another exception, eg. a syscall, then
+	 * they have not been setup using perf_read_regs() and so regs->result
+	 * is something random.
+	 */
+	return ((TRAP(regs) == 0xf00) && regs->result);
 }
 
 /*
diff -ur a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
--- a/arch/powerpc/platforms/powernv/pci.c	2017-03-23 14:26:50.000000000 +0100
+++ b/arch/powerpc/platforms/powernv/pci.c	2017-03-14 02:02:45.000000000 +0100
@@ -106,6 +106,7 @@
 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 	struct pnv_phb *phb = hose->private_data;
 	struct msi_desc *entry;
+	irq_hw_number_t hwirq;
 
 	if (WARN_ON(!phb))
 		return;
@@ -113,10 +114,10 @@
 	list_for_each_entry(entry, &pdev->msi_list, list) {
 		if (entry->irq == NO_IRQ)
 			continue;
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&phb->msi_bmp,
-			virq_to_hw(entry->irq) - phb->msi_base, 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
 	}
 }
 #endif /* CONFIG_PCI_MSI */
diff -ur a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
--- a/arch/powerpc/platforms/pseries/ras.c	2017-03-23 14:27:05.000000000 +0100
+++ b/arch/powerpc/platforms/pseries/ras.c	2017-03-14 02:02:51.000000000 +0100
@@ -186,7 +186,8 @@
 	int state;
 	int critical;
 
-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
+	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+				      &state);
 
 	if (state > 3)
 		critical = 1;		/* Time Critical */
diff -ur a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
--- a/arch/powerpc/sysdev/fsl_msi.c	2017-03-23 14:26:05.000000000 +0100
+++ b/arch/powerpc/sysdev/fsl_msi.c	2017-03-14 02:02:14.000000000 +0100
@@ -108,15 +108,16 @@
 {
 	struct msi_desc *entry;
 	struct fsl_msi *msi_data;
+	irq_hw_number_t hwirq;
 
 	list_for_each_entry(entry, &pdev->msi_list, list) {
 		if (entry->irq == NO_IRQ)
 			continue;
+		hwirq = virq_to_hw(entry->irq);
 		msi_data = irq_get_chip_data(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
-				       virq_to_hw(entry->irq), 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
 	}
 
 	return;
diff -ur a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c	2017-03-23 14:26:05.000000000 +0100
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c	2017-03-14 02:02:15.000000000 +0100
@@ -73,6 +73,7 @@
 static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
 {
 	struct msi_desc *entry;
+	irq_hw_number_t hwirq;
 
 	pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
 
@@ -80,10 +81,11 @@
 		if (entry->irq == NO_IRQ)
 			continue;
 
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
-				       virq_to_hw(entry->irq), ALLOC_CHUNK);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+				       hwirq, ALLOC_CHUNK);
 	}
 
 	return;
diff -ur a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
--- a/arch/powerpc/sysdev/mpic_u3msi.c	2017-03-23 14:26:05.000000000 +0100
+++ b/arch/powerpc/sysdev/mpic_u3msi.c	2017-03-14 02:02:15.000000000 +0100
@@ -124,15 +124,16 @@
 static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
 {
 	struct msi_desc *entry;
+	irq_hw_number_t hwirq;
 
         list_for_each_entry(entry, &pdev->msi_list, list) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
-				       virq_to_hw(entry->irq), 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
 	}
 
 	return;
diff -ur a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
--- a/arch/powerpc/sysdev/ppc4xx_msi.c	2017-03-23 14:26:07.000000000 +0100
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c	2017-03-14 02:02:17.000000000 +0100
@@ -121,16 +121,17 @@
 {
 	struct msi_desc *entry;
 	struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+	irq_hw_number_t hwirq;
 
 	dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
 
 	list_for_each_entry(entry, &dev->msi_list, list) {
 		if (entry->irq == NO_IRQ)
 			continue;
+		hwirq = virq_to_hw(entry->irq);
 		irq_set_msi_desc(entry->irq, NULL);
-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
-				virq_to_hw(entry->irq), 1);
 		irq_dispose_mapping(entry->irq);
+		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
 	}
 }
 
diff -ur a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
--- a/arch/s390/crypto/ghash_s390.c	2017-03-23 14:34:28.000000000 +0100
+++ b/arch/s390/crypto/ghash_s390.c	2017-03-14 02:08:34.000000000 +0100
@@ -16,11 +16,12 @@
 #define GHASH_DIGEST_SIZE	16
 
 struct ghash_ctx {
-	u8 icv[16];
-	u8 key[16];
+	u8 key[GHASH_BLOCK_SIZE];
 };
 
 struct ghash_desc_ctx {
+	u8 icv[GHASH_BLOCK_SIZE];
+	u8 key[GHASH_BLOCK_SIZE];
 	u8 buffer[GHASH_BLOCK_SIZE];
 	u32 bytes;
 };
@@ -28,8 +29,10 @@
 static int ghash_init(struct shash_desc *desc)
 {
 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
 
 	memset(dctx, 0, sizeof(*dctx));
+	memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
 
 	return 0;
 }
@@ -45,7 +48,6 @@
 	}
 
 	memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
-	memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
 
 	return 0;
 }
@@ -54,7 +56,6 @@
 			 const u8 *src, unsigned int srclen)
 {
 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
 	unsigned int n;
 	u8 *buf = dctx->buffer;
 	int ret;
@@ -70,7 +71,7 @@
 		src += n;
 
 		if (!dctx->bytes) {
-			ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+			ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
 					      GHASH_BLOCK_SIZE);
 			if (ret != GHASH_BLOCK_SIZE)
 				return -EIO;
@@ -79,7 +80,7 @@
 
 	n = srclen & ~(GHASH_BLOCK_SIZE - 1);
 	if (n) {
-		ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+		ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
 		if (ret != n)
 			return -EIO;
 		src += n;
@@ -94,7 +95,7 @@
 	return 0;
 }
 
-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_desc_ctx *dctx)
 {
 	u8 *buf = dctx->buffer;
 	int ret;
@@ -104,24 +105,24 @@
 
 		memset(pos, 0, dctx->bytes);
 
-		ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+		ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
 		if (ret != GHASH_BLOCK_SIZE)
 			return -EIO;
+
+		dctx->bytes = 0;
 	}
 
-	dctx->bytes = 0;
 	return 0;
 }
 
 static int ghash_final(struct shash_desc *desc, u8 *dst)
 {
 	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
 	int ret;
 
-	ret = ghash_flush(ctx, dctx);
+	ret = ghash_flush(dctx);
 	if (!ret)
-		memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+		memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
 	return ret;
 }
 
diff -ur a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
--- a/arch/s390/kernel/sclp.S	2017-03-23 14:34:34.000000000 +0100
+++ b/arch/s390/kernel/sclp.S	2017-03-14 02:08:38.000000000 +0100
@@ -276,6 +276,8 @@
 	jno	.Lesa2
 	ahi	%r15,-80
 	stmh	%r6,%r15,96(%r15)		# store upper register halves
+	basr	%r13,0
+	lmh	%r0,%r15,.Lzeroes-.(%r13)	# clear upper register halves
 .Lesa2:
 #endif
 	lr	%r10,%r2			# save string pointer
@@ -299,6 +301,8 @@
 #endif
 	lm	%r6,%r15,120(%r15)		# restore registers
 	br	%r14
+.Lzeroes:
+	.fill	64,4,0
 
 .LwritedataS4:
 	.long	0x00760005			# SCLP command for write data
diff -ur a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
--- a/arch/s390/mm/extable.c	2017-03-23 14:35:10.000000000 +0100
+++ b/arch/s390/mm/extable.c	2017-03-14 02:09:04.000000000 +0100
@@ -52,12 +52,16 @@
 	int i;
 
 	/* Normalize entries to being relative to the start of the section */
-	for (p = start, i = 0; p < finish; p++, i += 8)
+	for (p = start, i = 0; p < finish; p++, i += 8) {
 		p->insn += i;
+		p->fixup += i + 4;
+	}
 	sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
 	/* Denormalize all entries */
-	for (p = start, i = 0; p < finish; p++, i += 8)
+	for (p = start, i = 0; p < finish; p++, i += 8) {
 		p->insn -= i;
+		p->fixup -= i + 4;
+	}
 }
 
 #ifdef CONFIG_MODULES
diff -ur a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
--- a/arch/sh/include/uapi/asm/unistd_64.h	2017-03-23 14:24:39.000000000 +0100
+++ b/arch/sh/include/uapi/asm/unistd_64.h	2017-03-14 02:01:24.000000000 +0100
@@ -278,7 +278,7 @@
 #define __NR_fsetxattr		256
 #define __NR_getxattr		257
 #define __NR_lgetxattr		258
-#define __NR_fgetxattr		269
+#define __NR_fgetxattr		259
 #define __NR_listxattr		260
 #define __NR_llistxattr		261
 #define __NR_flistxattr		262
diff -ur a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
--- a/arch/sparc/crypto/aes_glue.c	2017-03-23 14:31:46.000000000 +0100
+++ b/arch/sparc/crypto/aes_glue.c	2017-03-14 02:06:21.000000000 +0100
@@ -433,6 +433,7 @@
 		.blkcipher = {
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= aes_set_key,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
@@ -452,6 +453,7 @@
 		.blkcipher = {
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= aes_set_key,
 			.encrypt	= ctr_crypt,
 			.decrypt	= ctr_crypt,
diff -ur a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
--- a/arch/sparc/crypto/camellia_glue.c	2017-03-23 14:31:46.000000000 +0100
+++ b/arch/sparc/crypto/camellia_glue.c	2017-03-14 02:06:21.000000000 +0100
@@ -274,6 +274,7 @@
 		.blkcipher = {
 			.min_keysize	= CAMELLIA_MIN_KEY_SIZE,
 			.max_keysize	= CAMELLIA_MAX_KEY_SIZE,
+			.ivsize		= CAMELLIA_BLOCK_SIZE,
 			.setkey		= camellia_set_key,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
diff -ur a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
--- a/arch/sparc/crypto/des_glue.c	2017-03-23 14:31:47.000000000 +0100
+++ b/arch/sparc/crypto/des_glue.c	2017-03-14 02:06:21.000000000 +0100
@@ -429,6 +429,7 @@
 		.blkcipher = {
 			.min_keysize	= DES_KEY_SIZE,
 			.max_keysize	= DES_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
 			.setkey		= des_set_key,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
@@ -485,6 +486,7 @@
 		.blkcipher = {
 			.min_keysize	= DES3_EDE_KEY_SIZE,
 			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES3_EDE_BLOCK_SIZE,
 			.setkey		= des3_ede_set_key,
 			.encrypt	= cbc3_encrypt,
 			.decrypt	= cbc3_decrypt,
diff -ur a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
--- a/arch/sparc/include/asm/visasm.h	2017-03-23 14:32:32.000000000 +0100
+++ b/arch/sparc/include/asm/visasm.h	2017-03-14 02:06:52.000000000 +0100
@@ -28,18 +28,20 @@
  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
 
 #define VISEntryHalf					\
+	VISEntry
+
+#define VISExitHalf					\
+	VISExit
+
+#define VISEntryHalfFast(fail_label)			\
 	rd		%fprs, %o5;			\
 	andcc		%o5, FPRS_FEF, %g0;		\
 	be,pt		%icc, 297f;			\
-	 sethi		%hi(298f), %g7;			\
-	sethi		%hi(VISenterhalf), %g1;		\
-	jmpl		%g1 + %lo(VISenterhalf), %g0;	\
-	 or		%g7, %lo(298f), %g7;		\
-	clr		%o5;				\
-297:	wr		%o5, FPRS_FEF, %fprs;		\
-298:
+	 nop;						\
+	ba,a,pt		%xcc, fail_label;		\
+297:	wr		%o5, FPRS_FEF, %fprs;
 
-#define VISExitHalf					\
+#define VISExitHalfFast					\
 	wr		%o5, 0, %fprs;
 
 #ifndef __ASSEMBLY__
diff -ur a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
--- a/arch/sparc/kernel/ldc.c	2017-03-23 14:32:05.000000000 +0100
+++ b/arch/sparc/kernel/ldc.c	2017-03-14 02:06:31.000000000 +0100
@@ -2305,7 +2305,7 @@
 	if (len & (8UL - 1))
 		return ERR_PTR(-EINVAL);
 
-	buf = kzalloc(len, GFP_KERNEL);
+	buf = kzalloc(len, GFP_ATOMIC);
 	if (!buf)
 		return ERR_PTR(-ENOMEM);
 
diff -ur a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
--- a/arch/sparc/kernel/sys_sparc_64.c	2017-03-23 14:31:57.000000000 +0100
+++ b/arch/sparc/kernel/sys_sparc_64.c	2017-03-14 02:06:28.000000000 +0100
@@ -416,7 +416,7 @@
 
 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
 {
-	int ret;
+	long ret;
 
 	if (personality(current->personality) == PER_LINUX32 &&
 	    personality(personality) == PER_LINUX)
diff -ur a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
--- a/arch/sparc/lib/ksyms.c	2017-03-23 14:32:20.000000000 +0100
+++ b/arch/sparc/lib/ksyms.c	2017-03-14 02:06:42.000000000 +0100
@@ -126,10 +126,6 @@
 void VISenter(void);
 EXPORT_SYMBOL(VISenter);
 
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
 		unsigned long *);
diff -ur a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
--- a/arch/sparc/lib/NG4memcpy.S	2017-03-23 14:32:18.000000000 +0100
+++ b/arch/sparc/lib/NG4memcpy.S	2017-03-14 02:06:40.000000000 +0100
@@ -41,6 +41,10 @@
 #endif
 #endif
 
+#if !defined(EX_LD) && !defined(EX_ST)
+#define NON_USER_COPY
+#endif
+
 #ifndef EX_LD
 #define EX_LD(x)	x
 #endif
@@ -197,9 +201,13 @@
 	 mov		EX_RETVAL(%o3), %o0
 
 .Llarge_src_unaligned:
+#ifdef NON_USER_COPY
+	VISEntryHalfFast(.Lmedium_vis_entry_fail)
+#else
+	VISEntryHalf
+#endif
 	andn		%o2, 0x3f, %o4
 	sub		%o2, %o4, %o2
-	VISEntryHalf
 	alignaddr	%o1, %g0, %g1
 	add		%o1, %o4, %o1
 	EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
@@ -232,14 +240,21 @@
 	add		%o0, 0x40, %o0
 	bne,pt		%icc, 1b
 	 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+	VISExitHalfFast
+#else
 	VISExitHalf
-
+#endif
 	brz,pn		%o2, .Lexit
 	 cmp		%o2, 19
 	ble,pn		%icc, .Lsmall_unaligned
 	 nop
 	ba,a,pt		%icc, .Lmedium_unaligned
 
+#ifdef NON_USER_COPY
+.Lmedium_vis_entry_fail:
+	 or		%o0, %o1, %g2
+#endif
 .Lmedium:
 	LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
 	andcc		%g2, 0x7, %g0
diff -ur a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
--- a/arch/sparc/lib/VISsave.S	2017-03-23 14:32:19.000000000 +0100
+++ b/arch/sparc/lib/VISsave.S	2017-03-14 02:06:41.000000000 +0100
@@ -44,9 +44,8 @@
 
 	 stx		%g3, [%g6 + TI_GSR]
 2:	add		%g6, %g1, %g3
-	cmp		%o5, FPRS_DU
-	be,pn		%icc, 6f
-	 sll		%g1, 3, %g1
+	mov		FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+	sll		%g1, 3, %g1
 	stb		%o5, [%g3 + TI_FPSAVED]
 	rd		%gsr, %g2
 	add		%g6, %g1, %g3
@@ -80,65 +79,3 @@
 	.align		32
 80:	jmpl		%g7 + %g0, %g0
 	 nop
-
-6:	ldub		[%g3 + TI_FPSAVED], %o5
-	or		%o5, FPRS_DU, %o5
-	add		%g6, TI_FPREGS+0x80, %g2
-	stb		%o5, [%g3 + TI_FPSAVED]
-
-	sll		%g1, 5, %g1
-	add		%g6, TI_FPREGS+0xc0, %g3
-	wr		%g0, FPRS_FEF, %fprs
-	membar		#Sync
-	stda		%f32, [%g2 + %g1] ASI_BLK_P
-	stda		%f48, [%g3 + %g1] ASI_BLK_P
-	membar		#Sync
-	ba,pt		%xcc, 80f
-	 nop
-
-	.align		32
-80:	jmpl		%g7 + %g0, %g0
-	 nop
-
-	.align		32
-VISenterhalf:
-	ldub		[%g6 + TI_FPDEPTH], %g1
-	brnz,a,pn	%g1, 1f
-	 cmp		%g1, 1
-	stb		%g0, [%g6 + TI_FPSAVED]
-	stx		%fsr, [%g6 + TI_XFSR]
-	clr		%o5
-	jmpl		%g7 + %g0, %g0
-	 wr		%g0, FPRS_FEF, %fprs
-
-1:	bne,pn		%icc, 2f
-	 srl		%g1, 1, %g1
-	ba,pt		%xcc, vis1
-	 sub		%g7, 8, %g7
-2:	addcc		%g6, %g1, %g3
-	sll		%g1, 3, %g1
-	andn		%o5, FPRS_DU, %g2
-	stb		%g2, [%g3 + TI_FPSAVED]
-
-	rd		%gsr, %g2
-	add		%g6, %g1, %g3
-	stx		%g2, [%g3 + TI_GSR]
-	add		%g6, %g1, %g2
-	stx		%fsr, [%g2 + TI_XFSR]
-	sll		%g1, 5, %g1
-3:	andcc		%o5, FPRS_DL, %g0
-	be,pn		%icc, 4f
-	 add		%g6, TI_FPREGS, %g2
-
-	add		%g6, TI_FPREGS+0x40, %g3
-	membar		#Sync
-	stda		%f0, [%g2 + %g1] ASI_BLK_P
-	stda		%f16, [%g3 + %g1] ASI_BLK_P
-	membar		#Sync
-	ba,pt		%xcc, 4f
-	 nop
-
-	.align		32
-4:	and		%o5, FPRS_DU, %o5
-	jmpl		%g7 + %g0, %g0
-	 wr		%o5, FPRS_FEF, %fprs
diff -ur a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
--- a/arch/tile/kernel/setup.c	2017-03-23 14:30:53.000000000 +0100
+++ b/arch/tile/kernel/setup.c	2017-03-14 02:05:34.000000000 +0100
@@ -1064,7 +1064,7 @@
 
 void __init free_initrd_mem(unsigned long begin, unsigned long end)
 {
-	free_bootmem(__pa(begin), end - begin);
+	free_bootmem_late(__pa(begin), end - begin);
 }
 
 #else
diff -ur a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
--- a/arch/um/os-Linux/start_up.c	2017-03-23 14:17:22.000000000 +0100
+++ b/arch/um/os-Linux/start_up.c	2017-03-14 01:55:07.000000000 +0100
@@ -95,6 +95,8 @@
 {
 	int pid, n, status;
 
+	fflush(stdout);
+
 	pid = fork();
 	if (pid == 0)
 		ptrace_child();
diff -ur a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
--- a/arch/x86/boot/compressed/head_32.S	2017-03-23 14:37:03.000000000 +0100
+++ b/arch/x86/boot/compressed/head_32.S	2017-03-14 02:10:52.000000000 +0100
@@ -54,7 +54,7 @@
 	call	reloc
 reloc:
 	popl	%ecx
-	subl	reloc, %ecx
+	subl	$reloc, %ecx
 	movl	%ecx, BP_code32_start(%eax)
 
 	sub	$0x4, %esp
Nur in b/arch/x86/crypto: crct10dif-pcl-asm_64.S.
Nur in b/arch/x86/crypto: crct10dif-pclmul_glue.c.
diff -ur a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c	2017-03-23 14:36:37.000000000 +0100
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c	2017-03-14 02:10:24.000000000 +0100
@@ -291,6 +291,7 @@
 			.cra_name		= "ghash",
 			.cra_driver_name	= "ghash-clmulni",
 			.cra_priority		= 400,
+			.cra_ctxsize		= sizeof(struct ghash_async_ctx),
 			.cra_flags		= CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
 			.cra_blocksize		= GHASH_BLOCK_SIZE,
 			.cra_type		= &crypto_ahash_type,
diff -ur a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
--- a/arch/x86/crypto/Makefile	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/x86/crypto/Makefile	2016-06-23 08:51:53.000000000 +0200
@@ -29,6 +29,7 @@
 obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
+obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
 
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
@@ -87,3 +88,4 @@
 crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
 sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
 sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
+crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
diff -ur a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
--- a/arch/x86/ia32/ia32entry.S	2017-03-23 14:37:04.000000000 +0100
+++ b/arch/x86/ia32/ia32entry.S	2017-03-14 02:10:53.000000000 +0100
@@ -422,6 +422,7 @@
 	/*CFI_REL_OFFSET	cs,CS-RIP*/
 	CFI_REL_OFFSET	rip,RIP-RIP
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
+	ASM_CLAC			/* Do this early to minimize exposure */
 	SWAPGS
 	/*
 	 * No need to follow this irqs on/off section: the syscall
diff -ur a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
--- a/arch/x86/include/asm/boot.h	2017-03-23 14:37:41.000000000 +0100
+++ b/arch/x86/include/asm/boot.h	2017-03-14 02:11:52.000000000 +0100
@@ -26,7 +26,7 @@
 #define BOOT_HEAP_SIZE             0x400000
 #else /* !CONFIG_KERNEL_BZIP2 */
 
-#define BOOT_HEAP_SIZE	0x8000
+#define BOOT_HEAP_SIZE	0x10000
 
 #endif /* !CONFIG_KERNEL_BZIP2 */
 
diff -ur a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
--- a/arch/x86/include/asm/kvm_host.h	2017-03-23 14:37:41.000000000 +0100
+++ b/arch/x86/include/asm/kvm_host.h	2017-03-14 02:11:51.000000000 +0100
@@ -59,7 +59,7 @@
 	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
 			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
 			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
-			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
+			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
 			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
 
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -539,7 +539,7 @@
 	struct kvm_pic *vpic;
 	struct kvm_ioapic *vioapic;
 	struct kvm_pit *vpit;
-	int vapics_in_nmi_mode;
+	atomic_t vapics_in_nmi_mode;
 	struct mutex apic_map_lock;
 	struct kvm_apic_map *apic_map;
 
diff -ur a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
--- a/arch/x86/include/asm/segment.h	2017-03-23 14:37:26.000000000 +0100
+++ b/arch/x86/include/asm/segment.h	2017-03-14 02:11:25.000000000 +0100
@@ -211,8 +211,19 @@
 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
 
 #ifdef __KERNEL__
+
+/*
+ * early_idt_handler_array is an array of entry points referenced in the
+ * early IDT.  For simplicity, it's a real array with one entry point
+ * every nine bytes.  That leaves room for an optional 'push $0' if the
+ * vector has no error code (two bytes), a 'push $vector_number' (two
+ * bytes), and a jump to the common entry code (up to five bytes).
+ */
+#define EARLY_IDT_HANDLER_SIZE 9
+
 #ifndef __ASSEMBLY__
-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
+
+extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 
 /*
  * Load a segment. Fall back on loading the zero
diff -ur a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
--- a/arch/x86/include/asm/uaccess_64.h	2017-03-23 14:37:25.000000000 +0100
+++ b/arch/x86/include/asm/uaccess_64.h	2017-03-14 02:11:23.000000000 +0100
@@ -77,11 +77,10 @@
 }
 
 static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
+int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
 	int ret = 0;
 
-	might_fault();
 	if (!__builtin_constant_p(size))
 		return copy_user_generic(dst, (__force void *)src, size);
 	switch (size) {
@@ -121,11 +120,17 @@
 }
 
 static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+	might_fault();
+	return __copy_from_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
+int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
 	int ret = 0;
 
-	might_fault();
 	if (!__builtin_constant_p(size))
 		return copy_user_generic((__force void *)dst, src, size);
 	switch (size) {
@@ -165,6 +170,13 @@
 }
 
 static __always_inline __must_check
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+	might_fault();
+	return __copy_to_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
 	int ret = 0;
@@ -220,13 +232,13 @@
 static __must_check __always_inline int
 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
 {
-	return copy_user_generic(dst, (__force const void *)src, size);
+	return __copy_from_user_nocheck(dst, (__force const void *)src, size);
 }
 
 static __must_check __always_inline int
 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 {
-	return copy_user_generic((__force void *)dst, src, size);
+	return __copy_to_user_nocheck((__force void *)dst, src, size);
 }
 
 extern long __copy_user_nocache(void *dst, const void __user *src,
diff -ur a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
--- a/arch/x86/include/asm/xen/hypervisor.h	2017-03-23 14:37:42.000000000 +0100
+++ b/arch/x86/include/asm/xen/hypervisor.h	2017-03-14 02:11:54.000000000 +0100
@@ -71,4 +71,6 @@
 }
 #endif
 
+extern void xen_set_iopl_mask(unsigned mask);
+
 #endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff -ur a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
--- a/arch/x86/include/uapi/asm/processor-flags.h	2017-03-23 14:37:43.000000000 +0100
+++ b/arch/x86/include/uapi/asm/processor-flags.h	2017-03-14 02:11:56.000000000 +0100
@@ -2,75 +2,129 @@
 #define _UAPI_ASM_X86_PROCESSOR_FLAGS_H
 /* Various flags defined: can be included from assembler. */
 
+#include <linux/const.h>
+
 /*
  * EFLAGS bits
  */
-#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
-#define X86_EFLAGS_FIXED 0x00000002 /* Bit 1 - always on */
-#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF	0x00000010 /* Auxiliary carry Flag */
-#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
+#define X86_EFLAGS_CF_BIT	0 /* Carry Flag */
+#define X86_EFLAGS_CF		_BITUL(X86_EFLAGS_CF_BIT)
+#define X86_EFLAGS_FIXED_BIT	1 /* Bit 1 - always on */
+#define X86_EFLAGS_FIXED	_BITUL(X86_EFLAGS_FIXED_BIT)
+#define X86_EFLAGS_PF_BIT	2 /* Parity Flag */
+#define X86_EFLAGS_PF		_BITUL(X86_EFLAGS_PF_BIT)
+#define X86_EFLAGS_AF_BIT	4 /* Auxiliary carry Flag */
+#define X86_EFLAGS_AF		_BITUL(X86_EFLAGS_AF_BIT)
+#define X86_EFLAGS_ZF_BIT	6 /* Zero Flag */
+#define X86_EFLAGS_ZF		_BITUL(X86_EFLAGS_ZF_BIT)
+#define X86_EFLAGS_SF_BIT	7 /* Sign Flag */
+#define X86_EFLAGS_SF		_BITUL(X86_EFLAGS_SF_BIT)
+#define X86_EFLAGS_TF_BIT	8 /* Trap Flag */
+#define X86_EFLAGS_TF		_BITUL(X86_EFLAGS_TF_BIT)
+#define X86_EFLAGS_IF_BIT	9 /* Interrupt Flag */
+#define X86_EFLAGS_IF		_BITUL(X86_EFLAGS_IF_BIT)
+#define X86_EFLAGS_DF_BIT	10 /* Direction Flag */
+#define X86_EFLAGS_DF		_BITUL(X86_EFLAGS_DF_BIT)
+#define X86_EFLAGS_OF_BIT	11 /* Overflow Flag */
+#define X86_EFLAGS_OF		_BITUL(X86_EFLAGS_OF_BIT)
+#define X86_EFLAGS_IOPL_BIT	12 /* I/O Privilege Level (2 bits) */
+#define X86_EFLAGS_IOPL		(_AC(3,UL) << X86_EFLAGS_IOPL_BIT)
+#define X86_EFLAGS_NT_BIT	14 /* Nested Task */
+#define X86_EFLAGS_NT		_BITUL(X86_EFLAGS_NT_BIT)
+#define X86_EFLAGS_RF_BIT	16 /* Resume Flag */
+#define X86_EFLAGS_RF		_BITUL(X86_EFLAGS_RF_BIT)
+#define X86_EFLAGS_VM_BIT	17 /* Virtual Mode */
+#define X86_EFLAGS_VM		_BITUL(X86_EFLAGS_VM_BIT)
+#define X86_EFLAGS_AC_BIT	18 /* Alignment Check/Access Control */
+#define X86_EFLAGS_AC		_BITUL(X86_EFLAGS_AC_BIT)
+#define X86_EFLAGS_AC_BIT	18 /* Alignment Check/Access Control */
+#define X86_EFLAGS_AC		_BITUL(X86_EFLAGS_AC_BIT)
+#define X86_EFLAGS_VIF_BIT	19 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIF		_BITUL(X86_EFLAGS_VIF_BIT)
+#define X86_EFLAGS_VIP_BIT	20 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_VIP		_BITUL(X86_EFLAGS_VIP_BIT)
+#define X86_EFLAGS_ID_BIT	21 /* CPUID detection */
+#define X86_EFLAGS_ID		_BITUL(X86_EFLAGS_ID_BIT)
 
 /*
  * Basic CPU control in CR0
  */
-#define X86_CR0_PE	0x00000001 /* Protection Enable */
-#define X86_CR0_MP	0x00000002 /* Monitor Coprocessor */
-#define X86_CR0_EM	0x00000004 /* Emulation */
-#define X86_CR0_TS	0x00000008 /* Task Switched */
-#define X86_CR0_ET	0x00000010 /* Extension Type */
-#define X86_CR0_NE	0x00000020 /* Numeric Error */
-#define X86_CR0_WP	0x00010000 /* Write Protect */
-#define X86_CR0_AM	0x00040000 /* Alignment Mask */
-#define X86_CR0_NW	0x20000000 /* Not Write-through */
-#define X86_CR0_CD	0x40000000 /* Cache Disable */
-#define X86_CR0_PG	0x80000000 /* Paging */
+#define X86_CR0_PE_BIT		0 /* Protection Enable */
+#define X86_CR0_PE		_BITUL(X86_CR0_PE_BIT)
+#define X86_CR0_MP_BIT		1 /* Monitor Coprocessor */
+#define X86_CR0_MP		_BITUL(X86_CR0_MP_BIT)
+#define X86_CR0_EM_BIT		2 /* Emulation */
+#define X86_CR0_EM		_BITUL(X86_CR0_EM_BIT)
+#define X86_CR0_TS_BIT		3 /* Task Switched */
+#define X86_CR0_TS		_BITUL(X86_CR0_TS_BIT)
+#define X86_CR0_ET_BIT		4 /* Extension Type */
+#define X86_CR0_ET		_BITUL(X86_CR0_ET_BIT)
+#define X86_CR0_NE_BIT		5 /* Numeric Error */
+#define X86_CR0_NE		_BITUL(X86_CR0_NE_BIT)
+#define X86_CR0_WP_BIT		16 /* Write Protect */
+#define X86_CR0_WP		_BITUL(X86_CR0_WP_BIT)
+#define X86_CR0_AM_BIT		18 /* Alignment Mask */
+#define X86_CR0_AM		_BITUL(X86_CR0_AM_BIT)
+#define X86_CR0_NW_BIT		29 /* Not Write-through */
+#define X86_CR0_NW		_BITUL(X86_CR0_NW_BIT)
+#define X86_CR0_CD_BIT		30 /* Cache Disable */
+#define X86_CR0_CD		_BITUL(X86_CR0_CD_BIT)
+#define X86_CR0_PG_BIT		31 /* Paging */
+#define X86_CR0_PG		_BITUL(X86_CR0_PG_BIT)
 
 /*
  * Paging options in CR3
  */
-#define X86_CR3_PWT	0x00000008 /* Page Write Through */
-#define X86_CR3_PCD	0x00000010 /* Page Cache Disable */
-#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
+#define X86_CR3_PWT_BIT		3 /* Page Write Through */
+#define X86_CR3_PWT		_BITUL(X86_CR3_PWT_BIT)
+#define X86_CR3_PCD_BIT		4 /* Page Cache Disable */
+#define X86_CR3_PCD		_BITUL(X86_CR3_PCD_BIT)
+#define X86_CR3_PCID_MASK	_AC(0x00000fff,UL) /* PCID Mask */
 
 /*
  * Intel CPU features in CR4
  */
-#define X86_CR4_VME	0x00000001 /* enable vm86 extensions */
-#define X86_CR4_PVI	0x00000002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD	0x00000004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE	0x00000008 /* enable debugging extensions */
-#define X86_CR4_PSE	0x00000010 /* enable page size extensions */
-#define X86_CR4_PAE	0x00000020 /* enable physical address extensions */
-#define X86_CR4_MCE	0x00000040 /* Machine check enable */
-#define X86_CR4_PGE	0x00000080 /* enable global pages */
-#define X86_CR4_PCE	0x00000100 /* enable performance counters at ipl 3 */
-#define X86_CR4_OSFXSR	0x00000200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
-#define X86_CR4_VMXE	0x00002000 /* enable VMX virtualization */
-#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
-#define X86_CR4_PCIDE	0x00020000 /* enable PCID support */
-#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
-#define X86_CR4_SMEP	0x00100000 /* enable SMEP support */
-#define X86_CR4_SMAP	0x00200000 /* enable SMAP support */
+#define X86_CR4_VME_BIT		0 /* enable vm86 extensions */
+#define X86_CR4_VME		_BITUL(X86_CR4_VME_BIT)
+#define X86_CR4_PVI_BIT		1 /* virtual interrupts flag enable */
+#define X86_CR4_PVI		_BITUL(X86_CR4_PVI_BIT)
+#define X86_CR4_TSD_BIT		2 /* disable time stamp at ipl 3 */
+#define X86_CR4_TSD		_BITUL(X86_CR4_TSD_BIT)
+#define X86_CR4_DE_BIT		3 /* enable debugging extensions */
+#define X86_CR4_DE		_BITUL(X86_CR4_DE_BIT)
+#define X86_CR4_PSE_BIT		4 /* enable page size extensions */
+#define X86_CR4_PSE		_BITUL(X86_CR4_PSE_BIT)
+#define X86_CR4_PAE_BIT		5 /* enable physical address extensions */
+#define X86_CR4_PAE		_BITUL(X86_CR4_PAE_BIT)
+#define X86_CR4_MCE_BIT		6 /* Machine check enable */
+#define X86_CR4_MCE		_BITUL(X86_CR4_MCE_BIT)
+#define X86_CR4_PGE_BIT		7 /* enable global pages */
+#define X86_CR4_PGE		_BITUL(X86_CR4_PGE_BIT)
+#define X86_CR4_PCE_BIT		8 /* enable performance counters at ipl 3 */
+#define X86_CR4_PCE		_BITUL(X86_CR4_PCE_BIT)
+#define X86_CR4_OSFXSR_BIT	9 /* enable fast FPU save and restore */
+#define X86_CR4_OSFXSR		_BITUL(X86_CR4_OSFXSR_BIT)
+#define X86_CR4_OSXMMEXCPT_BIT	10 /* enable unmasked SSE exceptions */
+#define X86_CR4_OSXMMEXCPT	_BITUL(X86_CR4_OSXMMEXCPT_BIT)
+#define X86_CR4_VMXE_BIT	13 /* enable VMX virtualization */
+#define X86_CR4_VMXE		_BITUL(X86_CR4_VMXE_BIT)
+#define X86_CR4_SMXE_BIT	14 /* enable safer mode (TXT) */
+#define X86_CR4_SMXE		_BITUL(X86_CR4_SMXE_BIT)
+#define X86_CR4_FSGSBASE_BIT	16 /* enable RDWRFSGS support */
+#define X86_CR4_FSGSBASE	_BITUL(X86_CR4_FSGSBASE_BIT)
+#define X86_CR4_PCIDE_BIT	17 /* enable PCID support */
+#define X86_CR4_PCIDE		_BITUL(X86_CR4_PCIDE_BIT)
+#define X86_CR4_OSXSAVE_BIT	18 /* enable xsave and xrestore */
+#define X86_CR4_OSXSAVE		_BITUL(X86_CR4_OSXSAVE_BIT)
+#define X86_CR4_SMEP_BIT	20 /* enable SMEP support */
+#define X86_CR4_SMEP		_BITUL(X86_CR4_SMEP_BIT)
+#define X86_CR4_SMAP_BIT	21 /* enable SMAP support */
+#define X86_CR4_SMAP		_BITUL(X86_CR4_SMAP_BIT)
 
 /*
  * x86-64 Task Priority Register, CR8
  */
-#define X86_CR8_TPR	0x0000000F /* task priority register */
+#define X86_CR8_TPR		_AC(0x0000000f,UL) /* task priority register */
 
 /*
  * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
diff -ur a/arch/x86/Kconfig b/arch/x86/Kconfig
--- a/arch/x86/Kconfig	2016-10-20 04:32:08.000000000 +0200
+++ b/arch/x86/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -154,7 +154,7 @@
 
 config NEED_DMA_MAP_STATE
 	def_bool y
-	depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
+	depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
 
 config NEED_SG_DMA_LENGTH
 	def_bool y
diff -ur a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
--- a/arch/x86/kernel/acpi/sleep.c	2017-03-23 14:37:17.000000000 +0100
+++ b/arch/x86/kernel/acpi/sleep.c	2017-03-14 02:11:11.000000000 +0100
@@ -16,6 +16,7 @@
 #include <asm/cacheflush.h>
 #include <asm/realmode.h>
 
+#include <linux/ftrace.h>
 #include "../../realmode/rm/wakeup.h"
 #include "sleep.h"
 
@@ -96,7 +97,13 @@
        saved_magic = 0x123456789abcdef0L;
 #endif /* CONFIG_64BIT */
 
+	/*
+	 * Pause/unpause graph tracing around do_suspend_lowlevel as it has
+	 * inconsistent call/return info after it jumps to the wakeup vector.
+	 */
+	pause_graph_tracing();
 	do_suspend_lowlevel();
+	unpause_graph_tracing();
 	return 0;
 }
 
diff -ur a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
--- a/arch/x86/kernel/apic/apic.c	2017-03-23 14:37:19.000000000 +0100
+++ b/arch/x86/kernel/apic/apic.c	2017-03-14 02:11:12.000000000 +0100
@@ -350,6 +350,13 @@
 	apic_write(APIC_LVTT, lvtt_value);
 
 	if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+		/*
+		 * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+		 * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
+		 * According to Intel, MFENCE can do the serialization here.
+		 */
+		asm volatile("mfence" : : : "memory");
+
 		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
 		return;
 	}
diff -ur a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
--- a/arch/x86/kernel/cpu/common.c	2017-03-23 14:37:20.000000000 +0100
+++ b/arch/x86/kernel/cpu/common.c	2017-03-14 02:11:14.000000000 +0100
@@ -280,10 +280,9 @@
 
 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 {
-	unsigned long eflags;
+	unsigned long eflags = native_save_fl();
 
 	/* This should have been cleared long ago */
-	raw_local_save_flags(eflags);
 	BUG_ON(eflags & X86_EFLAGS_AC);
 
 	if (cpu_has(c, X86_FEATURE_SMAP)) {
diff -ur a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c	2017-03-23 14:37:21.000000000 +0100
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c	2017-03-14 02:11:17.000000000 +0100
@@ -181,11 +181,6 @@
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package",
 				state->count);
-		else
-			printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
-				this_cpu,
-				level == CORE_LEVEL ? "Core" : "Package",
-				state->count);
 		return 1;
 	}
 	if (old_event) {
@@ -193,10 +188,6 @@
 			printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package");
-		else
-			printk(KERN_INFO "CPU%d: %s power limit normal\n",
-				this_cpu,
-				level == CORE_LEVEL ? "Core" : "Package");
 		return 1;
 	}
 
diff -ur a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
--- a/arch/x86/kernel/cpu/perf_event_amd.c	2017-03-23 14:37:20.000000000 +0100
+++ b/arch/x86/kernel/cpu/perf_event_amd.c	2017-03-14 02:11:15.000000000 +0100
@@ -648,48 +648,48 @@
 	.cpu_dead		= amd_pmu_cpu_dead,
 };
 
-static int setup_event_constraints(void)
+static int __init amd_core_pmu_init(void)
 {
-	if (boot_cpu_data.x86 == 0x15)
+	if (!cpu_has_perfctr_core)
+		return 0;
+
+	switch (boot_cpu_data.x86) {
+	case 0x15:
+		pr_cont("Fam15h ");
 		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
-	return 0;
-}
+		break;
 
-static int setup_perfctr_core(void)
-{
-	if (!cpu_has_perfctr_core) {
-		WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
-		     KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
+	default:
+		pr_err("core perfctr but no constraints; unknown hardware!\n");
 		return -ENODEV;
 	}
 
-	WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
-	     KERN_ERR "hw perf events core counters need constraints handler!");
-
 	/*
 	 * If core performance counter extensions exists, we must use
 	 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
-	 * x86_pmu_addr_offset().
+	 * amd_pmu_addr_offset().
 	 */
 	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
 	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
 	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;
 
-	printk(KERN_INFO "perf: AMD core performance counters detected\n");
-
+	pr_cont("core perfctr, ");
 	return 0;
 }
 
 __init int amd_pmu_init(void)
 {
+	int ret;
+
 	/* Performance-monitoring supported from K7 and later: */
 	if (boot_cpu_data.x86 < 6)
 		return -ENODEV;
 
 	x86_pmu = amd_pmu;
 
-	setup_event_constraints();
-	setup_perfctr_core();
+	ret = amd_core_pmu_init();
+	if (ret)
+		return ret;
 
 	/* Events are common for all AMDs */
 	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff -ur a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
--- a/arch/x86/kernel/cpu/perf_event.h	2017-03-23 14:37:21.000000000 +0100
+++ b/arch/x86/kernel/cpu/perf_event.h	2017-03-14 02:11:17.000000000 +0100
@@ -665,6 +665,8 @@
 
 void intel_pmu_lbr_init_snb(void);
 
+void intel_pmu_pebs_data_source_nhm(void);
+
 int intel_pmu_setup_lbr_filter(struct perf_event *event);
 
 int p4_pmu_init(void);
diff -ur a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
--- a/arch/x86/kernel/cpu/perf_event_intel.c	2017-03-23 14:37:23.000000000 +0100
+++ b/arch/x86/kernel/cpu/perf_event_intel.c	2017-03-14 02:11:18.000000000 +0100
@@ -2088,6 +2088,7 @@
 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
 
+		intel_pmu_pebs_data_source_nhm();
 		x86_add_quirk(intel_nehalem_quirk);
 
 		pr_cont("Nehalem events, ");
@@ -2133,6 +2134,7 @@
 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
 
+		intel_pmu_pebs_data_source_nhm();
 		pr_cont("Westmere events, ");
 		break;
 
diff -ur a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c	2017-03-23 14:37:19.000000000 +0100
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c	2017-03-14 02:11:13.000000000 +0100
@@ -49,7 +49,8 @@
 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
 
-static const u64 pebs_data_source[] = {
+/* Version for Sandy Bridge and later */
+static u64 pebs_data_source[] = {
 	P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
 	OP_LH | P(LVL, L1)  | P(SNOOP, NONE),	/* 0x01: L1 local */
 	OP_LH | P(LVL, LFB) | P(SNOOP, NONE),	/* 0x02: LFB hit */
@@ -68,6 +69,14 @@
 	OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
 };
 
+/* Patch up minor differences in the bits */
+void __init intel_pmu_pebs_data_source_nhm(void)
+{
+	pebs_data_source[0x05] = OP_LH | P(LVL, L3)  | P(SNOOP, HIT);
+	pebs_data_source[0x06] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
+	pebs_data_source[0x07] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
+}
+
 static u64 precise_store_data(u64 status)
 {
 	union intel_x86_pebs_dse dse;
diff -ur a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
--- a/arch/x86/kernel/head_32.S	2017-03-23 14:37:06.000000000 +0100
+++ b/arch/x86/kernel/head_32.S	2017-03-14 02:10:55.000000000 +0100
@@ -497,21 +497,22 @@
 __INIT
 setup_once:
 	/*
-	 * Set up a idt with 256 entries pointing to ignore_int,
-	 * interrupt gates. It doesn't actually load idt - that needs
-	 * to be done on each CPU. Interrupts are enabled elsewhere,
-	 * when we can be relatively sure everything is ok.
+	 * Set up a idt with 256 interrupt gates that push zero if there
+	 * is no error code and then jump to early_idt_handler_common.
+	 * It doesn't actually load the idt - that needs to be done on
+	 * each CPU. Interrupts are enabled elsewhere, when we can be
+	 * relatively sure everything is ok.
 	 */
 
 	movl $idt_table,%edi
-	movl $early_idt_handlers,%eax
+	movl $early_idt_handler_array,%eax
 	movl $NUM_EXCEPTION_VECTORS,%ecx
 1:
 	movl %eax,(%edi)
 	movl %eax,4(%edi)
 	/* interrupt gate, dpl=0, present */
 	movl $(0x8E000000 + __KERNEL_CS),2(%edi)
-	addl $9,%eax
+	addl $EARLY_IDT_HANDLER_SIZE,%eax
 	addl $8,%edi
 	loop 1b
 
@@ -543,26 +544,28 @@
 	andl $0,setup_once_ref	/* Once is enough, thanks */
 	ret
 
-ENTRY(early_idt_handlers)
+ENTRY(early_idt_handler_array)
 	# 36(%esp) %eflags
 	# 32(%esp) %cs
 	# 28(%esp) %eip
 	# 24(%rsp) error code
 	i = 0
 	.rept NUM_EXCEPTION_VECTORS
-	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
-	ASM_NOP2
-	.else
+	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
 	pushl $0		# Dummy error code, to make stack frame uniform
 	.endif
 	pushl $i		# 20(%esp) Vector number
-	jmp early_idt_handler
+	jmp early_idt_handler_common
 	i = i + 1
+	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
 	.endr
-ENDPROC(early_idt_handlers)
+ENDPROC(early_idt_handler_array)
 	
-	/* This is global to keep gas from relaxing the jumps */
-ENTRY(early_idt_handler)
+early_idt_handler_common:
+	/*
+	 * The stack is the hardware frame, an error code or zero, and the
+	 * vector number.
+	 */
 	cld
 
 	cmpl $2,(%esp)		# X86_TRAP_NMI
@@ -622,7 +625,7 @@
 is_nmi:
 	addl $8,%esp		/* drop vector number and error code */
 	iret
-ENDPROC(early_idt_handler)
+ENDPROC(early_idt_handler_common)
 
 /* This is the default interrupt "handler" :-) */
 	ALIGN
diff -ur a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
--- a/arch/x86/kernel/head64.c	2017-03-23 14:37:15.000000000 +0100
+++ b/arch/x86/kernel/head64.c	2017-03-14 02:11:06.000000000 +0100
@@ -162,7 +162,7 @@
 	clear_bss();
 
 	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
-		set_intr_gate(i, &early_idt_handlers[i]);
+		set_intr_gate(i, &early_idt_handler_array[i]);
 	load_idt((const struct desc_ptr *)&idt_descr);
 
 	copy_bootdata(__va(real_mode_data));
diff -ur a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
--- a/arch/x86/kernel/head_64.S	2017-03-23 14:37:05.000000000 +0100
+++ b/arch/x86/kernel/head_64.S	2017-03-14 02:10:54.000000000 +0100
@@ -64,6 +64,9 @@
 	 * tables and then reload them.
 	 */
 
+	/* Sanitize CPU configuration */
+	call verify_cpu
+
 	/*
 	 * Compute the delta between the address I am compiled to run at and the
 	 * address I am actually running at.
@@ -173,6 +176,9 @@
 	 * after the boot processor executes this code.
 	 */
 
+	/* Sanitize CPU configuration */
+	call verify_cpu
+
 	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
 1:
 
@@ -287,6 +293,8 @@
 	pushq	%rax		# target address in negative space
 	lretq
 
+#include "verify_cpu.S"
+
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
@@ -320,26 +328,28 @@
 	jmp bad_address
 
 	__INIT
-	.globl early_idt_handlers
-early_idt_handlers:
+ENTRY(early_idt_handler_array)
 	# 104(%rsp) %rflags
 	#  96(%rsp) %cs
 	#  88(%rsp) %rip
 	#  80(%rsp) error code
 	i = 0
 	.rept NUM_EXCEPTION_VECTORS
-	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
-	ASM_NOP2
-	.else
+	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
 	pushq $0		# Dummy error code, to make stack frame uniform
 	.endif
 	pushq $i		# 72(%rsp) Vector number
-	jmp early_idt_handler
+	jmp early_idt_handler_common
 	i = i + 1
+	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
 	.endr
+ENDPROC(early_idt_handler_array)
 
-/* This is global to keep gas from relaxing the jumps */
-ENTRY(early_idt_handler)
+early_idt_handler_common:
+	/*
+	 * The stack is the hardware frame, an error code or zero, and the
+	 * vector number.
+	 */
 	cld
 
 	cmpl $2,(%rsp)		# X86_TRAP_NMI
@@ -411,7 +421,7 @@
 is_nmi:
 	addq $16,%rsp		# drop vector number and error code
 	INTERRUPT_RETURN
-ENDPROC(early_idt_handler)
+ENDPROC(early_idt_handler_common)
 
 	__INITDATA
 
diff -ur a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
--- a/arch/x86/kernel/ioport.c	2017-03-23 14:37:16.000000000 +0100
+++ b/arch/x86/kernel/ioport.c	2017-03-14 02:11:08.000000000 +0100
@@ -96,9 +96,14 @@
 SYSCALL_DEFINE1(iopl, unsigned int, level)
 {
 	struct pt_regs *regs = current_pt_regs();
-	unsigned int old = (regs->flags >> 12) & 3;
 	struct thread_struct *t = &current->thread;
 
+	/*
+	 * Careful: the IOPL bits in regs->flags are undefined under Xen PV
+	 * and changing them has no effect.
+	 */
+	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
+
 	if (level > 3)
 		return -EINVAL;
 	/* Trying to gain more privileges? */
@@ -106,8 +111,9 @@
 		if (!capable(CAP_SYS_RAWIO))
 			return -EPERM;
 	}
-	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
-	t->iopl = level << 12;
+	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
+		(level << X86_EFLAGS_IOPL_BIT);
+	t->iopl = level << X86_EFLAGS_IOPL_BIT;
 	set_iopl_mask(t->iopl);
 
 	return 0;
diff -ur a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
--- a/arch/x86/kernel/microcode_intel_early.c	2017-03-23 14:37:15.000000000 +0100
+++ b/arch/x86/kernel/microcode_intel_early.c	2017-03-14 02:11:05.000000000 +0100
@@ -321,7 +321,7 @@
 	unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
 	int i;
 
-	while (leftover) {
+	while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
 		mc_header = (struct microcode_header_intel *)ucode_ptr;
 
 		mc_size = get_totalsize(mc_header);
diff -ur a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
--- a/arch/x86/kernel/pci-dma.c	2017-03-23 14:37:14.000000000 +0100
+++ b/arch/x86/kernel/pci-dma.c	2017-03-14 02:11:04.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 #include <linux/dma-mapping.h>
 #include <linux/dma-debug.h>
 #include <linux/dmar.h>
@@ -30,7 +33,7 @@
 int force_iommu __read_mostly = 0;
 #endif
 
-#ifdef CONFIG_SYNO_IOMMU_PASSTHROUGH
+#ifdef MY_ABC_HERE
 int iommu_merge __read_mostly = 1;
 #else  
 int iommu_merge __read_mostly = 0;
@@ -40,7 +43,7 @@
  
 int iommu_detected __read_mostly = 0;
 
-#ifdef CONFIG_SYNO_IOMMU_PASSTHROUGH
+#ifdef MY_ABC_HERE
 int iommu_pass_through __read_mostly = 1;
 #else  
 int iommu_pass_through __read_mostly;
diff -ur a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
--- a/arch/x86/kernel/process_64.c	2017-03-23 14:37:07.000000000 +0100
+++ b/arch/x86/kernel/process_64.c	2017-03-14 02:10:56.000000000 +0100
@@ -49,6 +49,7 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 #include <asm/switch_to.h>
+#include <asm/xen/hypervisor.h>
 
 asmlinkage extern void ret_from_fork(void);
 
@@ -412,6 +413,17 @@
 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
 		__switch_to_xtra(prev_p, next_p, tss);
 
+#ifdef CONFIG_XEN
+	/*
+	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
+	 * current_pt_regs()->flags may not match the current task's
+	 * intended IOPL.  We need to switch it manually.
+	 */
+	if (unlikely(xen_pv_domain() &&
+		     prev->iopl != next->iopl))
+		xen_set_iopl_mask(next->iopl);
+#endif
+
 	return prev_p;
 }
 
diff -ur a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
--- a/arch/x86/kernel/reboot.c	2017-03-23 14:37:15.000000000 +0100
+++ b/arch/x86/kernel/reboot.c	2017-03-14 02:11:05.000000000 +0100
@@ -365,6 +365,14 @@
 			DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
 		},
 	},
+	{	/* Handle problems with rebooting on the iMac10,1. */
+		.callback = set_pci_reboot,
+		.ident = "Apple iMac10,1",
+		.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
+		},
+	},
 
 	{	/* Handle reboot issue on Acer Aspire one */
 		.callback = set_kbd_reboot,
diff -ur a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
--- a/arch/x86/kernel/setup.c	2017-03-23 14:37:13.000000000 +0100
+++ b/arch/x86/kernel/setup.c	2017-03-14 02:11:03.000000000 +0100
@@ -1247,6 +1247,10 @@
 	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
 			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
 			KERNEL_PGD_PTRS);
+
+	clone_pgd_range(initial_page_table,
+			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
 #endif
 
 	tboot_probe();
diff -ur a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
--- a/arch/x86/kernel/signal.c	2017-03-23 14:37:16.000000000 +0100
+++ b/arch/x86/kernel/signal.c	2017-03-14 02:11:08.000000000 +0100
@@ -686,12 +686,15 @@
 	signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
 }
 
-#ifdef CONFIG_X86_32
-#define NR_restart_syscall	__NR_restart_syscall
-#else /* !CONFIG_X86_32 */
-#define NR_restart_syscall	\
-	test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
-#endif /* CONFIG_X86_32 */
+static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+	return __NR_restart_syscall;
+#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
+	return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
+		__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+}
 
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -720,7 +723,7 @@
 			break;
 
 		case -ERESTART_RESTARTBLOCK:
-			regs->ax = NR_restart_syscall;
+			regs->ax = get_nr_restart_syscall(regs);
 			regs->ip -= 2;
 			break;
 		}
diff -ur a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
--- a/arch/x86/kernel/tsc.c	2017-03-23 14:37:12.000000000 +0100
+++ b/arch/x86/kernel/tsc.c	2017-03-14 02:11:02.000000000 +0100
@@ -20,6 +20,7 @@
 #include <asm/hypervisor.h>
 #include <asm/nmi.h>
 #include <asm/x86_init.h>
+#include <asm/geode.h>
 
 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -804,15 +805,17 @@
 
 static void __init check_system_tsc_reliable(void)
 {
-#ifdef CONFIG_MGEODE_LX
-	/* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+	if (is_geode_lx()) {
+		/* RTSC counts during suspend */
 #define RTSC_SUSP 0x100
-	unsigned long res_low, res_high;
+		unsigned long res_low, res_high;
 
-	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
-	/* Geode_LX - the OLPC CPU has a very reliable TSC */
-	if (res_low & RTSC_SUSP)
-		tsc_clocksource_reliable = 1;
+		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+		/* Geode_LX - the OLPC CPU has a very reliable TSC */
+		if (res_low & RTSC_SUSP)
+			tsc_clocksource_reliable = 1;
+	}
 #endif
 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
 		tsc_clocksource_reliable = 1;
diff -ur a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
--- a/arch/x86/kernel/verify_cpu.S	2017-03-23 14:37:05.000000000 +0100
+++ b/arch/x86/kernel/verify_cpu.S	2017-03-14 02:10:54.000000000 +0100
@@ -34,10 +34,11 @@
 #include <asm/msr-index.h>
 
 verify_cpu:
-	pushfl				# Save caller passed flags
-	pushl	$0			# Kill any dangerous flags
-	popfl
+	pushf				# Save caller passed flags
+	push	$0			# Kill any dangerous flags
+	popf
 
+#ifndef __x86_64__
 	pushfl				# standard way to check for cpuid
 	popl	%eax
 	movl	%eax,%ebx
@@ -48,6 +49,7 @@
 	popl	%eax
 	cmpl	%eax,%ebx
 	jz	verify_cpu_no_longmode	# cpu has no cpuid
+#endif
 
 	movl	$0x0,%eax		# See if cpuid 1 is implemented
 	cpuid
@@ -130,10 +132,10 @@
 	jmp	verify_cpu_sse_test	# try again
 
 verify_cpu_no_longmode:
-	popfl				# Restore caller passed flags
+	popf				# Restore caller passed flags
 	movl $1,%eax
 	ret
 verify_cpu_sse_ok:
-	popfl				# Restore caller passed flags
+	popf				# Restore caller passed flags
 	xorl %eax, %eax
 	ret
diff -ur a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
--- a/arch/x86/kvm/i8254.c	2017-03-23 14:36:53.000000000 +0100
+++ b/arch/x86/kvm/i8254.c	2017-03-14 02:10:42.000000000 +0100
@@ -244,7 +244,7 @@
 		 * PIC is being reset.  Handle it gracefully here
 		 */
 		atomic_inc(&ps->pending);
-	else if (value > 0)
+	else if (value > 0 && ps->reinject)
 		/* in this case, we had multiple outstanding pit interrupts
 		 * that we needed to inject.  Reinject
 		 */
@@ -287,7 +287,9 @@
 	 * last one has been acked.
 	 */
 	spin_lock(&ps->inject_lock);
-	if (ps->irq_ack) {
+	if (!ps->reinject)
+		inject = 1;
+	else if (ps->irq_ack) {
 		ps->irq_ack = 0;
 		inject = 1;
 	}
@@ -305,7 +307,7 @@
 		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
 		 * VCPU0, and only if its LVT0 is in EXTINT mode.
 		 */
-		if (kvm->arch.vapics_in_nmi_mode > 0)
+		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
 			kvm_for_each_vcpu(i, vcpu, kvm)
 				kvm_apic_nmi_wd_deliver(vcpu);
 	}
@@ -316,10 +318,10 @@
 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
 	struct kvm_pit *pt = ps->kvm->arch.vpit;
 
-	if (ps->reinject || !atomic_read(&ps->pending)) {
+	if (ps->reinject)
 		atomic_inc(&ps->pending);
-		queue_kthread_work(&pt->worker, &pt->expired);
-	}
+
+	queue_kthread_work(&pt->worker, &pt->expired);
 
 	if (ps->is_periodic) {
 		hrtimer_add_expires_ns(&ps->timer, ps->period);
diff -ur a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
--- a/arch/x86/kvm/lapic.c	2017-03-23 14:36:54.000000000 +0100
+++ b/arch/x86/kvm/lapic.c	2017-03-14 02:10:43.000000000 +0100
@@ -1123,10 +1123,10 @@
 		if (!nmi_wd_enabled) {
 			apic_debug("Receive NMI setting on APIC_LVT0 "
 				   "for cpu %d\n", apic->vcpu->vcpu_id);
-			apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
+			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
 		}
 	} else if (nmi_wd_enabled)
-		apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
+		atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
 }
 
 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
diff -ur a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
--- a/arch/x86/kvm/lapic.h	2017-03-23 14:36:53.000000000 +0100
+++ b/arch/x86/kvm/lapic.h	2017-03-14 02:10:43.000000000 +0100
@@ -165,7 +165,7 @@
 
 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.apic->pending_events;
+	return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
 }
 
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
diff -ur a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
--- a/arch/x86/kvm/mmu.c	2017-03-23 14:36:58.000000000 +0100
+++ b/arch/x86/kvm/mmu.c	2017-03-14 02:10:46.000000000 +0100
@@ -3968,7 +3968,7 @@
 	++vcpu->kvm->stat.mmu_pte_write;
 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
-	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
+	mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
 		if (detect_write_misaligned(sp, gpa, bytes) ||
 		      detect_write_flooding(sp)) {
diff -ur a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
--- a/arch/x86/kvm/svm.c	2017-03-23 14:36:58.000000000 +0100
+++ b/arch/x86/kvm/svm.c	2017-03-14 02:10:47.000000000 +0100
@@ -495,8 +495,10 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (svm->vmcb->control.next_rip != 0)
+	if (svm->vmcb->control.next_rip != 0) {
+		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
 		svm->next_rip = svm->vmcb->control.next_rip;
+	}
 
 	if (!svm->next_rip) {
 		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
@@ -4228,7 +4230,9 @@
 		break;
 	}
 
-	vmcb->control.next_rip  = info->next_rip;
+	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
+	if (static_cpu_has(X86_FEATURE_NRIPS))
+		vmcb->control.next_rip  = info->next_rip;
 	vmcb->control.exit_code = icpt_info.exit_code;
 	vmexit = nested_svm_exit_handled(svm);
 
diff -ur a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
--- a/arch/x86/kvm/vmx.c	2017-03-23 14:37:07.000000000 +0100
+++ b/arch/x86/kvm/vmx.c	2017-03-14 02:10:52.000000000 +0100
@@ -1486,6 +1486,13 @@
 			return;
 		}
 		break;
+	case MSR_IA32_PEBS_ENABLE:
+		/* PEBS needs a quiescent period after being disabled (to write
+		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
+		 * provide that period, so a CPU could write host's record into
+		 * guest's memory.
+		 */
+		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
 	}
 
 	for (i = 0; i < m->nr; ++i)
diff -ur a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
--- a/arch/x86/kvm/x86.c	2017-03-23 14:37:02.000000000 +0100
+++ b/arch/x86/kvm/x86.c	2017-03-14 02:10:48.000000000 +0100
@@ -586,7 +586,7 @@
 	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
 		return 1;
 
-	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
+	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
 		return 1;
 
 	if (is_long_mode(vcpu)) {
@@ -1690,6 +1690,8 @@
 
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
+	accumulate_steal_time(vcpu);
+
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
 
@@ -1821,12 +1823,6 @@
 		if (!(data & KVM_MSR_ENABLED))
 			break;
 
-		vcpu->arch.st.last_steal = current->sched_info.run_delay;
-
-		preempt_disable();
-		accumulate_steal_time(vcpu);
-		preempt_enable();
-
 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 
 		break;
@@ -2444,7 +2440,6 @@
 		vcpu->cpu = cpu;
 	}
 
-	accumulate_steal_time(vcpu);
 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 }
 
diff -ur a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
--- a/arch/x86/mm/init_32.c	2017-03-23 14:37:53.000000000 +0100
+++ b/arch/x86/mm/init_32.c	2017-03-14 02:12:11.000000000 +0100
@@ -137,6 +137,7 @@
 
 	vaddr = start;
 	pgd_idx = pgd_index(vaddr);
+	pmd_idx = pmd_index(vaddr);
 
 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
diff -ur a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
--- a/arch/x86/mm/init_64.c	2017-03-23 14:37:53.000000000 +0100
+++ b/arch/x86/mm/init_64.c	2017-03-14 02:12:10.000000000 +0100
@@ -1163,7 +1163,7 @@
 	 * has been zapped already via cleanup_highmem().
 	 */
 	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
-	set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
 	rodata_test();
 
diff -ur a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
--- a/arch/x86/net/bpf_jit_comp.c	2017-03-23 14:37:48.000000000 +0100
+++ b/arch/x86/net/bpf_jit_comp.c	2017-03-14 02:12:03.000000000 +0100
@@ -175,7 +175,12 @@
 	}
 	cleanup_addr = proglen; /* epilogue address */
 
-	for (pass = 0; pass < 10; pass++) {
+	/* JITed image shrinks with every pass and the loop iterates
+	 * until the image stops shrinking. Very large bpf programs
+	 * may converge on the last pass. In such case do one more
+	 * pass to emit the final image
+	 */
+	for (pass = 0; pass < 10 || image; pass++) {
 		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
 		/* no prologue/epilogue for trivial filters (RET something) */
 		proglen = 0;
diff -ur a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
--- a/arch/x86/pci/acpi.c	2017-03-23 14:37:49.000000000 +0100
+++ b/arch/x86/pci/acpi.c	2017-03-14 02:12:04.000000000 +0100
@@ -84,6 +84,17 @@
 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
 		},
 	},
+	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
+	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
+	{
+		.callback = set_use_crs,
+		.ident = "Foxconn K8M890-8237A",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
+			DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
+			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+		},
+	},
 
 	/* Now for the blacklist.. */
 
diff -ur a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
--- a/arch/x86/platform/efi/efi_32.c	2017-03-23 14:37:51.000000000 +0100
+++ b/arch/x86/platform/efi/efi_32.c	2017-03-14 02:12:07.000000000 +0100
@@ -33,19 +33,15 @@
 
 /*
  * To make EFI call EFI runtime service in physical addressing mode we need
- * prelog/epilog before/after the invocation to disable interrupt, to
- * claim EFI runtime service handler exclusively and to duplicate a memory in
- * low memory space say 0 - 3G.
+ * prolog/epilog before/after the invocation to claim the EFI runtime service
+ * handler exclusively and to duplicate a memory mapping in low memory space,
+ * say 0 - 3G.
  */
 
-static unsigned long efi_rt_eflags;
-
 void efi_call_phys_prelog(void)
 {
 	struct desc_ptr gdt_descr;
 
-	local_irq_save(efi_rt_eflags);
-
 	load_cr3(initial_page_table);
 	__flush_tlb_all();
 
@@ -64,6 +60,4 @@
 
 	load_cr3(swapper_pg_dir);
 	__flush_tlb_all();
-
-	local_irq_restore(efi_rt_eflags);
 }
diff -ur a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
--- a/arch/x86/platform/efi/efi_64.c	2017-03-23 14:37:51.000000000 +0100
+++ b/arch/x86/platform/efi/efi_64.c	2017-03-14 02:12:07.000000000 +0100
@@ -40,7 +40,6 @@
 #include <asm/fixmap.h>
 
 static pgd_t *save_pgd __initdata;
-static unsigned long efi_flags __initdata;
 
 static void __init early_code_mapping_set_exec(int executable)
 {
@@ -66,7 +65,6 @@
 	int n_pgds;
 
 	early_code_mapping_set_exec(1);
-	local_irq_save(efi_flags);
 
 	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
 	save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
@@ -90,7 +88,6 @@
 		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
 	kfree(save_pgd);
 	__flush_tlb_all();
-	local_irq_restore(efi_flags);
 	early_code_mapping_set_exec(0);
 }
 
diff -ur a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
--- a/arch/x86/platform/efi/efi.c	2017-03-23 14:37:52.000000000 +0100
+++ b/arch/x86/platform/efi/efi.c	2017-03-14 02:12:09.000000000 +0100
@@ -232,12 +232,18 @@
 	efi_memory_desc_t *virtual_map)
 {
 	efi_status_t status;
+	unsigned long flags;
 
 	efi_call_phys_prelog();
+
+	local_irq_save(flags);
 	status = efi_call_phys4(efi_phys.set_virtual_address_map,
 				memory_map_size, descriptor_size,
 				descriptor_version, virtual_map);
+	local_irq_restore(flags);
+
 	efi_call_phys_epilog();
+
 	return status;
 }
 
diff -ur a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
--- a/arch/x86/xen/enlighten.c	2017-03-23 14:36:58.000000000 +0100
+++ b/arch/x86/xen/enlighten.c	2017-03-14 02:10:47.000000000 +0100
@@ -33,6 +33,10 @@
 #include <linux/memblock.h>
 #include <linux/edd.h>
 
+#ifdef CONFIG_KEXEC
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -481,6 +485,7 @@
 	pte_t pte;
 	unsigned long pfn;
 	struct page *page;
+	unsigned char dummy;
 
 	ptep = lookup_address((unsigned long)v, &level);
 	BUG_ON(ptep == NULL);
@@ -490,6 +495,32 @@
 
 	pte = pfn_pte(pfn, prot);
 
+	/*
+	 * Careful: update_va_mapping() will fail if the virtual address
+	 * we're poking isn't populated in the page tables.  We don't
+	 * need to worry about the direct map (that's always in the page
+	 * tables), but we need to be careful about vmap space.  In
+	 * particular, the top level page table can lazily propagate
+	 * entries between processes, so if we've switched mms since we
+	 * vmapped the target in the first place, we might not have the
+	 * top-level page table entry populated.
+	 *
+	 * We disable preemption because we want the same mm active when
+	 * we probe the target and when we issue the hypercall.  We'll
+	 * have the same nominal mm, but if we're a kernel thread, lazy
+	 * mm dropping could change our pgd.
+	 *
+	 * Out of an abundance of caution, this uses __get_user() to fault
+	 * in the target address just in case there's some obscure case
+	 * in which the target address isn't readable.
+	 */
+
+	preempt_disable();
+
+	pagefault_disable();	/* Avoid warnings due to being atomic. */
+	__get_user(dummy, (unsigned char __user __force *)v);
+	pagefault_enable();
+
 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
 		BUG();
 
@@ -501,6 +532,8 @@
 				BUG();
 	} else
 		kmap_flush_unused();
+
+	preempt_enable();
 }
 
 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -508,6 +541,17 @@
 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
 	int i;
 
+	/*
+	 * We need to mark the all aliases of the LDT pages RO.  We
+	 * don't need to call vm_flush_aliases(), though, since that's
+	 * only responsible for flushing aliases out the TLBs, not the
+	 * page tables, and Xen will flush the TLB for us if needed.
+	 *
+	 * To avoid confusing future readers: none of this is necessary
+	 * to load the LDT.  The hypervisor only checks this when the
+	 * LDT is faulted in due to subsequent descriptor access.
+	 */
+
 	for(i = 0; i < entries; i += entries_per_page)
 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
 }
@@ -908,7 +952,7 @@
 	xen_mc_issue(PARAVIRT_LAZY_CPU);
 }
 
-static void xen_set_iopl_mask(unsigned mask)
+void xen_set_iopl_mask(unsigned mask)
 {
 	struct physdev_set_iopl set_iopl;
 
@@ -1704,6 +1748,21 @@
 	.notifier_call	= xen_hvm_cpu_notify,
 };
 
+#ifdef CONFIG_KEXEC
+static void xen_hvm_shutdown(void)
+{
+	native_machine_shutdown();
+	if (kexec_in_progress)
+		xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+	native_machine_crash_shutdown(regs);
+	xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 {
 	init_hvm_pv_info();
@@ -1718,6 +1777,10 @@
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	xen_hvm_init_time_ops();
 	xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC
+	machine_ops.shutdown = xen_hvm_shutdown;
+	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 
 static bool __init xen_hvm_platform(void)
diff -ur a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
--- a/arch/x86/xen/suspend.c	2017-03-23 14:36:54.000000000 +0100
+++ b/arch/x86/xen/suspend.c	2017-03-14 02:10:44.000000000 +0100
@@ -30,7 +30,8 @@
 {
 #ifdef CONFIG_XEN_PVHVM
 	int cpu;
-	xen_hvm_init_shared_info();
+	if (!suspend_cancelled)
+	    xen_hvm_init_shared_info();
 	xen_callback_vector();
 	xen_unplug_emulated_devices();
 	if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff -ur a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
--- a/arch/xtensa/boot/Makefile	2016-10-20 04:32:09.000000000 +0200
+++ b/arch/xtensa/boot/Makefile	2016-07-29 05:48:09.000000000 +0200
@@ -12,7 +12,7 @@
 KBUILD_CFLAGS	+= -fno-builtin -Iarch/$(ARCH)/boot/include
 HOSTFLAGS	+= -Iarch/$(ARCH)/boot/include
 
-BIG_ENDIAN	:= $(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
+BIG_ENDIAN	:= $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
 
 export ccflags-y
 export BIG_ENDIAN
diff -ur a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
--- a/arch/xtensa/include/asm/traps.h	2017-03-23 14:19:02.000000000 +0100
+++ b/arch/xtensa/include/asm/traps.h	2017-03-14 01:56:22.000000000 +0100
@@ -24,30 +24,39 @@
 {
 #if XCHAL_NUM_AREGS > 16
 	__asm__ __volatile__ (
-		"	call12	1f\n"
+		"	call8	1f\n"
 		"	_j	2f\n"
 		"	retw\n"
 		"	.align	4\n"
 		"1:\n"
+#if XCHAL_NUM_AREGS == 32
+		"	_entry	a1, 32\n"
+		"	addi	a8, a0, 3\n"
+		"	_entry	a1, 16\n"
+		"	mov	a12, a12\n"
+		"	retw\n"
+#else
 		"	_entry	a1, 48\n"
-		"	addi	a12, a0, 3\n"
-#if XCHAL_NUM_AREGS > 32
-		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+		"	call12	1f\n"
+		"	retw\n"
+		"	.align	4\n"
+		"1:\n"
+		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
 		"	_entry	a1, 48\n"
 		"	mov	a12, a0\n"
 		"	.endr\n"
-#endif
-		"	_entry	a1, 48\n"
+		"	_entry	a1, 16\n"
 #if XCHAL_NUM_AREGS % 12 == 0
-		"	mov	a8, a8\n"
-#elif XCHAL_NUM_AREGS % 12 == 4
 		"	mov	a12, a12\n"
-#elif XCHAL_NUM_AREGS % 12 == 8
+#elif XCHAL_NUM_AREGS % 12 == 4
 		"	mov	a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+		"	mov	a8, a8\n"
 #endif
 		"	retw\n"
+#endif
 		"2:\n"
-		: : : "a12", "a13", "memory");
+		: : : "a8", "a9", "memory");
 #else
 	__asm__ __volatile__ (
 		"	mov	a12, a12\n"
diff -ur a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
--- a/arch/xtensa/kernel/entry.S	2017-03-23 14:19:02.000000000 +0100
+++ b/arch/xtensa/kernel/entry.S	2017-03-14 01:56:22.000000000 +0100
@@ -549,12 +549,13 @@
 	 *	 (if we have restored WSBITS-1 frames).
 	 */
 
+2:
 #if XCHAL_HAVE_THREADPTR
 	l32i	a3, a1, PT_THREADPTR
 	wur	a3, threadptr
 #endif
 
-2:	j	common_exception_exit
+	j	common_exception_exit
 
 	/* This is the kernel exception exit.
 	 * We avoided to do a MOVSP when we entered the exception, but we
@@ -1928,7 +1929,7 @@
 	mov	a12, a0
 	.endr
 #endif
-	_entry	a1, 48
+	_entry	a1, 16
 #if XCHAL_NUM_AREGS % 12 == 0
 	mov	a8, a8
 #elif XCHAL_NUM_AREGS % 12 == 4
@@ -1952,7 +1953,7 @@
 
 ENTRY(_switch_to)
 
-	entry	a1, 16
+	entry	a1, 48
 
 	mov	a11, a3			# and 'next' (a3)
 
diff -ur a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
--- a/arch/xtensa/kernel/head.S	2017-03-23 14:18:59.000000000 +0100
+++ b/arch/xtensa/kernel/head.S	2017-03-14 01:56:20.000000000 +0100
@@ -118,7 +118,7 @@
 	wsr	a0, icountlevel
 
 	.set	_index, 0
-	.rept	XCHAL_NUM_DBREAK - 1
+	.rept	XCHAL_NUM_DBREAK
 	wsr	a0, SREG_DBREAKC + _index
 	.set	_index, _index + 1
 	.endr
diff -ur a/arch/xtensa/Makefile b/arch/xtensa/Makefile
--- a/arch/xtensa/Makefile	2016-10-20 04:32:09.000000000 +0200
+++ b/arch/xtensa/Makefile	2016-07-29 05:48:09.000000000 +0200
@@ -55,10 +55,10 @@
 LDFLAGS := --no-relax
 endif
 
-ifeq ($(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
+ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
 CHECKFLAGS += -D__XTENSA_EB__
 endif
-ifeq ($(shell echo -e __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
+ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
 CHECKFLAGS += -D__XTENSA_EL__
 endif
 
diff -ur a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
--- a/arch/xtensa/platforms/iss/console.c	2017-03-23 14:19:10.000000000 +0100
+++ b/arch/xtensa/platforms/iss/console.c	2017-03-14 01:56:29.000000000 +0100
@@ -98,20 +98,23 @@
 {
 	struct tty_port *port = (struct tty_port *)priv;
 	int i = 0;
+	int rd = 1;
 	unsigned char c;
 
 	spin_lock(&timer_lock);
 
 	while (simc_poll(0)) {
-		simc_read(0, &c, 1);
+		rd = simc_read(0, &c, 1);
+		if (rd <= 0)
+			break;
 		tty_insert_flip_char(port, c, TTY_NORMAL);
 		i++;
 	}
 
 	if (i)
 		tty_flip_buffer_push(port);
-
-	mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+	if (rd)
+		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
 	spin_unlock(&timer_lock);
 }
 
diff -ur a/block/blk-cgroup.c b/block/blk-cgroup.c
--- a/block/blk-cgroup.c	2017-03-23 15:04:16.000000000 +0100
+++ b/block/blk-cgroup.c	2017-03-14 02:42:06.000000000 +0100
@@ -727,8 +727,12 @@
 		return -EINVAL;
 
 	disk = get_gendisk(MKDEV(major, minor), &part);
-	if (!disk || part)
+	if (!disk)
 		return -EINVAL;
+	if (part) {
+		put_disk(disk);
+		return -EINVAL;
+	}
 
 	rcu_read_lock();
 	spin_lock_irq(disk->queue->queue_lock);
diff -ur a/block/blk-core.c b/block/blk-core.c
--- a/block/blk-core.c	2017-03-23 15:04:16.000000000 +0100
+++ b/block/blk-core.c	2017-03-14 02:42:06.000000000 +0100
@@ -1556,6 +1556,10 @@
 {
 	int total_bytes;
 
+#ifdef MY_ABC_HERE
+	static unsigned long long blk_rq_pos_last = 0;
+#endif  
+
 	if (!req->bio)
 		return false;
 
@@ -1584,14 +1588,24 @@
 			break;
 		}
 #ifdef MY_ABC_HERE
-		if (printk_ratelimit()) {
-#endif  
+		if (blk_rq_pos_last == (unsigned long long)blk_rq_pos(req)) {
+			printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+					error_type, req->rq_disk ?
+					req->rq_disk->disk_name : "?",
+					blk_rq_pos_last);
+		} else {
+			blk_rq_pos_last = (unsigned long long)blk_rq_pos(req);
+			printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector in range %llu + 0-2(%d)\n",
+					error_type, req->rq_disk ?
+					req->rq_disk->disk_name : "?",
+					(blk_rq_pos_last >> CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT) << CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT,
+					CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT);
+		}
+#else
 		printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
 				   error_type, req->rq_disk ?
 				   req->rq_disk->disk_name : "?",
 				   (unsigned long long)blk_rq_pos(req));
-#ifdef MY_ABC_HERE
-		}
 #endif  
 
 	}
@@ -2062,6 +2076,9 @@
 {
 	int ret = 0;
 
+	if (!q->dev)
+		return ret;
+
 	spin_lock_irq(q->queue_lock);
 	if (q->nr_pending) {
 		ret = -EBUSY;
@@ -2076,6 +2093,9 @@
 
 void blk_post_runtime_suspend(struct request_queue *q, int err)
 {
+	if (!q->dev)
+		return;
+
 	spin_lock_irq(q->queue_lock);
 	if (!err) {
 		q->rpm_status = RPM_SUSPENDED;
@@ -2089,6 +2109,9 @@
 
 void blk_pre_runtime_resume(struct request_queue *q)
 {
+	if (!q->dev)
+		return;
+
 	spin_lock_irq(q->queue_lock);
 	q->rpm_status = RPM_RESUMING;
 	spin_unlock_irq(q->queue_lock);
@@ -2097,6 +2120,9 @@
 
 void blk_post_runtime_resume(struct request_queue *q, int err)
 {
+	if (!q->dev)
+		return;
+
 	spin_lock_irq(q->queue_lock);
 	if (!err) {
 		q->rpm_status = RPM_ACTIVE;
diff -ur a/block/genhd.c b/block/genhd.c
--- a/block/genhd.c	2017-03-23 15:04:16.000000000 +0100
+++ b/block/genhd.c	2017-03-14 02:42:05.000000000 +0100
@@ -327,9 +327,9 @@
 
 	idr_preload(GFP_KERNEL);
 
-	spin_lock(&ext_devt_lock);
+	spin_lock_bh(&ext_devt_lock);
 	idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
-	spin_unlock(&ext_devt_lock);
+	spin_unlock_bh(&ext_devt_lock);
 
 	idr_preload_end();
 	if (idx < 0)
@@ -345,9 +345,9 @@
 		return;
 
 	if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-		spin_lock(&ext_devt_lock);
+		spin_lock_bh(&ext_devt_lock);
 		idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-		spin_unlock(&ext_devt_lock);
+		spin_unlock_bh(&ext_devt_lock);
 	}
 }
 
@@ -403,7 +403,7 @@
 	struct hd_struct *part;
 	int err;
 #ifdef MY_ABC_HERE
-	int error;
+	int error = -1;
 #endif  
 
 	ddev->parent = disk->driverfs_dev;
@@ -583,13 +583,13 @@
 	} else {
 		struct hd_struct *part;
 
-		spin_lock(&ext_devt_lock);
+		spin_lock_bh(&ext_devt_lock);
 		part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
 		if (part && get_disk(part_to_disk(part))) {
 			*partno = part->partno;
 			disk = part_to_disk(part);
 		}
-		spin_unlock(&ext_devt_lock);
+		spin_unlock_bh(&ext_devt_lock);
 	}
 
 	return disk;
diff -ur a/block/partition-generic.c b/block/partition-generic.c
--- a/block/partition-generic.c	2017-03-23 15:04:14.000000000 +0100
+++ b/block/partition-generic.c	2017-03-14 02:42:04.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
  
 #include <linux/init.h>
 #include <linux/module.h>
@@ -122,6 +125,30 @@
 		atomic_read(&p->in_flight[1]));
 }
 
+#ifdef MY_ABC_HERE
+extern void
+PartitionRemapModeSet(struct gendisk *gd,
+							struct hd_struct *phd,
+							unsigned char blAutoRemap);
+ssize_t part_auto_remap_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", dev_to_part(dev)->auto_remap);
+}
+
+ssize_t part_auto_remap_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	unsigned val = 0;
+	struct hd_struct *p = dev_to_part(dev);
+	struct gendisk *disk = part_to_disk(p);
+
+	sscanf(buf, "%d", &val);
+	PartitionRemapModeSet(disk, p, val ? 1 : 0);
+	return count;
+}
+#endif  
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 ssize_t part_fail_show(struct device *dev,
 		       struct device_attribute *attr, char *buf)
@@ -158,6 +185,9 @@
 static struct device_attribute dev_attr_fail =
 	__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
 #endif
+#ifdef MY_ABC_HERE
+	static DEVICE_ATTR(auto_remap, S_IRUGO|S_IWUSR, part_auto_remap_show, part_auto_remap_store);
+#endif  
 
 static struct attribute *part_attrs[] = {
 	&dev_attr_partition.attr,
@@ -171,6 +201,9 @@
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 	&dev_attr_fail.attr,
 #endif
+#ifdef MY_ABC_HERE
+	&dev_attr_auto_remap.attr,
+#endif  
 	NULL
 };
 
@@ -331,6 +364,9 @@
 	if (!dev_get_uevent_suppress(ddev))
 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
 
+#ifdef MY_ABC_HERE
+	PartitionRemapModeSet(disk, p, 0);
+#endif  
 	hd_ref_init(p);
 	return p;
 
diff -ur a/block/partitions/mac.c b/block/partitions/mac.c
--- a/block/partitions/mac.c	2017-03-23 15:04:16.000000000 +0100
+++ b/block/partitions/mac.c	2017-03-14 02:42:06.000000000 +0100
@@ -32,7 +32,7 @@
 	Sector sect;
 	unsigned char *data;
 	int slot, blocks_in_map;
-	unsigned secsize;
+	unsigned secsize, datasize, partoffset;
 #ifdef CONFIG_PPC_PMAC
 	int found_root = 0;
 	int found_root_goodness = 0;
@@ -50,10 +50,14 @@
 	}
 	secsize = be16_to_cpu(md->block_size);
 	put_dev_sector(sect);
-	data = read_part_sector(state, secsize/512, &sect);
+	datasize = round_down(secsize, 512);
+	data = read_part_sector(state, datasize / 512, &sect);
 	if (!data)
 		return -1;
-	part = (struct mac_partition *) (data + secsize%512);
+	partoffset = secsize % 512;
+	if (partoffset + sizeof(*part) > datasize)
+		return -1;
+	part = (struct mac_partition *) (data + partoffset);
 	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
 		put_dev_sector(sect);
 		return 0;		/* not a MacOS disk */
diff -ur a/crypto/ablkcipher.c b/crypto/ablkcipher.c
--- a/crypto/ablkcipher.c	2017-03-23 14:15:40.000000000 +0100
+++ b/crypto/ablkcipher.c	2017-03-14 01:53:57.000000000 +0100
@@ -700,7 +700,7 @@
 err:
 		if (err != -EAGAIN)
 			break;
-		if (signal_pending(current)) {
+		if (fatal_signal_pending(current)) {
 			err = -EINTR;
 			break;
 		}
diff -ur a/crypto/af_alg.c b/crypto/af_alg.c
--- a/crypto/af_alg.c	2017-03-23 14:15:45.000000000 +0100
+++ b/crypto/af_alg.c	2017-03-14 01:54:00.000000000 +0100
@@ -247,10 +247,8 @@
 	security_sk_clone(sk, sk2);
 
 	err = type->accept(ask->private, sk2);
-	if (err) {
-		sk_free(sk2);
+	if (err)
 		goto unlock;
-	}
 
 	sk2->sk_family = PF_ALG;
 
diff -ur a/crypto/ahash.c b/crypto/ahash.c
--- a/crypto/ahash.c	2017-03-23 14:15:36.000000000 +0100
+++ b/crypto/ahash.c	2017-03-14 01:53:53.000000000 +0100
@@ -64,8 +64,9 @@
 	struct scatterlist *sg;
 
 	sg = walk->sg;
-	walk->pg = sg_page(sg);
 	walk->offset = sg->offset;
+	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+	walk->offset = offset_in_page(walk->offset);
 	walk->entrylen = sg->length;
 
 	if (walk->entrylen > walk->total)
@@ -462,7 +463,8 @@
 	struct crypto_alg *base = &alg->halg.base;
 
 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
-	    alg->halg.statesize > PAGE_SIZE / 8)
+	    alg->halg.statesize > PAGE_SIZE / 8 ||
+	    alg->halg.statesize == 0)
 		return -EINVAL;
 
 	base->cra_type = &crypto_ahash_type;
diff -ur a/crypto/algapi.c b/crypto/algapi.c
--- a/crypto/algapi.c	2017-03-23 14:15:43.000000000 +0100
+++ b/crypto/algapi.c	2017-03-14 01:53:58.000000000 +0100
@@ -325,7 +325,7 @@
 		crypto_alg_tested(larval->alg.cra_driver_name, 0);
 	}
 
-	err = wait_for_completion_interruptible(&larval->completion);
+	err = wait_for_completion_killable(&larval->completion);
 	WARN_ON(err);
 
 out:
diff -ur a/crypto/algif_hash.c b/crypto/algif_hash.c
--- a/crypto/algif_hash.c	2017-03-23 14:15:39.000000000 +0100
+++ b/crypto/algif_hash.c	2017-03-14 01:53:56.000000000 +0100
@@ -51,7 +51,8 @@
 
 	lock_sock(sk);
 	if (!ctx->more) {
-		err = crypto_ahash_init(&ctx->req);
+		err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
+						&ctx->completion);
 		if (err)
 			goto unlock;
 	}
@@ -131,6 +132,7 @@
 	} else {
 		if (!ctx->more) {
 			err = crypto_ahash_init(&ctx->req);
+			err = af_alg_wait_for_completion(err, &ctx->completion);
 			if (err)
 				goto unlock;
 		}
@@ -192,9 +194,14 @@
 	struct sock *sk2;
 	struct alg_sock *ask2;
 	struct hash_ctx *ctx2;
+	bool more;
 	int err;
 
-	err = crypto_ahash_export(req, state);
+	lock_sock(sk);
+	more = ctx->more;
+	err = more ? crypto_ahash_export(req, state) : 0;
+	release_sock(sk);
+
 	if (err)
 		return err;
 
@@ -205,7 +212,10 @@
 	sk2 = newsock->sk;
 	ask2 = alg_sk(sk2);
 	ctx2 = ask2->private;
-	ctx2->more = 1;
+	ctx2->more = more;
+
+	if (!more)
+		return err;
 
 	err = crypto_ahash_import(&ctx2->req, state);
 	if (err) {
diff -ur a/crypto/api.c b/crypto/api.c
--- a/crypto/api.c	2017-03-23 14:15:36.000000000 +0100
+++ b/crypto/api.c	2017-03-14 01:53:54.000000000 +0100
@@ -172,7 +172,7 @@
 	struct crypto_larval *larval = (void *)alg;
 	long timeout;
 
-	timeout = wait_for_completion_interruptible_timeout(
+	timeout = wait_for_completion_killable_timeout(
 		&larval->completion, 60 * HZ);
 
 	alg = larval->adult;
@@ -435,7 +435,7 @@
 err:
 		if (err != -EAGAIN)
 			break;
-		if (signal_pending(current)) {
+		if (fatal_signal_pending(current)) {
 			err = -EINTR;
 			break;
 		}
@@ -552,7 +552,7 @@
 err:
 		if (err != -EAGAIN)
 			break;
-		if (signal_pending(current)) {
+		if (fatal_signal_pending(current)) {
 			err = -EINTR;
 			break;
 		}
Nur in b/crypto: crct10dif_common.c.
Nur in b/crypto: crct10dif_generic.c.
diff -ur a/crypto/crypto_user.c b/crypto/crypto_user.c
--- a/crypto/crypto_user.c	2017-03-23 14:15:38.000000000 +0100
+++ b/crypto/crypto_user.c	2017-03-14 01:53:55.000000000 +0100
@@ -361,7 +361,7 @@
 		err = PTR_ERR(alg);
 		if (err != -EAGAIN)
 			break;
-		if (signal_pending(current)) {
+		if (fatal_signal_pending(current)) {
 			err = -EINTR;
 			break;
 		}
@@ -477,6 +477,7 @@
 		if (link->dump == NULL)
 			return -EINVAL;
 
+		down_read(&crypto_alg_sem);
 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
 
@@ -486,8 +487,11 @@
 				.done = link->done,
 				.min_dump_alloc = dump_alloc,
 			};
-			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
 		}
+		up_read(&crypto_alg_sem);
+
+		return err;
 	}
 
 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
diff -ur a/crypto/gcm.c b/crypto/gcm.c
--- a/crypto/gcm.c	2017-03-23 14:15:42.000000000 +0100
+++ b/crypto/gcm.c	2017-03-14 01:53:58.000000000 +0100
@@ -1173,6 +1173,9 @@
 	aead_request_set_tfm(subreq, ctx->child);
 	aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done,
 				  req);
+	if (!enc)
+		aead_request_set_callback(subreq, req->base.flags,
+					  req->base.complete, req->base.data);
 	aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
 	aead_request_set_assoc(subreq, assoc, assoclen);
 
diff -ur a/crypto/Kconfig b/crypto/Kconfig
--- a/crypto/Kconfig	2016-10-20 04:32:11.000000000 +0200
+++ b/crypto/Kconfig	2016-06-23 08:51:53.000000000 +0200
@@ -376,6 +376,25 @@
 	  which will enable any routine to use the CRC-32-IEEE 802.3 checksum
 	  and gain better performance as compared with the table implementation.
 
+config CRYPTO_CRCT10DIF
+	tristate "CRCT10DIF algorithm"
+	select CRYPTO_HASH
+	help
+	  CRC T10 Data Integrity Field computation is being cast as
+	  a crypto transform.  This allows for faster crc t10 diff
+	  transforms to be used if they are available.
+
+config CRYPTO_CRCT10DIF_PCLMUL
+	tristate "CRCT10DIF PCLMULQDQ hardware acceleration"
+	depends on X86 && 64BIT && CRC_T10DIF
+	select CRYPTO_HASH
+	help
+	  For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
+	  CRC T10 DIF PCLMULQDQ computation can be hardware
+	  accelerated PCLMULQDQ instruction. This option will create
+	  'crct10dif-plcmul' module, which is faster when computing the
+	  crct10dif checksum as compared with the generic table implementation.
+
 config CRYPTO_GHASH
 	tristate "GHASH digest algorithm"
 	select CRYPTO_GF128MUL
diff -ur a/crypto/Makefile b/crypto/Makefile
--- a/crypto/Makefile	2016-10-20 04:32:11.000000000 +0200
+++ b/crypto/Makefile	2016-06-23 08:51:53.000000000 +0200
@@ -83,6 +83,7 @@
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
 obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
+obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
 obj-$(CONFIG_CRYPTO_842) += 842.o
diff -ur a/crypto/tcrypt.c b/crypto/tcrypt.c
--- a/crypto/tcrypt.c	2017-03-23 14:15:44.000000000 +0100
+++ b/crypto/tcrypt.c	2017-03-14 01:53:59.000000000 +0100
@@ -1174,6 +1174,10 @@
 		ret += tcrypt_test("ghash");
 		break;
 
+	case 47:
+		ret += tcrypt_test("crct10dif");
+		break;
+
 	case 100:
 		ret += tcrypt_test("hmac(md5)");
 		break;
@@ -1498,6 +1502,10 @@
 		test_hash_speed("crc32c", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
 
+	case 320:
+		test_hash_speed("crct10dif", sec, generic_hash_speed_template);
+		if (mode > 300 && mode < 400) break;
+
 	case 399:
 		break;
 
diff -ur a/crypto/testmgr.c b/crypto/testmgr.c
--- a/crypto/testmgr.c	2017-03-23 14:15:47.000000000 +0100
+++ b/crypto/testmgr.c	2017-03-14 01:54:01.000000000 +0100
@@ -1973,6 +1973,16 @@
 			}
 		}
 	}, {
+		.alg = "crct10dif",
+		.test = alg_test_hash,
+		.fips_allowed = 1,
+		.suite = {
+			.hash = {
+				.vecs = crct10dif_tv_template,
+				.count = CRCT10DIF_TEST_VECTORS
+			}
+		}
+	}, {
 		.alg = "cryptd(__driver-cbc-aes-aesni)",
 		.test = alg_test_null,
 		.fips_allowed = 1,
diff -ur a/crypto/testmgr.h b/crypto/testmgr.h
--- a/crypto/testmgr.h	2017-03-23 14:17:01.000000000 +0100
+++ b/crypto/testmgr.h	2017-03-14 01:54:51.000000000 +0100
@@ -450,6 +450,39 @@
 	}
 };
 
+#define CRCT10DIF_TEST_VECTORS	3
+static struct hash_testvec crct10dif_tv_template[] = {
+	{
+		.plaintext = "abc",
+		.psize  = 3,
+#ifdef __LITTLE_ENDIAN
+		.digest = "\x3b\x44",
+#else
+		.digest = "\x44\x3b",
+#endif
+	}, {
+		.plaintext = "1234567890123456789012345678901234567890"
+			     "123456789012345678901234567890123456789",
+		.psize	= 79,
+#ifdef __LITTLE_ENDIAN
+		.digest	= "\x70\x4b",
+#else
+		.digest	= "\x4b\x70",
+#endif
+	}, {
+		.plaintext =
+		"abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
+		.psize  = 56,
+#ifdef __LITTLE_ENDIAN
+		.digest = "\xe3\x9c",
+#else
+		.digest = "\x9c\xe3",
+#endif
+		.np     = 2,
+		.tap    = { 28, 28 }
+	}
+};
+
 /*
  * SHA1 test vectors  from from FIPS PUB 180-1
  * Long vector from CAVS 5.0
diff -ur a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt	2016-10-20 04:32:07.000000000 +0200
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt	2016-07-29 05:48:09.000000000 +0200
@@ -38,7 +38,7 @@
 		      80 81 68 69
 		      70 71 72 73
 		      74 75 76 77>;
-	interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+	interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
 			  "saif0", "saif1", "i2c0", "i2c1",
 			  "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
 			  "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff -ur a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt	2016-10-20 04:32:07.000000000 +0200
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt	2016-07-29 05:48:09.000000000 +0200
@@ -91,5 +91,5 @@
 mpp62         62       gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
                        audio(mclk), uart0(cts)
 mpp63         63       gpo, spi0(sck), tclk
-mpp64         64       gpio, spi0(miso), spi0-1(cs1)
-mpp65         65       gpio, spi0(mosi), spi0-1(cs2)
+mpp64         64       gpio, spi0(miso), spi0(cs1)
+mpp65         65       gpio, spi0(mosi), spi0(cs2)
diff -ur a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt	2016-10-20 04:32:07.000000000 +0200
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt	2016-07-29 05:48:09.000000000 +0200
@@ -41,15 +41,15 @@
 mpp21         21       gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
 mpp22         22       gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
 mpp23         23       gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
-mpp24         24       gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
-mpp25         25       gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
-mpp26         26       gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
+mpp24         24       gpio, lcd(hsync), sata1(prsnt), tdm(rst)
+mpp25         25       gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
+mpp26         26       gpio, lcd(clk), tdm(fsync)
 mpp27         27       gpio, lcd(e), tdm(dtx), ptp(trig)
 mpp28         28       gpio, lcd(pwm), tdm(drx), ptp(evreq)
-mpp29         29       gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
+mpp29         29       gpio, lcd(ref-clk), tdm(int0), ptp(clk)
 mpp30         30       gpio, tdm(int1), sd0(clk)
-mpp31         31       gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
-mpp32         32       gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
+mpp31         31       gpio, tdm(int2), sd0(cmd)
+mpp32         32       gpio, tdm(int3), sd0(d0)
 mpp33         33       gpio, tdm(int4), sd0(d1), mem(bat)
 mpp34         34       gpio, tdm(int5), sd0(d2), sata0(prsnt)
 mpp35         35       gpio, tdm(int6), sd0(d3), sata1(prsnt)
@@ -57,21 +57,18 @@
 mpp37         37       gpio, spi(miso)
 mpp38         38       gpio, spi(sck)
 mpp39         39       gpio, spi(cs0)
-mpp40         40       gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
-                       pcie(clkreq0)
+mpp40         40       gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
 mpp41         41       gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
                        pcie(clkreq1)
-mpp42         42       gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
-                       vdd(cpu0-pd)
-mpp43         43       gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
-                       vdd(cpu2-3-pd){1}
+mpp42         42       gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
+mpp43         43       gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
 mpp44         44       gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
                        mem(bat)
 mpp45         45       gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
 mpp46         46       gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
 mpp47         47       gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
                        ref(clkout)
-mpp48         48       gpio, tclk, dev(burst/last)
+mpp48         48       gpio, dev(clkout), dev(burst/last)
 
 * Marvell Armada XP (mv78260 and mv78460 only)
 
@@ -83,9 +80,9 @@
 mpp52         52       gpio, dev(ad17)
 mpp53         53       gpio, dev(ad18)
 mpp54         54       gpio, dev(ad19)
-mpp55         55       gpio, dev(ad20), vdd(cpu0-pd)
-mpp56         56       gpio, dev(ad21), vdd(cpu1-pd)
-mpp57         57       gpio, dev(ad22), vdd(cpu2-3-pd){1}
+mpp55         55       gpio, dev(ad20)
+mpp56         56       gpio, dev(ad21)
+mpp57         57       gpio, dev(ad22)
 mpp58         58       gpio, dev(ad23)
 mpp59         59       gpio, dev(ad24)
 mpp60         60       gpio, dev(ad25)
@@ -95,6 +92,3 @@
 mpp64         64       gpio, dev(ad29)
 mpp65         65       gpio, dev(ad30)
 mpp66         66       gpio, dev(ad31)
-
-Notes:
-* {1} vdd(cpu2-3-pd) only available on mv78460.
diff -ur a/Documentation/devicetree/bindings/spi/spi_pl022.txt b/Documentation/devicetree/bindings/spi/spi_pl022.txt
--- a/Documentation/devicetree/bindings/spi/spi_pl022.txt	2016-10-20 04:32:07.000000000 +0200
+++ b/Documentation/devicetree/bindings/spi/spi_pl022.txt	2016-07-29 05:48:09.000000000 +0200
@@ -4,9 +4,9 @@
 - compatible : "arm,pl022", "arm,primecell"
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain SPI controller interrupt
+- num-cs : total number of chipselects
 
 Optional properties:
-- num-cs : total number of chipselects
 - cs-gpios : should specify GPIOs used for chipselects.
   The gpios will be referred to as reg = <index> in the SPI child nodes.
   If unspecified, a single SPI device without a chip select can be used.
diff -ur a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
--- a/Documentation/DocBook/device-drivers.tmpl	2016-10-20 04:32:07.000000000 +0200
+++ b/Documentation/DocBook/device-drivers.tmpl	2017-02-14 17:25:11.000000000 +0100
@@ -84,7 +84,7 @@
 
      <sect1><title>Kernel utility functions</title>
 !Iinclude/linux/kernel.h
-!Ekernel/printk.c
+!Ekernel/printk/printk.c
 !Ekernel/panic.c
 !Ekernel/sys.c
 !Ekernel/rcupdate.c
diff -ur a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
--- a/Documentation/filesystems/efivarfs.txt	2016-10-20 04:32:07.000000000 +0200
+++ b/Documentation/filesystems/efivarfs.txt	2016-07-29 05:48:09.000000000 +0200
@@ -14,3 +14,10 @@
 efivarfs is typically mounted like this,
 
 	mount -t efivarfs none /sys/firmware/efi/efivars
+
+Due to the presence of numerous firmware bugs where removing non-standard
+UEFI variables causes the system firmware to fail to POST, efivarfs
+files that are not well-known standardized variables are created
+as immutable files.  This doesn't prevent removal - "chattr -i" will work -
+but it does prevent this kind of failure from being accomplished
+accidentally.
diff -ur a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
--- a/drivers/acpi/acpica/acmacros.h	2017-03-23 14:48:16.000000000 +0100
+++ b/drivers/acpi/acpica/acmacros.h	2017-03-14 02:24:52.000000000 +0100
@@ -63,19 +63,15 @@
 #define ACPI_SET64(ptr, val)            (*ACPI_CAST64 (ptr) = (u64) (val))
 
 /*
- * printf() format helpers
+ * printf() format helper. This macros is a workaround for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
  */
 
 /* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
 
 #define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
 
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
-#else
-#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
-#endif
-
 /*
  * Macros for moving data around to/from buffers that are possibly unaligned.
  * If the hardware supports the transfer of unaligned data, just do the store.
diff -ur a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
--- a/drivers/acpi/acpica/dsmethod.c	2017-03-23 14:48:10.000000000 +0100
+++ b/drivers/acpi/acpica/dsmethod.c	2017-03-14 02:24:44.000000000 +0100
@@ -267,6 +267,9 @@
 				obj_desc->method.mutex->mutex.
 				    original_sync_level =
 				    obj_desc->method.mutex->mutex.sync_level;
+
+				obj_desc->method.mutex->mutex.thread_id =
+				    acpi_os_get_thread_id();
 			}
 		}
 
diff -ur a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
--- a/drivers/acpi/acpica/dsopcode.c	2017-03-23 14:48:09.000000000 +0100
+++ b/drivers/acpi/acpica/dsopcode.c	2017-03-14 02:24:43.000000000 +0100
@@ -446,7 +446,7 @@
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
 			  obj_desc,
-			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+			  ACPI_FORMAT_UINT64(obj_desc->region.address),
 			  obj_desc->region.length));
 
 	/* Now the address and length are valid for this opregion */
@@ -539,13 +539,12 @@
 		return_ACPI_STATUS(AE_NOT_EXIST);
 	}
 
-	obj_desc->region.address =
-	    (acpi_physical_address) ACPI_TO_INTEGER(table);
+	obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
 	obj_desc->region.length = table->length;
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
 			  obj_desc,
-			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+			  ACPI_FORMAT_UINT64(obj_desc->region.address),
 			  obj_desc->region.length));
 
 	/* Now the address and length are valid for this opregion */
diff -ur a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
--- a/drivers/acpi/acpica/evregion.c	2017-03-23 14:48:07.000000000 +0100
+++ b/drivers/acpi/acpica/evregion.c	2017-03-14 02:24:40.000000000 +0100
@@ -276,7 +276,7 @@
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
 			  &region_obj->region.handler->address_space, handler,
-			  ACPI_FORMAT_NATIVE_UINT(address),
+			  ACPI_FORMAT_UINT64(address),
 			  acpi_ut_get_region_name(region_obj->region.
 						  space_id)));
 
diff -ur a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
--- a/drivers/acpi/acpica/exdump.c	2017-03-23 14:48:11.000000000 +0100
+++ b/drivers/acpi/acpica/exdump.c	2017-03-14 02:24:45.000000000 +0100
@@ -621,8 +621,8 @@
 			acpi_os_printf("\n");
 		} else {
 			acpi_os_printf(" base %8.8X%8.8X Length %X\n",
-				       ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
-							       address),
+				       ACPI_FORMAT_UINT64(obj_desc->region.
+							  address),
 				       obj_desc->region.length);
 		}
 		break;
diff -ur a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
--- a/drivers/acpi/acpica/exfldio.c	2017-03-23 14:48:10.000000000 +0100
+++ b/drivers/acpi/acpica/exfldio.c	2017-03-14 02:24:44.000000000 +0100
@@ -269,17 +269,15 @@
 	}
 
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
-			      " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
+			      " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
 			      acpi_ut_get_region_name(rgn_desc->region.
 						      space_id),
 			      rgn_desc->region.space_id,
 			      obj_desc->common_field.access_byte_width,
 			      obj_desc->common_field.base_byte_offset,
-			      field_datum_byte_offset, ACPI_CAST_PTR(void,
-								     (rgn_desc->
-								      region.
-								      address +
-								      region_offset))));
+			      field_datum_byte_offset,
+			      ACPI_FORMAT_UINT64(rgn_desc->region.address +
+						 region_offset)));
 
 	/* Invoke the appropriate address_space/op_region handler */
 
diff -ur a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
--- a/drivers/acpi/acpica/exregion.c	2017-03-23 14:48:07.000000000 +0100
+++ b/drivers/acpi/acpica/exregion.c	2017-03-14 02:24:40.000000000 +0100
@@ -176,7 +176,7 @@
 		if (!mem_info->mapped_logical_address) {
 			ACPI_ERROR((AE_INFO,
 				    "Could not map memory at 0x%8.8X%8.8X, size %u",
-				    ACPI_FORMAT_NATIVE_UINT(address),
+				    ACPI_FORMAT_UINT64(address),
 				    (u32) map_length));
 			mem_info->mapped_length = 0;
 			return_ACPI_STATUS(AE_NO_MEMORY);
@@ -197,8 +197,7 @@
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 			  "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
-			  bit_width, function,
-			  ACPI_FORMAT_NATIVE_UINT(address)));
+			  bit_width, function, ACPI_FORMAT_UINT64(address)));
 
 	/*
 	 * Perform the memory read or write
@@ -300,8 +299,7 @@
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 			  "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
-			  bit_width, function,
-			  ACPI_FORMAT_NATIVE_UINT(address)));
+			  bit_width, function, ACPI_FORMAT_UINT64(address)));
 
 	/* Decode the function parameter */
 
diff -ur a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
--- a/drivers/acpi/acpica/hwvalid.c	2017-03-23 14:48:09.000000000 +0100
+++ b/drivers/acpi/acpica/hwvalid.c	2017-03-14 02:24:43.000000000 +0100
@@ -142,17 +142,17 @@
 	byte_width = ACPI_DIV_8(bit_width);
 	last_address = address + byte_width - 1;
 
-	ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
-			  ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
-								      last_address),
-			  byte_width));
+	ACPI_DEBUG_PRINT((ACPI_DB_IO,
+			  "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
+			  ACPI_FORMAT_UINT64(address),
+			  ACPI_FORMAT_UINT64(last_address), byte_width));
 
 	/* Maximum 16-bit address in I/O space */
 
 	if (last_address > ACPI_UINT16_MAX) {
 		ACPI_ERROR((AE_INFO,
-			    "Illegal I/O port address/length above 64K: %p/0x%X",
-			    ACPI_CAST_PTR(void, address), byte_width));
+			    "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
+			    ACPI_FORMAT_UINT64(address), byte_width));
 		return_ACPI_STATUS(AE_LIMIT);
 	}
 
@@ -181,8 +181,8 @@
 
 			if (acpi_gbl_osi_data >= port_info->osi_dependency) {
 				ACPI_DEBUG_PRINT((ACPI_DB_IO,
-						  "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
-						  ACPI_CAST_PTR(void, address),
+						  "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+						  ACPI_FORMAT_UINT64(address),
 						  byte_width, port_info->name,
 						  port_info->start,
 						  port_info->end));
diff -ur a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
--- a/drivers/acpi/acpica/nsdump.c	2017-03-23 14:48:12.000000000 +0100
+++ b/drivers/acpi/acpica/nsdump.c	2017-03-14 02:24:47.000000000 +0100
@@ -258,12 +258,11 @@
 		switch (type) {
 		case ACPI_TYPE_PROCESSOR:
 
-			acpi_os_printf("ID %02X Len %02X Addr %p\n",
+			acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
 				       obj_desc->processor.proc_id,
 				       obj_desc->processor.length,
-				       ACPI_CAST_PTR(void,
-						     obj_desc->processor.
-						     address));
+				       ACPI_FORMAT_UINT64(obj_desc->processor.
+							  address));
 			break;
 
 		case ACPI_TYPE_DEVICE:
@@ -334,8 +333,9 @@
 							       space_id));
 			if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
 				acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
-					       ACPI_FORMAT_NATIVE_UINT
-					       (obj_desc->region.address),
+					       ACPI_FORMAT_UINT64(obj_desc->
+								  region.
+								  address),
 					       obj_desc->region.length);
 			} else {
 				acpi_os_printf
diff -ur a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
--- a/drivers/acpi/acpica/tbinstal.c	2017-03-23 14:48:10.000000000 +0100
+++ b/drivers/acpi/acpica/tbinstal.c	2017-03-14 02:24:45.000000000 +0100
@@ -301,8 +301,7 @@
 			ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
 					"%4.4s %p Attempted physical table override failed",
 					table_header->signature,
-					ACPI_CAST_PTR(void,
-						      table_desc->address)));
+					ACPI_PHYSADDR_TO_PTR(table_desc->address)));
 			return (NULL);
 		}
 
@@ -318,7 +317,7 @@
 	ACPI_INFO((AE_INFO,
 		   "%4.4s %p %s table override, new table: %p",
 		   table_header->signature,
-		   ACPI_CAST_PTR(void, table_desc->address),
+		   ACPI_PHYSADDR_TO_PTR(table_desc->address),
 		   override_type, new_table));
 
 	/* We can now unmap/delete the original table (if fully mapped) */
diff -ur a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
--- a/drivers/acpi/acpica/tbutils.c	2017-03-23 14:48:15.000000000 +0100
+++ b/drivers/acpi/acpica/tbutils.c	2017-03-14 02:24:51.000000000 +0100
@@ -246,16 +246,12 @@
 {
 	struct acpi_table_header local_header;
 
-	/*
-	 * The reason that the Address is cast to a void pointer is so that we
-	 * can use %p which will work properly on both 32-bit and 64-bit hosts.
-	 */
 	if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
 
 		/* FACS only has signature and length fields */
 
-		ACPI_INFO((AE_INFO, "%4.4s %p %05X",
-			   header->signature, ACPI_CAST_PTR(void, address),
+		ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X %05X",
+			   header->signature, ACPI_FORMAT_UINT64(address),
 			   header->length));
 	} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
 
@@ -266,8 +262,8 @@
 					  header)->oem_id, ACPI_OEM_ID_SIZE);
 		acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
 
-		ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
-			   ACPI_CAST_PTR (void, address),
+		ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %05X (v%.2d %6.6s)",
+			   ACPI_FORMAT_UINT64(address),
 			   (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
 			    revision >
 			    0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -281,8 +277,8 @@
 		acpi_tb_cleanup_table_header(&local_header, header);
 
 		ACPI_INFO((AE_INFO,
-			   "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
-			   local_header.signature, ACPI_CAST_PTR(void, address),
+			   "%-4.4s 0x%8.8X%8.8X %05X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+			   local_header.signature, ACPI_FORMAT_UINT64(address),
 			   local_header.length, local_header.revision,
 			   local_header.oem_id, local_header.oem_table_id,
 			   local_header.oem_revision,
@@ -474,8 +470,8 @@
 	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
 	if (!table) {
 		ACPI_ERROR((AE_INFO,
-			    "Could not map memory for table [%s] at %p",
-			    signature, ACPI_CAST_PTR(void, address)));
+			    "Could not map memory for table [%s] at %8.8X%8.8X",
+			    signature, ACPI_FORMAT_UINT64(address)));
 		return;
 	}
 
diff -ur a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
--- a/drivers/acpi/acpica/tbxfroot.c	2017-03-23 14:48:14.000000000 +0100
+++ b/drivers/acpi/acpica/tbxfroot.c	2017-03-14 02:24:50.000000000 +0100
@@ -118,7 +118,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
+acpi_status acpi_find_root_pointer(acpi_physical_address * table_address)
 {
 	u8 *table_ptr;
 	u8 *mem_rover;
@@ -176,7 +176,8 @@
 			physical_address +=
 			    (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
 
-			*table_address = physical_address;
+			*table_address =
+			    (acpi_physical_address) physical_address;
 			return_ACPI_STATUS(AE_OK);
 		}
 	}
@@ -209,7 +210,7 @@
 		    (ACPI_HI_RSDP_WINDOW_BASE +
 		     ACPI_PTR_DIFF(mem_rover, table_ptr));
 
-		*table_address = physical_address;
+		*table_address = (acpi_physical_address) physical_address;
 		return_ACPI_STATUS(AE_OK);
 	}
 
diff -ur a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
--- a/drivers/acpi/acpica/utaddress.c	2017-03-23 14:48:09.000000000 +0100
+++ b/drivers/acpi/acpica/utaddress.c	2017-03-14 02:24:43.000000000 +0100
@@ -107,10 +107,10 @@
 	acpi_gbl_address_range_list[space_id] = range_info;
 
 	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-			  "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+			  "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
 			  acpi_ut_get_node_name(range_info->region_node),
-			  ACPI_CAST_PTR(void, address),
-			  ACPI_CAST_PTR(void, range_info->end_address)));
+			  ACPI_FORMAT_UINT64(address),
+			  ACPI_FORMAT_UINT64(range_info->end_address)));
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_ACPI_STATUS(AE_OK);
@@ -160,15 +160,13 @@
 			}
 
 			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+					  "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
 					  acpi_ut_get_node_name(range_info->
 								region_node),
-					  ACPI_CAST_PTR(void,
-							range_info->
-							start_address),
-					  ACPI_CAST_PTR(void,
-							range_info->
-							end_address)));
+					  ACPI_FORMAT_UINT64(range_info->
+							     start_address),
+					  ACPI_FORMAT_UINT64(range_info->
+							     end_address)));
 
 			ACPI_FREE(range_info);
 			return_VOID;
@@ -244,9 +242,9 @@
 								  region_node);
 
 				ACPI_WARNING((AE_INFO,
-					      "0x%p-0x%p %s conflicts with Region %s %d",
-					      ACPI_CAST_PTR(void, address),
-					      ACPI_CAST_PTR(void, end_address),
+					      "0x%8.8X%8.8X-0x%8.8X%8.8X %s conflicts with Region %s %d",
+					      ACPI_FORMAT_UINT64(address),
+					      ACPI_FORMAT_UINT64(end_address),
 					      acpi_ut_get_region_name(space_id),
 					      pathname, overlap_count));
 				ACPI_FREE(pathname);
diff -ur a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
--- a/drivers/acpi/acpica/utxfinit.c	2017-03-23 14:48:09.000000000 +0100
+++ b/drivers/acpi/acpica/utxfinit.c	2017-03-14 02:24:42.000000000 +0100
@@ -165,10 +165,12 @@
 	 * Obtain a permanent mapping for the FACS. This is required for the
 	 * Global Lock and the Firmware Waking Vector
 	 */
-	status = acpi_tb_initialize_facs();
-	if (ACPI_FAILURE(status)) {
-		ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
-		return_ACPI_STATUS(status);
+	if (!(flags & ACPI_NO_FACS_INIT)) {
+		status = acpi_tb_initialize_facs();
+		if (ACPI_FAILURE(status)) {
+			ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+			return_ACPI_STATUS(status);
+		}
 	}
 #endif				/* !ACPI_REDUCED_HARDWARE */
 
diff -ur a/drivers/acpi/osl.c b/drivers/acpi/osl.c
--- a/drivers/acpi/osl.c	2017-03-23 14:48:03.000000000 +0100
+++ b/drivers/acpi/osl.c	2017-03-14 02:24:35.000000000 +0100
@@ -173,7 +173,7 @@
 		request_mem_region(addr, length, desc);
 }
 
-static int __init acpi_reserve_resources(void)
+static void __init acpi_reserve_resources(void)
 {
 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
 		"ACPI PM1a_EVT_BLK");
@@ -202,10 +202,7 @@
 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
-
-	return 0;
 }
-device_initcall(acpi_reserve_resources);
 
 void acpi_os_printf(const char *fmt, ...)
 {
@@ -1726,6 +1723,7 @@
 
 acpi_status __init acpi_os_initialize1(void)
 {
+	acpi_reserve_resources();
 	kacpid_wq = alloc_workqueue("kacpid", 0, 1);
 	kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
 	kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
diff -ur a/drivers/ata/ahci.c b/drivers/ata/ahci.c
--- a/drivers/ata/ahci.c	2017-03-23 14:42:22.000000000 +0100
+++ b/drivers/ata/ahci.c	2017-03-14 02:18:12.000000000 +0100
@@ -251,6 +251,26 @@
 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci },  
 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci },  
 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci },  
+	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci },  
 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci },  
 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci },  
 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci },  
@@ -727,12 +747,14 @@
 	reg_val = syno_mv_9xxx_reg_get(host, 0x0E, addr_offset, data_offset);
 	syno_mv_9xxx_reg_set(host, 0xE, reg_val & ~0x100, addr_offset, data_offset);
 	reg_val = syno_mv_9xxx_reg_get(host, reg_addr, addr_offset, data_offset);
+	 
+	val &= 0xFBE;
 	reg_val &= ~0xFBE;
 	reg_val |= val;
 	syno_mv_9xxx_reg_set(host, reg_addr, reg_val, addr_offset, data_offset);
 }
 
-void syno_mv_9xxx_amp_adjust(struct ata_host *host)
+void syno_mv_9xxx_amp_adjust(struct ata_host *host, struct pci_dev *pdev)
 {
 	int port = 0;
 
@@ -762,6 +784,75 @@
 		syno_mv_9xxx_amp_adjust_by_port(host, 0xE3E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
 		port = 1;
 		syno_mv_9xxx_amp_adjust_by_port(host, 0xE3E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+	} else if (syno_is_hw_version(HW_DS1517p)) {
+		if (0x02 == PCI_SLOT(pdev->bus->self->devfn)) {
+			 
+			port = 0;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xD75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 1;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 2;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xD75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+		} else if (0x03 == PCI_SLOT(pdev->bus->self->devfn)) {
+			 
+			port = 0;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xD75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 1;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+		}
+	} else if (syno_is_hw_version(HW_DS1817p)) {
+		if (0x02 == PCI_SLOT(pdev->bus->self->devfn)) {
+			 
+			port = 0;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xD75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 1;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 2;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 3;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+		} else if (0x03 == PCI_SLOT(pdev->bus->self->devfn)) {
+			 
+			port = 0;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xCF5, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 1;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 2;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+			port = 3;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xE75, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xAE7E, mv_port_addr[port], mv_port_data[port], mv_sata_gen[1]);
+		}
+	} else if (syno_is_hw_version(HW_DS3017xs)) {
+		if (0x03 == PCI_SLOT(pdev->bus->self->devfn) && (0x00 == PCI_FUNC(pdev->bus->self->devfn) || 0x01 == PCI_FUNC(pdev->bus->self->devfn))) {
+			 
+			port = 0;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xF7B, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			port = 1;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xF7B, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			port = 2;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xFF5, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			port = 3;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xF77, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+		} else if (0x03 == PCI_SLOT(pdev->bus->self->devfn) && 0x02 == PCI_FUNC(pdev->bus->self->devfn)) {
+			 
+			port = 0;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xD7F, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+			port = 1;
+			syno_mv_9xxx_amp_adjust_by_port(host, 0xD7F, mv_port_addr[port], mv_port_data[port], mv_sata_gen[2]);
+		}
 	}
 }
 
@@ -1616,7 +1707,7 @@
 		syno_mv_9235_gpio_active_init(host);
 	}
 	if (pdev->vendor == 0x1b4b && (pdev->device == 0x9235 || pdev->device == 0x9215 || pdev->device == 0x9170)) {
-		syno_mv_9xxx_amp_adjust(host);
+		syno_mv_9xxx_amp_adjust(host, pdev);
 	}
 #endif  
 
diff -ur a/drivers/ata/libahci.c b/drivers/ata/libahci.c
--- a/drivers/ata/libahci.c	2017-03-23 14:42:26.000000000 +0100
+++ b/drivers/ata/libahci.c	2017-03-14 02:18:17.000000000 +0100
@@ -27,6 +27,9 @@
 extern void syno_ledtrig_active_set(int iLedNum);
 extern int *gpGreenLedMap;
 #endif  
+#ifdef MY_DEF_HERE
+#include <linux/synolib.h>
+#endif  
 
 static int ahci_skip_host_reset;
 int ahci_ignore_sss;
@@ -255,9 +258,6 @@
 #ifdef MY_DEF_HERE
 	&dev_attr_syno_wcache,
 #endif  
-#ifdef CONFIG_SYNO_SATA_DISK_SERIAL
-	&dev_attr_syno_disk_serial,
-#endif  
 #ifdef MY_DEF_HERE
 	&dev_attr_sw_locate,
 	&dev_attr_sw_fault,
@@ -574,7 +574,11 @@
 		}
 	}
 
+#ifdef MY_ABC_HERE
 	if (!port_map) {
+#else  
+	if (!port_map && vers < 0x10300) {
+#endif  
 		port_map = (1 << ahci_nr_ports(cap)) - 1;
 		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
 
@@ -890,6 +894,25 @@
 	return ret;
 }
 EXPORT_SYMBOL(syno_ahci_disk_led_enable);
+
+#ifdef MY_DEF_HERE
+ 
+int syno_ahci_disk_led_enable_by_port(const unsigned short diskPort, const int iValue)
+{
+	int i = 0;
+	unsigned short scsiHostNum = 0;
+	 
+	for (i = 0; i < SATA_REMAP_MAX; i++) {
+		if ((unsigned short)syno_get_remap_idx(i) == (diskPort - 1)) {
+			scsiHostNum = (unsigned short) i;
+			break;
+		}
+	}
+
+	return syno_ahci_disk_led_enable(scsiHostNum, iValue);
+}
+EXPORT_SYMBOL(syno_ahci_disk_led_enable_by_port);
+#endif  
 #endif  
 
 static void ahci_start_port(struct ata_port *ap)
@@ -1412,6 +1435,14 @@
 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
 
+	if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
+		tmp = readl(port_mmio + PORT_FBS);
+		tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+		tmp |= pmp << PORT_FBS_DEV_OFFSET;
+		writel(tmp, port_mmio + PORT_FBS);
+		pp->fbs_last_dev = pmp;
+	}
+
 	writel(1, port_mmio + PORT_CMD_ISSUE);
 
 	if (timeout_msec) {
@@ -1651,24 +1682,38 @@
 {
 	struct ata_link *link = qc->dev->link;
 	struct ata_port *ap = link->ap;
+	u8 prot = qc->tf.protocol;
 
-	if (ap->excl_link == NULL || ap->excl_link == link) {
-		if (ap->nr_active_links == 0 || ata_link_active(link)) {
-			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
-			return ata_std_qc_defer(qc);
-		}
+	int is_excl = (ata_is_atapi(prot) ||
+		       (qc->flags & ATA_QCFLAG_RESULT_TF));
 
-		ap->excl_link = link;
-	} else {
-		 
-		if (!ap->nr_active_links) {
-			ap->excl_link = link;
+	if (unlikely(ap->excl_link)) {
+		if (link == ap->excl_link) {
+			if (ap->nr_active_links)
+				return ATA_DEFER_PORT;
 			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
-			return ata_std_qc_defer(qc);
+		} else {
+			if (!ap->nr_active_links) {
+				 
+				if (is_excl) {
+					ap->excl_link = link;
+					qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+				} else {
+					 
+					ap->excl_link = NULL;
+				}
+			} else {
+				return ATA_DEFER_PORT;
+			}
 		}
+	} else if (unlikely(is_excl)) {
+		ap->excl_link = link;
+		if (ap->nr_active_links)
+			return ATA_DEFER_PORT;
+		qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
 	}
 
-	return ATA_DEFER_PORT;
+	return ata_std_qc_defer(qc);
 }
 EXPORT_SYMBOL_GPL(sata_syno_ahci_defer_cmd);
 #endif  
@@ -1877,7 +1922,7 @@
 	if (unlikely(resetting))
 		status &= ~PORT_IRQ_BAD_PMP;
 
-	if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
+	if (sata_lpm_ignore_phy_events(&ap->link)) {
 		status &= ~PORT_IRQ_PHYRDY;
 		ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
 	}
@@ -1888,7 +1933,7 @@
 	}
 
 	if (status & PORT_IRQ_SDB_FIS) {
-		 
+			 
 		if (hpriv->cap & HOST_CAP_SNTF)
 			sata_async_notification(ap);
 		else {
diff -ur a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
--- a/drivers/ata/libata-core.c	2017-03-23 14:42:28.000000000 +0100
+++ b/drivers/ata/libata-core.c	2017-03-14 02:18:19.000000000 +0100
@@ -87,6 +87,10 @@
 
 static char ata_force_param_buf[PAGE_SIZE] __initdata;
 
+#ifdef MY_DEF_HERE
+extern int g_syno_ds1815p_speed_limit;
+#endif  
+
 #if defined(MY_DEF_HERE)
 #include <linux/syno_gpio.h>
 
@@ -458,6 +462,11 @@
 	bool did_spd = false;
 	int linkno = link->pmp;
 	int i;
+#ifdef MY_DEF_HERE
+	struct ata_port *ap = link->ap;
+
+	WARN_ON(!ap);
+#endif  
 
 	if (ata_is_host_link(link))
 		linkno += 15;
@@ -471,7 +480,11 @@
 		if (fe->device != -1 && fe->device != linkno)
 			continue;
 
+#ifdef MY_DEF_HERE
+		if (!did_spd && fe->param.spd_limit && !(IS_SYNOLOGY_DX517(ap->PMSynoUnique) && syno_is_hw_version(HW_DS916p))) {
+#else
 		if (!did_spd && fe->param.spd_limit) {
+#endif  
 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
 					fe->param.name);
@@ -1969,14 +1982,16 @@
 	}
 #endif  
 
-#ifdef MY_ABC_HERE
-	 
-	if (syno_is_hw_version(HW_DS1815p)) {
-		if(ap && (ap->print_id == 7 || ap->print_id == 8)) {
-			dev->horkage |= ATA_HORKAGE_1_5_GBPS;
+#if defined(MY_DEF_HERE) && defined(MY_ABC_HERE)
+	if (1 == g_syno_ds1815p_speed_limit) {
+		 
+		if (syno_is_hw_version(HW_DS1815p)) {
+			if(ap && (ap->print_id == 7 || ap->print_id == 8)) {
+				dev->horkage |= ATA_HORKAGE_1_5_GBPS;
+			}
 		}
 	}
-#endif
+#endif  
 	ata_force_horkage(dev);
 
 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
@@ -3431,6 +3446,7 @@
 
 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
 
 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
@@ -3480,6 +3496,8 @@
 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
 
+	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
+
 	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
 	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
 	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
@@ -3667,7 +3685,7 @@
 	else  
 		return 0;
 
-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -5400,6 +5418,22 @@
 	return tmp;
 }
 
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+	unsigned long lpm_timeout = link->last_lpm_change +
+				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+	if (link->lpm_policy > ATA_LPM_MAX_POWER)
+		return true;
+
+	if ((link->flags & ATA_LFLAG_CHANGED) &&
+	    time_before(jiffies, lpm_timeout))
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
 {
 	return AC_ERR_SYSTEM;
diff -ur a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
--- a/drivers/ata/libata-eh.c	2017-03-23 14:42:30.000000000 +0100
+++ b/drivers/ata/libata-eh.c	2017-03-14 02:18:22.000000000 +0100
@@ -536,6 +536,7 @@
 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
 {
 	unsigned long flags;
+	int iDeepTries = 0;
 
 #ifdef MY_DEF_HERE
 	spin_lock_irqsave(ap->lock, flags);
@@ -2644,6 +2645,9 @@
 		}
 	}
 
+	link->last_lpm_change = jiffies;
+	link->flags |= ATA_LFLAG_CHANGED;
+
 	return 0;
 
 fail:
diff -ur a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
--- a/drivers/ata/libata-pmp.c	2017-03-23 14:42:24.000000000 +0100
+++ b/drivers/ata/libata-pmp.c	2017-03-14 02:18:15.000000000 +0100
@@ -113,7 +113,8 @@
 
 		sata_pmp_write(&(ap->link), SATA_PMP_GSCR_9705_GPI_POLARITY, 0xFFFFF);
 
-		sata_pmp_write(&(ap->link), SATA_PMP_GSCR_9705_SATA_BLINK_RATE, 0x2082082);
+		sata_pmp_write(&(ap->link), SATA_PMP_GSCR_9705_SATA_0_TO_3_BLINK_RATE, 0x2082082);
+		sata_pmp_write(&(ap->link), SATA_PMP_GSCR_9705_SATA_4_BLINK_RATE, 0x00000082);
 
 		sata_pmp_write(&(ap->link), 0x090, 0x00001F1F);
 		sata_pmp_write(&(ap->link), 0x091, 0xFFF0003A);
@@ -176,6 +177,9 @@
 			syno_pm_device_config_set(ap, 2, 0x91, 0xF7F);
 		}
 	}
+	if (IS_SYNOLOGY_DX517(ap->PMSynoUnique)) {
+		syno_pm_device_config_set(ap, 4, 0x91, 0xE7F);
+	}
 }
 
 void
@@ -442,7 +446,8 @@
 	if (!IS_SYNOLOGY_RX413(ap->PMSynoUnique) &&
 		!IS_SYNOLOGY_RX1214(ap->PMSynoUnique) &&
 		!IS_SYNOLOGY_RX1217(ap->PMSynoUnique) &&
-		!IS_SYNOLOGY_DX1215(ap->PMSynoUnique)) {
+		!IS_SYNOLOGY_DX1215(ap->PMSynoUnique) &&
+		!IS_SYNOLOGY_DX517(ap->PMSynoUnique)) {
 		goto END;
 	}
 
@@ -750,6 +755,19 @@
 			ret = 0;
 		}
 #endif  
+#ifdef MY_DEF_HERE
+		if (syno_is_hw_version(HW_DS1517p) && syno_pm_is_synology_3xxx(ap)) {
+			ata_port_printk(ap, KERN_ERR, "This expansion unit is unsupported\n");
+			ret = 0;
+		}
+#endif  
+#if defined (MY_DEF_HERE) && defined (MY_DEF_HERE)
+		 
+		if (IS_SYNOLOGY_DX517(ap->PMSynoUnique) && (ap->link.uiStsFlags & SYNO_STATUS_IS_SIL)) {
+			ata_port_printk(ap, KERN_ERR, "This expansion unit is unsupported\n");
+			ret = 0;
+		}
+#endif  
 	}
 	return ret;
 }
@@ -1277,6 +1295,13 @@
 				       ATA_LFLAG_NO_SRST |
 				       ATA_LFLAG_ASSUME_ATA;
 		}
+	} else if (vendor == 0x11ab && devid == 0x4140) {
+		 
+		ata_for_each_link(link, ap, EDGE) {
+			 
+			if (link->pmp == 4)
+				link->flags |= ATA_LFLAG_DISABLED;
+		}
 	}
 }
 
@@ -1321,6 +1346,9 @@
 	syno_pm_gpio_config(ap);
 	syno_prepare_custom_info(ap);
 #ifdef MY_ABC_HERE
+	if (0 == ap->PMSynoEMID) {
+		ap->pflags |= ATA_PFLAG_SYNO_BOOT_PROBE;
+	}
 	 
 	if (IS_SYNOLOGY_DX510(ap->PMSynoUnique)) {
 		target = 1;
@@ -1366,6 +1394,18 @@
 
 			link->sata_spd_limit = target_limit;
 		}
+	} else if (IS_SYNOLOGY_DX517(ap->PMSynoUnique) &&
+			(syno_is_hw_version(HW_DS916p))) {
+		target_limit = 0;
+
+		if(link->sata_spd_limit != target_limit) {
+			ata_dev_printk(dev, KERN_ERR,
+					"recover DX517 link speed to default configuration and reattach\n");
+
+			link->sata_spd_limit = 0;
+			rc = -EAGAIN;
+			goto fail;
+		}
 	}
 #endif  
 #endif  
diff -ur a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
--- a/drivers/ata/libata-scsi.c	2017-03-23 14:42:25.000000000 +0100
+++ b/drivers/ata/libata-scsi.c	2017-03-14 02:18:15.000000000 +0100
@@ -583,7 +583,8 @@
 				IS_SYNOLOGY_DX513(ap->PMSynoUnique) ||
 				IS_SYNOLOGY_DX213(ap->PMSynoUnique) ||
 				IS_SYNOLOGY_RX413(ap->PMSynoUnique) ||
-				IS_SYNOLOGY_RX415(ap->PMSynoUnique)) {
+				IS_SYNOLOGY_RX415(ap->PMSynoUnique) ||
+				IS_SYNOLOGY_DX517(ap->PMSynoUnique)) {
 			if (ap->host == pAp_master->host && ap->port_no == pAp_master->port_no) {
 				unsigned long flags;
 				spin_lock_irqsave(ap->lock, flags);
@@ -778,7 +779,7 @@
 				 "\"\n");
 
 		strncat(szTmp1, szTmp, BDEVNAME_SIZE);
-
+		 
 		if (IS_SYNOLOGY_RX410(ap->PMSynoUnique)) {
 			snprintf(szTmp,
 					BDEVNAME_SIZE,
@@ -909,6 +910,14 @@
 					EBOX_INFO_UNIQUE_DX1215,
 					EBOX_INFO_EMID_KEY,
 					ap->PMSynoEMID);
+		} else if(IS_SYNOLOGY_DX517(ap->PMSynoUnique)) {
+			snprintf(szTmp,
+					BDEVNAME_SIZE,
+					"%s=\"%s\"\n%s=\"%d\"\n",
+					EBOX_INFO_UNIQUE_KEY,
+					EBOX_INFO_UNIQUE_DX517,
+					EBOX_INFO_EMID_KEY,
+					ap->PMSynoEMID);
 		} else {
 			snprintf(szTmp,
 					BDEVNAME_SIZE,
@@ -1129,6 +1138,54 @@
 EXPORT_SYMBOL_GPL(dev_attr_syno_sata_disk_led_ctrl);
 #endif  
 
+#ifdef MY_DEF_HERE
+ 
+static int syno_shift_remap_table(struct ata_host *host)
+{
+	int i = 0;
+	int iRet = -1;
+	char szPciAddress[PCI_ADDR_LEN_MAX + 1];
+	struct pci_dev *pdev = NULL;
+	struct pci_dev *pdev_cur = NULL;
+
+	if (NULL == host) {
+		printk("Bad parameter!\n");
+		goto END;
+	}
+
+	if (0 == host->n_ports || NULL == host->ports) {
+		printk("Error: ata port informaion is needed.");
+		goto END;
+	}
+
+	pdev = to_pci_dev(host->dev);
+	pdev_cur = pdev;
+
+	while (NULL != pdev_cur) {
+		snprintf(szPciAddress, sizeof(szPciAddress),"%04x%02x%02x%x",
+				pci_domain_nr(pdev_cur->bus), pdev_cur->bus->number,
+				PCI_SLOT(pdev_cur->devfn), PCI_FUNC(pdev_cur->devfn));
+
+		for (i = 0; i < gPciAddrNum; i++) {
+			if (0 == strncmp(szPciAddress, gszPciAddrList[i], PCI_ADDR_LEN_MAX)) {
+				syno_insert_sata_index_remap(
+					host->ports[0]->print_id - 1,
+					host->n_ports,
+					gPciDeferStart);
+
+				gPciDeferStart += host->n_ports;
+				iRet = 1;
+				goto END;
+			}
+		}
+		pdev_cur = pdev_cur->bus->self;
+	}
+	iRet = 0;
+END:
+	return iRet;
+}
+#endif  
+
 static ssize_t ata_scsi_park_show(struct device *device,
 				  struct device_attribute *attr, char *buf)
 {
@@ -1322,9 +1379,6 @@
 #ifdef MY_DEF_HERE
 	&dev_attr_syno_wcache,
 #endif  
-#ifdef CONFIG_SYNO_SATA_DISK_SERIAL
-	&dev_attr_syno_disk_serial,
-#endif  
 #ifdef MY_DEF_HERE
 	&dev_attr_syno_sata_disk_led_ctrl,
 #endif  
@@ -1604,22 +1658,21 @@
 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
 		     int cmd, void __user *arg)
 {
-	int val = -EINVAL, rc = -EINVAL;
+	unsigned long val;
+	int rc = -EINVAL;
 	unsigned long flags;
 #ifdef MY_DEF_HERE
 	struct ata_device *dev;
 #endif  
 
 	switch (cmd) {
-	case ATA_IOC_GET_IO32:
+	case HDIO_GET_32BIT:
 		spin_lock_irqsave(ap->lock, flags);
 		val = ata_ioc32(ap);
 		spin_unlock_irqrestore(ap->lock, flags);
-		if (copy_to_user(arg, &val, 1))
-			return -EFAULT;
-		return 0;
+		return put_user(val, (unsigned long __user *)arg);
 
-	case ATA_IOC_SET_IO32:
+	case HDIO_SET_32BIT:
 		val = (unsigned long) arg;
 		rc = 0;
 		spin_lock_irqsave(ap->lock, flags);
@@ -2064,6 +2117,13 @@
 		queue_depth = 1;
 	}
 	spin_unlock_irqrestore(ap->lock, flags);
+	
+#ifdef MY_DEF_HERE	
+	 
+	if (!ata_ncq_enabled(dev) && 1 == sdev->queue_depth) {
+		return sdev->queue_depth;
+	}
+#endif  
 
 	queue_depth = min(queue_depth, sdev->host->can_queue);
 	queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
@@ -2429,11 +2489,22 @@
 	struct ata_port *ap = qc->ap;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	u8 *cdb = cmd->cmnd;
+#ifdef MY_ABC_HERE
+	u8 *desc = NULL;
+#endif  
 	int need_sense = (qc->err_mask != 0);
 
 #ifdef MY_DEF_HERE
+#ifdef MY_ABC_HERE
 	 
-	syno_result_tf_lba_restore(qc);
+	if (ata_is_ncq(qc->tf.protocol) &&
+			!(qc->err_mask & AC_ERR_NCQ)) {
+#endif  
+		 
+		syno_result_tf_lba_restore(qc);
+#ifdef MY_ABC_HERE
+	}
+#endif  
 #endif  
 
 	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
@@ -2445,6 +2516,15 @@
 		} else {
 			 
 			ata_gen_ata_sense(qc);
+#ifdef MY_ABC_HERE
+			 
+			if ( (qc->result_tf.feature & ATA_UNC) &&
+					ata_is_ncq(qc->tf.protocol) &&
+					!(qc->err_mask & AC_ERR_NCQ) ) {
+				desc = qc->scsicmd->sense_buffer + 8;
+				desc[SYNO_DESCRIPTOR_RESERVED_INDEX] |= SYNO_NCQ_FAKE_UNC;
+			}
+#endif  
 		}
 	}
 
@@ -3180,7 +3260,8 @@
 		rbuf[14] = (lowest_aligned >> 8) & 0x3f;
 		rbuf[15] = lowest_aligned;
 
-		if (ata_id_has_trim(args->id)) {
+		if (ata_id_has_trim(args->id) &&
+		    !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
 			rbuf[14] |= 0x80;  
 
 			if (ata_id_has_zero_after_trim(args->id))
@@ -4085,6 +4166,13 @@
 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
 {
 	int i, rc;
+#ifdef MY_DEF_HERE
+	int isCacheSSD = 0;
+
+	if (1 == syno_shift_remap_table(host)) {
+		isCacheSSD = 1;
+	}
+#endif  
 
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
@@ -4109,6 +4197,9 @@
 
 		shost->max_host_blocked = 1;
 
+#ifdef MY_DEF_HERE
+		shost->isCacheSSD = isCacheSSD;
+#endif
 		rc = scsi_add_host_with_dma(ap->scsi_host,
 						&ap->tdev, ap->host->dev);
 		if (rc)
@@ -4772,12 +4863,6 @@
 #endif  
 
 #ifdef MY_DEF_HERE
-	if (syno_is_synology_pm(ap)) {
-		mapped_idx = ((mapped_idx+1)*26) + channel;  
-	} else {
-#endif  
-
-#ifdef MY_DEF_HERE
 	if (!blMapped) {
 		mapped_idx = syno_get_remap_idx(index);
 
@@ -4788,6 +4873,12 @@
 	}
 #endif  
 
+#ifdef MY_DEF_HERE
+	if (syno_is_synology_pm(ap)) {
+		mapped_idx = ((mapped_idx+1)*26) + channel;  
+	} else {
+#endif  
+
 	if (0 <= (reversed_idx = syno_is_reversed_scsi_host_model(host->host_no))) {
 		mapped_idx = reversed_idx;
 		blMapped = true;
diff -ur a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
--- a/drivers/ata/libata-sff.c	2017-03-23 14:42:22.000000000 +0100
+++ b/drivers/ata/libata-sff.c	2017-03-14 02:18:12.000000000 +0100
@@ -609,12 +609,10 @@
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 {
 	struct ata_port *ap = qc->ap;
-	unsigned long flags;
 
 	if (ap->ops->error_handler) {
 		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
-
+			 
 			qc = ata_qc_from_tag(ap, qc->tag);
 			if (qc) {
 #ifdef MY_DEF_HERE
@@ -649,8 +647,6 @@
 					ata_port_freeze(ap);
 #endif  
 			}
-
-			spin_unlock_irqrestore(ap->lock, flags);
 		} else {
 #ifdef MY_DEF_HERE
 			if (IS_SYNO_SPINUP_CMD(qc)) {
@@ -684,10 +680,8 @@
 		}
 	} else {
 		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
 			ata_sff_irq_on(ap);
 			ata_qc_complete(qc);
-			spin_unlock_irqrestore(ap->lock, flags);
 		} else
 			ata_qc_complete(qc);
 	}
@@ -701,9 +695,10 @@
 #if defined(MY_DEF_HERE) && defined(MY_DEF_HERE)
 	struct ata_taskfile *tf = &qc->tf;
 #endif  
-	unsigned long flags = 0;
 	int poll_next;
 
+	lockdep_assert_held(ap->lock);
+
 #ifdef MY_DEF_HERE
 	 
 	if (IS_SYNO_SPINUP_CMD(qc)
@@ -715,6 +710,8 @@
 	}
 #else  
 
+	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
 	WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
 #endif  
 
@@ -755,9 +752,6 @@
 			}
 		}
 
-		if (in_wq)
-			spin_lock_irqsave(ap->lock, flags);
-
 		if (qc->tf.protocol == ATA_PROT_PIO) {
 			 
 			ap->hsm_task_state = HSM_ST;
@@ -766,9 +760,6 @@
 			 
 			atapi_send_cdb(ap, qc);
 
-		if (in_wq)
-			spin_unlock_irqrestore(ap->lock, flags);
-
 		break;
 
 	case HSM_ST:
@@ -950,12 +941,14 @@
 	u8 status;
 	int poll_next;
 
+	spin_lock_irq(ap->lock);
+
 	BUG_ON(ap->sff_pio_task_link == NULL);
 	 
 	qc = ata_qc_from_tag(ap, link->active_tag);
 	if (!qc) {
 		ap->sff_pio_task_link = NULL;
-		return;
+		goto out_unlock;
 	}
 
 fsm_start:
@@ -963,11 +956,14 @@
 
 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
 	if (status & ATA_BUSY) {
+		spin_unlock_irq(ap->lock);
 		ata_msleep(ap, 2);
+		spin_lock_irq(ap->lock);
+
 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
 		if (status & ATA_BUSY) {
 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
-			return;
+			goto out_unlock;
 		}
 	}
 
@@ -977,6 +973,8 @@
 
 	if (poll_next)
 		goto fsm_start;
+out_unlock:
+	spin_unlock_irq(ap->lock);
 }
 
 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
diff -ur a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
--- a/drivers/ata/pata_octeon_cf.c	2017-03-23 14:42:24.000000000 +0100
+++ b/drivers/ata/pata_octeon_cf.c	2017-03-14 02:18:15.000000000 +0100
@@ -1062,7 +1062,7 @@
 	},
 	{},
 };
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
 
 static struct platform_driver octeon_cf_driver = {
 	.probe		= octeon_cf_probe,
diff -ur a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
--- a/drivers/ata/sata_sil24.c	2017-03-23 14:42:20.000000000 +0100
+++ b/drivers/ata/sata_sil24.c	2017-03-14 02:18:10.000000000 +0100
@@ -26,6 +26,10 @@
 extern int *gpGreenLedMap;
 #endif  
 
+#ifdef MY_DEF_HERE
+extern int g_syno_ds1815p_speed_limit;
+#endif  
+
 struct sil24_prb {
 	__le16	ctrl;
 	__le16	prot;
@@ -1295,9 +1299,28 @@
 		tmp = readl(port + PORT_PHY_CFG);
 		tmp &= ~0x1f;
 		if (syno_is_hw_version(HW_DS1815p)) {
+#ifdef MY_DEF_HERE
+			 
+			if (1 == g_syno_ds1815p_speed_limit) {
+				dev_info(host->dev, "Increase sil3132 swing to 0x15\n");
+				 
+				tmp |= 0x15;
+			} else {
+				if (2 == host->host_no) {  
+					dev_info(host->dev, "Increase sil3132 swing to 0xa\n");
+					 
+					tmp |= 0x0a;
+				} else {  
+					dev_info(host->dev, "Increase sil3132 swing to 0x15\n");
+					 
+					tmp |= 0x15;
+				}
+			}
+#else  
 			dev_info(host->dev, "Increase sil3132 swing to 0x15\n");
 			 
 			tmp |= 0x15;
+#endif  
 		} else if (syno_is_hw_version(HW_DS1515p)) {
 			dev_info(host->dev, "Increase sil3132 swing to 0x13\n");
 			 
@@ -1394,6 +1417,18 @@
 	}
 
 	pci_set_master(pdev);
+
+#ifdef MY_DEF_HERE
+	if (pdev->vendor == 0x1095 && (pdev->device == 0x3132 || pdev->device == 0x3531)) {
+		int i=0;
+
+		for (i = 0; i < host->n_ports; i++) {
+			struct ata_port *ap = host->ports[i];
+			ap->link.uiStsFlags |= SYNO_STATUS_IS_SIL;
+		}
+	}
+#endif  
+
 	return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
 				 &sil24_sht);
 }
diff -ur a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
--- a/drivers/ata/sata_sil.c	2017-03-23 14:42:22.000000000 +0100
+++ b/drivers/ata/sata_sil.c	2017-03-14 02:18:13.000000000 +0100
@@ -628,6 +628,9 @@
 	unsigned int n, quirks = 0;
 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
 
+	/* This controller doesn't support trim */
+	dev->horkage |= ATA_HORKAGE_NOTRIM;
+
 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 
 	for (n = 0; sil_blacklist[n].product; n++)
diff -ur a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
--- a/drivers/auxdisplay/ks0108.c	2017-03-23 14:48:42.000000000 +0100
+++ b/drivers/auxdisplay/ks0108.c	2017-03-14 02:25:34.000000000 +0100
@@ -139,6 +139,7 @@
 
 	ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
 		NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
+	parport_put_port(ks0108_parport);
 	if (ks0108_pardevice == NULL) {
 		printk(KERN_ERR KS0108_NAME ": ERROR: "
 			"parport didn't register new device\n");
diff -ur a/drivers/base/devres.c b/drivers/base/devres.c
--- a/drivers/base/devres.c	2017-03-23 14:49:42.000000000 +0100
+++ b/drivers/base/devres.c	2017-03-14 02:26:48.000000000 +0100
@@ -296,10 +296,10 @@
 	if (!dr) {
 		add_dr(dev, &new_dr->node);
 		dr = new_dr;
-		new_dr = NULL;
+		new_res = NULL;
 	}
 	spin_unlock_irqrestore(&dev->devres_lock, flags);
-	devres_free(new_dr);
+	devres_free(new_res);
 
 	return dr->data;
 }
diff -ur a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
--- a/drivers/base/firmware_class.c	2017-03-23 14:49:44.000000000 +0100
+++ b/drivers/base/firmware_class.c	2017-03-14 02:26:50.000000000 +0100
@@ -512,10 +512,8 @@
 	module_put(THIS_MODULE);
 }
 
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
 {
-	struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
 	if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
 		return -ENOMEM;
 	if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -526,6 +524,18 @@
 	return 0;
 }
 
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct firmware_priv *fw_priv = to_firmware_priv(dev);
+	int err = 0;
+
+	mutex_lock(&fw_lock);
+	if (fw_priv->buf)
+		err = do_firmware_uevent(fw_priv, env);
+	mutex_unlock(&fw_lock);
+	return err;
+}
+
 static struct class firmware_class = {
 	.name		= "firmware",
 	.class_attrs	= firmware_class_attrs,
diff -ur a/drivers/base/platform.c b/drivers/base/platform.c
--- a/drivers/base/platform.c	2017-03-23 14:49:42.000000000 +0100
+++ b/drivers/base/platform.c	2017-03-14 02:26:49.000000000 +0100
@@ -347,9 +347,7 @@
 
 	while (--i >= 0) {
 		struct resource *r = &pdev->resource[i];
-		unsigned long type = resource_type(r);
-
-		if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+		if (r->parent)
 			release_resource(r);
 	}
 
@@ -380,9 +378,7 @@
 
 		for (i = 0; i < pdev->num_resources; i++) {
 			struct resource *r = &pdev->resource[i];
-			unsigned long type = resource_type(r);
-
-			if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+			if (r->parent)
 				release_resource(r);
 		}
 	}
diff -ur a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
--- a/drivers/base/regmap/regmap.c	2017-03-23 14:49:46.000000000 +0100
+++ b/drivers/base/regmap/regmap.c	2017-03-14 02:26:53.000000000 +0100
@@ -1586,7 +1586,7 @@
 					  &ival);
 			if (ret != 0)
 				return ret;
-			memcpy(val + (i * val_bytes), &ival, val_bytes);
+			map->format.format_val(val + (i * val_bytes), ival, 0);
 		}
 	}
 
diff -ur a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
--- a/drivers/base/regmap/regmap-debugfs.c	2017-03-23 14:49:45.000000000 +0100
+++ b/drivers/base/regmap/regmap-debugfs.c	2017-03-14 02:26:51.000000000 +0100
@@ -23,8 +23,7 @@
 /* Calculate the length of a fixed format  */
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 {
-	snprintf(buf, buf_size, "%x", max_val);
-	return strlen(buf);
+	return snprintf(NULL, 0, "%x", max_val);
 }
 
 static ssize_t regmap_name_read_file(struct file *file,
@@ -419,7 +418,7 @@
 		/* If we're in the region the user is trying to read */
 		if (p >= *ppos) {
 			/* ...but not beyond it */
-			if (buf_pos >= count - 1 - tot_len)
+			if (buf_pos + tot_len + 1 >= count)
 				break;
 
 			/* Format the register */
diff -ur a/drivers/block/nbd.c b/drivers/block/nbd.c
--- a/drivers/block/nbd.c	2017-03-23 14:48:46.000000000 +0100
+++ b/drivers/block/nbd.c	2017-03-14 02:25:39.000000000 +0100
@@ -580,8 +580,8 @@
 		BUG_ON(nbd->magic != NBD_MAGIC);
 
 		if (unlikely(!nbd->sock)) {
-			dev_err(disk_to_dev(nbd->disk),
-				"Attempted send on closed socket\n");
+			dev_err_ratelimited(disk_to_dev(nbd->disk),
+					    "Attempted send on closed socket\n");
 			req->errors++;
 			nbd_end_request(req);
 			spin_lock_irq(q->queue_lock);
diff -ur a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
--- a/drivers/block/paride/pd.c	2017-03-23 14:48:51.000000000 +0100
+++ b/drivers/block/paride/pd.c	2017-03-14 02:25:46.000000000 +0100
@@ -125,7 +125,7 @@
 */
 #include <linux/types.h>
 
-static bool verbose = 0;
+static int verbose = 0;
 static int major = PD_MAJOR;
 static char *name = PD_NAME;
 static int cluster = 64;
@@ -160,7 +160,7 @@
 static DEFINE_MUTEX(pd_mutex);
 static DEFINE_SPINLOCK(pd_lock);
 
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
 module_param(major, int, 0);
 module_param(name, charp, 0);
 module_param(cluster, int, 0);
diff -ur a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
--- a/drivers/block/paride/pt.c	2017-03-23 14:48:50.000000000 +0100
+++ b/drivers/block/paride/pt.c	2017-03-14 02:25:45.000000000 +0100
@@ -117,7 +117,7 @@
 
 */
 
-static bool verbose = 0;
+static int verbose = 0;
 static int major = PT_MAJOR;
 static char *name = PT_NAME;
 static int disable = 0;
@@ -152,7 +152,7 @@
 
 #include <asm/uaccess.h>
 
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
 module_param(major, int, 0);
 module_param(name, charp, 0);
 module_param_array(drive0, int, NULL, 0);
diff -ur a/drivers/block/rbd.c b/drivers/block/rbd.c
--- a/drivers/block/rbd.c	2017-03-23 14:48:53.000000000 +0100
+++ b/drivers/block/rbd.c	2017-03-14 02:25:46.000000000 +0100
@@ -90,6 +90,8 @@
 
 #define RBD_MINORS_PER_MAJOR	256		/* max minors per blkdev */
 
+#define RBD_MAX_PARENT_CHAIN_LEN	16
+
 #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
 #define RBD_MAX_SNAP_NAME_LEN	\
 			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
@@ -391,7 +393,7 @@
 		       size_t count);
 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
 			  size_t count);
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
 static void rbd_spec_put(struct rbd_spec *spec);
 
 static struct bus_attribute rbd_bus_attrs[] = {
@@ -454,6 +456,7 @@
 #  define rbd_assert(expr)	((void) 0)
 #endif /* !RBD_DEBUG */
 
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1667,6 +1670,16 @@
 	obj_request_done_set(obj_request);
 }
 
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+	dout("%s: obj %p\n", __func__, obj_request);
+
+	if (obj_request_img_data_test(obj_request))
+		rbd_osd_copyup_callback(obj_request);
+	else
+		obj_request_done_set(obj_request);
+}
+
 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
 				struct ceph_msg *msg)
 {
@@ -1705,6 +1718,8 @@
 		rbd_osd_stat_callback(obj_request);
 		break;
 	case CEPH_OSD_OP_CALL:
+		rbd_osd_call_callback(obj_request);
+		break;
 	case CEPH_OSD_OP_NOTIFY_ACK:
 	case CEPH_OSD_OP_WATCH:
 		rbd_osd_trivial_callback(obj_request);
@@ -1847,11 +1862,11 @@
 	rbd_assert(obj_request_type_valid(type));
 
 	size = strlen(object_name) + 1;
-	name = kmalloc(size, GFP_KERNEL);
+	name = kmalloc(size, GFP_NOIO);
 	if (!name)
 		return NULL;
 
-	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
+	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
 	if (!obj_request) {
 		kfree(name);
 		return NULL;
@@ -2111,6 +2126,11 @@
 			result, xferred);
 		if (!img_request->result)
 			img_request->result = result;
+		/*
+		 * Need to end I/O on the entire obj_request worth of
+		 * bytes in case of error.
+		 */
+		xferred = obj_request->length;
 	}
 
 	/* Image object requests don't own their page array */
@@ -2296,13 +2316,15 @@
 }
 
 static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
 {
 	struct rbd_img_request *img_request;
 	struct rbd_device *rbd_dev;
 	struct page **pages;
 	u32 page_count;
 
+	dout("%s: obj %p\n", __func__, obj_request);
+
 	rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
 	rbd_assert(obj_request_img_data_test(obj_request));
 	img_request = obj_request->img_request;
@@ -2328,9 +2350,7 @@
 	if (!obj_request->result)
 		obj_request->xferred = obj_request->length;
 
-	/* Finish up with the normal image object callback */
-
-	rbd_img_obj_callback(obj_request);
+	obj_request_done_set(obj_request);
 }
 
 static void
@@ -2427,7 +2447,6 @@
 
 	/* All set, send it off. */
 
-	orig_request->callback = rbd_img_obj_copyup_callback;
 	osdc = &rbd_dev->rbd_client->client->osdc;
 	img_result = rbd_obj_request_submit(osdc, orig_request);
 	if (!img_result)
@@ -3432,6 +3451,9 @@
 	blk_queue_io_opt(q, segment_size);
 
 	blk_queue_merge_bvec(q, rbd_merge_bvec);
+	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
+		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+
 	disk->queue = q;
 
 	q->queuedata = rbd_dev;
@@ -4807,45 +4829,50 @@
 	return ret;
 }
 
-static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
+/*
+ * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
+ * rbd_dev_image_probe() recursion depth, which means it's also the
+ * length of the already discovered part of the parent chain.
+ */
+static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
 {
 	struct rbd_device *parent = NULL;
-	struct rbd_spec *parent_spec;
-	struct rbd_client *rbdc;
 	int ret;
 
 	if (!rbd_dev->parent_spec)
 		return 0;
-	/*
-	 * We need to pass a reference to the client and the parent
-	 * spec when creating the parent rbd_dev.  Images related by
-	 * parent/child relationships always share both.
-	 */
-	parent_spec = rbd_spec_get(rbd_dev->parent_spec);
-	rbdc = __rbd_get_client(rbd_dev->rbd_client);
 
-	ret = -ENOMEM;
-	parent = rbd_dev_create(rbdc, parent_spec);
-	if (!parent)
+	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
+		pr_info("parent chain is too long (%d)\n", depth);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
+	if (!parent) {
+		ret = -ENOMEM;
 		goto out_err;
+	}
 
-	ret = rbd_dev_image_probe(parent, false);
+	/*
+	 * Images related by parent/child relationships always share
+	 * rbd_client and spec/parent_spec, so bump their refcounts.
+	 */
+	__rbd_get_client(rbd_dev->rbd_client);
+	rbd_spec_get(rbd_dev->parent_spec);
+
+	ret = rbd_dev_image_probe(parent, depth);
 	if (ret < 0)
 		goto out_err;
+
 	rbd_dev->parent = parent;
 	atomic_set(&rbd_dev->parent_ref, 1);
-
 	return 0;
+
 out_err:
-	if (parent) {
-		rbd_dev_unparent(rbd_dev);
-		kfree(rbd_dev->header_name);
+	rbd_dev_unparent(rbd_dev);
+	if (parent)
 		rbd_dev_destroy(parent);
-	} else {
-		rbd_put_client(rbdc);
-		rbd_spec_put(parent_spec);
-	}
-
 	return ret;
 }
 
@@ -4951,7 +4978,7 @@
  * parent), initiate a watch on its header object before using that
  * object to get detailed information about the rbd image.
  */
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
 {
 	int ret;
 	int tmp;
@@ -4972,7 +4999,7 @@
 	if (ret)
 		goto err_out_format;
 
-	if (mapping) {
+	if (!depth) {
 		ret = rbd_dev_header_watch_sync(rbd_dev, true);
 		if (ret)
 			goto out_header_name;
@@ -4989,7 +5016,7 @@
 	if (ret)
 		goto err_out_probe;
 
-	ret = rbd_dev_probe_parent(rbd_dev);
+	ret = rbd_dev_probe_parent(rbd_dev, depth);
 	if (ret)
 		goto err_out_probe;
 
@@ -5000,7 +5027,7 @@
 err_out_probe:
 	rbd_dev_unprobe(rbd_dev);
 err_out_watch:
-	if (mapping) {
+	if (!depth) {
 		tmp = rbd_dev_header_watch_sync(rbd_dev, false);
 		if (tmp)
 			rbd_warn(rbd_dev, "unable to tear down "
@@ -5071,7 +5098,7 @@
 	rbdc = NULL;		/* rbd_dev now owns this */
 	spec = NULL;		/* rbd_dev now owns this */
 
-	rc = rbd_dev_image_probe(rbd_dev, true);
+	rc = rbd_dev_image_probe(rbd_dev, 0);
 	if (rc < 0)
 		goto err_out_rbd_dev;
 
diff -ur a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
--- a/drivers/block/virtio_blk.c	2017-03-23 14:48:45.000000000 +0100
+++ b/drivers/block/virtio_blk.c	2017-03-14 02:25:39.000000000 +0100
@@ -656,7 +656,6 @@
 	struct request_queue *q;
 	int err, index;
 	int pool_size;
-
 	u64 cap;
 	u32 v, blk_size, sg_elems, opt_io_size;
 	u16 min_io_size;
diff -ur a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
--- a/drivers/block/xen-blkfront.c	2017-03-23 14:48:48.000000000 +0100
+++ b/drivers/block/xen-blkfront.c	2017-03-14 02:25:41.000000000 +0100
@@ -1585,7 +1585,8 @@
 			break;
 		/* Missed the backend's Closing state -- fallthrough */
 	case XenbusStateClosing:
-		blkfront_closing(info);
+		if (info)
+			blkfront_closing(info);
 		break;
 	}
 }
diff -ur a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
--- a/drivers/bluetooth/btusb.c	2017-03-23 14:55:02.000000000 +0100
+++ b/drivers/bluetooth/btusb.c	2017-03-14 02:32:08.000000000 +0100
@@ -1234,6 +1234,8 @@
 	}
 	fw_ptr = fw->data;
 
+	kfree_skb(skb);
+
 	/* This Intel specific command enables the manufacturer mode of the
 	 * controller.
 	 *
diff -ur a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
--- a/drivers/bluetooth/hci_vhci.c	2017-03-23 14:54:59.000000000 +0100
+++ b/drivers/bluetooth/hci_vhci.c	2017-03-14 02:32:06.000000000 +0100
@@ -265,6 +265,7 @@
 	hci_unregister_dev(hdev);
 	hci_free_dev(hdev);
 
+	skb_queue_purge(&data->readq);
 	file->private_data = NULL;
 	kfree(data);
 
diff -ur a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
--- a/drivers/bus/mvebu-mbus.c	2017-03-23 14:57:55.000000000 +0100
+++ b/drivers/bus/mvebu-mbus.c	2017-03-14 02:34:51.000000000 +0100
@@ -836,7 +836,7 @@
 int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
 			   size_t mbuswins_size,
 			   phys_addr_t sdramwins_phys_base,
-			   size_t sdramwins_size)
+			   size_t sdramwins_size, int is_coherent)
 {
 	struct mvebu_mbus_state *mbus = &mbus_state;
 	const struct of_device_id *of_id;
@@ -863,8 +863,7 @@
 		return -ENOMEM;
 	}
 
-	if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
-		mbus->hw_io_coherency = 1;
+	mbus->hw_io_coherency = is_coherent;
 
 	for (win = 0; win < mbus->soc->num_wins; win++)
 		mvebu_mbus_disable_window(mbus, win);
diff -ur a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
--- a/drivers/char/agp/intel-gtt.c	2017-03-23 14:40:36.000000000 +0100
+++ b/drivers/char/agp/intel-gtt.c	2017-03-14 02:15:57.000000000 +0100
@@ -583,7 +583,7 @@
 	/* Query intel_iommu to see if we need the workaround. Presumably that
 	 * was loaded first.
 	 */
-	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
 	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
 	     intel_iommu_gfx_mapped)
 		return 1;
diff -ur a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
--- a/drivers/char/ipmi/ipmi_si_intf.c	2017-03-23 14:40:37.000000000 +0100
+++ b/drivers/char/ipmi/ipmi_si_intf.c	2017-03-14 02:15:57.000000000 +0100
@@ -1141,14 +1141,14 @@
 
 	new_smi->intf = intf;
 
-	/* Try to claim any interrupts. */
-	if (new_smi->irq_setup)
-		new_smi->irq_setup(new_smi);
-
 	/* Set up the timer that drives the interface. */
 	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
 	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
+	/* Try to claim any interrupts. */
+	if (new_smi->irq_setup)
+		new_smi->irq_setup(new_smi);
+
 	/*
 	 * Check if the user forcefully enabled the daemon.
 	 */
@@ -2713,7 +2713,7 @@
 		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
 			schedule_timeout_uninterruptible(1);
 			smi_result = smi_info->handlers->event(
-				smi_info->si_sm, 100);
+				smi_info->si_sm, jiffies_to_usecs(1));
 		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
 			smi_result = smi_info->handlers->event(
 				smi_info->si_sm, 0);
diff -ur a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
--- a/drivers/char/tpm/tpm_ibmvtpm.c	2017-03-23 14:40:40.000000000 +0100
+++ b/drivers/char/tpm/tpm_ibmvtpm.c	2017-03-14 02:16:02.000000000 +0100
@@ -529,7 +529,7 @@
 			}
 			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
-						    GFP_KERNEL);
+						    GFP_ATOMIC);
 			if (!ibmvtpm->rtce_buf) {
 				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
 				return;
@@ -618,6 +618,9 @@
 		goto cleanup;
 	}
 
+	ibmvtpm->dev = dev;
+	ibmvtpm->vdev = vio_dev;
+
 	crq_q = &ibmvtpm->crq_queue;
 	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
 	if (!crq_q->crq_addr) {
@@ -662,8 +665,6 @@
 
 	crq_q->index = 0;
 
-	ibmvtpm->dev = dev;
-	ibmvtpm->vdev = vio_dev;
 	TPM_VPRIV(chip) = (void *)ibmvtpm;
 
 	spin_lock_init(&ibmvtpm->rtce_lock);
diff -ur a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
--- a/drivers/clk/versatile/clk-sp810.c	2017-03-23 14:42:15.000000000 +0100
+++ b/drivers/clk/versatile/clk-sp810.c	2017-03-14 02:18:01.000000000 +0100
@@ -128,8 +128,8 @@
 {
 	struct clk_sp810 *sp810 = data;
 
-	if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
-			ARRAY_SIZE(sp810->timerclken)))
+	if (WARN_ON(clkspec->args_count != 1 ||
+		    clkspec->args[0] >=	ARRAY_SIZE(sp810->timerclken)))
 		return NULL;
 
 	return sp810->timerclken[clkspec->args[0]].clk;
@@ -141,6 +141,7 @@
 	const char *parent_names[2];
 	char name[12];
 	struct clk_init_data init;
+	static int instance;
 	int i;
 
 	if (!sp810) {
@@ -172,7 +173,7 @@
 	init.num_parents = ARRAY_SIZE(parent_names);
 
 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
-		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
+		snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
 
 		sp810->timerclken[i].sp810 = sp810;
 		sp810->timerclken[i].channel = i;
@@ -184,5 +185,6 @@
 	}
 
 	of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
+	instance++;
 }
 CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
diff -ur a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
--- a/drivers/clocksource/vt8500_timer.c	2017-03-23 14:49:39.000000000 +0100
+++ b/drivers/clocksource/vt8500_timer.c	2017-03-14 02:26:45.000000000 +0100
@@ -50,6 +50,8 @@
 
 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
 
+#define MIN_OSCR_DELTA		16
+
 static void __iomem *regbase;
 
 static cycle_t vt8500_timer_read(struct clocksource *cs)
@@ -80,7 +82,7 @@
 		cpu_relax();
 	writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
 
-	if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+	if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
 		return -ETIME;
 
 	writel(1, regbase + TIMER_IER_VAL);
@@ -162,7 +164,7 @@
 		pr_err("%s: setup_irq failed for %s\n", __func__,
 							clockevent.name);
 	clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
-					4, 0xf0000000);
+					MIN_OSCR_DELTA * 2, 0xf0000000);
 }
 
 CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff -ur a/drivers/connector/connector.c b/drivers/connector/connector.c
--- a/drivers/connector/connector.c	2017-03-23 14:47:54.000000000 +0100
+++ b/drivers/connector/connector.c	2017-03-14 02:24:22.000000000 +0100
@@ -154,26 +154,21 @@
  *
  * It checks skb, netlink header and msg sizes, and calls callback helper.
  */
-static void cn_rx_skb(struct sk_buff *__skb)
+static void cn_rx_skb(struct sk_buff *skb)
 {
 	struct nlmsghdr *nlh;
-	struct sk_buff *skb;
 	int len, err;
 
-	skb = skb_get(__skb);
-
 	if (skb->len >= NLMSG_HDRLEN) {
 		nlh = nlmsg_hdr(skb);
 		len = nlmsg_len(nlh);
 
 		if (len < (int)sizeof(struct cn_msg) ||
 		    skb->len < nlh->nlmsg_len ||
-		    len > CONNECTOR_MAX_MSG_SIZE) {
-			kfree_skb(skb);
+		    len > CONNECTOR_MAX_MSG_SIZE)
 			return;
-		}
 
-		err = cn_call_callback(skb);
+		err = cn_call_callback(skb_get(skb));
 		if (err < 0)
 			kfree_skb(skb);
 	}
diff -ur a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
--- a/drivers/cpuidle/cpuidle.c	2017-03-23 14:46:45.000000000 +0100
+++ b/drivers/cpuidle/cpuidle.c	2017-03-14 02:22:47.000000000 +0100
@@ -135,6 +135,9 @@
 
 	/* ask the governor for the next state */
 	next_state = cpuidle_curr_governor->select(drv, dev);
+	if (next_state < 0)
+		return -EBUSY;
+
 	if (need_resched()) {
 		dev->last_residency = 0;
 		/* give the governor an opportunity to reflect on the outcome */
diff -ur a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
--- a/drivers/cpuidle/governors/menu.c	2017-03-23 14:46:45.000000000 +0100
+++ b/drivers/cpuidle/governors/menu.c	2017-03-14 02:22:48.000000000 +0100
@@ -266,7 +266,7 @@
 		data->needs_update = 0;
 	}
 
-	data->last_state_idx = 0;
+	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
 	data->exit_us = 0;
 
 	/* Special case when user has set very strict latency requirement */
diff -ur a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
--- a/drivers/crypto/caam/caamhash.c	2017-03-23 14:39:09.000000000 +0100
+++ b/drivers/crypto/caam/caamhash.c	2017-03-14 02:14:05.000000000 +0100
@@ -895,13 +895,14 @@
 			  state->buflen_1;
 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
-	int sec4_sg_bytes;
+	int sec4_sg_bytes, sec4_sg_src_index;
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct ahash_edesc *edesc;
 	int ret = 0;
 	int sh_len;
 
-	sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -928,7 +929,7 @@
 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
 						buf, state->buf_dma, buflen,
 						last_buflen);
-	(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
 
 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
 			  LDST_SGF);
diff -ur a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
--- a/drivers/crypto/caam/caamrng.c	2017-03-23 14:39:06.000000000 +0100
+++ b/drivers/crypto/caam/caamrng.c	2017-03-14 02:14:02.000000000 +0100
@@ -56,7 +56,7 @@
 
 /* Buffer, its dma address and lock */
 struct buf_data {
-	u8 buf[RN_BUF_SIZE];
+	u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
 	dma_addr_t addr;
 	struct completion filled;
 	u32 hw_desc[DESC_JOB_O_LEN];
diff -ur a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
--- a/drivers/crypto/ixp4xx_crypto.c	2017-03-23 14:39:04.000000000 +0100
+++ b/drivers/crypto/ixp4xx_crypto.c	2017-03-14 02:13:58.000000000 +0100
@@ -915,7 +915,6 @@
 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
 		/* This was never tested by Intel
 		 * for more than one dst buffer, I think. */
-		BUG_ON(req->dst->length < nbytes);
 		req_ctx->dst = NULL;
 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
 					flags, DMA_FROM_DEVICE))
diff -ur a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
--- a/drivers/crypto/talitos.c	2017-03-23 14:39:07.000000000 +0100
+++ b/drivers/crypto/talitos.c	2017-03-14 02:14:01.000000000 +0100
@@ -935,7 +935,8 @@
 		sg_count--;
 		link_tbl_ptr--;
 	}
-	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
+	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
+					+ cryptlen);
 
 	/* tag end of link table */
 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
@@ -2620,6 +2621,7 @@
 		break;
 	default:
 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
+		kfree(t_alg);
 		return ERR_PTR(-EINVAL);
 	}
 
diff -ur a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
--- a/drivers/dma/mv_xor.c	2017-03-23 14:48:23.000000000 +0100
+++ b/drivers/dma/mv_xor.c	2017-03-14 02:25:01.000000000 +0100
@@ -392,7 +392,8 @@
 	dma_cookie_t cookie = 0;
 	int busy = mv_chan_is_busy(mv_chan);
 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
-	int seen_current = 0;
+	int current_cleaned = 0;
+	struct mv_xor_desc *hw_desc;
 
 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
@@ -404,38 +405,57 @@
 
 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 					chain_node) {
-		prefetch(_iter);
-		prefetch(&_iter->async_tx);
 
-		/* do not advance past the current descriptor loaded into the
-		 * hardware channel, subsequent descriptors are either in
-		 * process or have not been submitted
-		 */
-		if (seen_current)
-			break;
-
-		/* stop the search if we reach the current descriptor and the
-		 * channel is busy
-		 */
-		if (iter->async_tx.phys == current_desc) {
-			seen_current = 1;
-			if (busy)
+		/* clean finished descriptors */
+		hw_desc = iter->hw_desc;
+		if (hw_desc->status & XOR_DESC_SUCCESS) {
+			cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
+								cookie);
+
+			/* done processing desc, clean slot */
+			mv_xor_clean_slot(iter, mv_chan);
+
+			/* break if we did cleaned the current */
+			if (iter->async_tx.phys == current_desc) {
+				current_cleaned = 1;
 				break;
+			}
+		} else {
+			if (iter->async_tx.phys == current_desc) {
+				current_cleaned = 0;
+				break;
+			}
 		}
-
-		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
-
-		if (mv_xor_clean_slot(iter, mv_chan))
-			break;
 	}
 
 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
-		struct mv_xor_desc_slot *chain_head;
-		chain_head = list_entry(mv_chan->chain.next,
-					struct mv_xor_desc_slot,
-					chain_node);
-
-		mv_xor_start_new_chain(mv_chan, chain_head);
+		if (current_cleaned) {
+			/*
+			 * current descriptor cleaned and removed, run
+			 * from list head
+			 */
+			iter = list_entry(mv_chan->chain.next,
+					  struct mv_xor_desc_slot,
+					  chain_node);
+			mv_xor_start_new_chain(mv_chan, iter);
+		} else {
+			if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
+				/*
+				 * descriptors are still waiting after
+				 * current, trigger them
+				 */
+				iter = list_entry(iter->chain_node.next,
+						  struct mv_xor_desc_slot,
+						  chain_node);
+				mv_xor_start_new_chain(mv_chan, iter);
+			} else {
+				/*
+				 * some descriptors are still waiting
+				 * to be cleaned
+				 */
+				tasklet_schedule(&mv_chan->irq_tasklet);
+			}
+		}
 	}
 
 	if (cookie > 0)
diff -ur a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
--- a/drivers/dma/mv_xor.h	2017-03-23 14:48:23.000000000 +0100
+++ b/drivers/dma/mv_xor.h	2017-03-14 02:25:03.000000000 +0100
@@ -32,6 +32,7 @@
 #define XOR_OPERATION_MODE_XOR		0
 #define XOR_OPERATION_MODE_MEMCPY	2
 #define XOR_OPERATION_MODE_MEMSET	4
+#define XOR_DESC_SUCCESS		0x40000000
 
 #define XOR_CURR_DESC(chan)	(chan->mmr_base + 0x210 + (chan->idx * 4))
 #define XOR_NEXT_DESC(chan)	(chan->mmr_base + 0x200 + (chan->idx * 4))
diff -ur a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
--- a/drivers/edac/amd64_edac.c	2017-03-23 14:53:32.000000000 +0100
+++ b/drivers/edac/amd64_edac.c	2017-03-14 02:30:53.000000000 +0100
@@ -1266,7 +1266,7 @@
 	u64 chan_off;
 	u64 dram_base		= get_dram_base(pvt, range);
 	u64 hole_off		= f10_dhar_offset(pvt);
-	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
 
 	if (hi_rng) {
 		/*
diff -ur a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
--- a/drivers/edac/edac_mc_sysfs.c	2017-03-23 14:53:28.000000000 +0100
+++ b/drivers/edac/edac_mc_sysfs.c	2017-03-14 02:30:49.000000000 +0100
@@ -973,21 +973,26 @@
  */
 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
 {
+	char *name;
 	int i, err;
 
 	/*
 	 * The memory controller needs its own bus, in order to avoid
 	 * namespace conflicts at /sys/bus/edac.
 	 */
-	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
-	if (!mci->bus->name)
+	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+	if (!name)
 		return -ENOMEM;
 
+	mci->bus->name = name;
+
 	edac_dbg(0, "creating bus %s\n", mci->bus->name);
 
 	err = bus_register(mci->bus);
-	if (err < 0)
+	if (err < 0) {
+		kfree(name);
 		return err;
+	}
 
 	/* get the /sys/devices/system/edac subsys reference */
 	mci->dev.type = &mci_attr_type;
@@ -1071,7 +1076,8 @@
 fail2:
 	device_unregister(&mci->dev);
 	bus_unregister(mci->bus);
-	kfree(mci->bus->name);
+	kfree(name);
+
 	return err;
 }
 
@@ -1102,10 +1108,12 @@
 
 void edac_unregister_sysfs(struct mem_ctl_info *mci)
 {
+	const char *name = mci->bus->name;
+
 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
 	device_unregister(&mci->dev);
 	bus_unregister(mci->bus);
-	kfree(mci->bus->name);
+	kfree(name);
 }
 
 static void mc_attr_release(struct device *dev)
diff -ur a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
--- a/drivers/edac/i7core_edac.c	2017-03-23 14:53:30.000000000 +0100
+++ b/drivers/edac/i7core_edac.c	2017-03-14 02:30:50.000000000 +0100
@@ -1872,7 +1872,7 @@
 
 	i7_dev = get_i7core_dev(mce->socketid);
 	if (!i7_dev)
-		return NOTIFY_BAD;
+		return NOTIFY_DONE;
 
 	mci = i7_dev->mci;
 	pvt = mci->pvt_info;
diff -ur a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
--- a/drivers/edac/ppc4xx_edac.c	2017-03-23 14:53:27.000000000 +0100
+++ b/drivers/edac/ppc4xx_edac.c	2017-03-14 02:30:48.000000000 +0100
@@ -921,7 +921,7 @@
 	 */
 
 	for (row = 0; row < mci->nr_csrows; row++) {
-		struct csrow_info *csi = &mci->csrows[row];
+		struct csrow_info *csi = mci->csrows[row];
 
 		/*
 		 * Get the configuration settings for this
diff -ur a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
--- a/drivers/edac/sb_edac.c	2017-03-23 14:53:31.000000000 +0100
+++ b/drivers/edac/sb_edac.c	2017-03-14 02:30:52.000000000 +0100
@@ -621,7 +621,7 @@
 	u32 reg;
 	u64 limit, prv = 0;
 	u64 tmp_mb;
-	u32 mb, kb;
+	u32 gb, mb;
 	u32 rir_way;
 
 	/*
@@ -634,8 +634,9 @@
 	pvt->tolm = GET_TOLM(reg);
 	tmp_mb = (1 + pvt->tolm) >> 20;
 
-	mb = div_u64_rem(tmp_mb, 1000, &kb);
-	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
+	gb = div_u64_rem(tmp_mb, 1024, &mb);
+	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
+		gb, (mb*1000)/1024, (u64)pvt->tolm);
 
 	/* Address range is already 45:25 */
 	pci_read_config_dword(pvt->pci_sad1, TOHM,
@@ -643,8 +644,9 @@
 	pvt->tohm = GET_TOHM(reg);
 	tmp_mb = (1 + pvt->tohm) >> 20;
 
-	mb = div_u64_rem(tmp_mb, 1000, &kb);
-	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
+	gb = div_u64_rem(tmp_mb, 1024, &mb);
+	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
+		gb, (mb*1000)/1024, (u64)pvt->tohm);
 
 	/*
 	 * Step 2) Get SAD range and SAD Interleave list
@@ -666,11 +668,11 @@
 			break;
 
 		tmp_mb = (limit + 1) >> 20;
-		mb = div_u64_rem(tmp_mb, 1000, &kb);
+		gb = div_u64_rem(tmp_mb, 1024, &mb);
 		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
 			 n_sads,
 			 get_dram_attr(reg),
-			 mb, kb,
+			 gb, (mb*1000)/1024,
 			 ((u64)tmp_mb) << 20L,
 			 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
 			 reg);
@@ -700,9 +702,9 @@
 			break;
 		tmp_mb = (limit + 1) >> 20;
 
-		mb = div_u64_rem(tmp_mb, 1000, &kb);
+		gb = div_u64_rem(tmp_mb, 1024, &mb);
 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
-			 n_tads, mb, kb,
+			 n_tads, gb, (mb*1000)/1024,
 			 ((u64)tmp_mb) << 20L,
 			 (u32)TAD_SOCK(reg),
 			 (u32)TAD_CH(reg),
@@ -725,10 +727,10 @@
 					      tad_ch_nilv_offset[j],
 					      &reg);
 			tmp_mb = TAD_OFFSET(reg) >> 20;
-			mb = div_u64_rem(tmp_mb, 1000, &kb);
+			gb = div_u64_rem(tmp_mb, 1024, &mb);
 			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
 				 i, j,
-				 mb, kb,
+				 gb, (mb*1000)/1024,
 				 ((u64)tmp_mb) << 20L,
 				 reg);
 		}
@@ -750,10 +752,10 @@
 
 			tmp_mb = RIR_LIMIT(reg) >> 20;
 			rir_way = 1 << RIR_WAY(reg);
-			mb = div_u64_rem(tmp_mb, 1000, &kb);
+			gb = div_u64_rem(tmp_mb, 1024, &mb);
 			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
 				 i, j,
-				 mb, kb,
+				 gb, (mb*1000)/1024,
 				 ((u64)tmp_mb) << 20L,
 				 rir_way,
 				 reg);
@@ -764,10 +766,10 @@
 						      &reg);
 				tmp_mb = RIR_OFFSET(reg) << 6;
 
-				mb = div_u64_rem(tmp_mb, 1000, &kb);
+				gb = div_u64_rem(tmp_mb, 1024, &mb);
 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
 					 i, j, k,
-					 mb, kb,
+					 gb, (mb*1000)/1024,
 					 ((u64)tmp_mb) << 20L,
 					 (u32)RIR_RNK_TGT(reg),
 					 reg);
@@ -804,7 +806,7 @@
 	u8			ch_way,sck_way;
 	u32			tad_offset;
 	u32			rir_way;
-	u32			mb, kb;
+	u32			mb, gb;
 	u64			ch_addr, offset, limit, prv = 0;
 
 	/*
@@ -1019,10 +1021,10 @@
 			continue;
 
 		limit = RIR_LIMIT(reg);
-		mb = div_u64_rem(limit >> 20, 1000, &kb);
+		gb = div_u64_rem(limit >> 20, 1024, &mb);
 		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
 			 n_rir,
-			 mb, kb,
+			 gb, (mb*1000)/1024,
 			 limit,
 			 1 << RIR_WAY(reg));
 		if  (ch_addr <= limit)
@@ -1532,7 +1534,7 @@
 
 	mci = get_mci_for_node_id(mce->socketid);
 	if (!mci)
-		return NOTIFY_BAD;
+		return NOTIFY_DONE;
 	pvt = mci->pvt_info;
 
 	/*
diff -ur a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
--- a/drivers/firewire/ohci.c	2017-03-23 14:49:42.000000000 +0100
+++ b/drivers/firewire/ohci.c	2017-03-14 02:26:47.000000000 +0100
@@ -3670,6 +3670,11 @@
 
 	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
 	ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+	/* JMicron JMB38x often shows 0 at first read, just ignore it */
+	if (!ohci->it_context_support) {
+		ohci_notice(ohci, "overriding IsoXmitIntMask\n");
+		ohci->it_context_support = 0xf;
+	}
 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
 	ohci->it_context_mask = ohci->it_context_support;
 	ohci->n_it = hweight32(ohci->it_context_mask);
diff -ur a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
--- a/drivers/firmware/efi/efivars.c	2017-03-23 14:42:59.000000000 +0100
+++ b/drivers/firmware/efi/efivars.c	2017-03-14 02:19:01.000000000 +0100
@@ -219,7 +219,8 @@
 	}
 
 	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
-	    efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) {
+	    efivar_validate(new_var->VendorGuid, new_var->VariableName,
+			    new_var->Data, new_var->DataSize) == false) {
 		printk(KERN_ERR "efivars: Malformed variable content\n");
 		return -EINVAL;
 	}
@@ -334,7 +335,8 @@
 		return -EACCES;
 
 	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
-	    efivar_validate(new_var, new_var->Data, new_var->DataSize) == false) {
+	    efivar_validate(new_var->VendorGuid, new_var->VariableName,
+			    new_var->Data, new_var->DataSize) == false) {
 		printk(KERN_ERR "efivars: Malformed variable content\n");
 		return -EINVAL;
 	}
@@ -405,35 +407,27 @@
 {
 	int i, short_name_size;
 	char *short_name;
-	unsigned long variable_name_size;
-	efi_char16_t *variable_name;
-
-	variable_name = new_var->var.VariableName;
-	variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
+	unsigned long utf8_name_size;
+	efi_char16_t *variable_name = new_var->var.VariableName;
 
 	/*
-	 * Length of the variable bytes in ASCII, plus the '-' separator,
+	 * Length of the variable bytes in UTF8, plus the '-' separator,
 	 * plus the GUID, plus trailing NUL
 	 */
-	short_name_size = variable_name_size / sizeof(efi_char16_t)
-				+ 1 + EFI_VARIABLE_GUID_LEN + 1;
-
-	short_name = kzalloc(short_name_size, GFP_KERNEL);
+	utf8_name_size = ucs2_utf8size(variable_name);
+	short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
 
+	short_name = kmalloc(short_name_size, GFP_KERNEL);
 	if (!short_name)
 		return 1;
 
-	/* Convert Unicode to normal chars (assume top bits are 0),
-	   ala UTF-8 */
-	for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
-		short_name[i] = variable_name[i] & 0xFF;
-	}
+	ucs2_as_utf8(short_name, variable_name, short_name_size);
+
 	/* This is ugly, but necessary to separate one vendor's
 	   private variables from another's.         */
-
-	*(short_name + strlen(short_name)) = '-';
+	short_name[utf8_name_size] = '-';
 	efi_guid_unparse(&new_var->var.VendorGuid,
-			 short_name + strlen(short_name));
+			 short_name + utf8_name_size + 1);
 
 	new_var->kobj.kset = efivars_kset;
 
diff -ur a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
--- a/drivers/firmware/efi/vars.c	2017-03-23 14:43:00.000000000 +0100
+++ b/drivers/firmware/efi/vars.c	2017-03-14 02:19:02.000000000 +0100
@@ -42,7 +42,7 @@
 EXPORT_SYMBOL_GPL(efivar_work);
 
 static bool
-validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
 		     unsigned long len)
 {
 	struct efi_generic_dev_path *node;
@@ -75,7 +75,7 @@
 }
 
 static bool
-validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
 		    unsigned long len)
 {
 	/* An array of 16-bit integers */
@@ -86,18 +86,18 @@
 }
 
 static bool
-validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
 		     unsigned long len)
 {
 	u16 filepathlength;
 	int i, desclength = 0, namelen;
 
-	namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
+	namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
 
 	/* Either "Boot" or "Driver" followed by four digits of hex */
 	for (i = match; i < match+4; i++) {
-		if (var->VariableName[i] > 127 ||
-		    hex_to_bin(var->VariableName[i] & 0xff) < 0)
+		if (var_name[i] > 127 ||
+		    hex_to_bin(var_name[i] & 0xff) < 0)
 			return true;
 	}
 
@@ -132,12 +132,12 @@
 	/*
 	 * And, finally, check the filepath
 	 */
-	return validate_device_path(var, match, buffer + desclength + 6,
+	return validate_device_path(var_name, match, buffer + desclength + 6,
 				    filepathlength);
 }
 
 static bool
-validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
 		unsigned long len)
 {
 	/* A single 16-bit integer */
@@ -148,7 +148,7 @@
 }
 
 static bool
-validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
 		      unsigned long len)
 {
 	int i;
@@ -165,67 +165,148 @@
 }
 
 struct variable_validate {
+	efi_guid_t vendor;
 	char *name;
-	bool (*validate)(struct efi_variable *var, int match, u8 *data,
+	bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
 			 unsigned long len);
 };
 
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine.  If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
 static const struct variable_validate variable_validate[] = {
-	{ "BootNext", validate_uint16 },
-	{ "BootOrder", validate_boot_order },
-	{ "DriverOrder", validate_boot_order },
-	{ "Boot*", validate_load_option },
-	{ "Driver*", validate_load_option },
-	{ "ConIn", validate_device_path },
-	{ "ConInDev", validate_device_path },
-	{ "ConOut", validate_device_path },
-	{ "ConOutDev", validate_device_path },
-	{ "ErrOut", validate_device_path },
-	{ "ErrOutDev", validate_device_path },
-	{ "Timeout", validate_uint16 },
-	{ "Lang", validate_ascii_string },
-	{ "PlatformLang", validate_ascii_string },
-	{ "", NULL },
+	{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+	{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+	{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+	{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+	{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+	{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+	{ LINUX_EFI_CRASH_GUID, "*", NULL },
+	{ NULL_GUID, "", NULL },
 };
 
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ *              final "*" character matches any trailing characters @var_name,
+ *              including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ *         that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+		 int *match)
+{
+	for (*match = 0; ; (*match)++) {
+		char c = match_name[*match];
+
+		switch (c) {
+		case '*':
+			/* Wildcard in @match_name means we've matched. */
+			return true;
+
+		case '\0':
+			/* @match_name has ended. Has @var_name too? */
+			return (*match == len);
+
+		default:
+			/*
+			 * We've reached a non-wildcard char in @match_name.
+			 * Continue only if there's an identical character in
+			 * @var_name.
+			 */
+			if (*match < len && c == var_name[*match])
+				continue;
+			return false;
+		}
+	}
+}
+
 bool
-efivar_validate(struct efi_variable *var, u8 *data, unsigned long len)
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+		unsigned long data_size)
 {
 	int i;
-	u16 *unicode_name = var->VariableName;
+	unsigned long utf8_size;
+	u8 *utf8_name;
+
+	utf8_size = ucs2_utf8size(var_name);
+	utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+	if (!utf8_name)
+		return false;
+
+	ucs2_as_utf8(utf8_name, var_name, utf8_size);
+	utf8_name[utf8_size] = '\0';
 
-	for (i = 0; variable_validate[i].validate != NULL; i++) {
+	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
 		const char *name = variable_validate[i].name;
-		int match;
+		int match = 0;
 
-		for (match = 0; ; match++) {
-			char c = name[match];
-			u16 u = unicode_name[match];
-
-			/* All special variables are plain ascii */
-			if (u > 127)
-				return true;
-
-			/* Wildcard in the matching name means we've matched */
-			if (c == '*')
-				return variable_validate[i].validate(var,
-							     match, data, len);
+		if (efi_guidcmp(vendor, variable_validate[i].vendor))
+			continue;
 
-			/* Case sensitive match */
-			if (c != u)
+		if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+			if (variable_validate[i].validate == NULL)
 				break;
-
-			/* Reached the end of the string while matching */
-			if (!c)
-				return variable_validate[i].validate(var,
-							     match, data, len);
+			kfree(utf8_name);
+			return variable_validate[i].validate(var_name, match,
+							     data, data_size);
 		}
 	}
-
+	kfree(utf8_name);
 	return true;
 }
 EXPORT_SYMBOL_GPL(efivar_validate);
 
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+			     size_t len)
+{
+	int i;
+	bool found = false;
+	int match = 0;
+
+	/*
+	 * Check if our variable is in the validated variables list
+	 */
+	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+		if (efi_guidcmp(variable_validate[i].vendor, vendor))
+			continue;
+
+		if (variable_matches(var_name, len,
+				     variable_validate[i].name, &match)) {
+			found = true;
+			break;
+		}
+	}
+
+	/*
+	 * If it's in our list, it is removable.
+	 */
+	return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
 static efi_status_t
 check_var_size(u32 attributes, unsigned long size)
 {
@@ -797,7 +878,7 @@
 
 	*set = false;
 
-	if (efivar_validate(&entry->var, data, *size) == false)
+	if (efivar_validate(*vendor, name, data, *size) == false)
 		return -EINVAL;
 
 	/*
diff -ur a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
--- a/drivers/gpio/gpiolib.c	2017-03-23 14:40:09.000000000 +0100
+++ b/drivers/gpio/gpiolib.c	2017-03-14 02:15:30.000000000 +0100
@@ -637,6 +637,7 @@
 
 static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
 {
+	struct gpio_chip	*chip;
 	unsigned long		flags;
 	int			status;
 	const char		*ioname = NULL;
@@ -653,8 +654,15 @@
 		return -EINVAL;
 	}
 
+	chip = desc->chip;
+
 	mutex_lock(&sysfs_lock);
 
+	if (!chip || !chip->exported) {
+		status = -ENODEV;
+		goto fail_unlock;
+	}
+
 	spin_lock_irqsave(&gpio_lock, flags);
 	if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
 	     test_bit(FLAG_EXPORT, &desc->flags)) {
@@ -889,6 +897,8 @@
 {
 	int			status;
 	struct device		*dev;
+	struct gpio_desc *desc;
+	unsigned int i;
 
 	mutex_lock(&sysfs_lock);
 	dev = class_find_device(&gpio_class, NULL, chip, match_export);
@@ -896,6 +906,7 @@
 		sysfs_remove_group(&dev->kobj, &gpiochip_attr_group);
 		put_device(dev);
 		device_unregister(dev);
+		 
 		chip->exported = 0;
 		status = 0;
 	} else
@@ -905,6 +916,12 @@
 	if (status)
 		pr_debug("%s: chip %s status %d\n", __func__,
 				chip->label, status);
+
+	for (i = 0; i < chip->ngpio; i++) {
+		desc = &chip->desc[i];
+		if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+			gpiod_free(desc);
+	}
 }
 
 static int __init gpiolib_sysfs_init(void)
@@ -1068,6 +1085,8 @@
 	int		status = 0;
 	unsigned	id;
 
+	gpiochip_unexport(chip);
+
 	spin_lock_irqsave(&gpio_lock, flags);
 
 	gpiochip_remove_pin_ranges(chip);
@@ -1088,9 +1107,6 @@
 
 	spin_unlock_irqrestore(&gpio_lock, flags);
 
-	if (status == 0)
-		gpiochip_unexport(chip);
-
 	return status;
 }
 EXPORT_SYMBOL_GPL(gpiochip_remove);
diff -ur a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
--- a/drivers/gpu/drm/ast/ast_drv.h	2017-03-23 14:54:31.000000000 +0100
+++ b/drivers/gpu/drm/ast/ast_drv.h	2017-03-14 02:31:39.000000000 +0100
@@ -294,6 +294,7 @@
 int ast_fbdev_init(struct drm_device *dev);
 void ast_fbdev_fini(struct drm_device *dev);
 void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
 
 struct ast_bo {
 	struct ttm_buffer_object bo;
diff -ur a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
--- a/drivers/gpu/drm/ast/ast_fb.c	2017-03-23 14:54:31.000000000 +0100
+++ b/drivers/gpu/drm/ast/ast_fb.c	2017-03-14 02:31:39.000000000 +0100
@@ -365,3 +365,10 @@
 
 	fb_set_suspend(ast->fbdev->helper.fbdev, state);
 }
+
+void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
+{
+	ast->fbdev->helper.fbdev->fix.smem_start =
+		ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
+	ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
+}
diff -ur a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
--- a/drivers/gpu/drm/ast/ast_main.c	2017-03-23 14:54:31.000000000 +0100
+++ b/drivers/gpu/drm/ast/ast_main.c	2017-03-14 02:31:39.000000000 +0100
@@ -121,7 +121,7 @@
 	} while (ast_read32(ast, 0x10000) != 0x01);
 	data = ast_read32(ast, 0x10004);
 
-	if (data & 0x400)
+	if (data & 0x40)
 		ast->dram_bus_width = 16;
 	else
 		ast->dram_bus_width = 32;
@@ -355,6 +355,7 @@
 	dev->mode_config.min_height = 0;
 	dev->mode_config.preferred_depth = 24;
 	dev->mode_config.prefer_shadow = 1;
+	dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
 
 	if (ast->chip == AST2100 ||
 	    ast->chip == AST2200 ||
diff -ur a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
--- a/drivers/gpu/drm/ast/ast_mode.c	2017-03-23 14:54:32.000000000 +0100
+++ b/drivers/gpu/drm/ast/ast_mode.c	2017-03-14 02:31:40.000000000 +0100
@@ -508,6 +508,8 @@
 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
 		if (ret)
 			DRM_ERROR("failed to kmap fbcon\n");
+		else
+			ast_fbdev_set_base(ast, gpu_addr);
 	}
 	ast_bo_unreserve(bo);
 
diff -ur a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
--- a/drivers/gpu/drm/drm_crtc.c	2017-03-23 14:53:44.000000000 +0100
+++ b/drivers/gpu/drm/drm_crtc.c	2017-03-14 02:31:03.000000000 +0100
@@ -2641,8 +2641,11 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
-	/* For some reason crtc x/y offsets are signed internally. */
-	if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+	/*
+	 * Universal plane src offsets are only 16.16, prevent havoc for
+	 * drivers using universal plane code internally.
+	 */
+	if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
 		return -ERANGE;
 
 	drm_modeset_lock_all(dev);
diff -ur a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
--- a/drivers/gpu/drm/drm_lock.c	2017-03-23 14:53:37.000000000 +0100
+++ b/drivers/gpu/drm/drm_lock.c	2017-03-14 02:30:58.000000000 +0100
@@ -61,6 +61,9 @@
 	struct drm_master *master = file_priv->master;
 	int ret = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	++file_priv->lock_count;
 
 	if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@
 	struct drm_lock *lock = data;
 	struct drm_master *master = file_priv->master;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
 			  task_pid_nr(current), lock->context);
diff -ur a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
--- a/drivers/gpu/drm/i915/intel_i2c.c	2017-03-23 14:54:18.000000000 +0100
+++ b/drivers/gpu/drm/i915/intel_i2c.c	2017-03-14 02:31:30.000000000 +0100
@@ -435,7 +435,7 @@
 					       struct intel_gmbus,
 					       adapter);
 	struct drm_i915_private *dev_priv = bus->dev_priv;
-	int i, reg_offset;
+	int i = 0, inc, try = 0, reg_offset;
 	int ret = 0;
 
 	intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@
 
 	reg_offset = dev_priv->gpio_mmio_base;
 
+retry:
 	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
-	for (i = 0; i < num; i++) {
+	for (; i < num; i += inc) {
+		inc = 1;
 		if (gmbus_is_index_read(msgs, i, num)) {
 			ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
-			i += 1;  /* set i to the index of the read xfer */
+			inc = 2; /* an index read is two msgs */
 		} else if (msgs[i].flags & I2C_M_RD) {
 			ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
 		} else {
@@ -525,6 +527,18 @@
 			 adapter->name, msgs[i].addr,
 			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
+	/*
+	 * Passive adapters sometimes NAK the first probe. Retry the first
+	 * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+	 * has retries internally. See also the retry loop in
+	 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+	 */
+	if (ret == -ENXIO && i == 0 && try++ == 0) {
+		DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+			      adapter->name);
+		goto retry;
+	}
+
 	goto out;
 
 timeout:
diff -ur a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
--- a/drivers/gpu/drm/i915/intel_lvds.c	2017-03-23 14:54:23.000000000 +0100
+++ b/drivers/gpu/drm/i915/intel_lvds.c	2017-03-14 02:31:33.000000000 +0100
@@ -809,12 +809,28 @@
 static const struct dmi_system_id intel_dual_link_lvds[] = {
 	{
 		.callback = intel_dual_link_lvds_callback,
-		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
+		.ident = "Apple MacBook Pro 15\" (2010)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
+		},
+	},
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro 15\" (2011)",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
 		},
 	},
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro 15\" (2012)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
+		},
+	},
 	{ }	/* terminating entry */
 };
 
diff -ur a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c	2017-03-23 14:54:31.000000000 +0100
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c	2017-03-14 02:31:39.000000000 +0100
@@ -1483,6 +1483,11 @@
 		return MODE_BANDWIDTH;
 	}
 
+	if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
+	    (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
+		return MODE_H_ILLEGAL;
+	}
+
 	if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
 	    mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
 	    mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
diff -ur a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c	2017-03-23 14:54:34.000000000 +0100
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c	2017-03-14 02:31:42.000000000 +0100
@@ -177,11 +177,12 @@
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_vma *vma;
 
-	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+	if (is_power_of_2(nvbo->valid_domains))
+		rep->domain = nvbo->valid_domains;
+	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 	else
 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
 	rep->offset = nvbo->bo.offset;
 	if (cli->base.vm) {
 		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
diff -ur a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
--- a/drivers/gpu/drm/qxl/qxl_cmd.c	2017-03-23 14:54:03.000000000 +0100
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c	2017-03-14 02:31:18.000000000 +0100
@@ -499,6 +499,7 @@
 
 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
 	cmd->type = QXL_SURFACE_CMD_CREATE;
+	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
 	cmd->u.surface_create.format = surf->surf.format;
 	cmd->u.surface_create.width = surf->surf.width;
 	cmd->u.surface_create.height = surf->surf.height;
diff -ur a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
--- a/drivers/gpu/drm/radeon/atombios_encoders.c	2017-03-23 14:53:49.000000000 +0100
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c	2017-03-14 02:31:06.000000000 +0100
@@ -868,8 +868,6 @@
 			else
 				args.v1.ucLaneNum = 4;
 
-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
 			switch (radeon_encoder->encoder_id) {
 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -886,6 +884,10 @@
 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
 			else
 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
 			break;
 		case 2:
 		case 3:
diff -ur a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
--- a/drivers/gpu/drm/radeon/radeon_atombios.c	2017-03-23 14:53:56.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c	2017-03-14 02:31:12.000000000 +0100
@@ -451,7 +451,9 @@
 	}
 
 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
-	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+	if (((dev->pdev->device == 0x9802) ||
+	     (dev->pdev->device == 0x9805) ||
+	     (dev->pdev->device == 0x9806)) &&
 	    (dev->pdev->subsystem_vendor == 0x1734) &&
 	    (dev->pdev->subsystem_device == 0x11bd)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -462,14 +464,6 @@
 		}
 	}
 
-	/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
-	if ((dev->pdev->device == 0x9805) &&
-	    (dev->pdev->subsystem_vendor == 0x1734) &&
-	    (dev->pdev->subsystem_device == 0x11bd)) {
-		if (*connector_type == DRM_MODE_CONNECTOR_VGA)
-			return false;
-	}
-
 	return true;
 }
 
diff -ur a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
--- a/drivers/gpu/drm/radeon/radeon_combios.c	2017-03-23 14:53:48.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_combios.c	2017-03-14 02:31:07.000000000 +0100
@@ -1271,10 +1271,15 @@
 
 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
 			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+				if (hss > lvds->native_mode.hdisplay)
+					hss = (10 - 1) * 8;
+
 				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
 					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
 				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
-					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+					hss;
 				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
 					(RBIOS8(tmp + 23) * 8);
 
@@ -3398,6 +3403,14 @@
 	    rdev->pdev->subsystem_device == 0x30ae)
 		return;
 
+	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x280a)
+		return;
+
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 	if (table)
diff -ur a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
--- a/drivers/gpu/drm/radeon/radeon_connectors.c	2017-03-23 14:53:45.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c	2017-03-14 02:31:05.000000000 +0100
@@ -78,6 +78,11 @@
 			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!radeon_dp_getdpcd(radeon_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff -ur a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
--- a/drivers/gpu/drm/radeon/radeon_gart.c	2017-03-23 14:53:51.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_gart.c	2017-03-14 02:31:09.000000000 +0100
@@ -251,8 +251,10 @@
 			}
 		}
 	}
-	mb();
-	radeon_gart_tlb_flush(rdev);
+	if (rdev->gart.ptr) {
+		mb();
+		radeon_gart_tlb_flush(rdev);
+	}
 }
 
 /**
@@ -294,8 +296,10 @@
 			}
 		}
 	}
-	mb();
-	radeon_gart_tlb_flush(rdev);
+	if (rdev->gart.ptr) {
+		mb();
+		radeon_gart_tlb_flush(rdev);
+	}
 	return 0;
 }
 
diff -ur a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c	2017-03-23 14:53:43.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c	2017-03-14 02:31:03.000000000 +0100
@@ -73,10 +73,17 @@
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
 
+	/* we can race here at startup, some boards seem to trigger
+	 * hotplug irqs when they shouldn't. */
+	if (!rdev->mode_info.mode_config_initialized)
+		return;
+
+	mutex_lock(&mode_config->mutex);
 	if (mode_config->num_connector) {
 		list_for_each_entry(connector, &mode_config->connector_list, head)
 			radeon_connector_hotplug(connector);
 	}
+	mutex_unlock(&mode_config->mutex);
 	/* Just fire off a uevent and let userspace tell us what to do */
 	drm_helper_hpd_irq_event(dev);
 }
diff -ur a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
--- a/drivers/gpu/drm/radeon/radeon_sa.c	2017-03-23 14:53:41.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_sa.c	2017-03-14 02:31:01.000000000 +0100
@@ -349,8 +349,15 @@
 			/* see if we can skip over some allocations */
 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (fences[i])
+				radeon_fence_ref(fences[i]);
+		}
+
 		spin_unlock(&sa_manager->wq.lock);
 		r = radeon_fence_wait_any(rdev, fences, false);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_unref(&fences[i]);
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
 		if (r == -ENOENT && block) {
diff -ur a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
--- a/drivers/gpu/drm/radeon/radeon_ttm.c	2017-03-23 14:53:46.000000000 +0100
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c	2017-03-14 02:31:05.000000000 +0100
@@ -618,7 +618,7 @@
 						       0, PAGE_SIZE,
 						       PCI_DMA_BIDIRECTIONAL);
 		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
-			while (--i) {
+			while (i--) {
 				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 				gtt->ttm.dma_address[i] = 0;
diff -ur a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c	2017-03-23 14:54:28.000000000 +0100
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c	2017-03-14 02:31:37.000000000 +0100
@@ -25,6 +25,7 @@
  *
  **************************************************************************/
 #include <linux/module.h>
+#include <linux/console.h>
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
@@ -1184,6 +1185,12 @@
 static int __init vmwgfx_init(void)
 {
 	int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force())
+		return -EINVAL;
+#endif
+
 	ret = drm_pci_init(&driver, &vmw_pci_driver);
 	if (ret)
 		DRM_ERROR("Failed initializing DRM.\n");
diff -ur a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
--- a/drivers/gpu/vga/vgaarb.c	2017-03-23 14:53:35.000000000 +0100
+++ b/drivers/gpu/vga/vgaarb.c	2017-03-14 02:30:56.000000000 +0100
@@ -389,8 +389,10 @@
 		set_current_state(interruptible ?
 				  TASK_INTERRUPTIBLE :
 				  TASK_UNINTERRUPTIBLE);
-		if (signal_pending(current)) {
-			rc = -EINTR;
+		if (interruptible && signal_pending(current)) {
+			__set_current_state(TASK_RUNNING);
+			remove_wait_queue(&vga_wait_queue, &wait);
+			rc = -ERESTARTSYS;
 			break;
 		}
 		schedule();
diff -ur a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
--- a/drivers/hid/hid-core.c	2017-03-23 14:48:38.000000000 +0100
+++ b/drivers/hid/hid-core.c	2017-03-14 02:25:25.000000000 +0100
@@ -1234,7 +1234,7 @@
 		"Multi-Axis Controller"
 	};
 	const char *type, *bus;
-	char buf[64];
+	char buf[64] = "";
 	unsigned int i;
 	int len;
 	int ret;
diff -ur a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
--- a/drivers/hid/usbhid/hid-core.c	2017-03-23 14:48:41.000000000 +0100
+++ b/drivers/hid/usbhid/hid-core.c	2017-03-14 02:25:31.000000000 +0100
@@ -180,7 +180,7 @@
 	if (time_after(jiffies, usbhid->stop_retry)) {
 
 		/* Retries failed, so do a port reset unless we lack bandwidth*/
-		if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+		if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
 		     && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
 
 			schedule_work(&usbhid->reset_work);
@@ -490,8 +490,6 @@
 	struct usbhid_device *usbhid = hid->driver_data;
 	int unplug = 0, status = urb->status;
 
-	spin_lock(&usbhid->lock);
-
 	switch (status) {
 	case 0:			/* success */
 		if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
@@ -511,6 +509,8 @@
 		hid_warn(urb->dev, "ctrl urb status %d received\n", status);
 	}
 
+	spin_lock(&usbhid->lock);
+
 	if (unplug) {
 		usbhid->ctrltail = usbhid->ctrlhead;
 	} else {
@@ -988,14 +988,6 @@
 	return ret;
 }
 
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
-	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
-		usbhid_restart_out_queue(usbhid);
-	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
-		usbhid_restart_ctrl_queue(usbhid);
-}
-
 static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
 {
 	struct usbhid_device *usbhid = hid->driver_data;
@@ -1412,6 +1404,37 @@
 	usb_kill_urb(usbhid->urbout);
 }
 
+static void hid_restart_io(struct hid_device *hid)
+{
+	struct usbhid_device *usbhid = hid->driver_data;
+	int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+	int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+	spin_lock_irq(&usbhid->lock);
+	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+	usbhid_mark_busy(usbhid);
+
+	if (clear_halt || reset_pending)
+		schedule_work(&usbhid->reset_work);
+	usbhid->retry_delay = 0;
+	spin_unlock_irq(&usbhid->lock);
+
+	if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+		return;
+
+	if (!clear_halt) {
+		if (hid_start_in(hid) < 0)
+			hid_io_error(hid);
+	}
+
+	spin_lock_irq(&usbhid->lock);
+	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+		usbhid_restart_out_queue(usbhid);
+	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+		usbhid_restart_ctrl_queue(usbhid);
+	spin_unlock_irq(&usbhid->lock);
+}
+
 /* Treat USB reset pretty much the same as suspend/resume */
 static int hid_pre_reset(struct usb_interface *intf)
 {
@@ -1461,14 +1484,14 @@
 		return 1;
 	}
 
+	/* No need to do another reset or clear a halted endpoint */
 	spin_lock_irq(&usbhid->lock);
 	clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+	clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
 	spin_unlock_irq(&usbhid->lock);
 	hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
-	status = hid_start_in(hid);
-	if (status < 0)
-		hid_io_error(hid);
-	usbhid_restart_queues(usbhid);
+
+	hid_restart_io(hid);
 
 	return 0;
 }
@@ -1490,25 +1513,9 @@
 #ifdef CONFIG_PM
 static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
 {
-	struct usbhid_device *usbhid = hid->driver_data;
-	int status;
-
-	spin_lock_irq(&usbhid->lock);
-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
-	usbhid_mark_busy(usbhid);
-
-	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
-			test_bit(HID_RESET_PENDING, &usbhid->iofl))
-		schedule_work(&usbhid->reset_work);
-	usbhid->retry_delay = 0;
-
-	usbhid_restart_queues(usbhid);
-	spin_unlock_irq(&usbhid->lock);
-
-	status = hid_start_in(hid);
-	if (status < 0)
-		hid_io_error(hid);
+	int status = 0;
 
+	hid_restart_io(hid);
 	if (driver_suspended && hid->driver && hid->driver->resume)
 		status = hid->driver->resume(hid);
 	return status;
@@ -1575,12 +1582,8 @@
 static int hid_resume(struct usb_interface *intf)
 {
 	struct hid_device *hid = usb_get_intfdata (intf);
-	struct usbhid_device *usbhid = hid->driver_data;
 	int status;
 
-	if (!test_bit(HID_STARTED, &usbhid->iofl))
-		return 0;
-
 	status = hid_resume_common(hid, true);
 	dev_dbg(&intf->dev, "resume status %d\n", status);
 	return 0;
@@ -1589,10 +1592,8 @@
 static int hid_reset_resume(struct usb_interface *intf)
 {
 	struct hid_device *hid = usb_get_intfdata(intf);
-	struct usbhid_device *usbhid = hid->driver_data;
 	int status;
 
-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
 	status = hid_post_reset(intf);
 	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
 		int ret = hid->driver->reset_resume(hid);
diff -ur a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
--- a/drivers/hv/channel_mgmt.c	2017-03-23 14:43:20.000000000 +0100
+++ b/drivers/hv/channel_mgmt.c	2017-03-14 02:19:20.000000000 +0100
@@ -644,7 +644,7 @@
 {
 	struct vmbus_channel_message_header *msg;
 	struct vmbus_channel_msginfo *msginfo;
-	int ret, t;
+	int ret;
 
 	msginfo = kmalloc(sizeof(*msginfo) +
 			  sizeof(struct vmbus_channel_message_header),
@@ -652,8 +652,6 @@
 	if (!msginfo)
 		return -ENOMEM;
 
-	init_completion(&msginfo->waitevent);
-
 	msg = (struct vmbus_channel_message_header *)msginfo->msg;
 
 	msg->msgtype = CHANNELMSG_REQUESTOFFERS;
@@ -666,12 +664,6 @@
 		goto cleanup;
 	}
 
-	t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
-	if (t == 0) {
-		ret = -ETIMEDOUT;
-		goto cleanup;
-	}
-
 cleanup:
 	kfree(msginfo);
 
diff -ur a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
--- a/drivers/hv/vmbus_drv.c	2017-03-23 14:43:20.000000000 +0100
+++ b/drivers/hv/vmbus_drv.c	2017-03-14 02:19:21.000000000 +0100
@@ -32,6 +32,7 @@
 #include <linux/acpi.h>
 #include <acpi/acpi_bus.h>
 #include <linux/completion.h>
+#include <linux/cpu.h>
 #include <linux/hyperv.h>
 #include <linux/kernel_stat.h>
 #include <asm/hyperv.h>
@@ -507,6 +508,39 @@
 	desc->action->handler(irq, desc->action->dev_id);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int hyperv_cpu_disable(void)
+{
+	return -ENOSYS;
+}
+
+static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
+{
+	static void *previous_cpu_disable;
+
+	/*
+	 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
+	 * ...) is not supported at this moment as channel interrupts are
+	 * distributed across all of them.
+	 */
+
+	if ((vmbus_proto_version == VERSION_WS2008) ||
+	    (vmbus_proto_version == VERSION_WIN7))
+		return;
+
+	if (vmbus_loaded) {
+		previous_cpu_disable = smp_ops.cpu_disable;
+		smp_ops.cpu_disable = hyperv_cpu_disable;
+		pr_notice("CPU offlining is not supported by hypervisor\n");
+	} else if (previous_cpu_disable)
+		smp_ops.cpu_disable = previous_cpu_disable;
+}
+#else
+static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
+{
+}
+#endif
+
 /*
  * vmbus_bus_init -Main vmbus driver initialization routine.
  *
@@ -562,6 +596,7 @@
 	if (ret)
 		goto err_irq;
 
+	hv_cpu_hotplug_quirk(true);
 	vmbus_request_offers();
 
 	return 0;
@@ -796,6 +831,7 @@
 	bus_unregister(&hv_bus);
 	hv_cleanup();
 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
+	hv_cpu_hotplug_quirk(false);
 }
 
 MODULE_LICENSE("GPL");
diff -ur a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
--- a/drivers/hwmon/max1111.c	2017-03-23 14:43:02.000000000 +0100
+++ b/drivers/hwmon/max1111.c	2017-03-14 02:19:03.000000000 +0100
@@ -85,6 +85,9 @@
 
 int max1111_read_channel(int channel)
 {
+	if (!the_max1111 || !the_max1111->spi)
+		return -ENODEV;
+
 	return max1111_read(&the_max1111->spi->dev, channel);
 }
 EXPORT_SYMBOL(max1111_read_channel);
@@ -260,6 +263,9 @@
 {
 	struct max1111_data *data = spi_get_drvdata(spi);
 
+#ifdef CONFIG_SHARPSL_PM
+	the_max1111 = NULL;
+#endif
 	hwmon_device_unregister(data->hwmon_dev);
 	sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff -ur a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
--- a/drivers/hwmon/mcp3021.c	2017-03-23 14:43:17.000000000 +0100
+++ b/drivers/hwmon/mcp3021.c	2017-03-14 02:19:17.000000000 +0100
@@ -31,14 +31,11 @@
 /* output format */
 #define MCP3021_SAR_SHIFT	2
 #define MCP3021_SAR_MASK	0x3ff
-
 #define MCP3021_OUTPUT_RES	10	/* 10-bit resolution */
-#define MCP3021_OUTPUT_SCALE	4
 
 #define MCP3221_SAR_SHIFT	0
 #define MCP3221_SAR_MASK	0xfff
 #define MCP3221_OUTPUT_RES	12	/* 12-bit resolution */
-#define MCP3221_OUTPUT_SCALE	1
 
 enum chips {
 	mcp3021,
@@ -54,7 +51,6 @@
 	u16 sar_shift;
 	u16 sar_mask;
 	u8 output_res;
-	u8 output_scale;
 };
 
 static int mcp3021_read16(struct i2c_client *client)
@@ -84,13 +80,7 @@
 
 static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
 {
-	if (val == 0)
-		return 0;
-
-	val = val * data->output_scale - data->output_scale / 2;
-
-	return val * DIV_ROUND_CLOSEST(data->vdd,
-			(1 << data->output_res) * data->output_scale);
+	return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
 }
 
 static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
@@ -132,14 +122,12 @@
 		data->sar_shift = MCP3021_SAR_SHIFT;
 		data->sar_mask = MCP3021_SAR_MASK;
 		data->output_res = MCP3021_OUTPUT_RES;
-		data->output_scale = MCP3021_OUTPUT_SCALE;
 		break;
 
 	case mcp3221:
 		data->sar_shift = MCP3221_SAR_SHIFT;
 		data->sar_mask = MCP3221_SAR_MASK;
 		data->output_res = MCP3221_OUTPUT_RES;
-		data->output_scale = MCP3221_OUTPUT_SCALE;
 		break;
 	}
 
diff -ur a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
--- a/drivers/hwmon/ntc_thermistor.c	2017-03-23 14:43:02.000000000 +0100
+++ b/drivers/hwmon/ntc_thermistor.c	2017-03-14 02:19:03.000000000 +0100
@@ -181,8 +181,10 @@
 ntc_thermistor_parse_dt(struct platform_device *pdev)
 {
 	struct iio_channel *chan;
+	enum iio_chan_type type;
 	struct device_node *np = pdev->dev.of_node;
 	struct ntc_thermistor_platform_data *pdata;
+	int ret;
 
 	if (!np)
 		return NULL;
@@ -195,6 +197,13 @@
 	if (IS_ERR(chan))
 		return ERR_CAST(chan);
 
+	ret = iio_get_channel_type(chan, &type);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	if (type != IIO_VOLTAGE)
+		return ERR_PTR(-EINVAL);
+
 	if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
 		return ERR_PTR(-ENODEV);
 	if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
diff -ur a/drivers/hwmon/syno_hddmon.c b/drivers/hwmon/syno_hddmon.c
--- a/drivers/hwmon/syno_hddmon.c	2017-03-23 14:43:08.000000000 +0100
+++ b/drivers/hwmon/syno_hddmon.c	2017-03-14 02:19:10.000000000 +0100
@@ -7,7 +7,6 @@
 #include <linux/syno.h>
 #include <linux/synobios.h>
 #include <linux/delay.h>
-#include <linux/libata.h>
 
 MODULE_LICENSE("Proprietary");
 
@@ -17,6 +16,7 @@
 #define SYNO_HDDMON_STR "Syno_HDDMon"
 #define SYNO_HDDMON_UPLG_STR "Syno_HDDMon_UPLGM"
 extern long g_internal_hd_num;
+extern long g_syno_hdd_powerup_seq;
 
 #ifdef MY_DEF_HERE
 extern long g_hdd_hotplug;
diff -ur a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
--- a/drivers/i2c/busses/i2c-at91.c	2017-03-23 14:39:14.000000000 +0100
+++ b/drivers/i2c/busses/i2c-at91.c	2017-03-14 02:14:16.000000000 +0100
@@ -63,6 +63,9 @@
 #define	AT91_TWI_UNRE		0x0080	/* Underrun Error */
 #define	AT91_TWI_NACK		0x0100	/* Not Acknowledged */
 
+#define	AT91_TWI_INT_MASK \
+	(AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
+
 #define	AT91_TWI_IER		0x0024	/* Interrupt Enable Register */
 #define	AT91_TWI_IDR		0x0028	/* Interrupt Disable Register */
 #define	AT91_TWI_IMR		0x002c	/* Interrupt Mask Register */
@@ -118,13 +121,12 @@
 
 static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
 {
-	at91_twi_write(dev, AT91_TWI_IDR,
-		       AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
+	at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
 }
 
 static void at91_twi_irq_save(struct at91_twi_dev *dev)
 {
-	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
+	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
 	at91_disable_twi_interrupts(dev);
 }
 
@@ -214,6 +216,14 @@
 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
 			 dev->buf_len, DMA_TO_DEVICE);
 
+	/*
+	 * When this callback is called, THR/TX FIFO is likely not to be empty
+	 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
+	 * Status Register to be sure that the STOP bit has been sent and the
+	 * transfer is completed. The NACK interrupt has already been enabled,
+	 * we just have to enable TXCOMP one.
+	 */
+	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
 	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 }
 
@@ -308,7 +318,7 @@
 	/* The last two bytes have to be read without using dma */
 	dev->buf += dev->buf_len - 2;
 	dev->buf_len = 2;
-	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
+	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
 }
 
 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
@@ -369,7 +379,7 @@
 	/* catch error flags */
 	dev->transfer_status |= status;
 
-	if (irqstatus & AT91_TWI_TXCOMP) {
+	if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
 		at91_disable_twi_interrupts(dev);
 		complete(&dev->cmd_complete);
 	}
@@ -382,6 +392,34 @@
 	int ret;
 	bool has_unre_flag = dev->pdata->has_unre_flag;
 
+	/*
+	 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
+	 * read flag but shows the state of the transmission at the time the
+	 * Status Register is read. According to the programmer datasheet,
+	 * TXCOMP is set when both holding register and internal shifter are
+	 * empty and STOP condition has been sent.
+	 * Consequently, we should enable NACK interrupt rather than TXCOMP to
+	 * detect transmission failure.
+	 *
+	 * Besides, the TXCOMP bit is already set before the i2c transaction
+	 * has been started. For read transactions, this bit is cleared when
+	 * writing the START bit into the Control Register. So the
+	 * corresponding interrupt can safely be enabled just after.
+	 * However for write transactions managed by the CPU, we first write
+	 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
+	 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
+	 * the interrupt handler would be called immediately and the i2c command
+	 * would be reported as completed.
+	 * Also when a write transaction is managed by the DMA controller,
+	 * enabling the TXCOMP interrupt in this function may lead to a race
+	 * condition since we don't know whether the TXCOMP interrupt is enabled
+	 * before or after the DMA has started to write into THR. So the TXCOMP
+	 * interrupt is enabled later by at91_twi_write_data_dma_callback().
+	 * Immediately after in that DMA callback, we still need to send the
+	 * STOP condition manually writing the corresponding bit into the
+	 * Control Register.
+	 */
+
 	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
 		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
 
@@ -412,26 +450,24 @@
 		 * seems to be the best solution.
 		 */
 		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
 			at91_twi_read_data_dma(dev);
-			/*
-			 * It is important to enable TXCOMP irq here because
-			 * doing it only when transferring the last two bytes
-			 * will mask NACK errors since TXCOMP is set when a
-			 * NACK occurs.
-			 */
-			at91_twi_write(dev, AT91_TWI_IER,
-			       AT91_TWI_TXCOMP);
-		} else
+		} else {
 			at91_twi_write(dev, AT91_TWI_IER,
-			       AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
+				       AT91_TWI_TXCOMP |
+				       AT91_TWI_NACK |
+				       AT91_TWI_RXRDY);
+		}
 	} else {
 		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
 			at91_twi_write_data_dma(dev);
-			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
 		} else {
 			at91_twi_write_next_byte(dev);
 			at91_twi_write(dev, AT91_TWI_IER,
-				AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+				       AT91_TWI_TXCOMP |
+				       AT91_TWI_NACK |
+				       AT91_TWI_TXRDY);
 		}
 	}
 
diff -ur a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
--- a/drivers/i2c/busses/i2c-cpm.c	2017-03-23 14:39:15.000000000 +0100
+++ b/drivers/i2c/busses/i2c-cpm.c	2017-03-14 02:14:18.000000000 +0100
@@ -120,8 +120,8 @@
 	cbd_t __iomem *rbase;
 	u_char *txbuf[CPM_MAXBD];
 	u_char *rxbuf[CPM_MAXBD];
-	u32 txdma[CPM_MAXBD];
-	u32 rxdma[CPM_MAXBD];
+	dma_addr_t txdma[CPM_MAXBD];
+	dma_addr_t rxdma[CPM_MAXBD];
 };
 
 static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff -ur a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
--- a/drivers/i2c/busses/i2c-rcar.c	2017-03-23 14:39:14.000000000 +0100
+++ b/drivers/i2c/busses/i2c-rcar.c	2017-03-14 02:14:17.000000000 +0100
@@ -672,15 +672,16 @@
 		return ret;
 	}
 
+	pm_runtime_enable(dev);
+	platform_set_drvdata(pdev, priv);
+
 	ret = i2c_add_numbered_adapter(adap);
 	if (ret < 0) {
 		dev_err(dev, "reg adap failed: %d\n", ret);
+		pm_runtime_disable(dev);
 		return ret;
 	}
 
-	pm_runtime_enable(dev);
-	platform_set_drvdata(pdev, priv);
-
 	dev_info(dev, "probed\n");
 
 	return 0;
diff -ur a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
--- a/drivers/iio/adc/ad7793.c	2017-03-23 14:47:53.000000000 +0100
+++ b/drivers/iio/adc/ad7793.c	2017-03-14 02:24:19.000000000 +0100
@@ -101,7 +101,7 @@
 #define AD7795_CH_AIN1M_AIN1M	8 /* AIN1(-) - AIN1(-) */
 
 /* ID Register Bit Designations (AD7793_REG_ID) */
-#define AD7785_ID		0xB
+#define AD7785_ID		0x3
 #define AD7792_ID		0xA
 #define AD7793_ID		0xB
 #define AD7794_ID		0xF
diff -ur a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
--- a/drivers/iio/dac/ad5064.c	2017-03-23 14:47:54.000000000 +0100
+++ b/drivers/iio/dac/ad5064.c	2017-03-14 02:24:21.000000000 +0100
@@ -602,10 +602,16 @@
 	unsigned int addr, unsigned int val)
 {
 	struct i2c_client *i2c = to_i2c_client(st->dev);
+	int ret;
 
 	st->data.i2c[0] = (cmd << 4) | addr;
 	put_unaligned_be16(val, &st->data.i2c[1]);
-	return i2c_master_send(i2c, st->data.i2c, 3);
+
+	ret = i2c_master_send(i2c, st->data.i2c, 3);
+	if (ret < 0)
+		return ret;
+
+	return 0;
 }
 
 static int ad5064_i2c_probe(struct i2c_client *i2c,
diff -ur a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
--- a/drivers/iio/dac/ad5624r_spi.c	2017-03-23 14:47:53.000000000 +0100
+++ b/drivers/iio/dac/ad5624r_spi.c	2017-03-14 02:24:20.000000000 +0100
@@ -22,7 +22,7 @@
 #include "ad5624r.h"
 
 static int ad5624r_spi_write(struct spi_device *spi,
-			     u8 cmd, u8 addr, u16 val, u8 len)
+			     u8 cmd, u8 addr, u16 val, u8 shift)
 {
 	u32 data;
 	u8 msg[3];
@@ -35,7 +35,7 @@
 	 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
 	 * for the AD5664R, AD5644R, and AD5624R, respectively.
 	 */
-	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
 	msg[0] = data >> 16;
 	msg[1] = data >> 8;
 	msg[2] = data;
diff -ur a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
--- a/drivers/iio/dac/mcp4725.c	2017-03-23 14:47:53.000000000 +0100
+++ b/drivers/iio/dac/mcp4725.c	2017-03-14 02:24:19.000000000 +0100
@@ -166,6 +166,7 @@
 	data->client = client;
 
 	indio_dev->dev.parent = &client->dev;
+	indio_dev->name = id->name;
 	indio_dev->info = &mcp4725_info;
 	indio_dev->channels = &mcp4725_channel;
 	indio_dev->num_channels = 1;
diff -ur a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
--- a/drivers/iio/imu/adis16400_core.c	2017-03-23 14:47:51.000000000 +0100
+++ b/drivers/iio/imu/adis16400_core.c	2017-03-14 02:24:15.000000000 +0100
@@ -438,6 +438,11 @@
 			*val = st->variant->temp_scale_nano / 1000000;
 			*val2 = (st->variant->temp_scale_nano % 1000000);
 			return IIO_VAL_INT_PLUS_MICRO;
+		case IIO_PRESSURE:
+			/* 20 uBar = 0.002kPascal */
+			*val = 0;
+			*val2 = 2000;
+			return IIO_VAL_INT_PLUS_MICRO;
 		default:
 			return -EINVAL;
 		}
@@ -480,10 +485,10 @@
 	}
 }
 
-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
 	.type = IIO_VOLTAGE, \
 	.indexed = 1, \
-	.channel = 0, \
+	.channel = chn, \
 	.extend_name = name, \
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
 		BIT(IIO_CHAN_INFO_SCALE), \
@@ -499,10 +504,10 @@
 }
 
 #define ADIS16400_SUPPLY_CHAN(addr, bits) \
-	ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+	ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
 
 #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
-	ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+	ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
 
 #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
 	.type = IIO_ANGL_VEL, \
@@ -819,11 +824,6 @@
 	.debugfs_reg_access = adis_debugfs_reg_access,
 };
 
-static const unsigned long adis16400_burst_scan_mask[] = {
-	~0UL,
-	0,
-};
-
 static const char * const adis16400_status_error_msgs[] = {
 	[ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
 	[ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
@@ -871,6 +871,20 @@
 		BIT(ADIS16400_DIAG_STAT_POWER_LOW),
 };
 
+static void adis16400_setup_chan_mask(struct adis16400_state *st)
+{
+	const struct adis16400_chip_info *chip_info = st->variant;
+	unsigned i;
+
+	for (i = 0; i < chip_info->num_channels; i++) {
+		const struct iio_chan_spec *ch = &chip_info->channels[i];
+
+		if (ch->scan_index >= 0 &&
+		    ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
+			st->avail_scan_mask[0] |= BIT(ch->scan_index);
+	}
+}
+
 static int adis16400_probe(struct spi_device *spi)
 {
 	struct adis16400_state *st;
@@ -894,8 +908,10 @@
 	indio_dev->info = &adis16400_info;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	if (!(st->variant->flags & ADIS16400_NO_BURST))
-		indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+	if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+		adis16400_setup_chan_mask(st);
+		indio_dev->available_scan_masks = st->avail_scan_mask;
+	}
 
 	ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
 	if (ret)
diff -ur a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
--- a/drivers/iio/imu/adis16400.h	2017-03-23 14:47:51.000000000 +0100
+++ b/drivers/iio/imu/adis16400.h	2017-03-14 02:24:15.000000000 +0100
@@ -165,6 +165,7 @@
 	int				filt_int;
 
 	struct adis adis;
+	unsigned long avail_scan_mask[2];
 };
 
 /* At the moment triggers are only used for ring buffer
diff -ur a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
--- a/drivers/iio/imu/adis16480.c	2017-03-23 14:47:51.000000000 +0100
+++ b/drivers/iio/imu/adis16480.c	2017-03-14 02:24:15.000000000 +0100
@@ -110,6 +110,10 @@
 struct adis16480_chip_info {
 	unsigned int num_channels;
 	const struct iio_chan_spec *channels;
+	unsigned int gyro_max_val;
+	unsigned int gyro_max_scale;
+	unsigned int accel_max_val;
+	unsigned int accel_max_scale;
 };
 
 struct adis16480 {
@@ -533,19 +537,21 @@
 static int adis16480_read_raw(struct iio_dev *indio_dev,
 	const struct iio_chan_spec *chan, int *val, int *val2, long info)
 {
+	struct adis16480 *st = iio_priv(indio_dev);
+
 	switch (info) {
 	case IIO_CHAN_INFO_RAW:
 		return adis_single_conversion(indio_dev, chan, 0, val);
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_ANGL_VEL:
-			*val = 0;
-			*val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = st->chip_info->gyro_max_scale;
+			*val2 = st->chip_info->gyro_max_val;
+			return IIO_VAL_FRACTIONAL;
 		case IIO_ACCEL:
-			*val = 0;
-			*val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = st->chip_info->accel_max_scale;
+			*val2 = st->chip_info->accel_max_val;
+			return IIO_VAL_FRACTIONAL;
 		case IIO_MAGN:
 			*val = 0;
 			*val2 = 100; /* 0.0001 gauss */
@@ -702,18 +708,39 @@
 	[ADIS16375] = {
 		.channels = adis16485_channels,
 		.num_channels = ARRAY_SIZE(adis16485_channels),
+		/*
+		 * storing the value in rad/degree and the scale in degree
+		 * gives us the result in rad and better precession than
+		 * storing the scale directly in rad.
+		 */
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22887),
+		.gyro_max_scale = 300,
+		.accel_max_val = IIO_M_S_2_TO_G(21973),
+		.accel_max_scale = 18,
 	},
 	[ADIS16480] = {
 		.channels = adis16480_channels,
 		.num_channels = ARRAY_SIZE(adis16480_channels),
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+		.gyro_max_scale = 450,
+		.accel_max_val = IIO_M_S_2_TO_G(12500),
+		.accel_max_scale = 5,
 	},
 	[ADIS16485] = {
 		.channels = adis16485_channels,
 		.num_channels = ARRAY_SIZE(adis16485_channels),
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+		.gyro_max_scale = 450,
+		.accel_max_val = IIO_M_S_2_TO_G(20000),
+		.accel_max_scale = 5,
 	},
 	[ADIS16488] = {
 		.channels = adis16480_channels,
 		.num_channels = ARRAY_SIZE(adis16480_channels),
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+		.gyro_max_scale = 450,
+		.accel_max_val = IIO_M_S_2_TO_G(22500),
+		.accel_max_scale = 18,
 	},
 };
 
diff -ur a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
--- a/drivers/iio/imu/adis_buffer.c	2017-03-23 14:47:50.000000000 +0100
+++ b/drivers/iio/imu/adis_buffer.c	2017-03-14 02:24:14.000000000 +0100
@@ -43,7 +43,7 @@
 		return -ENOMEM;
 
 	rx = adis->buffer;
-	tx = rx + indio_dev->scan_bytes;
+	tx = rx + scan_count;
 
 	spi_message_init(&adis->msg);
 
diff -ur a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
--- a/drivers/infiniband/core/cm.c	2017-03-23 14:40:51.000000000 +0100
+++ b/drivers/infiniband/core/cm.c	2017-03-14 02:16:13.000000000 +0100
@@ -857,6 +857,11 @@
 	case IB_CM_SIDR_REQ_RCVD:
 		spin_unlock_irq(&cm_id_priv->lock);
 		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
+		spin_lock_irq(&cm.lock);
+		if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
+			rb_erase(&cm_id_priv->sidr_id_node,
+				 &cm.remote_sidr_table);
+		spin_unlock_irq(&cm.lock);
 		break;
 	case IB_CM_REQ_SENT:
 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
@@ -3093,7 +3098,10 @@
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
 	spin_lock_irqsave(&cm.lock, flags);
-	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
+		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
+	}
 	spin_unlock_irqrestore(&cm.lock, flags);
 	return 0;
 
diff -ur a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
--- a/drivers/infiniband/core/uverbs_cmd.c	2017-03-23 14:40:47.000000000 +0100
+++ b/drivers/infiniband/core/uverbs_cmd.c	2017-03-14 02:16:10.000000000 +0100
@@ -2106,6 +2106,12 @@
 		next->send_flags = user_wr->send_flags;
 
 		if (is_ud) {
+			if (next->opcode != IB_WR_SEND &&
+			    next->opcode != IB_WR_SEND_WITH_IMM) {
+				ret = -EINVAL;
+				goto out_put;
+			}
+
 			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
 						     file->ucontext);
 			if (!next->wr.ud.ah) {
@@ -2142,9 +2148,11 @@
 					user_wr->wr.atomic.compare_add;
 				next->wr.atomic.swap = user_wr->wr.atomic.swap;
 				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
+			case IB_WR_SEND:
 				break;
 			default:
-				break;
+				ret = -EINVAL;
+				goto out_put;
 			}
 		}
 
diff -ur a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
--- a/drivers/infiniband/core/uverbs.h	2017-03-23 14:40:46.000000000 +0100
+++ b/drivers/infiniband/core/uverbs.h	2017-03-14 02:16:09.000000000 +0100
@@ -69,7 +69,7 @@
  */
 
 struct ib_uverbs_device {
-	struct kref				ref;
+	atomic_t				refcount;
 	int					num_comp_vectors;
 	struct completion			comp;
 	struct device			       *dev;
@@ -78,6 +78,7 @@
 	struct cdev			        cdev;
 	struct rb_root				xrcd_tree;
 	struct mutex				xrcd_tree_mutex;
+	struct kobject				kobj;
 };
 
 struct ib_uverbs_event_file {
diff -ur a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
--- a/drivers/infiniband/core/uverbs_main.c	2017-03-23 14:40:44.000000000 +0100
+++ b/drivers/infiniband/core/uverbs_main.c	2017-03-14 02:16:07.000000000 +0100
@@ -119,14 +119,18 @@
 static void ib_uverbs_add_one(struct ib_device *device);
 static void ib_uverbs_remove_one(struct ib_device *device);
 
-static void ib_uverbs_release_dev(struct kref *ref)
+static void ib_uverbs_release_dev(struct kobject *kobj)
 {
 	struct ib_uverbs_device *dev =
-		container_of(ref, struct ib_uverbs_device, ref);
+		container_of(kobj, struct ib_uverbs_device, kobj);
 
-	complete(&dev->comp);
+	kfree(dev);
 }
 
+static struct kobj_type ib_uverbs_dev_ktype = {
+	.release = ib_uverbs_release_dev,
+};
+
 static void ib_uverbs_release_event_file(struct kref *ref)
 {
 	struct ib_uverbs_event_file *file =
@@ -282,13 +286,19 @@
 	return context->device->dealloc_ucontext(context);
 }
 
+static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
+{
+	complete(&dev->comp);
+}
+
 static void ib_uverbs_release_file(struct kref *ref)
 {
 	struct ib_uverbs_file *file =
 		container_of(ref, struct ib_uverbs_file, ref);
 
 	module_put(file->device->ib_dev->owner);
-	kref_put(&file->device->ref, ib_uverbs_release_dev);
+	if (atomic_dec_and_test(&file->device->refcount))
+		ib_uverbs_comp_dev(file->device);
 
 	kfree(file);
 }
@@ -629,9 +639,7 @@
 	int ret;
 
 	dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
-	if (dev)
-		kref_get(&dev->ref);
-	else
+	if (!atomic_inc_not_zero(&dev->refcount))
 		return -ENXIO;
 
 	if (!try_module_get(dev->ib_dev->owner)) {
@@ -652,6 +660,7 @@
 	mutex_init(&file->mutex);
 
 	filp->private_data = file;
+	kobject_get(&dev->kobj);
 
 	return nonseekable_open(inode, filp);
 
@@ -659,13 +668,16 @@
 	module_put(dev->ib_dev->owner);
 
 err:
-	kref_put(&dev->ref, ib_uverbs_release_dev);
+	if (atomic_dec_and_test(&dev->refcount))
+		ib_uverbs_comp_dev(dev);
+
 	return ret;
 }
 
 static int ib_uverbs_close(struct inode *inode, struct file *filp)
 {
 	struct ib_uverbs_file *file = filp->private_data;
+	struct ib_uverbs_device *dev = file->device;
 
 	ib_uverbs_cleanup_ucontext(file, file->ucontext);
 
@@ -673,6 +685,7 @@
 		kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
 
 	kref_put(&file->ref, ib_uverbs_release_file);
+	kobject_put(&dev->kobj);
 
 	return 0;
 }
@@ -768,10 +781,11 @@
 	if (!uverbs_dev)
 		return;
 
-	kref_init(&uverbs_dev->ref);
+	atomic_set(&uverbs_dev->refcount, 1);
 	init_completion(&uverbs_dev->comp);
 	uverbs_dev->xrcd_tree = RB_ROOT;
 	mutex_init(&uverbs_dev->xrcd_tree_mutex);
+	kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
 
 	spin_lock(&map_lock);
 	devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -798,6 +812,7 @@
 	cdev_init(&uverbs_dev->cdev, NULL);
 	uverbs_dev->cdev.owner = THIS_MODULE;
 	uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+	uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
 	kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
 	if (cdev_add(&uverbs_dev->cdev, base, 1))
 		goto err_cdev;
@@ -828,9 +843,10 @@
 		clear_bit(devnum, overflow_map);
 
 err:
-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+	if (atomic_dec_and_test(&uverbs_dev->refcount))
+		ib_uverbs_comp_dev(uverbs_dev);
 	wait_for_completion(&uverbs_dev->comp);
-	kfree(uverbs_dev);
+	kobject_put(&uverbs_dev->kobj);
 	return;
 }
 
@@ -850,9 +866,10 @@
 	else
 		clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
 
-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+	if (atomic_dec_and_test(&uverbs_dev->refcount))
+		ib_uverbs_comp_dev(uverbs_dev);
 	wait_for_completion(&uverbs_dev->comp);
-	kfree(uverbs_dev);
+	kobject_put(&uverbs_dev->kobj);
 }
 
 static char *uverbs_devnode(struct device *dev, umode_t *mode)
diff -ur a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c	2017-03-23 14:41:07.000000000 +0100
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c	2017-03-14 02:16:36.000000000 +0100
@@ -149,7 +149,7 @@
 	error = l2t_send(tdev, skb, l2e);
 	if (error < 0)
 		kfree_skb(skb);
-	return error;
+	return error < 0 ? error : 0;
 }
 
 int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
@@ -165,7 +165,7 @@
 	error = cxgb3_ofld_send(tdev, skb);
 	if (error < 0)
 		kfree_skb(skb);
-	return error;
+	return error < 0 ? error : 0;
 }
 
 static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
diff -ur a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
--- a/drivers/infiniband/hw/mlx4/ah.c	2017-03-23 14:40:59.000000000 +0100
+++ b/drivers/infiniband/hw/mlx4/ah.c	2017-03-14 02:16:27.000000000 +0100
@@ -169,9 +169,13 @@
 	enum rdma_link_layer ll;
 
 	memset(ah_attr, 0, sizeof *ah_attr);
-	ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
 	ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
 	ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+	if (ll == IB_LINK_LAYER_ETHERNET)
+		ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
+	else
+		ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+
 	ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
 	if (ah->av.ib.stat_rate)
 		ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
diff -ur a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
--- a/drivers/infiniband/hw/mlx4/sysfs.c	2017-03-23 14:40:59.000000000 +0100
+++ b/drivers/infiniband/hw/mlx4/sysfs.c	2017-03-14 02:16:26.000000000 +0100
@@ -562,6 +562,8 @@
 	struct mlx4_port *p;
 	int i;
 	int ret;
+	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
+			IB_LINK_LAYER_ETHERNET;
 
 	p = kzalloc(sizeof *p, GFP_KERNEL);
 	if (!p)
@@ -579,7 +581,8 @@
 
 	p->pkey_group.name  = "pkey_idx";
 	p->pkey_group.attrs =
-		alloc_group_attrs(show_port_pkey, store_port_pkey,
+		alloc_group_attrs(show_port_pkey,
+				  is_eth ? NULL : store_port_pkey,
 				  dev->dev->caps.pkey_table_len[port_num]);
 	if (!p->pkey_group.attrs)
 		goto err_alloc;
diff -ur a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
--- a/drivers/infiniband/hw/qib/qib.h	2017-03-23 14:40:59.000000000 +0100
+++ b/drivers/infiniband/hw/qib/qib.h	2017-03-14 02:16:26.000000000 +0100
@@ -1466,27 +1466,22 @@
  * first to avoid possible serial port delays from printk.
  */
 #define qib_early_err(dev, fmt, ...) \
-	do { \
-		dev_err(dev, fmt, ##__VA_ARGS__); \
-	} while (0)
+	dev_err(dev, fmt, ##__VA_ARGS__)
 
 #define qib_dev_err(dd, fmt, ...) \
-	do { \
-		dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
-			qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
-	} while (0)
+	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+		qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
 
-#define qib_dev_porterr(dd, port, fmt, ...) \
-	do { \
-		dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
-			qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
-			##__VA_ARGS__); \
-	} while (0)
+#define qib_dev_warn(dd, fmt, ...) \
+	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
+		qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
 
+#define qib_dev_porterr(dd, port, fmt, ...) \
+	dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
+		qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+		##__VA_ARGS__)
 #define qib_devinfo(pcidev, fmt, ...) \
-	do { \
-		dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
-	} while (0)
+	dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
 
 /*
  * this is used for formatting hw error messages...
diff -ur a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
--- a/drivers/infiniband/hw/qib/qib_keys.c	2017-03-23 14:40:55.000000000 +0100
+++ b/drivers/infiniband/hw/qib/qib_keys.c	2017-03-14 02:16:21.000000000 +0100
@@ -86,6 +86,10 @@
 	 * unrestricted LKEY.
 	 */
 	rkt->gen++;
+	/*
+	 * bits are capped in qib_verbs.c to insure enough bits
+	 * for generation number
+	 */
 	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
 		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
 		 << 8);
diff -ur a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
--- a/drivers/infiniband/hw/qib/qib_verbs.c	2017-03-23 14:41:00.000000000 +0100
+++ b/drivers/infiniband/hw/qib/qib_verbs.c	2017-03-14 02:16:26.000000000 +0100
@@ -40,6 +40,7 @@
 #include <linux/rculist.h>
 #include <linux/mm.h>
 #include <linux/random.h>
+#include <linux/vmalloc.h>
 
 #include "qib.h"
 #include "qib_common.h"
@@ -2084,10 +2085,16 @@
 	 * the LKEY).  The remaining bits act as a generation number or tag.
 	 */
 	spin_lock_init(&dev->lk_table.lock);
+	/* insure generation is at least 4 bits see keys.c */
+	if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+		qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
+			ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
+		ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
+	}
 	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
 	dev->lk_table.table = (struct qib_mregion __rcu **)
-		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+		vmalloc(lk_tab_size);
 	if (dev->lk_table.table == NULL) {
 		ret = -ENOMEM;
 		goto err_lk;
@@ -2260,7 +2267,7 @@
 					sizeof(struct qib_pio_header),
 				  dev->pio_hdrs, dev->pio_hdrs_phys);
 err_hdrs:
-	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+	vfree(dev->lk_table.table);
 err_lk:
 	kfree(dev->qp_table);
 err_qpt:
@@ -2314,8 +2321,7 @@
 					sizeof(struct qib_pio_header),
 				  dev->pio_hdrs, dev->pio_hdrs_phys);
 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
-	free_pages((unsigned long) dev->lk_table.table,
-		   get_order(lk_tab_size));
+	vfree(dev->lk_table.table);
 	kfree(dev->qp_table);
 }
 
diff -ur a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
--- a/drivers/infiniband/hw/qib/qib_verbs.h	2017-03-23 14:40:59.000000000 +0100
+++ b/drivers/infiniband/hw/qib/qib_verbs.h	2017-03-14 02:16:25.000000000 +0100
@@ -644,6 +644,8 @@
 	struct qpn_map map[QPNMAP_ENTRIES];
 };
 
+#define MAX_LKEY_TABLE_BITS 23
+
 struct qib_lkey_table {
 	spinlock_t lock; /* protect changes in this struct */
 	u32 next;               /* next unused index (speeds search) */
diff -ur a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c	2017-03-23 14:40:54.000000000 +0100
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c	2017-03-14 02:16:19.000000000 +0100
@@ -286,15 +286,13 @@
 	struct qib_ibdev *dev = to_idev(ibqp->device);
 	struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
 	struct qib_mcast *mcast = NULL;
-	struct qib_mcast_qp *p, *tmp;
+	struct qib_mcast_qp *p, *tmp, *delp = NULL;
 	struct rb_node *n;
 	int last = 0;
 	int ret;
 
-	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
-		ret = -EINVAL;
-		goto bail;
-	}
+	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+		return -EINVAL;
 
 	spin_lock_irq(&ibp->lock);
 
@@ -303,8 +301,7 @@
 	while (1) {
 		if (n == NULL) {
 			spin_unlock_irq(&ibp->lock);
-			ret = -EINVAL;
-			goto bail;
+			return -EINVAL;
 		}
 
 		mcast = rb_entry(n, struct qib_mcast, rb_node);
@@ -328,6 +325,7 @@
 		 */
 		list_del_rcu(&p->list);
 		mcast->n_attached--;
+		delp = p;
 
 		/* If this was the last attached QP, remove the GID too. */
 		if (list_empty(&mcast->qp_list)) {
@@ -338,15 +336,16 @@
 	}
 
 	spin_unlock_irq(&ibp->lock);
+	/* QP not attached */
+	if (!delp)
+		return -EINVAL;
+	/*
+	 * Wait for any list walkers to finish before freeing the
+	 * list element.
+	 */
+	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+	qib_mcast_qp_free(delp);
 
-	if (p) {
-		/*
-		 * Wait for any list walkers to finish before freeing the
-		 * list element.
-		 */
-		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
-		qib_mcast_qp_free(p);
-	}
 	if (last) {
 		atomic_dec(&mcast->refcount);
 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
@@ -355,11 +354,7 @@
 		dev->n_mcast_grps_allocated--;
 		spin_unlock_irq(&dev->n_mcast_grps_lock);
 	}
-
-	ret = 0;
-
-bail:
-	return ret;
+	return 0;
 }
 
 int qib_mcast_tree_empty(struct qib_ibport *ibp)
diff -ur a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
--- a/drivers/infiniband/ulp/isert/ib_isert.c	2017-03-23 14:40:46.000000000 +0100
+++ b/drivers/infiniband/ulp/isert/ib_isert.c	2017-03-14 02:16:09.000000000 +0100
@@ -49,6 +49,8 @@
 isert_rdma_accept(struct isert_conn *isert_conn);
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 
+static void isert_release_work(struct work_struct *work);
+
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
 {
@@ -202,7 +204,7 @@
 static void
 isert_free_rx_descriptors(struct isert_conn *isert_conn)
 {
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
 	struct iser_rx_desc *rx_desc;
 	int i;
 
@@ -432,6 +434,7 @@
 	init_completion(&isert_conn->conn_wait_comp_err);
 	kref_init(&isert_conn->conn_kref);
 	mutex_init(&isert_conn->conn_mutex);
+	INIT_WORK(&isert_conn->release_work, isert_release_work);
 
 	isert_conn->conn_cm_id = cma_id;
 	isert_conn->responder_resources = event->param.conn.responder_resources;
@@ -527,14 +530,15 @@
 static void
 isert_connect_release(struct isert_conn *isert_conn)
 {
-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 	struct isert_device *device = isert_conn->conn_device;
 	int cq_index;
+	struct ib_device *ib_dev = device->ib_device;
 
 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 
 	isert_free_rx_descriptors(isert_conn);
-	rdma_destroy_id(isert_conn->conn_cm_id);
+	if (isert_conn->conn_cm_id)
+		rdma_destroy_id(isert_conn->conn_cm_id);
 
 	if (isert_conn->conn_qp) {
 		cq_index = ((struct isert_cq_desc *)
@@ -673,6 +677,7 @@
 {
 	struct isert_np *isert_np = cma_id->context;
 	struct isert_conn *isert_conn;
+	bool terminating = false;
 
 	if (isert_np->np_cm_id == cma_id)
 		return isert_np_cma_handler(cma_id->context, event);
@@ -680,21 +685,37 @@
 	isert_conn = cma_id->qp->qp_context;
 
 	mutex_lock(&isert_conn->conn_mutex);
+	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
 	isert_conn_terminate(isert_conn);
 	mutex_unlock(&isert_conn->conn_mutex);
 
 	pr_info("conn %p completing conn_wait\n", isert_conn);
 	complete(&isert_conn->conn_wait);
 
+	if (terminating)
+		goto out;
+
+	mutex_lock(&isert_np->np_accept_mutex);
+	if (!list_empty(&isert_conn->conn_accept_node)) {
+		list_del_init(&isert_conn->conn_accept_node);
+		isert_put_conn(isert_conn);
+		queue_work(isert_release_wq, &isert_conn->release_work);
+	}
+	mutex_unlock(&isert_np->np_accept_mutex);
+
+out:
 	return 0;
 }
 
-static void
+static int
 isert_connect_error(struct rdma_cm_id *cma_id)
 {
 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
+	isert_conn->conn_cm_id = NULL;
 	isert_put_conn(isert_conn);
+
+	return -1;
 }
 
 static int
@@ -724,7 +745,7 @@
 	case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
 	case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
 	case RDMA_CM_EVENT_CONNECT_ERROR:
-		isert_connect_error(cma_id);
+		ret = isert_connect_error(cma_id);
 		break;
 	default:
 		pr_err("Unhandled RDMA CMA event: %d\n", event->event);
@@ -2418,7 +2439,6 @@
 
 	wait_for_completion(&isert_conn->conn_wait_comp_err);
 
-	INIT_WORK(&isert_conn->release_work, isert_release_work);
 	queue_work(isert_release_wq, &isert_conn->release_work);
 }
 
diff -ur a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
--- a/drivers/input/misc/ati_remote2.c	2017-03-23 14:39:33.000000000 +0100
+++ b/drivers/input/misc/ati_remote2.c	2017-03-14 02:14:42.000000000 +0100
@@ -817,26 +817,49 @@
 
 	ar2->udev = udev;
 
+	/* Sanity check, first interface must have an endpoint */
+	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+		dev_err(&interface->dev,
+			"%s(): interface 0 must have an endpoint\n", __func__);
+		r = -ENODEV;
+		goto fail1;
+	}
 	ar2->intf[0] = interface;
 	ar2->ep[0] = &alt->endpoint[0].desc;
 
+	/* Sanity check, the device must have two interfaces */
 	ar2->intf[1] = usb_ifnum_to_if(udev, 1);
+	if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
+		dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
+			__func__, udev->actconfig->desc.bNumInterfaces);
+		r = -ENODEV;
+		goto fail1;
+	}
+
 	r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
 	if (r)
 		goto fail1;
+
+	/* Sanity check, second interface must have an endpoint */
 	alt = ar2->intf[1]->cur_altsetting;
+	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+		dev_err(&interface->dev,
+			"%s(): interface 1 must have an endpoint\n", __func__);
+		r = -ENODEV;
+		goto fail2;
+	}
 	ar2->ep[1] = &alt->endpoint[0].desc;
 
 	r = ati_remote2_urb_init(ar2);
 	if (r)
-		goto fail2;
+		goto fail3;
 
 	ar2->channel_mask = channel_mask;
 	ar2->mode_mask = mode_mask;
 
 	r = ati_remote2_setup(ar2, ar2->channel_mask);
 	if (r)
-		goto fail2;
+		goto fail3;
 
 	usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
 	strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@@ -845,11 +868,11 @@
 
 	r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
 	if (r)
-		goto fail2;
+		goto fail3;
 
 	r = ati_remote2_input_init(ar2);
 	if (r)
-		goto fail3;
+		goto fail4;
 
 	usb_set_intfdata(interface, ar2);
 
@@ -857,10 +880,11 @@
 
 	return 0;
 
- fail3:
+ fail4:
 	sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
- fail2:
+ fail3:
 	ati_remote2_urb_cleanup(ar2);
+ fail2:
 	usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
  fail1:
 	kfree(ar2);
diff -ur a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
--- a/drivers/input/misc/ims-pcu.c	2017-03-23 14:39:33.000000000 +0100
+++ b/drivers/input/misc/ims-pcu.c	2017-03-14 02:14:41.000000000 +0100
@@ -1428,6 +1428,8 @@
 
 	pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
 					 union_desc->bMasterInterface0);
+	if (!pcu->ctrl_intf)
+		return -EINVAL;
 
 	alt = pcu->ctrl_intf->cur_altsetting;
 	pcu->ep_ctrl = &alt->endpoint[0].desc;
@@ -1435,6 +1437,8 @@
 
 	pcu->data_intf = usb_ifnum_to_if(pcu->udev,
 					 union_desc->bSlaveInterface0);
+	if (!pcu->data_intf)
+		return -EINVAL;
 
 	alt = pcu->data_intf->cur_altsetting;
 	if (alt->desc.bNumEndpoints != 2) {
diff -ur a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
--- a/drivers/input/misc/max8997_haptic.c	2017-03-23 14:39:33.000000000 +0100
+++ b/drivers/input/misc/max8997_haptic.c	2017-03-14 02:14:41.000000000 +0100
@@ -246,12 +246,14 @@
 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
 	const struct max8997_platform_data *pdata =
 					dev_get_platdata(iodev->dev);
-	const struct max8997_haptic_platform_data *haptic_pdata =
-					pdata->haptic_pdata;
+	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
 	struct max8997_haptic *chip;
 	struct input_dev *input_dev;
 	int error;
 
+	if (pdata)
+		haptic_pdata = pdata->haptic_pdata;
+
 	if (!haptic_pdata) {
 		dev_err(&pdev->dev, "no haptic platform data\n");
 		return -EINVAL;
diff -ur a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
--- a/drivers/input/misc/powermate.c	2017-03-23 14:39:30.000000000 +0100
+++ b/drivers/input/misc/powermate.c	2017-03-14 02:14:37.000000000 +0100
@@ -307,6 +307,9 @@
 	int error = -ENOMEM;
 
 	interface = intf->cur_altsetting;
+	if (interface->desc.bNumEndpoints < 1)
+		return -EINVAL;
+
 	endpoint = &interface->endpoint[0].desc;
 	if (!usb_endpoint_is_int_in(endpoint))
 		return -EIO;
diff -ur a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
--- a/drivers/input/mouse/elantech.c	2017-03-23 14:39:42.000000000 +0100
+++ b/drivers/input/mouse/elantech.c	2017-03-14 02:14:54.000000000 +0100
@@ -314,7 +314,7 @@
 					 unsigned int x2, unsigned int y2)
 {
 	elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
-	elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
+	elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
 }
 
 /*
@@ -1081,7 +1081,7 @@
 			input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
 					     ETP_WMAX_V2, 0, 0);
 		}
-		input_mt_init_slots(dev, 2, 0);
+		input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
 		input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
 		input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
 		break;
@@ -1241,10 +1241,11 @@
 		return true;
 
 	/*
-	 * Some models have a revision higher then 20. Meaning param[2] may
-	 * be 10 or 20, skip the rates check for these.
+	 * Some hw_version >= 4 models have a revision higher then 20. Meaning
+	 * that param[2] may be 10 or 20, skip the rates check for these.
 	 */
-	if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+	if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
+	    param[2] < 40)
 		return true;
 
 	for (i = 0; i < ARRAY_SIZE(rates); i++)
@@ -1356,6 +1357,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
 		},
 	},
+	{
+		/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+		},
+	},
 #endif
 	{ }
 };
diff -ur a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
--- a/drivers/input/serio/i8042-x86ia64io.h	2017-03-23 14:39:46.000000000 +0100
+++ b/drivers/input/serio/i8042-x86ia64io.h	2017-03-14 02:15:01.000000000 +0100
@@ -257,6 +257,13 @@
 		},
 	},
 	{
+		/* Fujitsu Lifebook U745 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+		},
+	},
+	{
 		/* Fujitsu T70H */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff -ur a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
--- a/drivers/input/tablet/gtco.c	2017-03-23 14:39:41.000000000 +0100
+++ b/drivers/input/tablet/gtco.c	2017-03-14 02:14:52.000000000 +0100
@@ -854,6 +854,14 @@
 		goto err_free_buf;
 	}
 
+	/* Sanity check that a device has an endpoint */
+	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+		dev_err(&usbinterface->dev,
+			"Invalid number of endpoints\n");
+		error = -EINVAL;
+		goto err_free_urb;
+	}
+
 	/*
 	 * The endpoint is always altsetting 0, we know this since we know
 	 * this device only has one interrupt endpoint
@@ -875,7 +883,7 @@
 	 * HID report descriptor
 	 */
 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
-				     HID_DEVICE_TYPE, &hid_desc) != 0){
+				     HID_DEVICE_TYPE, &hid_desc) != 0) {
 		dev_err(&usbinterface->dev,
 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
 		error = -EIO;
diff -ur a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
--- a/drivers/input/touchscreen/ads7846.c	2017-03-23 14:39:36.000000000 +0100
+++ b/drivers/input/touchscreen/ads7846.c	2017-03-14 02:14:46.000000000 +0100
@@ -695,18 +695,22 @@
 
 static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
 {
+	int value;
 	struct spi_transfer *t =
 		list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
 
 	if (ts->model == 7845) {
-		return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
+		value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
 	} else {
 		/*
 		 * adjust:  on-wire is a must-ignore bit, a BE12 value, then
 		 * padding; built from two 8 bit values written msb-first.
 		 */
-		return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
+		value = be16_to_cpup((__be16 *)t->rx_buf);
 	}
+
+	/* enforce ADC output is 12 bits width */
+	return (value >> 3) & 0xfff;
 }
 
 static void ads7846_update_value(struct spi_message *m, int val)
diff -ur a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
--- a/drivers/input/touchscreen/usbtouchscreen.c	2017-03-23 14:39:36.000000000 +0100
+++ b/drivers/input/touchscreen/usbtouchscreen.c	2017-03-14 02:14:46.000000000 +0100
@@ -618,6 +618,9 @@
 		goto err_out;
 	}
 
+	/* TSC-25 data sheet specifies a delay after the RESET command */
+	msleep(150);
+
 	/* set coordinate output rate */
 	buf[0] = buf[1] = 0xFF;
 	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff -ur a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
--- a/drivers/iommu/amd_iommu.c	2017-03-23 14:38:35.000000000 +0100
+++ b/drivers/iommu/amd_iommu.c	2017-03-14 02:13:17.000000000 +0100
@@ -287,14 +287,27 @@
 
 	/*
 	 * If it's a multifunction device that does not support our
-	 * required ACS flags, add to the same group as function 0.
+	 * required ACS flags, add to the same group as lowest numbered
+	 * function that also does not suport the required ACS flags.
 	 */
 	if (dma_pdev->multifunction &&
-	    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
-		swap_pci_ref(&dma_pdev,
-			     pci_get_slot(dma_pdev->bus,
-					  PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
-					  0)));
+	    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+		u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+		for (i = 0; i < 8; i++) {
+			struct pci_dev *tmp;
+
+			tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+			if (!tmp)
+				continue;
+
+			if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+				swap_pci_ref(&dma_pdev, tmp);
+				break;
+			}
+			pci_dev_put(tmp);
+		}
+	}
 
 	/*
 	 * Devices on the root bus go through the iommu.  If that's not us,
@@ -2104,8 +2117,8 @@
 static void clear_dte_entry(u16 devid)
 {
 	/* remove entry from the device table seen by the hardware */
-	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
-	amd_iommu_dev_table[devid].data[1] = 0;
+	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
+	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
 
 	amd_iommu_apply_erratum_63(devid);
 }
diff -ur a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
--- a/drivers/iommu/amd_iommu_types.h	2017-03-23 14:38:33.000000000 +0100
+++ b/drivers/iommu/amd_iommu_types.h	2017-03-14 02:13:15.000000000 +0100
@@ -281,6 +281,7 @@
 #define IOMMU_PTE_IR (1ULL << 61)
 #define IOMMU_PTE_IW (1ULL << 62)
 
+#define DTE_FLAG_MASK	(0x3ffULL << 32)
 #define DTE_FLAG_IOTLB	(0x01UL << 32)
 #define DTE_FLAG_GV	(0x01ULL << 55)
 #define DTE_GLX_SHIFT	(56)
diff -ur a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
--- a/drivers/iommu/dmar.c	2017-03-23 14:38:32.000000000 +0100
+++ b/drivers/iommu/dmar.c	2017-03-14 02:13:14.000000000 +0100
@@ -43,14 +43,27 @@
 
 #include "irq_remapping.h"
 
-/* No locks are needed as DMA remapping hardware unit
- * list is constructed at boot time and hotplug of
- * these units are not supported by the architecture.
+/*
+ * Assumptions:
+ * 1) The hotplug framework guarentees that DMAR unit will be hot-added
+ *    before IO devices managed by that unit.
+ * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
+ *    after IO devices managed by that unit.
+ * 3) Hotplug events are rare.
+ *
+ * Locking rules for DMA and interrupt remapping related global data structures:
+ * 1) Use dmar_global_lock in process context
+ * 2) Use RCU in interrupt context
  */
+DECLARE_RWSEM(dmar_global_lock);
 LIST_HEAD(dmar_drhd_units);
 
 struct acpi_table_header * __initdata dmar_tbl;
 static acpi_size dmar_tbl_size;
+static int dmar_dev_scope_status = 1;
+
+static int alloc_iommu(struct dmar_drhd_unit *drhd);
+static void free_iommu(struct intel_iommu *iommu);
 
 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
@@ -59,69 +72,14 @@
 	 * the very end.
 	 */
 	if (drhd->include_all)
-		list_add_tail(&drhd->list, &dmar_drhd_units);
+		list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
 	else
-		list_add(&drhd->list, &dmar_drhd_units);
-}
-
-static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
-					   struct pci_dev **dev, u16 segment)
-{
-	struct pci_bus *bus;
-	struct pci_dev *pdev = NULL;
-	struct acpi_dmar_pci_path *path;
-	int count;
-
-	bus = pci_find_bus(segment, scope->bus);
-	path = (struct acpi_dmar_pci_path *)(scope + 1);
-	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
-		/ sizeof(struct acpi_dmar_pci_path);
-
-	while (count) {
-		if (pdev)
-			pci_dev_put(pdev);
-		/*
-		 * Some BIOSes list non-exist devices in DMAR table, just
-		 * ignore it
-		 */
-		if (!bus) {
-			pr_warn("Device scope bus [%d] not found\n", scope->bus);
-			break;
-		}
-		pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
-		if (!pdev) {
-			/* warning will be printed below */
-			break;
-		}
-		path ++;
-		count --;
-		bus = pdev->subordinate;
-	}
-	if (!pdev) {
-		pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
-			segment, scope->bus, path->dev, path->fn);
-		*dev = NULL;
-		return 0;
-	}
-	if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
-			pdev->subordinate) || (scope->entry_type == \
-			ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
-		pci_dev_put(pdev);
-		pr_warn("Device scope type does not match for %s\n",
-			pci_name(pdev));
-		return -EINVAL;
-	}
-	*dev = pdev;
-	return 0;
+		list_add_rcu(&drhd->list, &dmar_drhd_units);
 }
 
-int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
-				struct pci_dev ***devices, u16 segment)
+void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
 {
 	struct acpi_dmar_device_scope *scope;
-	void * tmp = start;
-	int index;
-	int ret;
 
 	*cnt = 0;
 	while (start < end) {
@@ -136,32 +94,237 @@
 		start += scope->length;
 	}
 	if (*cnt == 0)
-		return 0;
+		return NULL;
 
-	*devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
-	if (!*devices)
-		return -ENOMEM;
+	return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
+}
 
-	start = tmp;
-	index = 0;
-	while (start < end) {
+void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
+{
+	int i;
+	struct device *tmp_dev;
+
+	if (*devices && *cnt) {
+		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
+			put_device(tmp_dev);
+		kfree(*devices);
+	}
+
+	*devices = NULL;
+	*cnt = 0;
+}
+
+/* Optimize out kzalloc()/kfree() for normal cases */
+static char dmar_pci_notify_info_buf[64];
+
+static struct dmar_pci_notify_info *
+dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
+{
+	int level = 0;
+	size_t size;
+	struct pci_dev *tmp;
+	struct dmar_pci_notify_info *info;
+
+	BUG_ON(dev->is_virtfn);
+
+	/* Only generate path[] for device addition event */
+	if (event == BUS_NOTIFY_ADD_DEVICE)
+		for (tmp = dev; tmp; tmp = tmp->bus->self)
+			level++;
+
+	size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+	if (size <= sizeof(dmar_pci_notify_info_buf)) {
+		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
+	} else {
+		info = kzalloc(size, GFP_KERNEL);
+		if (!info) {
+			pr_warn("Out of memory when allocating notify_info "
+				"for %s.\n", pci_name(dev));
+			if (dmar_dev_scope_status == 0)
+				dmar_dev_scope_status = -ENOMEM;
+			return NULL;
+		}
+	}
+
+	info->event = event;
+	info->dev = dev;
+	info->seg = pci_domain_nr(dev->bus);
+	info->level = level;
+	if (event == BUS_NOTIFY_ADD_DEVICE) {
+		for (tmp = dev; tmp; tmp = tmp->bus->self) {
+			level--;
+			info->path[level].device = PCI_SLOT(tmp->devfn);
+			info->path[level].function = PCI_FUNC(tmp->devfn);
+			if (pci_is_root_bus(tmp->bus))
+				info->bus = tmp->bus->number;
+		}
+	}
+
+	return info;
+}
+
+static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
+{
+	if ((void *)info != dmar_pci_notify_info_buf)
+		kfree(info);
+}
+
+static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
+				struct acpi_dmar_pci_path *path, int count)
+{
+	int i;
+
+	if (info->bus != bus)
+		return false;
+	if (info->level != count)
+		return false;
+
+	for (i = 0; i < count; i++) {
+		if (path[i].device != info->path[i].device ||
+		    path[i].function != info->path[i].function)
+			return false;
+	}
+
+	return true;
+}
+
+/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
+int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
+			  void *start, void*end, u16 segment,
+			  struct dmar_dev_scope *devices,
+			  int devices_cnt)
+{
+	int i, level;
+	struct device *tmp, *dev = &info->dev->dev;
+	struct acpi_dmar_device_scope *scope;
+	struct acpi_dmar_pci_path *path;
+
+	if (segment != info->seg)
+		return 0;
+
+	for (; start < end; start += scope->length) {
 		scope = start;
-		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
-		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
-			ret = dmar_parse_one_dev_scope(scope,
-				&(*devices)[index], segment);
-			if (ret) {
-				kfree(*devices);
-				return ret;
-			}
-			index ++;
+		if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
+		    scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
+			continue;
+
+		path = (struct acpi_dmar_pci_path *)(scope + 1);
+		level = (scope->length - sizeof(*scope)) / sizeof(*path);
+		if (!dmar_match_pci_path(info, scope->bus, path, level))
+			continue;
+
+		if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
+		    (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
+			pr_warn("Device scope type does not match for %s\n",
+				pci_name(info->dev));
+			return -EINVAL;
 		}
-		start += scope->length;
+
+		for_each_dev_scope(devices, devices_cnt, i, tmp)
+			if (tmp == NULL) {
+				devices[i].bus = info->dev->bus->number;
+				devices[i].devfn = info->dev->devfn;
+				rcu_assign_pointer(devices[i].dev,
+						   get_device(dev));
+				return 1;
+			}
+		BUG_ON(i >= devices_cnt);
 	}
 
 	return 0;
 }
 
+int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
+			  struct dmar_dev_scope *devices, int count)
+{
+	int index;
+	struct device *tmp;
+
+	if (info->seg != segment)
+		return 0;
+
+	for_each_active_dev_scope(devices, count, index, tmp)
+		if (tmp == &info->dev->dev) {
+			rcu_assign_pointer(devices[index].dev, NULL);
+			synchronize_rcu();
+			put_device(tmp);
+			return 1;
+		}
+
+	return 0;
+}
+
+static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
+{
+	int ret = 0;
+	struct dmar_drhd_unit *dmaru;
+	struct acpi_dmar_hardware_unit *drhd;
+
+	for_each_drhd_unit(dmaru) {
+		if (dmaru->include_all)
+			continue;
+
+		drhd = container_of(dmaru->hdr,
+				    struct acpi_dmar_hardware_unit, header);
+		ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
+				((void *)drhd) + drhd->header.length,
+				dmaru->segment,
+				dmaru->devices, dmaru->devices_cnt);
+		if (ret != 0)
+			break;
+	}
+	if (ret >= 0)
+		ret = dmar_iommu_notify_scope_dev(info);
+	if (ret < 0 && dmar_dev_scope_status == 0)
+		dmar_dev_scope_status = ret;
+
+	return ret;
+}
+
+static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
+{
+	struct dmar_drhd_unit *dmaru;
+
+	for_each_drhd_unit(dmaru)
+		if (dmar_remove_dev_scope(info, dmaru->segment,
+			dmaru->devices, dmaru->devices_cnt))
+			break;
+	dmar_iommu_notify_scope_dev(info);
+}
+
+static int dmar_pci_bus_notifier(struct notifier_block *nb,
+				 unsigned long action, void *data)
+{
+	struct pci_dev *pdev = to_pci_dev(data);
+	struct dmar_pci_notify_info *info;
+
+	/* Only care about add/remove events for physical functions */
+	if (pdev->is_virtfn)
+		return NOTIFY_DONE;
+	if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+		return NOTIFY_DONE;
+
+	info = dmar_alloc_pci_notify_info(pdev, action);
+	if (!info)
+		return NOTIFY_DONE;
+
+	down_write(&dmar_global_lock);
+	if (action == BUS_NOTIFY_ADD_DEVICE)
+		dmar_pci_bus_add_dev(info);
+	else if (action == BUS_NOTIFY_DEL_DEVICE)
+		dmar_pci_bus_del_dev(info);
+	up_write(&dmar_global_lock);
+
+	dmar_free_pci_notify_info(info);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block dmar_pci_bus_nb = {
+	.notifier_call = dmar_pci_bus_notifier,
+	.priority = INT_MIN,
+};
+
 /**
  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
  * structure which uniquely represent one DMA remapping hardware unit
@@ -183,9 +346,21 @@
 	dmaru->reg_base_addr = drhd->address;
 	dmaru->segment = drhd->segment;
 	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
+	if (!dmaru->include_all) {
+		dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
+					((void *)drhd) + drhd->header.length,
+					&dmaru->devices_cnt);
+		if (dmaru->devices_cnt && dmaru->devices == NULL) {
+			kfree(dmaru);
+			return -ENOMEM;
+		}
+	}
 
 	ret = alloc_iommu(dmaru);
 	if (ret) {
+		if (!dmaru->include_all)
+			dmar_free_dev_scope(&dmaru->devices,
+					    &dmaru->devices_cnt);
 		kfree(dmaru);
 		return ret;
 	}
@@ -193,25 +368,13 @@
 	return 0;
 }
 
-static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
+static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
 {
-	struct acpi_dmar_hardware_unit *drhd;
-	int ret = 0;
-
-	drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
-
-	if (dmaru->include_all)
-		return 0;
-
-	ret = dmar_parse_dev_scope((void *)(drhd + 1),
-				((void *)drhd) + drhd->header.length,
-				&dmaru->devices_cnt, &dmaru->devices,
-				drhd->segment);
-	if (ret) {
-		list_del(&dmaru->list);
-		kfree(dmaru);
-	}
-	return ret;
+	if (dmaru->devices && dmaru->devices_cnt)
+		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
+	if (dmaru->iommu)
+		free_iommu(dmaru->iommu);
+	kfree(dmaru);
 }
 
 #ifdef CONFIG_ACPI_NUMA
@@ -309,6 +472,7 @@
 	struct acpi_table_dmar *dmar;
 	struct acpi_dmar_header *entry_header;
 	int ret = 0;
+	int drhd_count = 0;
 
 	/*
 	 * Do it again, earlier dmar_tbl mapping could be mapped with
@@ -347,6 +511,7 @@
 
 		switch (entry_header->type) {
 		case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+			drhd_count++;
 			ret = dmar_parse_one_drhd(entry_header);
 			break;
 		case ACPI_DMAR_TYPE_RESERVED_MEMORY:
@@ -371,17 +536,20 @@
 
 		entry_header = ((void *)entry_header + entry_header->length);
 	}
+	if (drhd_count == 0)
+		pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
 	return ret;
 }
 
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
-			  struct pci_dev *dev)
+static int dmar_pci_device_match(struct dmar_dev_scope devices[],
+				 int cnt, struct pci_dev *dev)
 {
 	int index;
+	struct device *tmp;
 
 	while (dev) {
-		for (index = 0; index < cnt; index++)
-			if (dev == devices[index])
+		for_each_active_dev_scope(devices, cnt, index, tmp)
+			if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
 				return 1;
 
 		/* Check our parent */
@@ -394,56 +562,63 @@
 struct dmar_drhd_unit *
 dmar_find_matched_drhd_unit(struct pci_dev *dev)
 {
-	struct dmar_drhd_unit *dmaru = NULL;
+	struct dmar_drhd_unit *dmaru;
 	struct acpi_dmar_hardware_unit *drhd;
 
 	dev = pci_physfn(dev);
 
-	list_for_each_entry(dmaru, &dmar_drhd_units, list) {
+	rcu_read_lock();
+	for_each_drhd_unit(dmaru) {
 		drhd = container_of(dmaru->hdr,
 				    struct acpi_dmar_hardware_unit,
 				    header);
 
 		if (dmaru->include_all &&
 		    drhd->segment == pci_domain_nr(dev->bus))
-			return dmaru;
+			goto out;
 
 		if (dmar_pci_device_match(dmaru->devices,
 					  dmaru->devices_cnt, dev))
-			return dmaru;
+			goto out;
 	}
+	dmaru = NULL;
+out:
+	rcu_read_unlock();
 
-	return NULL;
+	return dmaru;
 }
 
 int __init dmar_dev_scope_init(void)
 {
-	static int dmar_dev_scope_initialized;
-	struct dmar_drhd_unit *drhd, *drhd_n;
-	int ret = -ENODEV;
-
-	if (dmar_dev_scope_initialized)
-		return dmar_dev_scope_initialized;
+	struct pci_dev *dev = NULL;
+	struct dmar_pci_notify_info *info;
 
-	if (list_empty(&dmar_drhd_units))
-		goto fail;
+	if (dmar_dev_scope_status != 1)
+		return dmar_dev_scope_status;
 
-	list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
-		ret = dmar_parse_dev(drhd);
-		if (ret)
-			goto fail;
-	}
+	if (list_empty(&dmar_drhd_units)) {
+		dmar_dev_scope_status = -ENODEV;
+	} else {
+		dmar_dev_scope_status = 0;
 
-	ret = dmar_parse_rmrr_atsr_dev();
-	if (ret)
-		goto fail;
+		for_each_pci_dev(dev) {
+			if (dev->is_virtfn)
+				continue;
+
+			info = dmar_alloc_pci_notify_info(dev,
+					BUS_NOTIFY_ADD_DEVICE);
+			if (!info) {
+				return dmar_dev_scope_status;
+			} else {
+				dmar_pci_bus_add_dev(info);
+				dmar_free_pci_notify_info(info);
+			}
+		}
 
-	dmar_dev_scope_initialized = 1;
-	return 0;
+		bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
+	}
 
-fail:
-	dmar_dev_scope_initialized = ret;
-	return ret;
+	return dmar_dev_scope_status;
 }
 
 int __init dmar_table_init(void)
@@ -451,24 +626,23 @@
 	static int dmar_table_initialized;
 	int ret;
 
-	if (dmar_table_initialized)
-		return 0;
-
-	dmar_table_initialized = 1;
-
-	ret = parse_dmar_table();
-	if (ret) {
-		if (ret != -ENODEV)
-			pr_info("parse DMAR table failure.\n");
-		return ret;
-	}
+	if (dmar_table_initialized == 0) {
+		ret = parse_dmar_table();
+		if (ret < 0) {
+			if (ret != -ENODEV)
+				pr_info("parse DMAR table failure.\n");
+		} else  if (list_empty(&dmar_drhd_units)) {
+			pr_info("No DMAR devices found\n");
+			ret = -ENODEV;
+		}
 
-	if (list_empty(&dmar_drhd_units)) {
-		pr_info("No DMAR devices found\n");
-		return -ENODEV;
+		if (ret < 0)
+			dmar_table_initialized = ret;
+		else
+			dmar_table_initialized = 1;
 	}
 
-	return 0;
+	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
 }
 
 static void warn_invalid_dmar(u64 addr, const char *message)
@@ -483,7 +657,7 @@
 		dmi_get_system_info(DMI_PRODUCT_VERSION));
 }
 
-int __init check_zero_address(void)
+static int __init check_zero_address(void)
 {
 	struct acpi_table_dmar *dmar;
 	struct acpi_dmar_header *entry_header;
@@ -537,18 +711,11 @@
 {
 	int ret;
 
+	down_write(&dmar_global_lock);
 	ret = dmar_table_detect();
 	if (ret)
 		ret = check_zero_address();
 	{
-		struct acpi_table_dmar *dmar;
-
-		dmar = (struct acpi_table_dmar *) dmar_tbl;
-
-		if (ret && irq_remapping_enabled && cpu_has_x2apic &&
-		    dmar->flags & 0x1)
-			pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
 		if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
 			iommu_detected = 1;
 			/* Make sure ACS will be enabled */
@@ -560,8 +727,9 @@
 			x86_init.iommu.iommu_init = intel_iommu_init;
 #endif
 	}
-	early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
+	early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
 	dmar_tbl = NULL;
+	up_write(&dmar_global_lock);
 
 	return ret ? 1 : -ENODEV;
 }
@@ -641,7 +809,7 @@
 	return err;
 }
 
-int alloc_iommu(struct dmar_drhd_unit *drhd)
+static int alloc_iommu(struct dmar_drhd_unit *drhd)
 {
 	struct intel_iommu *iommu;
 	u32 ver, sts;
@@ -716,12 +884,19 @@
 	return err;
 }
 
-void free_iommu(struct intel_iommu *iommu)
+static void free_iommu(struct intel_iommu *iommu)
 {
-	if (!iommu)
-		return;
+	if (iommu->irq) {
+		free_irq(iommu->irq, iommu);
+		irq_set_handler_data(iommu->irq, NULL);
+		destroy_irq(iommu->irq);
+	}
 
-	free_dmar_iommu(iommu);
+	if (iommu->qi) {
+		free_page((unsigned long)iommu->qi->desc);
+		kfree(iommu->qi->desc_status);
+		kfree(iommu->qi);
+	}
 
 	if (iommu->reg)
 		unmap_iommu(iommu);
@@ -967,7 +1142,7 @@
 
 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
-	sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
+	sts =  readl(iommu->reg + DMAR_GSTS_REG);
 	if (!(sts & DMA_GSTS_QIES))
 		goto end;
 
@@ -1044,7 +1219,7 @@
 	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
 	if (!desc_page) {
 		kfree(qi);
-		iommu->qi = 0;
+		iommu->qi = NULL;
 		return -ENOMEM;
 	}
 
@@ -1054,13 +1229,10 @@
 	if (!qi->desc_status) {
 		free_page((unsigned long) qi->desc);
 		kfree(qi);
-		iommu->qi = 0;
+		iommu->qi = NULL;
 		return -ENOMEM;
 	}
 
-	qi->free_head = qi->free_tail = 0;
-	qi->free_cnt = QI_LENGTH;
-
 	raw_spin_lock_init(&qi->q_lock);
 
 	__dmar_enable_qi(iommu);
@@ -1105,9 +1277,7 @@
 	"Blocked an interrupt request due to source-id verification failure",
 };
 
-#define MAX_FAULT_REASON_IDX 	(ARRAY_SIZE(fault_reason_strings) - 1)
-
-const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
 {
 	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
 					ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1272,7 +1442,7 @@
 		return 0;
 
 	irq = create_irq();
-	if (!irq) {
+	if (irq <= 0) {
 		pr_err("IOMMU: no free vectors\n");
 		return -EINVAL;
 	}
@@ -1297,15 +1467,14 @@
 int __init enable_drhd_fault_handling(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 
 	/*
 	 * Enable fault control interrupt.
 	 */
-	for_each_drhd_unit(drhd) {
-		int ret;
-		struct intel_iommu *iommu = drhd->iommu;
+	for_each_iommu(iommu, drhd) {
 		u32 fault_status;
-		ret = dmar_set_interrupt(iommu);
+		int ret = dmar_set_interrupt(iommu);
 
 		if (ret) {
 			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1360,4 +1529,27 @@
 		return 0;
 	return dmar->flags & 0x1;
 }
+
+static int __init dmar_free_unused_resources(void)
+{
+	struct dmar_drhd_unit *dmaru, *dmaru_n;
+
+	/* DMAR units are in use */
+	if (irq_remapping_enabled || intel_iommu_enabled)
+		return 0;
+
+	if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
+		bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
+
+	down_write(&dmar_global_lock);
+	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
+		list_del(&dmaru->list);
+		dmar_free_drhd(dmaru);
+	}
+	up_write(&dmar_global_lock);
+
+	return 0;
+}
+
+late_initcall(dmar_free_unused_resources);
 IOMMU_INIT_POST(detect_intel_iommu);
diff -ur a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
--- a/drivers/iommu/intel-iommu.c	2017-03-23 14:38:35.000000000 +0100
+++ b/drivers/iommu/intel-iommu.c	2017-03-14 02:13:18.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
  
 #include <linux/init.h>
 #include <linux/bitmap.h>
@@ -11,6 +14,7 @@
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
 #include <linux/mempool.h>
+#include <linux/memory.h>
 #include <linux/timer.h>
 #include <linux/iova.h>
 #include <linux/iommu.h>
@@ -20,6 +24,7 @@
 #include <linux/dmi.h>
 #include <linux/pci-ats.h>
 #include <linux/memblock.h>
+#include <linux/dma-contiguous.h>
 #include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -33,6 +38,19 @@
 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
 
+#ifdef CONFIG_SYNO_SKIP_IOMMU
+typedef struct _tag_SYNO_PCIE_DEV_SKIP_DMAR {
+	const char *szName;     
+	const unsigned short iVendor;  
+	const unsigned short iDevice;  
+} SYNO_PCIE_DEV_SKIP_DMAR;
+
+SYNO_PCIE_DEV_SKIP_DMAR gSkipDmarDev[] = {
+        {"Tehuti 10G SFP+", 0x1fc9, 0x4022},
+        {NULL, 0, 0}
+};
+#endif  
+
 #define IOAPIC_RANGE_START	(0xfee00000)
 #define IOAPIC_RANGE_END	(0xfeefffff)
 #define IOVA_START_ADDR		(0x1000)
@@ -40,6 +58,7 @@
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 
 #define MAX_AGAW_WIDTH 64
+#define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
 
 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -64,12 +83,12 @@
 
 static inline int agaw_to_width(int agaw)
 {
-	return 30 + agaw * LEVEL_STRIDE;
+	return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
 }
 
 static inline int width_to_agaw(int width)
 {
-	return (width - 30) / LEVEL_STRIDE;
+	return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
 }
 
 static inline unsigned int level_to_offset_bits(int level)
@@ -99,7 +118,7 @@
 
 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
 {
-	return  1 << ((lvl - 1) * LEVEL_STRIDE);
+	return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
 }
 
 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@@ -213,26 +232,6 @@
 	pte->val = 0;
 }
 
-static inline void dma_set_pte_readable(struct dma_pte *pte)
-{
-	pte->val |= DMA_PTE_READ;
-}
-
-static inline void dma_set_pte_writable(struct dma_pte *pte)
-{
-	pte->val |= DMA_PTE_WRITE;
-}
-
-static inline void dma_set_pte_snp(struct dma_pte *pte)
-{
-	pte->val |= DMA_PTE_SNP;
-}
-
-static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
-{
-	pte->val = (pte->val & ~3) | (prot & 3);
-}
-
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
 #ifdef CONFIG_64BIT
@@ -243,11 +242,6 @@
 #endif
 }
 
-static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
-{
-	pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
-}
-
 static inline bool dma_pte_present(struct dma_pte *pte)
 {
 	return (pte->val & 3) != 0;
@@ -255,7 +249,7 @@
 
 static inline bool dma_pte_superpage(struct dma_pte *pte)
 {
-	return (pte->val & (1 << 7));
+	return (pte->val & DMA_PTE_LARGE_PAGE);
 }
 
 static inline int first_pte_in_page(struct dma_pte *pte)
@@ -304,23 +298,46 @@
 struct device_domain_info {
 	struct list_head link;	 
 	struct list_head global;  
-	int segment;		 
 	u8 bus;			 
 	u8 devfn;		 
-	struct pci_dev *dev;  
+	struct device *dev;  
 	struct intel_iommu *iommu;  
 	struct dmar_domain *domain;  
 };
 
+struct dmar_rmrr_unit {
+	struct list_head list;		 
+	struct acpi_dmar_header *hdr;	 
+	u64	base_address;		 
+	u64	end_address;		 
+	struct dmar_dev_scope *devices;	 
+	int	devices_cnt;		 
+};
+
+struct dmar_atsr_unit {
+	struct list_head list;		 
+	struct acpi_dmar_header *hdr;	 
+	struct dmar_dev_scope *devices;	 
+	int devices_cnt;		 
+	u8 include_all:1;		 
+};
+
+static LIST_HEAD(dmar_atsr_units);
+static LIST_HEAD(dmar_rmrr_units);
+
+#define for_each_rmrr_units(rmrr) \
+	list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
 static void flush_unmaps_timeout(unsigned long data);
 
-DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
+static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
 
 #define HIGH_WATER_MARK 250
 struct deferred_flush_tables {
 	int next;
 	struct iova *iova[HIGH_WATER_MARK];
 	struct dmar_domain *domain[HIGH_WATER_MARK];
+	struct page *freelist[HIGH_WATER_MARK];
 };
 
 static struct deferred_flush_tables *deferred_flush;
@@ -333,7 +350,12 @@
 static int timer_on;
 static long list_size;
 
+static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+				       struct device *dev);
+static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
+					   struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
@@ -486,18 +508,30 @@
 
 static void domain_update_iommu_coherency(struct dmar_domain *domain)
 {
-	int i;
-
-	i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
+	int i, found = 0;
 
-	domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
+	domain->iommu_coherency = 1;
 
 	for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+		found = 1;
 		if (!ecap_coherent(g_iommus[i]->ecap)) {
 			domain->iommu_coherency = 0;
 			break;
 		}
 	}
+	if (found)
+		return;
+
+	rcu_read_lock();
+	for_each_active_iommu(iommu, drhd) {
+		if (!ecap_coherent(iommu->ecap)) {
+			domain->iommu_coherency = 0;
+			break;
+		}
+	}
+	rcu_read_unlock();
 }
 
 static void domain_update_iommu_snooping(struct dmar_domain *domain)
@@ -525,12 +559,15 @@
 		return;
 	}
 
+	rcu_read_lock();
 	for_each_active_iommu(iommu, drhd) {
 		mask &= cap_super_page_val(iommu->cap);
 		if (!mask) {
 			break;
 		}
 	}
+	rcu_read_unlock();
+
 	domain->iommu_superpage = fls(mask);
 }
 
@@ -541,34 +578,68 @@
 	domain_update_iommu_superpage(domain);
 }
 
-static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
+static int iommu_dummy(struct device *dev)
+{
+	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
+static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
 {
 	struct dmar_drhd_unit *drhd = NULL;
+	struct intel_iommu *iommu;
+	struct device *tmp;
+	struct pci_dev *ptmp, *pdev = NULL;
+	u16 segment;
 	int i;
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-		if (segment != drhd->segment)
+	if (iommu_dummy(dev))
+		return NULL;
+
+	if (dev_is_pci(dev)) {
+		pdev = to_pci_dev(dev);
+		segment = pci_domain_nr(pdev->bus);
+#ifdef MY_ABC_HERE
+	}
+#else
+	} else if (ACPI_COMPANION(dev))
+		dev = &ACPI_COMPANION(dev)->dev;
+#endif  
+
+	rcu_read_lock();
+	for_each_active_iommu(iommu, drhd) {
+		if (pdev && segment != drhd->segment)
 			continue;
 
-		for (i = 0; i < drhd->devices_cnt; i++) {
-			if (drhd->devices[i] &&
-			    drhd->devices[i]->bus->number == bus &&
-			    drhd->devices[i]->devfn == devfn)
-				return drhd->iommu;
-			if (drhd->devices[i] &&
-			    drhd->devices[i]->subordinate &&
-			    drhd->devices[i]->subordinate->number <= bus &&
-			    drhd->devices[i]->subordinate->busn_res.end >= bus)
-				return drhd->iommu;
+		for_each_active_dev_scope(drhd->devices,
+					  drhd->devices_cnt, i, tmp) {
+			if (tmp == dev) {
+				*bus = drhd->devices[i].bus;
+				*devfn = drhd->devices[i].devfn;
+				goto out;
+			}
+
+			if (!pdev || !dev_is_pci(tmp))
+				continue;
+
+			ptmp = to_pci_dev(tmp);
+			if (ptmp->subordinate &&
+			    ptmp->subordinate->number <= pdev->bus->number &&
+			    ptmp->subordinate->busn_res.end >= pdev->bus->number)
+				goto got_pdev;
 		}
 
-		if (drhd->include_all)
-			return drhd->iommu;
+		if (pdev && drhd->include_all) {
+		got_pdev:
+			*bus = pdev->bus->number;
+			*devfn = pdev->devfn;
+			goto out;
+		}
 	}
+	iommu = NULL;
+ out:
+	rcu_read_unlock();
 
-	return NULL;
+	return iommu;
 }
 
 static void domain_flush_cache(struct dmar_domain *domain,
@@ -667,7 +738,7 @@
 }
 
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
-				      unsigned long pfn, int target_level)
+				      unsigned long pfn, int *target_level)
 {
 	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
 	struct dma_pte *parent, *pte = NULL;
@@ -682,14 +753,14 @@
 
 	parent = domain->pgd;
 
-	while (level > 0) {
+	while (1) {
 		void *tmp_page;
 
 		offset = pfn_level_offset(pfn, level);
 		pte = &parent[offset];
-		if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
+		if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
 			break;
-		if (level == target_level)
+		if (level == *target_level)
 			break;
 
 		if (!dma_pte_present(pte)) {
@@ -702,18 +773,22 @@
 
 			domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
 			pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
-			if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+			if (cmpxchg64(&pte->val, 0ULL, pteval))
 				 
 				free_pgtable_page(tmp_page);
-			} else {
-				dma_pte_addr(pte);
+			else
 				domain_flush_cache(domain, pte, sizeof(*pte));
-			}
 		}
+		if (level == 1)
+			break;
+
 		parent = phys_to_virt(dma_pte_addr(pte));
 		level--;
 	}
 
+	if (!*target_level)
+		*target_level = level;
+
 	return pte;
 }
 
@@ -737,7 +812,7 @@
 			break;
 		}
 
-		if (pte->val & DMA_PTE_LARGE_PAGE) {
+		if (dma_pte_superpage(pte)) {
 			*large_page = total;
 			return pte;
 		}
@@ -748,14 +823,13 @@
 	return NULL;
 }
 
-static int dma_pte_clear_range(struct dmar_domain *domain,
+static void dma_pte_clear_range(struct dmar_domain *domain,
 				unsigned long start_pfn,
 				unsigned long last_pfn)
 {
 	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
 	unsigned int large_page = 1;
 	struct dma_pte *first_pte, *pte;
-	int order;
 
 	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
 	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -778,9 +852,6 @@
 				   (void *)pte - (void *)first_pte);
 
 	} while (start_pfn && start_pfn <= last_pfn);
-
-	order = (large_page - 1) * 9;
-	return order;
 }
 
 static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -834,6 +905,112 @@
 	}
 }
 
+static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
+					    int level, struct dma_pte *pte,
+					    struct page *freelist)
+{
+	struct page *pg;
+
+	pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
+	pg->freelist = freelist;
+	freelist = pg;
+
+	if (level == 1)
+		return freelist;
+
+	pte = page_address(pg);
+	do {
+		if (dma_pte_present(pte) && !dma_pte_superpage(pte))
+			freelist = dma_pte_list_pagetables(domain, level - 1,
+							   pte, freelist);
+		pte++;
+	} while (!first_pte_in_page(pte));
+
+	return freelist;
+}
+
+static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
+					struct dma_pte *pte, unsigned long pfn,
+					unsigned long start_pfn,
+					unsigned long last_pfn,
+					struct page *freelist)
+{
+	struct dma_pte *first_pte = NULL, *last_pte = NULL;
+
+	pfn = max(start_pfn, pfn);
+	pte = &pte[pfn_level_offset(pfn, level)];
+
+	do {
+		unsigned long level_pfn;
+
+		if (!dma_pte_present(pte))
+			goto next;
+
+		level_pfn = pfn & level_mask(level);
+
+		if (start_pfn <= level_pfn &&
+		    last_pfn >= level_pfn + level_size(level) - 1) {
+			 
+			if (level > 1 && !dma_pte_superpage(pte))
+				freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
+
+			dma_clear_pte(pte);
+			if (!first_pte)
+				first_pte = pte;
+			last_pte = pte;
+		} else if (level > 1) {
+			 
+			freelist = dma_pte_clear_level(domain, level - 1,
+						       phys_to_virt(dma_pte_addr(pte)),
+						       level_pfn, start_pfn, last_pfn,
+						       freelist);
+		}
+next:
+		pfn += level_size(level);
+	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
+
+	if (first_pte)
+		domain_flush_cache(domain, first_pte,
+				   (void *)++last_pte - (void *)first_pte);
+
+	return freelist;
+}
+
+struct page *domain_unmap(struct dmar_domain *domain,
+			  unsigned long start_pfn,
+			  unsigned long last_pfn)
+{
+	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+	struct page *freelist = NULL;
+
+	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+	BUG_ON(start_pfn > last_pfn);
+
+	freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
+				       domain->pgd, 0, start_pfn, last_pfn, NULL);
+
+	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
+		struct page *pgd_page = virt_to_page(domain->pgd);
+		pgd_page->freelist = freelist;
+		freelist = pgd_page;
+
+		domain->pgd = NULL;
+	}
+
+	return freelist;
+}
+
+void dma_free_pagelist(struct page *freelist)
+{
+	struct page *pg;
+
+	while ((pg = freelist)) {
+		freelist = pg->freelist;
+		free_pgtable_page(page_address(pg));
+	}
+}
+
 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
 {
 	struct root_entry *root;
@@ -971,13 +1148,14 @@
 			(unsigned long long)DMA_TLB_IAIG(val));
 }
 
-static struct device_domain_info *iommu_support_dev_iotlb(
-	struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
+static struct device_domain_info *
+iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
+			 u8 bus, u8 devfn)
 {
 	int found = 0;
 	unsigned long flags;
 	struct device_domain_info *info;
-	struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
+	struct pci_dev *pdev;
 
 	if (!ecap_dev_iotlb_support(iommu->ecap))
 		return NULL;
@@ -993,34 +1171,35 @@
 		}
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 
-	if (!found || !info->dev)
+	if (!found || !info->dev || !dev_is_pci(info->dev))
 		return NULL;
 
-	if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
-		return NULL;
+	pdev = to_pci_dev(info->dev);
 
-	if (!dmar_find_matched_atsr_unit(info->dev))
+	if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
 		return NULL;
 
-	info->iommu = iommu;
+	if (!dmar_find_matched_atsr_unit(pdev))
+		return NULL;
 
 	return info;
 }
 
 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
 {
-	if (!info)
+	if (!info || !dev_is_pci(info->dev))
 		return;
 
-	pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+	pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
 }
 
 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
 {
-	if (!info->dev || !pci_ats_enabled(info->dev))
+	if (!info->dev || !dev_is_pci(info->dev) ||
+	    !pci_ats_enabled(to_pci_dev(info->dev)))
 		return;
 
-	pci_disable_ats(info->dev);
+	pci_disable_ats(to_pci_dev(info->dev));
 }
 
 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -1032,29 +1211,37 @@
 
 	spin_lock_irqsave(&device_domain_lock, flags);
 	list_for_each_entry(info, &domain->devices, link) {
-		if (!info->dev || !pci_ats_enabled(info->dev))
+		struct pci_dev *pdev;
+		if (!info->dev || !dev_is_pci(info->dev))
+			continue;
+
+		pdev = to_pci_dev(info->dev);
+		if (!pci_ats_enabled(pdev))
 			continue;
 
 		sid = info->bus << 8 | info->devfn;
-		qdep = pci_ats_queue_depth(info->dev);
+		qdep = pci_ats_queue_depth(pdev);
 		qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
 	}
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-				  unsigned long pfn, unsigned int pages, int map)
+				  unsigned long pfn, unsigned int pages, int ih, int map)
 {
 	unsigned int mask = ilog2(__roundup_pow_of_two(pages));
 	uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
 
 	BUG_ON(pages == 0);
 
+	if (ih)
+		ih = 1 << 6;
+	 
 	if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
 		iommu->flush.flush_iotlb(iommu, did, 0, 0,
 						DMA_TLB_DSI_FLUSH);
 	else
-		iommu->flush.flush_iotlb(iommu, did, addr, mask,
+		iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
 						DMA_TLB_PSI_FLUSH);
 
 	if (!cap_caching_mode(iommu->cap) || !map)
@@ -1115,21 +1302,25 @@
 	unsigned long nlongs;
 
 	ndomains = cap_ndoms(iommu->cap);
-	pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
-			ndomains);
+	pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
+		 iommu->seq_id, ndomains);
 	nlongs = BITS_TO_LONGS(ndomains);
 
 	spin_lock_init(&iommu->lock);
 
 	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
 	if (!iommu->domain_ids) {
-		printk(KERN_ERR "Allocating domain id array failed\n");
+		pr_err("IOMMU%d: allocating domain id array failed\n",
+		       iommu->seq_id);
 		return -ENOMEM;
 	}
 	iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
 			GFP_KERNEL);
 	if (!iommu->domains) {
-		printk(KERN_ERR "Allocating domain array failed\n");
+		pr_err("IOMMU%d: allocating domain array failed\n",
+		       iommu->seq_id);
+		kfree(iommu->domain_ids);
+		iommu->domain_ids = NULL;
 		return -ENOMEM;
 	}
 
@@ -1138,59 +1329,46 @@
 	return 0;
 }
 
-static void domain_exit(struct dmar_domain *domain);
-static void vm_domain_exit(struct dmar_domain *domain);
-
-void free_dmar_iommu(struct intel_iommu *iommu)
+static void free_dmar_iommu(struct intel_iommu *iommu)
 {
 	struct dmar_domain *domain;
-	int i;
+	int i, count;
 	unsigned long flags;
 
 	if ((iommu->domains) && (iommu->domain_ids)) {
 		for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
+			 
+			if (cap_caching_mode(iommu->cap) && i == 0)
+				continue;
+
 			domain = iommu->domains[i];
 			clear_bit(i, iommu->domain_ids);
 
 			spin_lock_irqsave(&domain->iommu_lock, flags);
-			if (--domain->iommu_count == 0) {
-				if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
-					vm_domain_exit(domain);
-				else
-					domain_exit(domain);
-			}
+			count = --domain->iommu_count;
 			spin_unlock_irqrestore(&domain->iommu_lock, flags);
+			if (count == 0)
+				domain_exit(domain);
 		}
 	}
 
 	if (iommu->gcmd & DMA_GCMD_TE)
 		iommu_disable_translation(iommu);
 
-	if (iommu->irq) {
-		irq_set_handler_data(iommu->irq, NULL);
-		 
-		free_irq(iommu->irq, iommu);
-		destroy_irq(iommu->irq);
-	}
-
 	kfree(iommu->domains);
 	kfree(iommu->domain_ids);
+	iommu->domains = NULL;
+	iommu->domain_ids = NULL;
 
 	g_iommus[iommu->seq_id] = NULL;
 
-	for (i = 0; i < g_num_of_iommus; i++) {
-		if (g_iommus[i])
-			break;
-	}
-
-	if (i == g_num_of_iommus)
-		kfree(g_iommus);
-
 	free_context_table(iommu);
 }
 
-static struct dmar_domain *alloc_domain(void)
+static struct dmar_domain *alloc_domain(bool vm)
 {
+	 
+	static atomic_t vm_domid = ATOMIC_INIT(0);
 	struct dmar_domain *domain;
 
 	domain = alloc_domain_mem();
@@ -1198,8 +1376,15 @@
 		return NULL;
 
 	domain->nid = -1;
+	domain->iommu_count = 0;
 	memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
 	domain->flags = 0;
+	spin_lock_init(&domain->iommu_lock);
+	INIT_LIST_HEAD(&domain->devices);
+	if (vm) {
+		domain->id = atomic_inc_return(&vm_domid);
+		domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+	}
 
 	return domain;
 }
@@ -1223,6 +1408,7 @@
 	}
 
 	domain->id = num;
+	domain->iommu_count++;
 	set_bit(num, iommu->domain_ids);
 	set_bit(iommu->seq_id, domain->iommu_bmp);
 	iommu->domains[num] = domain;
@@ -1236,22 +1422,16 @@
 {
 	unsigned long flags;
 	int num, ndomains;
-	int found = 0;
 
 	spin_lock_irqsave(&iommu->lock, flags);
 	ndomains = cap_ndoms(iommu->cap);
 	for_each_set_bit(num, iommu->domain_ids, ndomains) {
 		if (iommu->domains[num] == domain) {
-			found = 1;
+			clear_bit(num, iommu->domain_ids);
+			iommu->domains[num] = NULL;
 			break;
 		}
 	}
-
-	if (found) {
-		clear_bit(num, iommu->domain_ids);
-		clear_bit(iommu->seq_id, domain->iommu_bmp);
-		iommu->domains[num] = NULL;
-	}
 	spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
@@ -1321,8 +1501,6 @@
 	unsigned long sagaw;
 
 	init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
-	spin_lock_init(&domain->iommu_lock);
-
 	domain_reserve_special_ranges(domain);
 
 	iommu = domain_get_iommu(domain);
@@ -1340,7 +1518,6 @@
 			return -ENODEV;
 	}
 	domain->agaw = agaw;
-	INIT_LIST_HEAD(&domain->devices);
 
 	if (ecap_coherent(iommu->ecap))
 		domain->iommu_coherency = 1;
@@ -1352,8 +1529,11 @@
 	else
 		domain->iommu_snooping = 0;
 
-	domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
-	domain->iommu_count = 1;
+	if (intel_iommu_superpage)
+		domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
+	else
+		domain->iommu_superpage = 0;
+
 	domain->nid = iommu->node;
 
 	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
@@ -1367,6 +1547,7 @@
 {
 	struct dmar_drhd_unit *drhd;
 	struct intel_iommu *iommu;
+	struct page *freelist = NULL;
 
 	if (!domain)
 		return;
@@ -1375,26 +1556,29 @@
 		flush_unmaps_timeout(0);
 
 	domain_remove_dev_info(domain);
-	 
-	put_iova_domain(&domain->iovad);
 
-	dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+	put_iova_domain(&domain->iovad);
 
-	dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+	freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
 
+	rcu_read_lock();
 	for_each_active_iommu(iommu, drhd)
-		if (test_bit(iommu->seq_id, domain->iommu_bmp))
+		if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+		    test_bit(iommu->seq_id, domain->iommu_bmp))
 			iommu_detach_domain(domain, iommu);
+	rcu_read_unlock();
+
+	dma_free_pagelist(freelist);
 
 	free_domain_mem(domain);
 }
 
-static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
-				 u8 bus, u8 devfn, int translation)
+static int domain_context_mapping_one(struct dmar_domain *domain,
+				      struct intel_iommu *iommu,
+				      u8 bus, u8 devfn, int translation)
 {
 	struct context_entry *context;
 	unsigned long flags;
-	struct intel_iommu *iommu;
 	struct dma_pte *pgd;
 	unsigned long num;
 	unsigned long ndomains;
@@ -1409,10 +1593,6 @@
 	BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
 	       translation != CONTEXT_TT_MULTI_LEVEL);
 
-	iommu = device_to_iommu(segment, bus, devfn);
-	if (!iommu)
-		return -ENODEV;
-
 	context = device_to_context_entry(iommu, bus, devfn);
 	if (!context)
 		return -ENOMEM;
@@ -1465,7 +1645,7 @@
 	context_set_domain_id(context, id);
 
 	if (translation != CONTEXT_TT_PASS_THROUGH) {
-		info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+		info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
 		translation = info ? CONTEXT_TT_DEV_IOTLB :
 				     CONTEXT_TT_MULTI_LEVEL;
 	}
@@ -1516,28 +1696,33 @@
 {
 	struct domain_context_mapping_data *data = opaque;
 
-	return domain_context_mapping_one(data->domain, pci_domain_nr(pdev->bus),
+	return domain_context_mapping_one(data->domain, data->iommu,
 					  PCI_BUS_NUM(alias), alias & 0xff,
 					  data->translation);
 }
 
 static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
-			int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev,
+		       int translation)
 {
 	struct intel_iommu *iommu;
+	u8 bus, devfn;
 	struct domain_context_mapping_data data;
 
-	iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-				pdev->devfn);
+	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return -ENODEV;
 
+	if (!dev_is_pci(dev))
+		return domain_context_mapping_one(domain, iommu, bus, devfn,
+						  translation);
+
 	data.domain = domain;
 	data.iommu = iommu;
 	data.translation = translation;
 
-	return pci_for_each_dma_alias(pdev, &domain_context_mapping_cb, &data);
+	return pci_for_each_dma_alias(to_pci_dev(dev),
+				      &domain_context_mapping_cb, &data);
 }
 
 static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -1548,16 +1733,20 @@
 	return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
 }
 
-static int domain_context_mapped(struct pci_dev *pdev)
+static int domain_context_mapped(struct device *dev)
 {
 	struct intel_iommu *iommu;
+	u8 bus, devfn;
 
-	iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-				pdev->devfn);
+	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return -ENODEV;
 
-	return !pci_for_each_dma_alias(pdev, domain_context_mapped_cb, iommu);
+	if (!dev_is_pci(dev))
+		return device_context_mapped(iommu, bus, devfn);
+
+	return !pci_for_each_dma_alias(to_pci_dev(dev),
+				       domain_context_mapped_cb, iommu);
 }
 
 static inline unsigned long aligned_nrpages(unsigned long host_addr,
@@ -1627,7 +1816,7 @@
 		if (!pte) {
 			largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
 
-			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
+			first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
 			if (!pte)
 				return -ENOMEM;
 			 
@@ -1712,38 +1901,45 @@
 	list_del(&info->link);
 	list_del(&info->global);
 	if (info->dev)
-		info->dev->dev.archdata.iommu = NULL;
+		info->dev->archdata.iommu = NULL;
 }
 
 static void domain_remove_dev_info(struct dmar_domain *domain)
 {
-	struct device_domain_info *info;
-	unsigned long flags;
-	struct intel_iommu *iommu;
+	struct device_domain_info *info, *tmp;
+	unsigned long flags, flags2;
 
 	spin_lock_irqsave(&device_domain_lock, flags);
-	while (!list_empty(&domain->devices)) {
-		info = list_entry(domain->devices.next,
-			struct device_domain_info, link);
+	list_for_each_entry_safe(info, tmp, &domain->devices, link) {
 		unlink_domain_info(info);
 		spin_unlock_irqrestore(&device_domain_lock, flags);
 
 		iommu_disable_dev_iotlb(info);
-		iommu = device_to_iommu(info->segment, info->bus, info->devfn);
-		iommu_detach_dev(iommu, info->bus, info->devfn);
-		free_devinfo_mem(info);
+		iommu_detach_dev(info->iommu, info->bus, info->devfn);
+
+		if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+			iommu_detach_dependent_devices(info->iommu, info->dev);
+			 
+			spin_lock_irqsave(&domain->iommu_lock, flags2);
+			if (test_and_clear_bit(info->iommu->seq_id,
+					       domain->iommu_bmp)) {
+				domain->iommu_count--;
+				domain_update_iommu_cap(domain);
+			}
+			spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+		}
 
+		free_devinfo_mem(info);
 		spin_lock_irqsave(&device_domain_lock, flags);
 	}
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static struct dmar_domain *
-find_domain(struct pci_dev *pdev)
+static struct dmar_domain *find_domain(struct device *dev)
 {
 	struct device_domain_info *info;
 
-	info = pdev->dev.archdata.iommu;
+	info = dev->archdata.iommu;
 	if (info)
 		return info->domain;
 	return NULL;
@@ -1764,7 +1960,7 @@
 
 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
 						int bus, int devfn,
-						struct pci_dev *pdev,
+						struct device *dev,
 						struct dmar_domain *domain)
 {
 	struct dmar_domain *found = NULL;
@@ -1777,16 +1973,15 @@
 
 	info->bus = bus;
 	info->devfn = devfn;
-	info->dev = pdev;
-	info->segment = iommu->segment;
+	info->dev = dev;
 	info->domain = domain;
 	info->iommu = iommu;
-	if (!pdev)
+	if (!dev)
 		domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
 
 	spin_lock_irqsave(&device_domain_lock, flags);
-	if (pdev)
-		found = find_domain(pdev);
+	if (dev)
+		found = find_domain(dev);
 	else {
 		struct device_domain_info *info2;
 		info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
@@ -1802,8 +1997,8 @@
 
 	list_add(&info->link, &domain->devices);
 	list_add(&info->global, &device_domain_list);
-	if (pdev)
-		pdev->dev.archdata.iommu = info;
+	if (dev)
+		dev->archdata.iommu = info;
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 
 	return domain;
@@ -1815,44 +2010,46 @@
 	return 0;
 }
 
-static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
+static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 {
 	struct dmar_domain *domain, *tmp;
 	struct intel_iommu *iommu;
 	struct device_domain_info *info;
 	u16 dma_alias;
 	unsigned long flags;
+	u8 bus, devfn;
 
-	domain = find_domain(pdev);
+	domain = find_domain(dev);
 	if (domain)
 		return domain;
 
-	iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-				pdev->devfn);
+	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return NULL;
 
-	pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
+	if (dev_is_pci(dev)) {
+		struct pci_dev *pdev = to_pci_dev(dev);
 
-	spin_lock_irqsave(&device_domain_lock, flags);
-	info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
-						    PCI_BUS_NUM(dma_alias),
-						    dma_alias & 0xff);
-	if (info) {
-		iommu = info->iommu;
-		domain = info->domain;
-	}
-	spin_unlock_irqrestore(&device_domain_lock, flags);
+		pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
 
-	if (info)
-		goto found_domain;
+		spin_lock_irqsave(&device_domain_lock, flags);
+		info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
+						      PCI_BUS_NUM(dma_alias),
+						      dma_alias & 0xff);
+		if (info) {
+			iommu = info->iommu;
+			domain = info->domain;
+		}
+		spin_unlock_irqrestore(&device_domain_lock, flags);
+
+		if (info)
+			goto found_domain;
+	}
 
-	domain = alloc_domain();
+	domain = alloc_domain(false);
 	if (!domain)
 		return NULL;
 
-	domain->flags = false;
-
 	if (iommu_attach_domain(domain, iommu)) {
 		free_domain_mem(domain);
 		return NULL;
@@ -1863,19 +2060,21 @@
 		return NULL;
 	}
 
-	tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-				   dma_alias & 0xff, NULL, domain);
+	if (dev_is_pci(dev)) {
+		tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+					   dma_alias & 0xff, NULL, domain);
 
-	if (!tmp || tmp != domain) {
-		domain_exit(domain);
-		domain = tmp;
-	}
+		if (!tmp || tmp != domain) {
+			domain_exit(domain);
+			domain = tmp;
+		}
 
-	if (!domain)
-		return NULL;
+		if (!domain)
+			return NULL;
+	}
 
 found_domain:
-	tmp = dmar_insert_dev_info(iommu, pdev->bus->number, pdev->devfn, pdev, domain);
+	tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
 
 	if (!tmp || tmp != domain) {
 		domain_exit(domain);
@@ -1889,6 +2088,9 @@
 #define IDENTMAP_ALL		1
 #define IDENTMAP_GFX		2
 #define IDENTMAP_AZALIA		4
+#ifdef CONFIG_SYNO_SKIP_IOMMU
+#define IDENTMAP_SYNO_SKIP      8
+#endif  
 
 static int iommu_domain_identity_map(struct dmar_domain *domain,
 				     unsigned long long start,
@@ -1913,26 +2115,26 @@
 				  DMA_PTE_READ|DMA_PTE_WRITE);
 }
 
-static int iommu_prepare_identity_map(struct pci_dev *pdev,
+static int iommu_prepare_identity_map(struct device *dev,
 				      unsigned long long start,
 				      unsigned long long end)
 {
 	struct dmar_domain *domain;
 	int ret;
 
-	domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
 	if (!domain)
 		return -ENOMEM;
 
 	if (domain == si_domain && hw_pass_through) {
 		printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
-		       pci_name(pdev), start, end);
+		       dev_name(dev), start, end);
 		return 0;
 	}
 
 	printk(KERN_INFO
 	       "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
-	       pci_name(pdev), start, end);
+	       dev_name(dev), start, end);
 	
 	if (end < start) {
 		WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
@@ -1959,7 +2161,7 @@
 	if (ret)
 		goto error;
 
-	ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+	ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
 	if (ret)
 		goto error;
 
@@ -1971,12 +2173,12 @@
 }
 
 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
-	struct pci_dev *pdev)
+					 struct device *dev)
 {
-	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+	if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
 		return 0;
-	return iommu_prepare_identity_map(pdev, rmrr->base_address,
-		rmrr->end_address);
+	return iommu_prepare_identity_map(dev, rmrr->base_address,
+					  rmrr->end_address);
 }
 
 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
@@ -1990,12 +2192,13 @@
 		return;
 
 	printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
-	ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
+	ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
 
 	if (ret)
 		printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
 		       "floppy might not work\n");
 
+	pci_dev_put(pdev);
 }
 #else
 static inline void iommu_prepare_isa(void)
@@ -2012,11 +2215,11 @@
 	struct intel_iommu *iommu;
 	int nid, ret = 0;
 
-	si_domain = alloc_domain();
+	si_domain = alloc_domain(false);
 	if (!si_domain)
 		return -EFAULT;
 
-	pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
+	si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
 
 	for_each_active_iommu(iommu, drhd) {
 		ret = iommu_attach_domain(si_domain, iommu);
@@ -2031,7 +2234,8 @@
 		return -EFAULT;
 	}
 
-	si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+	pr_debug("IOMMU: identity mapping domain is domain %d\n",
+		 si_domain->id);
 
 	if (hw)
 		return 0;
@@ -2051,16 +2255,14 @@
 	return 0;
 }
 
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-					  struct pci_dev *pdev);
-static int identity_mapping(struct pci_dev *pdev)
+static int identity_mapping(struct device *dev)
 {
 	struct device_domain_info *info;
 
 	if (likely(!iommu_identity_mapping))
 		return 0;
 
-	info = pdev->dev.archdata.iommu;
+	info = dev->archdata.iommu;
 	if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
 		return (info->domain == si_domain);
 
@@ -2068,118 +2270,166 @@
 }
 
 static int domain_add_dev_info(struct dmar_domain *domain,
-			       struct pci_dev *pdev,
-			       int translation)
+			       struct device *dev, int translation)
 {
-	struct device_domain_info *info;
-	unsigned long flags;
+	struct dmar_domain *ndomain;
+	struct intel_iommu *iommu;
+	u8 bus, devfn;
 	int ret;
 
-	info = alloc_devinfo_mem();
-	if (!info)
-		return -ENOMEM;
-
-	info->segment = pci_domain_nr(pdev->bus);
-	info->bus = pdev->bus->number;
-	info->devfn = pdev->devfn;
-	info->dev = pdev;
-	info->domain = domain;
+	iommu = device_to_iommu(dev, &bus, &devfn);
+	if (!iommu)
+		return -ENODEV;
 
-	spin_lock_irqsave(&device_domain_lock, flags);
-	list_add(&info->link, &domain->devices);
-	list_add(&info->global, &device_domain_list);
-	pdev->dev.archdata.iommu = info;
-	spin_unlock_irqrestore(&device_domain_lock, flags);
+	ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+	if (ndomain != domain)
+		return -EBUSY;
 
-	ret = domain_context_mapping(domain, pdev, translation);
+	ret = domain_context_mapping(domain, dev, translation);
 	if (ret) {
-		spin_lock_irqsave(&device_domain_lock, flags);
-		unlink_domain_info(info);
-		spin_unlock_irqrestore(&device_domain_lock, flags);
-		free_devinfo_mem(info);
+		domain_remove_one_dev_info(domain, dev);
 		return ret;
 	}
 
 	return 0;
 }
 
-static bool device_has_rmrr(struct pci_dev *dev)
+static bool device_has_rmrr(struct device *dev)
 {
 	struct dmar_rmrr_unit *rmrr;
+	struct device *tmp;
 	int i;
 
+	rcu_read_lock();
 	for_each_rmrr_units(rmrr) {
-		for (i = 0; i < rmrr->devices_cnt; i++) {
-			 
-			if (rmrr->devices[i] == dev)
+		 
+		for_each_active_dev_scope(rmrr->devices,
+					  rmrr->devices_cnt, i, tmp)
+			if (tmp == dev) {
+				rcu_read_unlock();
 				return true;
-		}
+			}
 	}
+	rcu_read_unlock();
 	return false;
 }
 
-static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+static int iommu_should_identity_map(struct device *dev, int startup)
 {
+#ifdef CONFIG_SYNO_SKIP_IOMMU
+	int i = 0;
+#endif  
 
-	if (device_has_rmrr(pdev) &&
-	    (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
-		return 0;
+	if (dev_is_pci(dev)) {
+		struct pci_dev *pdev = to_pci_dev(dev);
 
-	if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
-		return 1;
+		if (device_has_rmrr(dev) &&
+		    (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
+			return 0;
 
-	if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
-		return 1;
+		if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+			return 1;
 
-	if (!(iommu_identity_mapping & IDENTMAP_ALL))
-		return 0;
+		if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+			return 1;
+
+#ifdef CONFIG_SYNO_SKIP_IOMMU
+	if (iommu_identity_mapping & IDENTMAP_SYNO_SKIP) {
+		for (i = 0; NULL != gSkipDmarDev[i].szName; i++) {
+			if (gSkipDmarDev[i].iVendor == pdev->vendor && gSkipDmarDev[i].iDevice == pdev->device) {
+				return 1;
+			}
+		}
+	}
+#endif  
 
-	if (!pci_is_pcie(pdev)) {
-		if (!pci_is_root_bus(pdev->bus))
+		if (!(iommu_identity_mapping & IDENTMAP_ALL))
 			return 0;
-		if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+
+		if (!pci_is_pcie(pdev)) {
+			if (!pci_is_root_bus(pdev->bus))
+				return 0;
+			if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+				return 0;
+		} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
 			return 0;
-	} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
-		return 0;
+	} else {
+		if (device_has_rmrr(dev))
+			return 0;
+	}
 
 	if (!startup) {
 		 
-		u64 dma_mask = pdev->dma_mask;
+		u64 dma_mask = *dev->dma_mask;
 
-		if (pdev->dev.coherent_dma_mask &&
-		    pdev->dev.coherent_dma_mask < dma_mask)
-			dma_mask = pdev->dev.coherent_dma_mask;
+		if (dev->coherent_dma_mask &&
+		    dev->coherent_dma_mask < dma_mask)
+			dma_mask = dev->coherent_dma_mask;
 
-		return dma_mask >= dma_get_required_mask(&pdev->dev);
+		return dma_mask >= dma_get_required_mask(dev);
 	}
 
 	return 1;
 }
 
+static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
+{
+	int ret;
+
+	if (!iommu_should_identity_map(dev, 1))
+		return 0;
+
+	ret = domain_add_dev_info(si_domain, dev,
+				  hw ? CONTEXT_TT_PASS_THROUGH :
+				       CONTEXT_TT_MULTI_LEVEL);
+	if (!ret)
+		pr_info("IOMMU: %s identity mapping for device %s\n",
+			hw ? "hardware" : "software", dev_name(dev));
+	else if (ret == -ENODEV)
+		 
+		ret = 0;
+
+	return ret;
+}
+
 static int __init iommu_prepare_static_identity_mapping(int hw)
 {
 	struct pci_dev *pdev = NULL;
-	int ret;
+	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
+	struct device *dev;
+	int i;
+	int ret = 0;
 
 	ret = si_domain_init(hw);
 	if (ret)
 		return -EFAULT;
 
 	for_each_pci_dev(pdev) {
-		if (iommu_should_identity_map(pdev, 1)) {
-			ret = domain_add_dev_info(si_domain, pdev,
-					     hw ? CONTEXT_TT_PASS_THROUGH :
-						  CONTEXT_TT_MULTI_LEVEL);
-			if (ret) {
-				 
-				if (ret == -ENODEV)
-					continue;
-				return ret;
+		ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
+		if (ret)
+			return ret;
+	}
+
+	for_each_active_iommu(iommu, drhd)
+		for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
+			struct acpi_device_physical_node *pn;
+			struct acpi_device *adev;
+
+			if (dev->bus != &acpi_bus_type)
+				continue;
+				
+			adev= to_acpi_device(dev);
+			mutex_lock(&adev->physical_node_lock);
+			list_for_each_entry(pn, &adev->physical_node_list, node) {
+				ret = dev_prepare_static_identity_mapping(pn->dev, hw);
+				if (ret)
+					break;
 			}
-			pr_info("IOMMU: %s identity mapping for device %s\n",
-				hw ? "hardware" : "software", pci_name(pdev));
+			mutex_unlock(&adev->physical_node_lock);
+			if (ret)
+				return ret;
 		}
-	}
 
 	return 0;
 }
@@ -2188,7 +2438,7 @@
 {
 	struct dmar_drhd_unit *drhd;
 	struct dmar_rmrr_unit *rmrr;
-	struct pci_dev *pdev;
+	struct device *dev;
 	struct intel_iommu *iommu;
 	int i, ret;
 
@@ -2214,35 +2464,27 @@
 		sizeof(struct deferred_flush_tables), GFP_KERNEL);
 	if (!deferred_flush) {
 		ret = -ENOMEM;
-		goto error;
+		goto free_g_iommus;
 	}
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
+	for_each_active_iommu(iommu, drhd) {
 		g_iommus[iommu->seq_id] = iommu;
 
 		ret = iommu_init_domains(iommu);
 		if (ret)
-			goto error;
+			goto free_iommu;
 
 		ret = iommu_alloc_root_entry(iommu);
 		if (ret) {
 			printk(KERN_ERR "IOMMU: allocate root entry failed\n");
-			goto error;
+			goto free_iommu;
 		}
 		if (!ecap_pass_through(iommu->ecap))
 			hw_pass_through = 0;
 	}
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
-
+	for_each_active_iommu(iommu, drhd) {
+		 
 		if (iommu->qi)
 			continue;
 
@@ -2251,12 +2493,7 @@
 		dmar_disable_qi(iommu);
 	}
 
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
-
+	for_each_active_iommu(iommu, drhd) {
 		if (dmar_enable_qi(iommu)) {
 			 
 			iommu->flush.flush_context = __iommu_flush_context;
@@ -2278,12 +2515,15 @@
 	if (iommu_pass_through)
 		iommu_identity_mapping |= IDENTMAP_ALL;
 
-#ifdef CONFIG_SYNO_IOMMU_PASSTHROUGH
+#ifdef MY_ABC_HERE
 	printk(KERN_INFO "IOMMU passthrough mode = %d\n", iommu_pass_through);
 #endif  
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
 	iommu_identity_mapping |= IDENTMAP_GFX;
 #endif
+#ifdef CONFIG_SYNO_SKIP_IOMMU
+	iommu_identity_mapping |= IDENTMAP_SYNO_SKIP;
+#endif  
 
 	check_tylersburg_isoch();
 
@@ -2291,18 +2531,16 @@
 		ret = iommu_prepare_static_identity_mapping(hw_pass_through);
 		if (ret) {
 			printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
-			goto error;
+			goto free_iommu;
 		}
 	}
 	 
 	printk(KERN_INFO "IOMMU: Setting RMRR:\n");
 	for_each_rmrr_units(rmrr) {
-		for (i = 0; i < rmrr->devices_cnt; i++) {
-			pdev = rmrr->devices[i];
-			 
-			if (!pdev)
-				continue;
-			ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+		 
+		for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+					  i, dev) {
+			ret = iommu_prepare_rmrr_dev(rmrr, dev);
 			if (ret)
 				printk(KERN_ERR
 				       "IOMMU: mapping reserved region failed\n");
@@ -2311,20 +2549,19 @@
 
 	iommu_prepare_isa();
 
-	for_each_drhd_unit(drhd) {
+	for_each_iommu(iommu, drhd) {
 		if (drhd->ignored) {
 			 
 			if (force_on)
-				iommu_disable_protect_mem_regions(drhd->iommu);
+				iommu_disable_protect_mem_regions(iommu);
 			continue;
 		}
-		iommu = drhd->iommu;
 
 		iommu_flush_write_buffer(iommu);
 
 		ret = dmar_set_interrupt(iommu);
 		if (ret)
-			goto error;
+			goto free_iommu;
 
 		iommu_set_root_entry(iommu);
 
@@ -2333,20 +2570,20 @@
 
 		ret = iommu_enable_translation(iommu);
 		if (ret)
-			goto error;
+			goto free_iommu;
 
 		iommu_disable_protect_mem_regions(iommu);
 	}
 
 	return 0;
-error:
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-		iommu = drhd->iommu;
-		free_iommu(iommu);
-	}
+
+free_iommu:
+	for_each_active_iommu(iommu, drhd)
+		free_dmar_iommu(iommu);
+	kfree(deferred_flush);
+free_g_iommus:
 	kfree(g_iommus);
+error:
 	return ret;
 }
 
@@ -2354,7 +2591,6 @@
 				     struct dmar_domain *domain,
 				     unsigned long nrpages, uint64_t dma_mask)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
 	struct iova *iova = NULL;
 
 	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -2369,33 +2605,30 @@
 	iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
 	if (unlikely(!iova)) {
 		printk(KERN_ERR "Allocating %ld-page iova for %s failed",
-		       nrpages, pci_name(pdev));
+		       nrpages, dev_name(dev));
 		return NULL;
 	}
 
 	return iova;
 }
 
-static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
+static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 {
 	struct dmar_domain *domain;
 	int ret;
 
-	domain = get_domain_for_dev(pdev,
-			DEFAULT_DOMAIN_ADDRESS_WIDTH);
+	domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
 	if (!domain) {
-		printk(KERN_ERR
-			"Allocating domain for %s failed", pci_name(pdev));
+		printk(KERN_ERR "Allocating domain for %s failed",
+		       dev_name(dev));
 		return NULL;
 	}
 
-	if (unlikely(!domain_context_mapped(pdev))) {
-		ret = domain_context_mapping(domain, pdev,
-					     CONTEXT_TT_MULTI_LEVEL);
+	if (unlikely(!domain_context_mapped(dev))) {
+		ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
 		if (ret) {
-			printk(KERN_ERR
-				"Domain context map for %s failed",
-				pci_name(pdev));
+			printk(KERN_ERR "Domain context map for %s failed",
+			       dev_name(dev));
 			return NULL;
 		}
 	}
@@ -2403,59 +2636,49 @@
 	return domain;
 }
 
-static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
 {
 	struct device_domain_info *info;
 
-	info = dev->dev.archdata.iommu;
+	info = dev->archdata.iommu;
 	if (likely(info))
 		return info->domain;
 
 	return __get_valid_domain_for_dev(dev);
 }
 
-static int iommu_dummy(struct pci_dev *pdev)
-{
-	return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
 static int iommu_no_mapping(struct device *dev)
 {
-	struct pci_dev *pdev;
 	int found;
 
-	if (unlikely(dev->bus != &pci_bus_type))
-		return 1;
-
-	pdev = to_pci_dev(dev);
-	if (iommu_dummy(pdev))
+	if (iommu_dummy(dev))
 		return 1;
 
 	if (!iommu_identity_mapping)
 		return 0;
 
-	found = identity_mapping(pdev);
+	found = identity_mapping(dev);
 	if (found) {
-		if (iommu_should_identity_map(pdev, 0))
+		if (iommu_should_identity_map(dev, 0))
 			return 1;
 		else {
 			 
-			domain_remove_one_dev_info(si_domain, pdev);
+			domain_remove_one_dev_info(si_domain, dev);
 			printk(KERN_INFO "32bit %s uses non-identity mapping\n",
-			       pci_name(pdev));
+			       dev_name(dev));
 			return 0;
 		}
 	} else {
 		 
-		if (iommu_should_identity_map(pdev, 0)) {
+		if (iommu_should_identity_map(dev, 0)) {
 			int ret;
-			ret = domain_add_dev_info(si_domain, pdev,
+			ret = domain_add_dev_info(si_domain, dev,
 						  hw_pass_through ?
 						  CONTEXT_TT_PASS_THROUGH :
 						  CONTEXT_TT_MULTI_LEVEL);
 			if (!ret) {
 				printk(KERN_INFO "64bit %s uses identity mapping\n",
-				       pci_name(pdev));
+				       dev_name(dev));
 				return 1;
 			}
 		}
@@ -2464,10 +2687,9 @@
 	return 0;
 }
 
-static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 				     size_t size, int dir, u64 dma_mask)
 {
-	struct pci_dev *pdev = to_pci_dev(hwdev);
 	struct dmar_domain *domain;
 	phys_addr_t start_paddr;
 	struct iova *iova;
@@ -2478,17 +2700,17 @@
 
 	BUG_ON(dir == DMA_NONE);
 
-	if (iommu_no_mapping(hwdev))
+	if (iommu_no_mapping(dev))
 		return paddr;
 
-	domain = get_valid_domain_for_dev(pdev);
+	domain = get_valid_domain_for_dev(dev);
 	if (!domain)
 		return 0;
 
 	iommu = domain_get_iommu(domain);
 	size = aligned_nrpages(paddr, size);
 
-	iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
+	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
 	if (!iova)
 		goto error;
 
@@ -2504,7 +2726,7 @@
 		goto error;
 
 	if (cap_caching_mode(iommu->cap))
-		iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
+		iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
 
@@ -2516,7 +2738,7 @@
 	if (iova)
 		__free_iova(&domain->iovad, iova);
 	printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
-		pci_name(pdev), size, (unsigned long long)paddr, dir);
+		dev_name(dev), size, (unsigned long long)paddr, dir);
 	return 0;
 }
 
@@ -2526,7 +2748,7 @@
 				 struct dma_attrs *attrs)
 {
 	return __intel_map_single(dev, page_to_phys(page) + offset, size,
-				  dir, to_pci_dev(dev)->dma_mask);
+				  dir, *dev->dma_mask);
 }
 
 static void flush_unmaps(void)
@@ -2553,13 +2775,16 @@
 
 			if (cap_caching_mode(iommu->cap))
 				iommu_flush_iotlb_psi(iommu, domain->id,
-				iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+					iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
+					!deferred_flush[i].freelist[j], 0);
 			else {
 				mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
 				iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
 						(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
 			}
 			__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
+			if (deferred_flush[i].freelist[j])
+				dma_free_pagelist(deferred_flush[i].freelist[j]);
 		}
 		deferred_flush[i].next = 0;
 	}
@@ -2576,7 +2801,7 @@
 	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
 }
 
-static void add_unmap(struct dmar_domain *dom, struct iova *iova)
+static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
 {
 	unsigned long flags;
 	int next, iommu_id;
@@ -2592,6 +2817,7 @@
 	next = deferred_flush[iommu_id].next;
 	deferred_flush[iommu_id].domain[next] = dom;
 	deferred_flush[iommu_id].iova[next] = iova;
+	deferred_flush[iommu_id].freelist[next] = freelist;
 	deferred_flush[iommu_id].next++;
 
 	if (!timer_on) {
@@ -2606,16 +2832,16 @@
 			     size_t size, enum dma_data_direction dir,
 			     struct dma_attrs *attrs)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dmar_domain *domain;
 	unsigned long start_pfn, last_pfn;
 	struct iova *iova;
 	struct intel_iommu *iommu;
+	struct page *freelist;
 
 	if (iommu_no_mapping(dev))
 		return;
 
-	domain = find_domain(pdev);
+	domain = find_domain(dev);
 	BUG_ON(!domain);
 
 	iommu = domain_get_iommu(domain);
@@ -2629,82 +2855,97 @@
 	last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
 
 	pr_debug("Device %s unmapping: pfn %lx-%lx\n",
-		 pci_name(pdev), start_pfn, last_pfn);
+		 dev_name(dev), start_pfn, last_pfn);
 
-	dma_pte_clear_range(domain, start_pfn, last_pfn);
-
-	dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+	freelist = domain_unmap(domain, start_pfn, last_pfn);
 
 	if (intel_iommu_strict) {
 		iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-				      last_pfn - start_pfn + 1, 0);
+				      last_pfn - start_pfn + 1, !freelist, 0);
 		 
 		__free_iova(&domain->iovad, iova);
+		dma_free_pagelist(freelist);
 	} else {
-		add_unmap(domain, iova);
+		add_unmap(domain, iova, freelist);
 		 
 	}
 }
 
-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
+static void *intel_alloc_coherent(struct device *dev, size_t size,
 				  dma_addr_t *dma_handle, gfp_t flags,
 				  struct dma_attrs *attrs)
 {
-	void *vaddr;
+	struct page *page = NULL;
 	int order;
 
 	size = PAGE_ALIGN(size);
 	order = get_order(size);
 
-	if (!iommu_no_mapping(hwdev))
+	if (!iommu_no_mapping(dev))
 		flags &= ~(GFP_DMA | GFP_DMA32);
-	else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
-		if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
+	else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+		if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
 			flags |= GFP_DMA;
 		else
 			flags |= GFP_DMA32;
 	}
 
-	vaddr = (void *)__get_free_pages(flags, order);
-	if (!vaddr)
+	if (flags & __GFP_WAIT) {
+		unsigned int count = size >> PAGE_SHIFT;
+
+		page = dma_alloc_from_contiguous(dev, count, order);
+		if (page && iommu_no_mapping(dev) &&
+		    page_to_phys(page) + size > dev->coherent_dma_mask) {
+			dma_release_from_contiguous(dev, page, count);
+			page = NULL;
+		}
+	}
+
+	if (!page)
+		page = alloc_pages(flags, order);
+	if (!page)
 		return NULL;
-	memset(vaddr, 0, size);
+	memset(page_address(page), 0, size);
 
-	*dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
 					 DMA_BIDIRECTIONAL,
-					 hwdev->coherent_dma_mask);
+					 dev->coherent_dma_mask);
 	if (*dma_handle)
-		return vaddr;
-	free_pages((unsigned long)vaddr, order);
+		return page_address(page);
+	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+		__free_pages(page, order);
+
 	return NULL;
 }
 
-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
 				dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	int order;
+	struct page *page = virt_to_page(vaddr);
 
 	size = PAGE_ALIGN(size);
 	order = get_order(size);
 
-	intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
-	free_pages((unsigned long)vaddr, order);
+	intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+		__free_pages(page, order);
 }
 
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
 			   int nelems, enum dma_data_direction dir,
 			   struct dma_attrs *attrs)
 {
-	struct pci_dev *pdev = to_pci_dev(hwdev);
 	struct dmar_domain *domain;
 	unsigned long start_pfn, last_pfn;
 	struct iova *iova;
 	struct intel_iommu *iommu;
+	struct page *freelist;
 
-	if (iommu_no_mapping(hwdev))
+	if (iommu_no_mapping(dev))
 		return;
 
-	domain = find_domain(pdev);
+	domain = find_domain(dev);
 	BUG_ON(!domain);
 
 	iommu = domain_get_iommu(domain);
@@ -2717,17 +2958,16 @@
 	start_pfn = mm_to_dma_pfn(iova->pfn_lo);
 	last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
 
-	dma_pte_clear_range(domain, start_pfn, last_pfn);
-
-	dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+	freelist = domain_unmap(domain, start_pfn, last_pfn);
 
 	if (intel_iommu_strict) {
 		iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
-				      last_pfn - start_pfn + 1, 0);
+				      last_pfn - start_pfn + 1, !freelist, 0);
 		 
 		__free_iova(&domain->iovad, iova);
+		dma_free_pagelist(freelist);
 	} else {
-		add_unmap(domain, iova);
+		add_unmap(domain, iova, freelist);
 		 
 	}
 }
@@ -2746,11 +2986,10 @@
 	return nelems;
 }
 
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
 			enum dma_data_direction dir, struct dma_attrs *attrs)
 {
 	int i;
-	struct pci_dev *pdev = to_pci_dev(hwdev);
 	struct dmar_domain *domain;
 	size_t size = 0;
 	int prot = 0;
@@ -2761,10 +3000,10 @@
 	struct intel_iommu *iommu;
 
 	BUG_ON(dir == DMA_NONE);
-	if (iommu_no_mapping(hwdev))
-		return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
+	if (iommu_no_mapping(dev))
+		return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
 
-	domain = get_valid_domain_for_dev(pdev);
+	domain = get_valid_domain_for_dev(dev);
 	if (!domain)
 		return 0;
 
@@ -2773,8 +3012,8 @@
 	for_each_sg(sglist, sg, nelems, i)
 		size += aligned_nrpages(sg->offset, sg->length);
 
-	iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
-				pdev->dma_mask);
+	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
+				*dev->dma_mask);
 	if (!iova) {
 		sglist->dma_length = 0;
 		return 0;
@@ -2802,7 +3041,7 @@
 	}
 
 	if (cap_caching_mode(iommu->cap))
-		iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
+		iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
 
@@ -2928,32 +3167,40 @@
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
 
+#ifdef CONFIG_SYNO_SKIP_IOMMU
+static void quirk_skip_iommu(struct pci_dev *pdev)
+{
+       printk("DMAR: Disabling IOMMU for %X:%X\n", pdev->vendor, pdev->device);
+       pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+}
+DECLARE_PCI_FIXUP_HEADER(0x1fc9, 0x4022, quirk_skip_iommu);
+#endif  
+
 static void __init init_no_remapping_devices(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct device *dev;
+	int i;
 
 	for_each_drhd_unit(drhd) {
 		if (!drhd->include_all) {
-			int i;
-			for (i = 0; i < drhd->devices_cnt; i++)
-				if (drhd->devices[i] != NULL)
-					break;
+			for_each_active_dev_scope(drhd->devices,
+						  drhd->devices_cnt, i, dev)
+				break;
 			 
 			if (i == drhd->devices_cnt)
 				drhd->ignored = 1;
 		}
 	}
 
-	for_each_drhd_unit(drhd) {
-		int i;
-		if (drhd->ignored || drhd->include_all)
+	for_each_active_drhd_unit(drhd) {
+		if (drhd->include_all)
 			continue;
 
-		for (i = 0; i < drhd->devices_cnt; i++)
-			if (drhd->devices[i] &&
-			    !IS_GFX_DEVICE(drhd->devices[i]))
+		for_each_active_dev_scope(drhd->devices,
+					  drhd->devices_cnt, i, dev)
+			if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
 				break;
-
 		if (i < drhd->devices_cnt)
 			continue;
 
@@ -2961,11 +3208,9 @@
 			intel_iommu_gfx_mapped = 1;
 		} else {
 			drhd->ignored = 1;
-			for (i = 0; i < drhd->devices_cnt; i++) {
-				if (!drhd->devices[i])
-					continue;
-				drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
-			}
+			for_each_active_dev_scope(drhd->devices,
+						  drhd->devices_cnt, i, dev)
+				dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
 		}
 	}
 }
@@ -3105,13 +3350,6 @@
 static inline void init_iommu_pm_ops(void) {}
 #endif	 
 
-LIST_HEAD(dmar_rmrr_units);
-
-static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
-{
-	list_add(&rmrr->list, &dmar_rmrr_units);
-}
-
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
 {
 	struct acpi_dmar_reserved_memory *rmrr;
@@ -3125,30 +3363,18 @@
 	rmrr = (struct acpi_dmar_reserved_memory *)header;
 	rmrru->base_address = rmrr->base_address;
 	rmrru->end_address = rmrr->end_address;
-
-	dmar_register_rmrr_unit(rmrru);
-	return 0;
-}
-
-static int __init
-rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
-{
-	struct acpi_dmar_reserved_memory *rmrr;
-	int ret;
-
-	rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
-	ret = dmar_parse_dev_scope((void *)(rmrr + 1),
-		((void *)rmrr) + rmrr->header.length,
-		&rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
-
-	if (ret || (rmrru->devices_cnt == 0)) {
-		list_del(&rmrru->list);
+	rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
+				((void *)rmrr) + rmrr->header.length,
+				&rmrru->devices_cnt);
+	if (rmrru->devices_cnt && rmrru->devices == NULL) {
 		kfree(rmrru);
+		return -ENOMEM;
 	}
-	return ret;
-}
 
-static LIST_HEAD(dmar_atsr_units);
+	list_add(&rmrru->list, &dmar_rmrr_units);
+
+	return 0;
+}
 
 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
 {
@@ -3162,115 +3388,160 @@
 
 	atsru->hdr = hdr;
 	atsru->include_all = atsr->flags & 0x1;
+	if (!atsru->include_all) {
+		atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
+				(void *)atsr + atsr->header.length,
+				&atsru->devices_cnt);
+		if (atsru->devices_cnt && atsru->devices == NULL) {
+			kfree(atsru);
+			return -ENOMEM;
+		}
+	}
 
-	list_add(&atsru->list, &dmar_atsr_units);
+	list_add_rcu(&atsru->list, &dmar_atsr_units);
 
 	return 0;
 }
 
-static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
 {
-	int rc;
-	struct acpi_dmar_atsr *atsr;
+	dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
+	kfree(atsru);
+}
 
-	if (atsru->include_all)
-		return 0;
+static void intel_iommu_free_dmars(void)
+{
+	struct dmar_rmrr_unit *rmrru, *rmrr_n;
+	struct dmar_atsr_unit *atsru, *atsr_n;
 
-	atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
-	rc = dmar_parse_dev_scope((void *)(atsr + 1),
-				(void *)atsr + atsr->header.length,
-				&atsru->devices_cnt, &atsru->devices,
-				atsr->segment);
-	if (rc || !atsru->devices_cnt) {
-		list_del(&atsru->list);
-		kfree(atsru);
+	list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+		list_del(&rmrru->list);
+		dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+		kfree(rmrru);
 	}
 
-	return rc;
+	list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
+		list_del(&atsru->list);
+		intel_iommu_free_atsr(atsru);
+	}
 }
 
 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
 {
-	int i;
+	int i, ret = 1;
 	struct pci_bus *bus;
+	struct pci_dev *bridge = NULL;
+	struct device *tmp;
 	struct acpi_dmar_atsr *atsr;
 	struct dmar_atsr_unit *atsru;
 
 	dev = pci_physfn(dev);
-
-	list_for_each_entry(atsru, &dmar_atsr_units, list) {
-		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
-		if (atsr->segment == pci_domain_nr(dev->bus))
-			goto found;
-	}
-
-	return 0;
-
-found:
 	for (bus = dev->bus; bus; bus = bus->parent) {
-		struct pci_dev *bridge = bus->self;
-
+		bridge = bus->self;
 		if (!bridge || !pci_is_pcie(bridge) ||
 		    pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
 			return 0;
-
-		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
-			for (i = 0; i < atsru->devices_cnt; i++)
-				if (atsru->devices[i] == bridge)
-					return 1;
+		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
 			break;
-		}
 	}
+	if (!bridge)
+		return 0;
 
-	if (atsru->include_all)
-		return 1;
+	rcu_read_lock();
+	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+		if (atsr->segment != pci_domain_nr(dev->bus))
+			continue;
 
-	return 0;
+		for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
+			if (tmp == &bridge->dev)
+				goto out;
+
+		if (atsru->include_all)
+			goto out;
+	}
+	ret = 0;
+out:
+	rcu_read_unlock();
+
+	return ret;
 }
 
-int __init dmar_parse_rmrr_atsr_dev(void)
+int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
 {
-	struct dmar_rmrr_unit *rmrr, *rmrr_n;
-	struct dmar_atsr_unit *atsr, *atsr_n;
 	int ret = 0;
+	struct dmar_rmrr_unit *rmrru;
+	struct dmar_atsr_unit *atsru;
+	struct acpi_dmar_atsr *atsr;
+	struct acpi_dmar_reserved_memory *rmrr;
 
-	list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
-		ret = rmrr_parse_dev(rmrr);
-		if (ret)
-			return ret;
+	if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
+		return 0;
+
+	list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
+		rmrr = container_of(rmrru->hdr,
+				    struct acpi_dmar_reserved_memory, header);
+		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+			ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
+				((void *)rmrr) + rmrr->header.length,
+				rmrr->segment, rmrru->devices,
+				rmrru->devices_cnt);
+			if(ret < 0)
+				return ret;
+		} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+			dmar_remove_dev_scope(info, rmrr->segment,
+				rmrru->devices, rmrru->devices_cnt);
+		}
 	}
 
-	list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
-		ret = atsr_parse_dev(atsr);
-		if (ret)
-			return ret;
+	list_for_each_entry(atsru, &dmar_atsr_units, list) {
+		if (atsru->include_all)
+			continue;
+
+		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+			ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
+					(void *)atsr + atsr->header.length,
+					atsr->segment, atsru->devices,
+					atsru->devices_cnt);
+			if (ret > 0)
+				break;
+			else if(ret < 0)
+				return ret;
+		} else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+			if (dmar_remove_dev_scope(info, atsr->segment,
+					atsru->devices, atsru->devices_cnt))
+				break;
+		}
 	}
 
-	return ret;
+	return 0;
 }
 
 static int device_notifier(struct notifier_block *nb,
 				  unsigned long action, void *data)
 {
 	struct device *dev = data;
-	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dmar_domain *domain;
 
-	if (iommu_no_mapping(dev))
+	if (iommu_dummy(dev))
 		return 0;
 
-	domain = find_domain(pdev);
-	if (!domain)
+	if (action != BUS_NOTIFY_UNBOUND_DRIVER &&
+	    action != BUS_NOTIFY_DEL_DEVICE)
 		return 0;
 
-	if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
-		domain_remove_one_dev_info(domain, pdev);
+	domain = find_domain(dev);
+	if (!domain)
+		return 0;
 
-		if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
-		    !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
-		    list_empty(&domain->devices))
-			domain_exit(domain);
-	}
+	down_read(&dmar_global_lock);
+	domain_remove_one_dev_info(domain, dev);
+	if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+	    !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+	    list_empty(&domain->devices))
+		domain_exit(domain);
+	up_read(&dmar_global_lock);
 
 	return 0;
 }
@@ -3279,44 +3550,108 @@
 	.notifier_call = device_notifier,
 };
 
+static int intel_iommu_memory_notifier(struct notifier_block *nb,
+				       unsigned long val, void *v)
+{
+	struct memory_notify *mhp = v;
+	unsigned long long start, end;
+	unsigned long start_vpfn, last_vpfn;
+
+	switch (val) {
+	case MEM_GOING_ONLINE:
+		start = mhp->start_pfn << PAGE_SHIFT;
+		end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
+		if (iommu_domain_identity_map(si_domain, start, end)) {
+			pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
+				start, end);
+			return NOTIFY_BAD;
+		}
+		break;
+
+	case MEM_OFFLINE:
+	case MEM_CANCEL_ONLINE:
+		start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+		last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
+		while (start_vpfn <= last_vpfn) {
+			struct iova *iova;
+			struct dmar_drhd_unit *drhd;
+			struct intel_iommu *iommu;
+			struct page *freelist;
+
+			iova = find_iova(&si_domain->iovad, start_vpfn);
+			if (iova == NULL) {
+				pr_debug("dmar: failed get IOVA for PFN %lx\n",
+					 start_vpfn);
+				break;
+			}
+
+			iova = split_and_remove_iova(&si_domain->iovad, iova,
+						     start_vpfn, last_vpfn);
+			if (iova == NULL) {
+				pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
+					start_vpfn, last_vpfn);
+				return NOTIFY_BAD;
+			}
+
+			freelist = domain_unmap(si_domain, iova->pfn_lo,
+					       iova->pfn_hi);
+
+			rcu_read_lock();
+			for_each_active_iommu(iommu, drhd)
+				iommu_flush_iotlb_psi(iommu, si_domain->id,
+					iova->pfn_lo,
+					iova->pfn_hi - iova->pfn_lo + 1,
+					!freelist, 0);
+			rcu_read_unlock();
+			dma_free_pagelist(freelist);
+
+			start_vpfn = iova->pfn_hi + 1;
+			free_iova_mem(iova);
+		}
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_memory_nb = {
+	.notifier_call = intel_iommu_memory_notifier,
+	.priority = 0
+};
+
 int __init intel_iommu_init(void)
 {
-	int ret = 0;
+	int ret = -ENODEV;
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 
 	force_on = tboot_force_iommu();
 
+	if (iommu_init_mempool()) {
+		if (force_on)
+			panic("tboot: Failed to initialize iommu memory\n");
+		return -ENOMEM;
+	}
+
+	down_write(&dmar_global_lock);
 	if (dmar_table_init()) {
 		if (force_on)
 			panic("tboot: Failed to initialize DMAR table\n");
-		return 	-ENODEV;
+		goto out_free_dmar;
 	}
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu;
-
-		if (drhd->ignored)
-			continue;
-
-		iommu = drhd->iommu;
+	for_each_active_iommu(iommu, drhd)
 		if (iommu->gcmd & DMA_GCMD_TE)
 			iommu_disable_translation(iommu);
-	}
 
 	if (dmar_dev_scope_init() < 0) {
 		if (force_on)
 			panic("tboot: Failed to initialize DMAR device scope\n");
-		return 	-ENODEV;
+		goto out_free_dmar;
 	}
 
 	if (no_iommu || dmar_disabled)
-		return -ENODEV;
-
-	if (iommu_init_mempool()) {
-		if (force_on)
-			panic("tboot: Failed to initialize iommu memory\n");
-		return 	-ENODEV;
-	}
+		goto out_free_dmar;
 
 	if (list_empty(&dmar_rmrr_units))
 		printk(KERN_INFO "DMAR: No RMRR found\n");
@@ -3327,7 +3662,7 @@
 	if (dmar_init_reserved_ranges()) {
 		if (force_on)
 			panic("tboot: Failed to reserve iommu ranges\n");
-		return 	-ENODEV;
+		goto out_free_reserved_range;
 	}
 
 	init_no_remapping_devices();
@@ -3337,10 +3672,9 @@
 		if (force_on)
 			panic("tboot: Failed to initialize DMARs\n");
 		printk(KERN_ERR "IOMMU: dmar init failed\n");
-		put_iova_domain(&reserved_iova_list);
-		iommu_exit_mempool();
-		return ret;
+		goto out_free_reserved_range;
 	}
+	up_write(&dmar_global_lock);
 	printk(KERN_INFO
 	"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
 
@@ -3353,12 +3687,21 @@
 	init_iommu_pm_ops();
 
 	bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
-
 	bus_register_notifier(&pci_bus_type, &device_nb);
+	if (si_domain && !hw_pass_through)
+		register_memory_notifier(&intel_iommu_memory_nb);
 
 	intel_iommu_enabled = 1;
 
 	return 0;
+
+out_free_reserved_range:
+	put_iova_domain(&reserved_iova_list);
+out_free_dmar:
+	intel_iommu_free_dmars();
+	up_write(&dmar_global_lock);
+	iommu_exit_mempool();
+	return ret;
 }
 
 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
@@ -3370,40 +3713,37 @@
 }
 
 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-					   struct pci_dev *pdev)
+					   struct device *dev)
 {
-	if (!iommu || !pdev)
+	if (!iommu || !dev || !dev_is_pci(dev))
 		return;
 
-	pci_for_each_dma_alias(pdev, &iommu_detach_dev_cb, iommu);
+	pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
 }
 
 static void domain_remove_one_dev_info(struct dmar_domain *domain,
-					  struct pci_dev *pdev)
+				       struct device *dev)
 {
-	struct device_domain_info *info;
+	struct device_domain_info *info, *tmp;
 	struct intel_iommu *iommu;
 	unsigned long flags;
 	int found = 0;
-	struct list_head *entry, *tmp;
+	u8 bus, devfn;
 
-	iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-				pdev->devfn);
+	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return;
 
 	spin_lock_irqsave(&device_domain_lock, flags);
-	list_for_each_safe(entry, tmp, &domain->devices) {
-		info = list_entry(entry, struct device_domain_info, link);
-		if (info->segment == pci_domain_nr(pdev->bus) &&
-		    info->bus == pdev->bus->number &&
-		    info->devfn == pdev->devfn) {
+	list_for_each_entry_safe(info, tmp, &domain->devices, link) {
+		if (info->iommu == iommu && info->bus == bus &&
+		    info->devfn == devfn) {
 			unlink_domain_info(info);
 			spin_unlock_irqrestore(&device_domain_lock, flags);
 
 			iommu_disable_dev_iotlb(info);
 			iommu_detach_dev(iommu, info->bus, info->devfn);
-			iommu_detach_dependent_devices(iommu, pdev);
+			iommu_detach_dependent_devices(iommu, dev);
 			free_devinfo_mem(info);
 
 			spin_lock_irqsave(&device_domain_lock, flags);
@@ -3414,8 +3754,7 @@
 				continue;
 		}
 
-		if (iommu == device_to_iommu(info->segment, info->bus,
-					    info->devfn))
+		if (info->iommu == iommu)
 			found = 1;
 	}
 
@@ -3439,77 +3778,21 @@
 	}
 }
 
-static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
-{
-	struct device_domain_info *info;
-	struct intel_iommu *iommu;
-	unsigned long flags1, flags2;
-
-	spin_lock_irqsave(&device_domain_lock, flags1);
-	while (!list_empty(&domain->devices)) {
-		info = list_entry(domain->devices.next,
-			struct device_domain_info, link);
-		unlink_domain_info(info);
-		spin_unlock_irqrestore(&device_domain_lock, flags1);
-
-		iommu_disable_dev_iotlb(info);
-		iommu = device_to_iommu(info->segment, info->bus, info->devfn);
-		iommu_detach_dev(iommu, info->bus, info->devfn);
-		iommu_detach_dependent_devices(iommu, info->dev);
-
-		spin_lock_irqsave(&domain->iommu_lock, flags2);
-		if (test_and_clear_bit(iommu->seq_id,
-				       domain->iommu_bmp)) {
-			domain->iommu_count--;
-			domain_update_iommu_cap(domain);
-		}
-		spin_unlock_irqrestore(&domain->iommu_lock, flags2);
-
-		free_devinfo_mem(info);
-		spin_lock_irqsave(&device_domain_lock, flags1);
-	}
-	spin_unlock_irqrestore(&device_domain_lock, flags1);
-}
-
-static unsigned long vm_domid;
-
-static struct dmar_domain *iommu_alloc_vm_domain(void)
-{
-	struct dmar_domain *domain;
-
-	domain = alloc_domain_mem();
-	if (!domain)
-		return NULL;
-
-	domain->id = vm_domid++;
-	domain->nid = -1;
-	memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
-	domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
-
-	return domain;
-}
-
 static int md_domain_init(struct dmar_domain *domain, int guest_width)
 {
 	int adjust_width;
 
 	init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
-	spin_lock_init(&domain->iommu_lock);
-
 	domain_reserve_special_ranges(domain);
 
 	domain->gaw = guest_width;
 	adjust_width = guestwidth_to_adjustwidth(guest_width);
 	domain->agaw = width_to_agaw(adjust_width);
 
-	INIT_LIST_HEAD(&domain->devices);
-
-	domain->iommu_count = 0;
 	domain->iommu_coherency = 0;
 	domain->iommu_snooping = 0;
 	domain->iommu_superpage = 0;
 	domain->max_addr = 0;
-	domain->nid = -1;
 
 	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
 	if (!domain->pgd)
@@ -3518,55 +3801,11 @@
 	return 0;
 }
 
-static void iommu_free_vm_domain(struct dmar_domain *domain)
-{
-	unsigned long flags;
-	struct dmar_drhd_unit *drhd;
-	struct intel_iommu *iommu;
-	unsigned long i;
-	unsigned long ndomains;
-
-	for_each_drhd_unit(drhd) {
-		if (drhd->ignored)
-			continue;
-		iommu = drhd->iommu;
-
-		ndomains = cap_ndoms(iommu->cap);
-		for_each_set_bit(i, iommu->domain_ids, ndomains) {
-			if (iommu->domains[i] == domain) {
-				spin_lock_irqsave(&iommu->lock, flags);
-				clear_bit(i, iommu->domain_ids);
-				iommu->domains[i] = NULL;
-				spin_unlock_irqrestore(&iommu->lock, flags);
-				break;
-			}
-		}
-	}
-}
-
-static void vm_domain_exit(struct dmar_domain *domain)
-{
-	 
-	if (!domain)
-		return;
-
-	vm_domain_remove_all_dev_info(domain);
-	 
-	put_iova_domain(&domain->iovad);
-
-	dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
-	dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
-
-	iommu_free_vm_domain(domain);
-	free_domain_mem(domain);
-}
-
 static int intel_iommu_domain_init(struct iommu_domain *domain)
 {
 	struct dmar_domain *dmar_domain;
 
-	dmar_domain = iommu_alloc_vm_domain();
+	dmar_domain = alloc_domain(true);
 	if (!dmar_domain) {
 		printk(KERN_ERR
 			"intel_iommu_domain_init: dmar_domain == NULL\n");
@@ -3575,7 +3814,7 @@
 	if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
 		printk(KERN_ERR
 			"intel_iommu_domain_init() failed\n");
-		vm_domain_exit(dmar_domain);
+		domain_exit(dmar_domain);
 		return -ENOMEM;
 	}
 	domain_update_iommu_cap(dmar_domain);
@@ -3593,32 +3832,31 @@
 	struct dmar_domain *dmar_domain = domain->priv;
 
 	domain->priv = NULL;
-	vm_domain_exit(dmar_domain);
+	domain_exit(dmar_domain);
 }
 
 static int intel_iommu_attach_device(struct iommu_domain *domain,
 				     struct device *dev)
 {
 	struct dmar_domain *dmar_domain = domain->priv;
-	struct pci_dev *pdev = to_pci_dev(dev);
 	struct intel_iommu *iommu;
 	int addr_width;
+	u8 bus, devfn;
 
-	if (unlikely(domain_context_mapped(pdev))) {
+	if (unlikely(domain_context_mapped(dev))) {
 		struct dmar_domain *old_domain;
 
-		old_domain = find_domain(pdev);
+		old_domain = find_domain(dev);
 		if (old_domain) {
 			if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
 			    dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
-				domain_remove_one_dev_info(old_domain, pdev);
+				domain_remove_one_dev_info(old_domain, dev);
 			else
 				domain_remove_dev_info(old_domain);
 		}
 	}
 
-	iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
-				pdev->devfn);
+	iommu = device_to_iommu(dev, &bus, &devfn);
 	if (!iommu)
 		return -ENODEV;
 
@@ -3646,16 +3884,15 @@
 		dmar_domain->agaw--;
 	}
 
-	return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+	return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
 }
 
 static void intel_iommu_detach_device(struct iommu_domain *domain,
 				      struct device *dev)
 {
 	struct dmar_domain *dmar_domain = domain->priv;
-	struct pci_dev *pdev = to_pci_dev(dev);
 
-	domain_remove_one_dev_info(dmar_domain, pdev);
+	domain_remove_one_dev_info(dmar_domain, dev);
 }
 
 static int intel_iommu_map(struct iommu_domain *domain,
@@ -3695,18 +3932,46 @@
 }
 
 static size_t intel_iommu_unmap(struct iommu_domain *domain,
-			     unsigned long iova, size_t size)
+				unsigned long iova, size_t size)
 {
 	struct dmar_domain *dmar_domain = domain->priv;
-	int order;
+	struct page *freelist = NULL;
+	struct intel_iommu *iommu;
+	unsigned long start_pfn, last_pfn;
+	unsigned int npages;
+	int iommu_id, num, ndomains, level = 0;
+
+	if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
+		BUG();
+
+	if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
+		size = VTD_PAGE_SIZE << level_to_offset_bits(level);
+
+	start_pfn = iova >> VTD_PAGE_SHIFT;
+	last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
 
-	order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
-			    (iova + size - 1) >> VTD_PAGE_SHIFT);
+	freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
+
+	npages = last_pfn - start_pfn + 1;
+
+	for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
+               iommu = g_iommus[iommu_id];
+
+               ndomains = cap_ndoms(iommu->cap);
+               for_each_set_bit(num, iommu->domain_ids, ndomains) {
+                       if (iommu->domains[num] == dmar_domain)
+                               iommu_flush_iotlb_psi(iommu, num, start_pfn,
+						     npages, !freelist, 0);
+	       }
+
+	}
+
+	dma_free_pagelist(freelist);
 
 	if (dmar_domain->max_addr == iova + size)
 		dmar_domain->max_addr = iova;
 
-	return PAGE_SIZE << order;
+	return size;
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,9 +3979,10 @@
 {
 	struct dmar_domain *dmar_domain = domain->priv;
 	struct dma_pte *pte;
+	int level = 0;
 	u64 phys = 0;
 
-	pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
+	pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
 	if (pte)
 		phys = dma_pte_addr(pte);
 
@@ -3739,10 +4005,9 @@
 static int intel_iommu_add_device(struct device *dev)
 {
 	struct iommu_group *group;
-	struct pci_dev *pdev = to_pci_dev(dev);
-	
-	if (!device_to_iommu(pci_domain_nr(pdev->bus),
-			     pdev->bus->number, pdev->devfn))
+	u8 bus, devfn;
+
+	if (!device_to_iommu(dev, &bus, &devfn))
 		return -ENODEV;
 
 	group = iommu_group_get_for_dev(dev);
diff -ur a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
--- a/drivers/iommu/intel_irq_remapping.c	2017-03-23 14:38:32.000000000 +0100
+++ b/drivers/iommu/intel_irq_remapping.c	2017-03-14 02:13:14.000000000 +0100
@@ -38,15 +38,28 @@
 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
 static int ir_ioapic_num, ir_hpet_num;
 
+/*
+ * Lock ordering:
+ * ->dmar_global_lock
+ *	->irq_2_ir_lock
+ *		->qi->q_lock
+ *	->iommu->register_lock
+ * Note:
+ * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
+ * in single-threaded environment with interrupt disabled, so no need to tabke
+ * the dmar_global_lock.
+ */
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
+static int __init parse_ioapics_under_ir(void);
+
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
 	struct irq_cfg *cfg = irq_get_chip_data(irq);
 	return cfg ? &cfg->irq_2_iommu : NULL;
 }
 
-int get_irte(int irq, struct irte *entry)
+static int get_irte(int irq, struct irte *entry)
 {
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	unsigned long flags;
@@ -69,19 +82,13 @@
 	struct ir_table *table = iommu->ir_table;
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	struct irq_cfg *cfg = irq_get_chip_data(irq);
-	u16 index, start_index;
 	unsigned int mask = 0;
 	unsigned long flags;
-	int i;
+	int index;
 
 	if (!count || !irq_iommu)
 		return -1;
 
-	/*
-	 * start the IRTE search from index 0.
-	 */
-	index = start_index = 0;
-
 	if (count > 1) {
 		count = __roundup_pow_of_two(count);
 		mask = ilog2(count);
@@ -96,32 +103,17 @@
 	}
 
 	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-	do {
-		for (i = index; i < index + count; i++)
-			if  (table->base[i].present)
-				break;
-		/* empty index found */
-		if (i == index + count)
-			break;
-
-		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
-
-		if (index == start_index) {
-			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-			printk(KERN_ERR "can't allocate an IRTE\n");
-			return -1;
-		}
-	} while (1);
-
-	for (i = index; i < index + count; i++)
-		table->base[i].present = 1;
-
-	cfg->remapped = 1;
-	irq_iommu->iommu = iommu;
-	irq_iommu->irte_index =  index;
-	irq_iommu->sub_handle = 0;
-	irq_iommu->irte_mask = mask;
-
+	index = bitmap_find_free_region(table->bitmap,
+					INTR_REMAP_TABLE_ENTRIES, mask);
+	if (index < 0) {
+		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
+	} else {
+		cfg->remapped = 1;
+		irq_iommu->iommu = iommu;
+		irq_iommu->irte_index =  index;
+		irq_iommu->sub_handle = 0;
+		irq_iommu->irte_mask = mask;
+	}
 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
 	return index;
@@ -254,6 +246,8 @@
 		set_64bit(&entry->low, 0);
 		set_64bit(&entry->high, 0);
 	}
+	bitmap_release_region(iommu->ir_table->bitmap, index,
+			      irq_iommu->irte_mask);
 
 	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 }
@@ -324,19 +318,21 @@
 	if (!irte)
 		return -1;
 
+	down_read(&dmar_global_lock);
 	for (i = 0; i < MAX_IO_APICS; i++) {
 		if (ir_ioapic[i].id == apic) {
 			sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
 			break;
 		}
 	}
+	up_read(&dmar_global_lock);
 
 	if (sid == 0) {
 		pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
 		return -1;
 	}
 
-	set_irte_sid(irte, 1, 0, sid);
+	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
 
 	return 0;
 }
@@ -349,12 +345,14 @@
 	if (!irte)
 		return -1;
 
+	down_read(&dmar_global_lock);
 	for (i = 0; i < MAX_HPET_TBS; i++) {
 		if (ir_hpet[i].id == id) {
 			sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
 			break;
 		}
 	}
+	up_read(&dmar_global_lock);
 
 	if (sid == 0) {
 		pr_warning("Failed to set source-id of HPET block (%d)\n", id);
@@ -475,6 +473,7 @@
 {
 	struct ir_table *ir_table;
 	struct page *pages;
+	unsigned long *bitmap;
 
 	ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
 					     GFP_ATOMIC);
@@ -486,13 +485,23 @@
 				 INTR_REMAP_PAGE_ORDER);
 
 	if (!pages) {
-		printk(KERN_ERR "failed to allocate pages of order %d\n",
-		       INTR_REMAP_PAGE_ORDER);
+		pr_err("IR%d: failed to allocate pages of order %d\n",
+		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
 		kfree(iommu->ir_table);
 		return -ENOMEM;
 	}
 
+	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
+			 sizeof(long), GFP_ATOMIC);
+	if (bitmap == NULL) {
+		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
+		__free_pages(pages, INTR_REMAP_PAGE_ORDER);
+		kfree(ir_table);
+		return -ENOMEM;
+	}
+
 	ir_table->base = page_address(pages);
+	ir_table->bitmap = bitmap;
 
 	iommu_set_irq_remapping(iommu, mode);
 	return 0;
@@ -517,7 +526,7 @@
 
 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
-	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+	sts = readl(iommu->reg + DMAR_GSTS_REG);
 	if (!(sts & DMA_GSTS_IRES))
 		goto end;
 
@@ -543,6 +552,7 @@
 static int __init intel_irq_remapping_supported(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 
 	if (disable_irq_remap)
 		return 0;
@@ -561,12 +571,9 @@
 	if (!dmar_ir_support())
 		return 0;
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd)
 		if (!ecap_ir_support(iommu->ecap))
 			return 0;
-	}
 
 	return 1;
 }
@@ -574,6 +581,7 @@
 static int __init intel_enable_irq_remapping(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 	bool x2apic_present;
 	int setup = 0;
 	int eim = 0;
@@ -586,6 +594,8 @@
 	}
 
 	if (x2apic_present) {
+		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+
 		eim = !dmar_x2apic_optout();
 		if (!eim)
 			printk(KERN_WARNING
@@ -594,9 +604,7 @@
 				"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
 	}
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd) {
 		/*
 		 * If the queued invalidation is already initialized,
 		 * shouldn't disable it.
@@ -621,9 +629,7 @@
 	/*
 	 * check for the Interrupt-remapping support
 	 */
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd) {
 		if (!ecap_ir_support(iommu->ecap))
 			continue;
 
@@ -637,10 +643,8 @@
 	/*
 	 * Enable queued invalidation for all the DRHD's.
 	 */
-	for_each_drhd_unit(drhd) {
-		int ret;
-		struct intel_iommu *iommu = drhd->iommu;
-		ret = dmar_enable_qi(iommu);
+	for_each_iommu(iommu, drhd) {
+		int ret = dmar_enable_qi(iommu);
 
 		if (ret) {
 			printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -653,9 +657,7 @@
 	/*
 	 * Setup Interrupt-remapping for all the DRHD's now.
 	 */
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd) {
 		if (!ecap_ir_support(iommu->ecap))
 			continue;
 
@@ -710,12 +712,12 @@
 		 * Access PCI directly due to the PCI
 		 * subsystem isn't initialized yet.
 		 */
-		bus = read_pci_config_byte(bus, path->dev, path->fn,
+		bus = read_pci_config_byte(bus, path->device, path->function,
 					   PCI_SECONDARY_BUS);
 		path++;
 	}
 	ir_hpet[ir_hpet_num].bus   = bus;
-	ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+	ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
 	ir_hpet[ir_hpet_num].iommu = iommu;
 	ir_hpet[ir_hpet_num].id    = scope->enumeration_id;
 	ir_hpet_num++;
@@ -738,13 +740,13 @@
 		 * Access PCI directly due to the PCI
 		 * subsystem isn't initialized yet.
 		 */
-		bus = read_pci_config_byte(bus, path->dev, path->fn,
+		bus = read_pci_config_byte(bus, path->device, path->function,
 					   PCI_SECONDARY_BUS);
 		path++;
 	}
 
 	ir_ioapic[ir_ioapic_num].bus   = bus;
-	ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+	ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
 	ir_ioapic[ir_ioapic_num].iommu = iommu;
 	ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id;
 	ir_ioapic_num++;
@@ -797,22 +799,20 @@
  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  * hardware unit.
  */
-int __init parse_ioapics_under_ir(void)
+static int __init parse_ioapics_under_ir(void)
 {
 	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
 	int ir_supported = 0;
 	int ioapic_idx;
 
-	for_each_drhd_unit(drhd) {
-		struct intel_iommu *iommu = drhd->iommu;
-
+	for_each_iommu(iommu, drhd)
 		if (ecap_ir_support(iommu->ecap)) {
 			if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
 				return -1;
 
 			ir_supported = 1;
 		}
-	}
 
 	if (!ir_supported)
 		return 0;
@@ -830,12 +830,18 @@
 	return 1;
 }
 
-int __init ir_dev_scope_init(void)
+static int __init ir_dev_scope_init(void)
 {
+	int ret;
+
 	if (!irq_remapping_enabled)
 		return 0;
 
-	return dmar_dev_scope_init();
+	down_write(&dmar_global_lock);
+	ret = dmar_dev_scope_init();
+	up_write(&dmar_global_lock);
+
+	return ret;
 }
 rootfs_initcall(ir_dev_scope_init);
 
@@ -916,23 +922,27 @@
 				    struct io_apic_irq_attr *attr)
 {
 	int ioapic_id = mpc_ioapic_id(attr->ioapic);
-	struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
+	struct intel_iommu *iommu;
 	struct IR_IO_APIC_route_entry *entry;
 	struct irte irte;
 	int index;
 
+	down_read(&dmar_global_lock);
+	iommu = map_ioapic_to_ir(ioapic_id);
 	if (!iommu) {
 		pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
-		return -ENODEV;
-	}
-
-	entry = (struct IR_IO_APIC_route_entry *)route_entry;
-
-	index = alloc_irte(iommu, irq, 1);
-	if (index < 0) {
-		pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
-		return -ENOMEM;
+		index = -ENODEV;
+	} else {
+		index = alloc_irte(iommu, irq, 1);
+		if (index < 0) {
+			pr_warn("Failed to allocate IRTE for ioapic %d\n",
+				ioapic_id);
+			index = -ENOMEM;
+		}
 	}
+	up_read(&dmar_global_lock);
+	if (index < 0)
+		return index;
 
 	prepare_irte(&irte, vector, destination);
 
@@ -951,6 +961,7 @@
 		irte.avail, irte.vector, irte.dest_id,
 		irte.sid, irte.sq, irte.svt);
 
+	entry = (struct IR_IO_APIC_route_entry *)route_entry;
 	memset(entry, 0, sizeof(*entry));
 
 	entry->index2	= (index >> 15) & 0x1;
@@ -1081,20 +1092,23 @@
 	struct intel_iommu *iommu;
 	int index;
 
+	down_read(&dmar_global_lock);
 	iommu = map_dev_to_ir(dev);
 	if (!iommu) {
 		printk(KERN_ERR
 		       "Unable to map PCI %s to iommu\n", pci_name(dev));
-		return -ENOENT;
+		index = -ENOENT;
+	} else {
+		index = alloc_irte(iommu, irq, nvec);
+		if (index < 0) {
+			printk(KERN_ERR
+			       "Unable to allocate %d IRTE for PCI %s\n",
+			       nvec, pci_name(dev));
+			index = -ENOSPC;
+		}
 	}
+	up_read(&dmar_global_lock);
 
-	index = alloc_irte(iommu, irq, nvec);
-	if (index < 0) {
-		printk(KERN_ERR
-		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
-		       pci_name(dev));
-		return -ENOSPC;
-	}
 	return index;
 }
 
@@ -1102,33 +1116,40 @@
 			       int index, int sub_handle)
 {
 	struct intel_iommu *iommu;
+	int ret = -ENOENT;
 
+	down_read(&dmar_global_lock);
 	iommu = map_dev_to_ir(pdev);
-	if (!iommu)
-		return -ENOENT;
-	/*
-	 * setup the mapping between the irq and the IRTE
-	 * base index, the sub_handle pointing to the
-	 * appropriate interrupt remap table entry.
-	 */
-	set_irte_irq(irq, iommu, index, sub_handle);
+	if (iommu) {
+		/*
+		 * setup the mapping between the irq and the IRTE
+		 * base index, the sub_handle pointing to the
+		 * appropriate interrupt remap table entry.
+		 */
+		set_irte_irq(irq, iommu, index, sub_handle);
+		ret = 0;
+	}
+	up_read(&dmar_global_lock);
 
-	return 0;
+	return ret;
 }
 
 static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
 {
-	struct intel_iommu *iommu = map_hpet_to_ir(id);
+	int ret = -1;
+	struct intel_iommu *iommu;
 	int index;
 
-	if (!iommu)
-		return -1;
-
-	index = alloc_irte(iommu, irq, 1);
-	if (index < 0)
-		return -1;
+	down_read(&dmar_global_lock);
+	iommu = map_hpet_to_ir(id);
+	if (iommu) {
+		index = alloc_irte(iommu, irq, 1);
+		if (index >= 0)
+			ret = 0;
+	}
+	up_read(&dmar_global_lock);
 
-	return 0;
+	return ret;
 }
 
 struct irq_remap_ops intel_irq_remap_ops = {
diff -ur a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
--- a/drivers/iommu/iommu.c	2017-03-23 14:38:33.000000000 +0100
+++ b/drivers/iommu/iommu.c	2017-03-14 02:13:15.000000000 +0100
@@ -30,6 +30,7 @@
 #include <linux/notifier.h>
 #include <linux/err.h>
 #include <linux/pci.h>
+#include <trace/events/iommu.h>
 
 static struct kset *iommu_group_kset;
 static struct ida iommu_group_ida;
@@ -364,6 +365,8 @@
 	/* Notify any listeners about change to group. */
 	blocking_notifier_call_chain(&group->notifier,
 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+
+	trace_add_device_to_group(group->id, dev);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -400,6 +403,8 @@
 	sysfs_remove_link(group->devices_kobj, device->name);
 	sysfs_remove_link(&dev->kobj, "iommu_group");
 
+	trace_remove_device_from_group(group->id, dev);
+
 	kfree(device->name);
 	kfree(device);
 	dev->iommu_group = NULL;
@@ -862,10 +867,14 @@
 
 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
 {
+	int ret;
 	if (unlikely(domain->ops->attach_dev == NULL))
 		return -ENODEV;
 
-	return domain->ops->attach_dev(domain, dev);
+	ret = domain->ops->attach_dev(domain, dev);
+	if (!ret)
+		trace_attach_device_to_domain(dev);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_attach_device);
 
@@ -875,6 +884,7 @@
 		return;
 
 	domain->ops->detach_dev(domain, dev);
+	trace_detach_device_from_domain(dev);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_device);
 
@@ -936,6 +946,38 @@
 }
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
+static size_t iommu_pgsize(struct iommu_domain *domain,
+			   unsigned long addr_merge, size_t size)
+{
+	unsigned int pgsize_idx;
+	size_t pgsize;
+
+	/* Max page size that still fits into 'size' */
+	pgsize_idx = __fls(size);
+
+	/* need to consider alignment requirements ? */
+	if (likely(addr_merge)) {
+		/* Max page size allowed by address */
+		unsigned int align_pgsize_idx = __ffs(addr_merge);
+		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+	}
+
+	/* build a mask of acceptable page sizes */
+	pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+	/* throw away page sizes not supported by the hardware */
+	pgsize &= domain->ops->pgsize_bitmap;
+
+	/* make sure we're still sane */
+	BUG_ON(!pgsize);
+
+	/* pick the biggest page */
+	pgsize_idx = __fls(pgsize);
+	pgsize = 1UL << pgsize_idx;
+
+	return pgsize;
+}
+
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
 	      phys_addr_t paddr, size_t size, int prot)
 {
@@ -957,45 +999,18 @@
 	 * size of the smallest page supported by the hardware
 	 */
 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
-			"0x%x\n", iova, (unsigned long)paddr,
-			(unsigned long)size, min_pagesz);
+		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+		       iova, &paddr, size, min_pagesz);
 		return -EINVAL;
 	}
 
-	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
-				(unsigned long)paddr, (unsigned long)size);
+	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
 	while (size) {
-		unsigned long pgsize, addr_merge = iova | paddr;
-		unsigned int pgsize_idx;
-
-		/* Max page size that still fits into 'size' */
-		pgsize_idx = __fls(size);
-
-		/* need to consider alignment requirements ? */
-		if (likely(addr_merge)) {
-			/* Max page size allowed by both iova and paddr */
-			unsigned int align_pgsize_idx = __ffs(addr_merge);
-
-			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
-		}
-
-		/* build a mask of acceptable page sizes */
-		pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
-		/* throw away page sizes not supported by the hardware */
-		pgsize &= domain->ops->pgsize_bitmap;
-
-		/* make sure we're still sane */
-		BUG_ON(!pgsize);
-
-		/* pick the biggest page */
-		pgsize_idx = __fls(pgsize);
-		pgsize = 1UL << pgsize_idx;
+		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 
-		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
-					(unsigned long)paddr, pgsize);
+		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+			 iova, &paddr, pgsize);
 
 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
 		if (ret)
@@ -1009,6 +1024,8 @@
 	/* unroll mapping in case something went wrong */
 	if (ret)
 		iommu_unmap(domain, orig_iova, orig_size - size);
+	else
+		trace_map(iova, paddr, size);
 
 	return ret;
 }
@@ -1032,32 +1049,32 @@
 	 * by the hardware
 	 */
 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
-		pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
-					iova, (unsigned long)size, min_pagesz);
+		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+		       iova, size, min_pagesz);
 		return -EINVAL;
 	}
 
-	pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
-							(unsigned long)size);
+	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 
 	/*
 	 * Keep iterating until we either unmap 'size' bytes (or more)
 	 * or we hit an area that isn't mapped.
 	 */
 	while (unmapped < size) {
-		size_t left = size - unmapped;
+		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-		unmapped_page = domain->ops->unmap(domain, iova, left);
+		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
 		if (!unmapped_page)
 			break;
 
-		pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
-					(unsigned long)unmapped_page);
+		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
+			 iova, unmapped_page);
 
 		iova += unmapped_page;
 		unmapped += unmapped_page;
 	}
 
+	trace_unmap(iova, 0, size);
 	return unmapped;
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
Nur in b/drivers/iommu: iommu-traces.c.
diff -ur a/drivers/iommu/iova.c b/drivers/iommu/iova.c
--- a/drivers/iommu/iova.c	2017-03-23 14:38:31.000000000 +0100
+++ b/drivers/iommu/iova.c	2017-03-14 02:13:13.000000000 +0100
@@ -341,19 +341,30 @@
 	return 0;
 }
 
+static inline struct iova *
+alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
+{
+	struct iova *iova;
+
+	iova = alloc_iova_mem();
+	if (iova) {
+		iova->pfn_lo = pfn_lo;
+		iova->pfn_hi = pfn_hi;
+	}
+
+	return iova;
+}
+
 static struct iova *
 __insert_new_range(struct iova_domain *iovad,
 	unsigned long pfn_lo, unsigned long pfn_hi)
 {
 	struct iova *iova;
 
-	iova = alloc_iova_mem();
-	if (!iova)
-		return iova;
+	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
+	if (iova)
+		iova_insert_rbtree(&iovad->rbroot, iova);
 
-	iova->pfn_hi = pfn_hi;
-	iova->pfn_lo = pfn_lo;
-	iova_insert_rbtree(&iovad->rbroot, iova);
 	return iova;
 }
 
@@ -432,3 +443,44 @@
 	}
 	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
+
+struct iova *
+split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
+		      unsigned long pfn_lo, unsigned long pfn_hi)
+{
+	unsigned long flags;
+	struct iova *prev = NULL, *next = NULL;
+
+	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+	if (iova->pfn_lo < pfn_lo) {
+		prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
+		if (prev == NULL)
+			goto error;
+	}
+	if (iova->pfn_hi > pfn_hi) {
+		next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
+		if (next == NULL)
+			goto error;
+	}
+
+	__cached_rbnode_delete_update(iovad, iova);
+	rb_erase(&iova->node, &iovad->rbroot);
+
+	if (prev) {
+		iova_insert_rbtree(&iovad->rbroot, prev);
+		iova->pfn_lo = pfn_lo;
+	}
+	if (next) {
+		iova_insert_rbtree(&iovad->rbroot, next);
+		iova->pfn_hi = pfn_hi;
+	}
+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+	return iova;
+
+error:
+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+	if (prev)
+		free_iova_mem(prev);
+	return NULL;
+}
diff -ur a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
--- a/drivers/iommu/irq_remapping.c	2017-03-23 14:38:31.000000000 +0100
+++ b/drivers/iommu/irq_remapping.c	2017-03-14 02:13:12.000000000 +0100
@@ -148,7 +148,7 @@
 		return do_setup_msix_irqs(dev, nvec);
 }
 
-void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
 {
 	/*
 	 * Intr-remapping uses pin number as the virtual vector
@@ -293,8 +293,8 @@
 					     vector, attr);
 }
 
-int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
-			      bool force)
+static int set_remapped_irq_affinity(struct irq_data *data,
+				     const struct cpumask *mask, bool force)
 {
 	if (!config_enabled(CONFIG_SMP) || !remap_ops ||
 	    !remap_ops->set_affinity)
diff -ur a/drivers/iommu/Makefile b/drivers/iommu/Makefile
--- a/drivers/iommu/Makefile	2016-10-20 04:32:04.000000000 +0200
+++ b/drivers/iommu/Makefile	2016-08-15 05:51:19.000000000 +0200
@@ -1,4 +1,5 @@
 obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
diff -ur a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
--- a/drivers/isdn/gigaset/ser-gigaset.c	2017-03-23 14:42:43.000000000 +0100
+++ b/drivers/isdn/gigaset/ser-gigaset.c	2017-03-14 02:18:39.000000000 +0100
@@ -522,9 +522,18 @@
 	cs->hw.ser->tty = tty;
 	atomic_set(&cs->hw.ser->refcnt, 1);
 	init_completion(&cs->hw.ser->dead_cmp);
-
 	tty->disc_data = cs;
 
+	/* Set the amount of data we're willing to receive per call
+	 * from the hardware driver to half of the input buffer size
+	 * to leave some reserve.
+	 * Note: We don't do flow control towards the hardware driver.
+	 * If more data is received than will fit into the input buffer,
+	 * it will be dropped and an error will be logged. This should
+	 * never happen as the device is slow and the buffer size ample.
+	 */
+	tty->receive_room = RBUFSIZE/2;
+
 	/* OK.. Initialization of the datastructures and the HW is done.. Now
 	 * startup system and notify the LL that we are ready to run
 	 */
diff -ur a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
--- a/drivers/isdn/i4l/isdn_ppp.c	2017-03-23 14:42:49.000000000 +0100
+++ b/drivers/isdn/i4l/isdn_ppp.c	2017-03-14 02:18:47.000000000 +0100
@@ -299,6 +299,8 @@
 	is->compflags = 0;
 
 	is->reset = isdn_ppp_ccp_reset_alloc(is);
+	if (!is->reset)
+		return -ENOMEM;
 
 	is->lp = NULL;
 	is->mp_seqno = 0;       /* MP sequence number */
@@ -318,6 +320,10 @@
 	 * VJ header compression init
 	 */
 	is->slcomp = slhc_init(16, 16);	/* not necessary for 2. link in bundle */
+	if (IS_ERR(is->slcomp)) {
+		isdn_ppp_ccp_reset_free(is);
+		return PTR_ERR(is->slcomp);
+	}
 #endif
 #ifdef CONFIG_IPPP_FILTER
 	is->pass_filter = NULL;
@@ -566,10 +572,8 @@
 			is->maxcid = val;
 #ifdef CONFIG_ISDN_PPP_VJ
 			sltmp = slhc_init(16, val);
-			if (!sltmp) {
-				printk(KERN_ERR "ippp, can't realloc slhc struct\n");
-				return -ENOMEM;
-			}
+			if (IS_ERR(sltmp))
+				return PTR_ERR(sltmp);
 			if (is->slcomp)
 				slhc_free(is->slcomp);
 			is->slcomp = sltmp;
diff -ur a/drivers/leds/leds-lp3943.c b/drivers/leds/leds-lp3943.c
--- a/drivers/leds/leds-lp3943.c	2017-03-23 14:46:58.000000000 +0100
+++ b/drivers/leds/leds-lp3943.c	2017-03-14 02:23:07.000000000 +0100
@@ -387,7 +387,17 @@
 	if (!mode) {
 		goto END;
 	}
-	*mode = brightness == 0 ? LP3943_LED_OFF : nodeMode;
+	switch (brightness) {
+		case 0:
+			*mode = LP3943_LED_OFF;
+			break;
+		case 255:
+			*mode = LP3943_LED_ON;
+			break;
+		default:
+			*mode = nodeMode;
+			break;
+	}
 
 END:
 	return;
diff -ur a/drivers/lguest/core.c b/drivers/lguest/core.c
--- a/drivers/lguest/core.c	2017-03-23 14:40:24.000000000 +0100
+++ b/drivers/lguest/core.c	2017-03-14 02:15:46.000000000 +0100
@@ -176,7 +176,7 @@
 bool lguest_address_ok(const struct lguest *lg,
 		       unsigned long addr, unsigned long len)
 {
-	return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
+	return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
 }
 
 /*
diff -ur a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
--- a/drivers/macintosh/windfarm_core.c	2017-03-23 14:41:15.000000000 +0100
+++ b/drivers/macintosh/windfarm_core.c	2017-03-14 02:16:47.000000000 +0100
@@ -431,7 +431,7 @@
 {
 	mutex_lock(&wf_lock);
 	blocking_notifier_chain_unregister(&wf_client_list, nb);
-	wf_client_count++;
+	wf_client_count--;
 	if (wf_client_count == 0)
 		wf_stop_thread();
 	mutex_unlock(&wf_lock);
diff -ur a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
--- a/drivers/md/bcache/super.c	2017-03-23 14:39:03.000000000 +0100
+++ b/drivers/md/bcache/super.c	2017-03-14 02:13:56.000000000 +0100
@@ -1334,6 +1334,9 @@
 	struct btree *b;
 	unsigned i;
 
+	if (!c)
+		closure_return(cl);
+
 	bch_cache_accounting_destroy(&c->accounting);
 
 	kobject_put(&c->internal);
@@ -2031,8 +2034,10 @@
 	closure_debug_init();
 
 	bcache_major = register_blkdev(0, "bcache");
-	if (bcache_major < 0)
+	if (bcache_major < 0) {
+		unregister_reboot_notifier(&reboot);
 		return bcache_major;
+	}
 
 	if (!(bcache_wq = create_workqueue("bcache")) ||
 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
diff -ur a/drivers/md/bitmap.c b/drivers/md/bitmap.c
--- a/drivers/md/bitmap.c	2017-03-23 14:38:55.000000000 +0100
+++ b/drivers/md/bitmap.c	2017-03-14 02:13:45.000000000 +0100
@@ -564,6 +564,8 @@
 	if (err)
 		return err;
 
+	err = -EINVAL;
+
 	sb = kmap_atomic(sb_page);
 
 	chunksize = le32_to_cpu(sb->chunksize);
diff -ur a/drivers/md/dm.c b/drivers/md/dm.c
--- a/drivers/md/dm.c	2017-03-23 14:38:57.000000000 +0100
+++ b/drivers/md/dm.c	2017-03-14 02:13:47.000000000 +0100
@@ -48,6 +48,11 @@
 
 static DEFINE_IDR(_minor_idr);
 
+#ifdef MY_ABC_HERE
+extern sector_t (*funcSYNOLvLgSectorCount)(void *private, sector_t sector);
+sector_t SynoLvLgSectorCount(void *, sector_t);
+#endif  
+
 static DEFINE_SPINLOCK(_minor_lock);
  
 struct dm_io {
@@ -249,6 +254,10 @@
 			goto bad;
 	}
 
+#ifdef MY_ABC_HERE
+	funcSYNOLvLgSectorCount = SynoLvLgSectorCount;
+#endif  
+
 	return 0;
 
       bad:
@@ -819,6 +828,20 @@
 }
 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
 
+#ifdef MY_ABC_HERE
+sector_t SynoLvLgSectorCount(void *private, sector_t sector)
+{
+	struct dm_target *ti = (struct dm_target *)private;
+
+	if (ti && ti->type->lg_sector_get) {
+		return ti->type->lg_sector_get(sector, ti);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(SynoLvLgSectorCount);
+#endif  
+
 static void __map_bio(struct dm_target_io *tio)
 {
 	int r;
diff -ur a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
--- a/drivers/md/dm-cache-policy-cleaner.c	2017-03-23 14:38:54.000000000 +0100
+++ b/drivers/md/dm-cache-policy-cleaner.c	2017-03-14 02:13:44.000000000 +0100
@@ -434,7 +434,7 @@
 static struct dm_cache_policy_type wb_policy_type = {
 	.name = "cleaner",
 	.version = {1, 0, 0},
-	.hint_size = 0,
+	.hint_size = 4,
 	.owner = THIS_MODULE,
 	.create = wb_create
 };
diff -ur a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
--- a/drivers/md/dm-exception-store.h	2017-03-23 14:38:57.000000000 +0100
+++ b/drivers/md/dm-exception-store.h	2017-03-14 02:13:49.000000000 +0100
@@ -70,7 +70,7 @@
 	 * Update the metadata with this exception.
 	 */
 	void (*commit_exception) (struct dm_exception_store *store,
-				  struct dm_exception *e,
+				  struct dm_exception *e, int valid,
 				  void (*callback) (void *, int success),
 				  void *callback_context);
 
diff -ur a/drivers/md/dm-io.c b/drivers/md/dm-io.c
--- a/drivers/md/dm-io.c	2017-03-23 14:38:52.000000000 +0100
+++ b/drivers/md/dm-io.c	2017-03-14 02:13:41.000000000 +0100
@@ -25,6 +25,8 @@
 	struct bio_set *bios;
 };
 
+#define sinfo(fmt, args...)
+
 struct io {
 	unsigned long error_bits;
 	atomic_t count;
@@ -35,10 +37,17 @@
 	void *vma_invalidate_address;
 	unsigned long vma_invalidate_size;
 #ifdef MY_ABC_HERE
-	int is_return_err;
+	unsigned long bi_flags;
 #endif  
 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
 
+#ifdef MY_ABC_HERE
+static int has_correction_flag(unsigned long bi_flags)
+{
+	return 0;
+}
+#endif  
+
 static struct kmem_cache *_dm_io_cache;
 
 struct dm_io_client *dm_io_client_create(void)
@@ -95,7 +104,11 @@
 	*region = val & (DM_IO_MAX_REGIONS - 1);
 }
 
+#ifdef MY_ABC_HERE
+static void dec_count_common(struct io *io, unsigned int region, int error, unsigned long bi_flags)
+#else
 static void dec_count(struct io *io, unsigned int region, int error)
+#endif  
 {
 	if (error)
 		set_bit(region, &io->error_bits);
@@ -119,6 +132,18 @@
 	}
 }
 
+#ifdef MY_ABC_HERE
+static void dec_count_syno(struct io *io, unsigned int region, int error, unsigned long bi_flags)
+{
+	dec_count_common(io, region, error, bi_flags);
+}
+
+static void dec_count(struct io *io, unsigned int region, int error)
+{
+	dec_count_common(io, region, error, 0);
+}
+#endif
+
 static void endio(struct bio *bio, int error)
 {
 	struct io *io;
@@ -268,11 +293,7 @@
 		bio->bi_sector = where->sector + (where->count - remaining);
 		bio->bi_bdev = where->bdev;
 		bio->bi_end_io = endio;
-#ifdef MY_ABC_HERE
-		if (1 == io->is_return_err) {
-			set_bit(BIO_MD_RETURN_ERROR, &bio->bi_flags);
-		}
-#endif  
+
 		store_io_and_region_in_bio(bio, io, region);
 
 		if (rw & REQ_DISCARD) {
@@ -302,6 +323,12 @@
 		}
 
 		atomic_inc(&io->count);
+#ifdef MY_ABC_HERE
+		bio->bi_flags |= io->bi_flags;
+		if (has_correction_flag(io->bi_flags)) {
+			sinfo("bio start=%llu size=%llu", (u64)bio->bi_sector, (u64)to_sector(bio->bi_size));
+		}
+#endif  
 		submit_bio(rw, bio);
 	} while (remaining);
 }
@@ -330,7 +357,7 @@
 #ifdef MY_ABC_HERE
 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 		   struct dm_io_region *where, int rw, struct dpages *dp,
-		   unsigned long *error_bits, int is_return_err)
+		   unsigned long *error_bits, unsigned long bi_flags)
 #else
 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 		   struct dm_io_region *where, int rw, struct dpages *dp,
@@ -355,7 +382,15 @@
 	io->vma_invalidate_address = dp->vma_invalidate_address;
 	io->vma_invalidate_size = dp->vma_invalidate_size;
 #ifdef MY_ABC_HERE
-	io->is_return_err = is_return_err;
+	if (has_correction_flag(bi_flags)) {
+		sinfo("add io flags start");
+	}
+
+	io->bi_flags = bi_flags;
+
+	if (has_correction_flag(bi_flags)) {
+		sinfo("add io flags finish");
+	}
 #endif  
 
 	dispatch_io(rw, num_regions, where, dp, io, 1);
@@ -371,7 +406,7 @@
 #ifdef MY_ABC_HERE
 static int async_io(struct dm_io_client *client, unsigned int num_regions,
 		    struct dm_io_region *where, int rw, struct dpages *dp,
-		    io_notify_fn fn, void *context, int is_return_err)
+		    io_notify_fn fn, void *context, unsigned long bi_flags)
 #else
 static int async_io(struct dm_io_client *client, unsigned int num_regions,
 		    struct dm_io_region *where, int rw, struct dpages *dp,
@@ -394,7 +429,11 @@
 	io->callback = fn;
 	io->context = context;
 #ifdef MY_ABC_HERE
-	io->is_return_err = is_return_err;
+	io->bi_flags = bi_flags;
+
+	if (has_correction_flag(bi_flags)) {
+		sinfo("set bi_flags=%lx", bi_flags);
+	}
 #endif  
 
 	io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -440,12 +479,17 @@
 	return 0;
 }
 #ifdef MY_ABC_HERE
+ 
 int syno_dm_io(struct dm_io_request *io_req, unsigned num_regions,
-	  struct dm_io_region *where, unsigned long *sync_error_bits)
+	  struct dm_io_region *where, unsigned long *sync_error_bits, unsigned long bi_flags)
 {
 	int r;
 	struct dpages dp;
-	int is_return_err = 1;
+	if (has_correction_flag(bi_flags)) {
+		sinfo("set extra bi_flags=%lx", bi_flags);
+	}
+
+	bi_flags |= 1 << BIO_MD_RETURN_ERROR;
 
 	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
 	if (r)
@@ -453,9 +497,9 @@
 
 	if (!io_req->notify.fn)
 		return sync_io(io_req->client, num_regions, where,
-			       io_req->bi_rw, &dp, sync_error_bits, is_return_err);
+			       io_req->bi_rw, &dp, sync_error_bits, bi_flags);
 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
-			&dp, io_req->notify.fn, io_req->notify.context, is_return_err);
+			&dp, io_req->notify.fn, io_req->notify.context, bi_flags);
 }
 EXPORT_SYMBOL(syno_dm_io);
 #endif  
diff -ur a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
--- a/drivers/md/dm-linear.c	2017-03-23 14:38:54.000000000 +0100
+++ b/drivers/md/dm-linear.c	2017-03-14 02:13:43.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
  
 #include "dm.h"
 #include <linux/module.h>
@@ -138,6 +141,27 @@
 	return fn(ti, lc->dev, lc->start, ti->len, data);
 }
 
+#ifdef MY_ABC_HERE
+extern void SYNOLvInfoSet(struct block_device *bdev, void *private, const char *lv_name);
+static void linear_lv_info_set(struct dm_target *ti)
+{
+	struct linear_c *lc = ti->private;
+	struct block_device *bdev = lc->dev->bdev;
+	struct mapped_device *md = dm_table_get_md(ti->table);
+
+	SYNOLvInfoSet(bdev, (void *)ti, dm_device_name(md));
+}
+
+static sector_t linear_lg_sector_get(sector_t sector, struct dm_target *ti)
+{
+	struct linear_c *lc = ti->private;
+	sector_t lg_sector;
+
+	lg_sector = sector - lc->start + ti->begin;
+	return lg_sector;
+}
+#endif  
+
 static struct target_type linear_target = {
 	.name   = "linear",
 	.version = {1, 2, 1},
@@ -148,6 +172,10 @@
 	.status = linear_status,
 	.ioctl  = linear_ioctl,
 	.merge  = linear_merge,
+#ifdef MY_ABC_HERE
+	.lvinfoset = linear_lv_info_set,
+	.lg_sector_get = linear_lg_sector_get,
+#endif  
 	.iterate_devices = linear_iterate_devices,
 };
 
diff -ur a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
--- a/drivers/md/dm-mpath.c	2017-03-23 14:38:58.000000000 +0100
+++ b/drivers/md/dm-mpath.c	2017-03-14 02:13:48.000000000 +0100
@@ -1607,11 +1607,8 @@
 	/*
 	 * Only pass ioctls through if the device sizes match exactly.
 	 */
-	if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
-		int err = scsi_verify_blk_ioctl(NULL, cmd);
-		if (err)
-			r = err;
-	}
+	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+		r = scsi_verify_blk_ioctl(NULL, cmd);
 
 	if (r == -ENOTCONN && !fatal_signal_pending(current))
 		queue_work(kmultipathd, &m->process_queued_ios);
diff -ur a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
--- a/drivers/md/dm-raid.c	2017-03-23 14:38:56.000000000 +0100
+++ b/drivers/md/dm-raid.c	2017-03-14 02:13:46.000000000 +0100
@@ -325,8 +325,7 @@
 		 */
 		if (min_region_size > (1 << 13)) {
 			/* If not a power of 2, make it the next power of 2 */
-			if (min_region_size & (min_region_size - 1))
-				region_size = 1 << fls(region_size);
+			region_size = roundup_pow_of_two(min_region_size);
 			DMINFO("Choosing default region size of %lu sectors",
 			       region_size);
 		} else {
diff -ur a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
--- a/drivers/md/dm-snap.c	2017-03-23 14:38:54.000000000 +0100
+++ b/drivers/md/dm-snap.c	2017-03-14 02:13:43.000000000 +0100
@@ -1388,8 +1388,9 @@
 	dm_table_event(s->ti->table);
 }
 
-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
+static void pending_complete(void *context, int success)
 {
+	struct dm_snap_pending_exception *pe = context;
 	struct dm_exception *e;
 	struct dm_snapshot *s = pe->snap;
 	struct bio *origin_bios = NULL;
@@ -1459,24 +1460,13 @@
 	free_pending_exception(pe);
 }
 
-static void commit_callback(void *context, int success)
-{
-	struct dm_snap_pending_exception *pe = context;
-
-	pending_complete(pe, success);
-}
-
 static void complete_exception(struct dm_snap_pending_exception *pe)
 {
 	struct dm_snapshot *s = pe->snap;
 
-	if (unlikely(pe->copy_error))
-		pending_complete(pe, 0);
-
-	else
-		/* Update the metadata if we are persistent */
-		s->store->type->commit_exception(s->store, &pe->e,
-						 commit_callback, pe);
+	/* Update the metadata if we are persistent */
+	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
+					 pending_complete, pe);
 }
 
 /*
diff -ur a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
--- a/drivers/md/dm-snap-persistent.c	2017-03-23 14:38:54.000000000 +0100
+++ b/drivers/md/dm-snap-persistent.c	2017-03-14 02:13:43.000000000 +0100
@@ -646,7 +646,7 @@
 }
 
 static void persistent_commit_exception(struct dm_exception_store *store,
-					struct dm_exception *e,
+					struct dm_exception *e, int valid,
 					void (*callback) (void *, int success),
 					void *callback_context)
 {
@@ -655,6 +655,9 @@
 	struct core_exception ce;
 	struct commit_callback *cb;
 
+	if (!valid)
+		ps->valid = 0;
+
 	ce.old_chunk = e->old_chunk;
 	ce.new_chunk = e->new_chunk;
 	write_exception(ps, ps->current_committed++, &ce);
diff -ur a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
--- a/drivers/md/dm-snap-transient.c	2017-03-23 14:38:53.000000000 +0100
+++ b/drivers/md/dm-snap-transient.c	2017-03-14 02:13:43.000000000 +0100
@@ -52,12 +52,12 @@
 }
 
 static void transient_commit_exception(struct dm_exception_store *store,
-				       struct dm_exception *e,
+				       struct dm_exception *e, int valid,
 				       void (*callback) (void *, int success),
 				       void *callback_context)
 {
 	/* Just succeed */
-	callback(callback_context, 1);
+	callback(callback_context, valid);
 }
 
 static void transient_usage(struct dm_exception_store *store,
diff -ur a/drivers/md/dm-table.c b/drivers/md/dm-table.c
--- a/drivers/md/dm-table.c	2017-03-23 14:38:55.000000000 +0100
+++ b/drivers/md/dm-table.c	2017-03-14 02:13:44.000000000 +0100
@@ -710,6 +710,12 @@
 		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 		       dm_device_name(t->md), type);
 
+#ifdef MY_ABC_HERE
+	if (tgt->type->lvinfoset){
+		tgt->type->lvinfoset(tgt);
+	}
+#endif  
+
 	return 0;
 
  bad:
diff -ur a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
--- a/drivers/md/dm-thin.c	2017-03-23 14:38:58.000000000 +0100
+++ b/drivers/md/dm-thin.c	2017-03-14 02:13:49.000000000 +0100
@@ -2109,7 +2109,7 @@
 						metadata_low_callback,
 						pool);
 	if (r)
-		goto out_free_pt;
+		goto out_flags_changed;
 
 	pt->callbacks.congested_fn = pool_is_congested;
 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -2281,7 +2281,7 @@
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
 
-	cancel_delayed_work(&pool->waker);
+	cancel_delayed_work_sync(&pool->waker);
 	flush_workqueue(pool->wq);
 	(void) commit_or_fallback(pool);
 }
diff -ur a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
--- a/drivers/md/dm-thin-metadata.c	2017-03-23 14:38:57.000000000 +0100
+++ b/drivers/md/dm-thin-metadata.c	2017-03-14 02:13:47.000000000 +0100
@@ -1191,6 +1191,12 @@
 	dm_block_t held_root;
 
 	/*
+	 * We commit to ensure the btree roots which we increment in a
+	 * moment are up to date.
+	 */
+	__commit_transaction(pmd);
+
+	/*
 	 * Copy the superblock.
 	 */
 	dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
@@ -1281,8 +1287,8 @@
 		return r;
 
 	disk_super = dm_block_data(copy);
-	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
-	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+	dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+	dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
 	dm_sm_dec_block(pmd->metadata_sm, held_root);
 
 	return dm_tm_unlock(pmd->tm, copy);
diff -ur a/drivers/md/libmd-report.c b/drivers/md/libmd-report.c
--- a/drivers/md/libmd-report.c	2017-03-23 14:38:52.000000000 +0100
+++ b/drivers/md/libmd-report.c	2017-03-14 02:13:42.000000000 +0100
@@ -52,4 +52,38 @@
 }
 EXPORT_SYMBOL(SynoReportCorrectBadSector);
 EXPORT_SYMBOL(funcSYNOSendRaidEvent);
+
+#ifdef MY_ABC_HERE
+sector_t (*funcSYNOLvLgSectorCount)(void *, sector_t) = NULL;
+int (*funcSYNOSendAutoRemapRaidEvent)(unsigned int, unsigned long long, unsigned int) = NULL;
+int (*funcSYNOSendAutoRemapLVEvent)(const char*, unsigned long long, unsigned int) = NULL;
+void SynoAutoRemapReport(struct mddev *mddev, sector_t sector, struct block_device *bdev)
+{
+	int index = SynoSCSIGetDeviceIndex(bdev);
+
+	if (NULL == mddev->syno_private) {
+		if (NULL == funcSYNOSendAutoRemapRaidEvent) {
+			printk("Can't reference to function 'SYNOSendAutoRemapRaidEvent'\n");
+		} else {
+			printk("report md[%d] auto-remapped sector:[%llu]\n",
+				mddev->md_minor, (unsigned long long)sector);
+			funcSYNOSendAutoRemapRaidEvent(mddev->md_minor, sector, (unsigned int)index);
+		}
+	} else {
+		if (NULL == funcSYNOLvLgSectorCount || NULL == funcSYNOSendAutoRemapLVEvent) {
+			printk("Can't reference to function 'funcSYNOLvLgSectorCount' or 'SYNOSendAutoRemapLVEvent'\n");
+		} else {
+			sector_t lv_sector = funcSYNOLvLgSectorCount(mddev->syno_private, sector);
+			printk("report lv:[%s] [%d]th auto-remapped sector:[%llu]\n",
+				mddev->lv_name, index, (unsigned long long)lv_sector);
+			funcSYNOSendAutoRemapLVEvent(mddev->lv_name, lv_sector, (unsigned int)index);
+		}
+	}
+}
+
+EXPORT_SYMBOL(SynoAutoRemapReport);
+EXPORT_SYMBOL(funcSYNOLvLgSectorCount);
+EXPORT_SYMBOL(funcSYNOSendAutoRemapRaidEvent);
+EXPORT_SYMBOL(funcSYNOSendAutoRemapLVEvent);
+#endif  
 #endif  
diff -ur a/drivers/md/linear.c b/drivers/md/linear.c
--- a/drivers/md/linear.c	2017-03-23 14:38:55.000000000 +0100
+++ b/drivers/md/linear.c	2017-03-14 02:13:45.000000000 +0100
@@ -314,8 +314,14 @@
 			syno_md_error(mddev, rdev);
 		} else {
 #ifdef MY_ABC_HERE
+#ifdef MY_ABC_HERE
+			if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+				SynoReportBadSector(bio->bi_sector, bio->bi_rw, mddev->md_minor, bio->bi_bdev, __FUNCTION__);
+			}
+#else  
 			SynoReportBadSector(bio->bi_sector, bio->bi_rw, mddev->md_minor, bio->bi_bdev, __FUNCTION__);
 #endif  
+#endif  
 			md_error(mddev, rdev);
 		}
 #else  
diff -ur a/drivers/md/md.c b/drivers/md/md.c
--- a/drivers/md/md.c	2017-03-23 14:39:07.000000000 +0100
+++ b/drivers/md/md.c	2017-03-14 02:13:58.000000000 +0100
@@ -346,14 +346,13 @@
 	mddev->suspended = 1;
 	synchronize_rcu();
 #ifdef MY_ABC_HERE
-	sector_t ori_suspend_lo, ori_suspend_hi;
-	ori_suspend_lo = mddev->suspend_lo;
-	ori_suspend_hi = mddev->suspend_hi;
+	DEFINE_WAIT(wait_noio);
+	const sector_t ori_suspend_lo = mddev->suspend_lo, ori_suspend_hi = mddev->suspend_hi;
+
 	mddev->suspend_lo = 0;
 	mddev->suspend_hi = 0;
 	mddev->pers->quiesce(mddev, 2);
 
-	DEFINE_WAIT(wait_noio);
 	while (atomic_read(&mddev->active_io) > 0) {
 		prepare_to_wait(&mddev->sb_wait, &wait_noio, TASK_UNINTERRUPTIBLE);
 		if (mddev->flags & MD_UPDATE_SB_FLAGS) {
@@ -954,6 +953,96 @@
 }
 #endif  
 
+#ifdef MY_ABC_HERE
+static inline void
+RaidMemberAutoRemapSet(struct mddev *mddev)
+{
+	struct md_rdev *rdev;
+	char b[BDEVNAME_SIZE];
+
+#ifdef MY_ABC_HERE
+	if (MD_AUTO_REMAP_MODE_ISMAXDEGRADE == mddev->auto_remap && mddev->pers->syno_set_rdev_auto_remap) {
+		mddev->pers->syno_set_rdev_auto_remap(mddev);
+	} else {
+		rdev_for_each(rdev, mddev) {
+			bdevname(rdev->bdev, b);
+			RaidRemapModeSet(rdev->bdev, mddev->auto_remap);
+			printk("md: %s: set %s to auto_remap [%d]\n", mdname(mddev), b, mddev->auto_remap);
+		}
+	}
+#else  
+	rdev_for_each(rdev, mddev) {
+		bdevname(rdev->bdev, b);
+		RaidRemapModeSet(rdev->bdev, mddev->auto_remap);
+		printk("md: %s: set %s to auto_remap [%d]\n", mdname(mddev), b, mddev->auto_remap);
+	}
+#endif  
+}
+#endif  
+
+#ifdef MY_ABC_HERE
+static int sync_sb_page_io(struct block_device *bdev, sector_t sector, int size,
+		struct page *page, int rw)
+{
+	struct bio *bio = bio_alloc(GFP_NOIO, 1);
+	struct completion event;
+	int ret;
+
+	rw |= REQ_SYNC;
+
+	bio->bi_bdev = bdev;
+	bio->bi_sector = sector;
+	bio_add_page(bio, page, size, 0);
+	init_completion(&event);
+	bio->bi_private = &event;
+	bio->bi_end_io = bi_complete;
+	submit_bio(rw, bio);
+	wait_for_completion(&event);
+
+	ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	if (ret && bio_flagged(bio, BIO_AUTO_REMAP)) {
+		clear_bit(BIO_AUTO_REMAP, &bio->bi_flags);
+		ret = 0;
+	}
+
+	bio_put(bio);
+	return ret;
+}
+
+static int SynoRaidAutoRemapAdjust(struct mddev *mddev, int specify_setting)
+{
+	int old_setting = -1;
+
+	if (NULL == mddev || NULL == mddev->pers) {
+		goto END;
+	}
+
+	old_setting = mddev->auto_remap;
+
+	if (MD_AUTO_REMAP_MODE_ISMAXDEGRADE == specify_setting) {
+		if (mddev->pers->ismaxdegrade && mddev->pers->ismaxdegrade(mddev)) {
+			if (!mddev->pers->syno_set_rdev_auto_remap) {
+				specify_setting = MD_AUTO_REMAP_MODE_FORCE_ON;
+			}
+		} else {
+			specify_setting = MD_AUTO_REMAP_MODE_FORCE_OFF;
+		}
+	}
+
+	mddev->auto_remap = specify_setting;
+
+	if (old_setting != mddev->auto_remap || MD_AUTO_REMAP_MODE_ISMAXDEGRADE == mddev->auto_remap) {
+		RaidMemberAutoRemapSet(mddev);
+	} else {  
+		printk("md: %s: current auto_remap = %d\n", mdname(mddev), mddev->auto_remap);
+	}
+
+END:
+	 
+	return old_setting;
+}
+#endif  
+
 static int read_disk_sb(struct md_rdev * rdev, int size)
 {
 	char b[BDEVNAME_SIZE];
@@ -967,8 +1056,13 @@
 	if (rdev->sb_loaded)
 		return 0;
 
+#ifdef MY_ABC_HERE
+	if (!sync_sb_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
+		goto fail;
+#else  
 	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
 		goto fail;
+#endif  
 
 #ifdef MY_ABC_HERE
 	sb = (mdp_super_t*)page_address(rdev->sb_page);
@@ -2241,6 +2335,9 @@
 
 static void kick_rdev_from_array(struct md_rdev * rdev)
 {
+#ifdef MY_ABC_HERE
+	RaidRemapModeSet(rdev->bdev, 0);
+#endif  
 	unbind_rdev_from_array(rdev);
 	export_rdev(rdev);
 }
@@ -3198,6 +3295,9 @@
 	if (err)
 		goto abort_free;
 
+#ifdef MY_ABC_HERE
+	RaidRemapModeSet(rdev->bdev, 0);
+#endif  
 	kobject_init(&rdev->kobj, &rdev_ktype);
 
 	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
@@ -4474,6 +4574,58 @@
 
 #ifdef MY_ABC_HERE
 static ssize_t
+auto_remap_show(struct mddev *mddev, char *page)
+{
+	return sprintf(page, "%d\n", mddev->auto_remap);
+}
+
+static ssize_t
+auto_remap_store(struct mddev *mddev, const char *page, size_t len)
+{
+	int auto_remap_mode = -1;
+
+	if (!mddev->pers){
+		len = -EINVAL;
+		goto END;
+	}
+#ifdef MY_ABC_HERE
+	if (mddev->nodev_and_crashed) {
+		 
+		goto END;
+	}
+#endif  
+
+#ifdef MY_ABC_HERE
+	if (cmd_match(page, "2")) {  
+		auto_remap_mode = MD_AUTO_REMAP_MODE_ISMAXDEGRADE;
+	} else
+#endif  
+	if (cmd_match(page, "1")) {
+		auto_remap_mode = MD_AUTO_REMAP_MODE_FORCE_ON;
+	} else if (cmd_match(page, "0")) {
+		auto_remap_mode = MD_AUTO_REMAP_MODE_FORCE_OFF;
+	} else {
+		printk("md: %s: auto_remap, error input\n", mdname(mddev));
+		goto END;
+	}
+
+#ifdef MY_ABC_HERE
+	SynoRaidAutoRemapAdjust(mddev, auto_remap_mode);
+#else  
+	mddev->auto_remap = auto_remap_mode;
+	RaidMemberAutoRemapSet(mddev);
+#endif  
+
+END:
+	return len;
+}
+
+static struct md_sysfs_entry md_auto_remap =
+__ATTR(auto_remap, S_IRUGO|S_IWUSR, auto_remap_show, auto_remap_store);
+#endif  
+
+#ifdef MY_ABC_HERE
+static ssize_t
 md_active_show(struct mddev *mddev, char *page)
 {
 	return sprintf(page, "%d\n", mddev->blActive);
@@ -4567,6 +4719,9 @@
 	&md_array_size.attr,
 	&max_corr_read_errors.attr,
 #ifdef MY_ABC_HERE
+	&md_auto_remap.attr,
+#endif  
+#ifdef MY_ABC_HERE
 	&md_active.attr,
 #endif  
 	NULL,
@@ -4817,6 +4972,34 @@
 }
 
 #ifdef MY_ABC_HERE
+void SYNOLvInfoSet(struct block_device *bdev, void *private, const char *name)
+{
+	struct mddev *mddev = NULL;
+	char *szDiskName = NULL;
+
+	if (!bdev || !private || !name){
+		printk("%s:%s(%d) error params\n", __FILE__, __FUNCTION__, __LINE__);
+		return;
+	}
+
+	szDiskName = bdev->bd_disk->disk_name;
+	if (NULL == strstr(szDiskName, "md")) {
+		printk("%s:%s(%d) This's not md device:[%s]\n",
+			__FILE__, __FUNCTION__, __LINE__, szDiskName);
+		return;
+	}
+
+	mddev = bdev->bd_disk->private_data;
+	if (mddev) {
+		mddev->syno_private = private;
+	}
+
+	snprintf(mddev->lv_name, 16, "%s", name);
+}
+EXPORT_SYMBOL(SYNOLvInfoSet);
+#endif  
+
+#ifdef MY_ABC_HERE
 static int start_dirty_degraded = 1;
 #else  
 static int start_dirty_degraded;
@@ -5176,6 +5359,8 @@
 static void __md_stop(struct mddev *mddev)
 {
 	mddev->ready = 0;
+	 
+	flush_workqueue(md_misc_wq);
 	mddev->pers->stop(mddev);
 	if (mddev->pers->sync_request && mddev->to_remove == NULL)
 		mddev->to_remove = &md_redundancy_group;
@@ -5266,6 +5451,10 @@
 
 		if (mddev->ro)
 			mddev->ro = 0;
+#ifdef MY_ABC_HERE
+		mddev->auto_remap = MD_AUTO_REMAP_MODE_FORCE_OFF;
+		RaidMemberAutoRemapSet(mddev);
+#endif  
 	} else
 		mutex_unlock(&mddev->open_mutex);
 	 
@@ -5501,7 +5690,7 @@
 	char *ptr, *buf = NULL;
 	int err = -ENOMEM;
 
-	file = kmalloc(sizeof(*file), GFP_NOIO);
+	file = kzalloc(sizeof(*file), GFP_NOIO);
 
 	if (!file)
 		goto out;
@@ -6040,7 +6229,7 @@
 	    mddev->ctime         != info->ctime         ||
 	    mddev->level         != info->level         ||
  
-	    !mddev->persistent	 != info->not_persistent||
+	    mddev->persistent	 != !info->not_persistent ||
 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
 	     
 	    ((state^info->state) & 0xfffffe00)
@@ -7113,6 +7302,9 @@
 	struct md_rdev *rdev;
 	char *desc, *action = NULL;
 	struct blk_plug plug;
+#ifdef MY_ABC_HERE
+	int old_auto_remap_setting = -1;
+#endif  
 
 #ifdef MY_ABC_HERE
 	 
@@ -7128,6 +7320,10 @@
 		return;
 	}
 
+#ifdef MY_ABC_HERE
+	old_auto_remap_setting = SynoRaidAutoRemapAdjust(mddev, MD_AUTO_REMAP_MODE_ISMAXDEGRADE);
+#endif  
+
 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
 			desc = "data-check";
@@ -7221,6 +7417,16 @@
 	}
 
 #ifdef MY_ABC_HERE
+	if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6) {
+		printk(KERN_WARNING "md: %s: flushing inflight I/O\n", mdname(mddev));
+		mddev_lock(mddev);
+		mddev_suspend(mddev);
+		mddev_resume(mddev);
+		mddev_unlock(mddev);
+	}
+#endif  
+
+#ifdef MY_ABC_HERE
 	printk(KERN_WARNING "md: %s of RAID array %s\n", desc, mdname(mddev));
 #else  
 	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -7420,6 +7626,10 @@
 	wake_up(&resync_wait);
 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
+#ifdef MY_ABC_HERE
+	 
+	SynoRaidAutoRemapAdjust(mddev, old_auto_remap_setting);
+#endif  
 	return;
 
  interrupted:
diff -ur a/drivers/md/md.h b/drivers/md/md.h
--- a/drivers/md/md.h	2017-03-23 14:38:57.000000000 +0100
+++ b/drivers/md/md.h	2017-03-14 02:13:48.000000000 +0100
@@ -142,6 +142,31 @@
 				int is_new);
 extern void md_ack_all_badblocks(struct badblocks *bb);
 
+#ifdef CONFIG_SYNO_MD_DATA_CORRECTION
+struct md_self_heal_record {
+	struct list_head record_list;
+	void             *private;
+	struct bio       *bio;
+	struct mddev     *mddev;
+	u32              u32_last_hash;
+	int              retry_cnt;
+	int              max_retry_cnt;
+	int              is_hashed;  
+	int              request_cnt;  
+	sector_t         sector_start;
+	sector_t         sector_leng;
+};
+
+u32 syno_self_heal_hash_bio_page(struct bio *bio);
+int syno_self_heal_is_valid_md_stat(struct mddev *mddev);
+int syno_self_heal_record_hash_value(struct md_self_heal_record *heal_record, struct bio *bio);
+void syno_self_heal_del_all_record(struct mddev *mddev);
+void syno_self_heal_find_and_del_record(struct mddev *mddev, struct bio *bio);
+void syno_self_heal_modify_bio_info(struct md_self_heal_record *heal_record, struct bio *bio);
+struct md_self_heal_record* syno_self_heal_init_record(struct mddev *mddev, struct bio *bio, int max_retry_cnt);
+struct md_self_heal_record* syno_self_heal_find_record(struct mddev *mddev, struct bio *bio);
+#endif  
+
 struct mddev {
 	void				*private;
 	struct md_personality		*pers;
@@ -296,6 +321,16 @@
 #ifdef MY_ABC_HERE
     unsigned char           nodev_and_crashed;      
 #endif  
+#ifdef MY_ABC_HERE
+#define MD_AUTO_REMAP_MODE_FORCE_OFF 0
+#define MD_AUTO_REMAP_MODE_FORCE_ON 1
+#define MD_AUTO_REMAP_MODE_ISMAXDEGRADE 2
+	unsigned char			auto_remap;
+#endif  
+#ifdef MY_ABC_HERE
+	void                            *syno_private;     
+	char                            lv_name[16];
+#endif  
 
 #ifdef MY_ABC_HERE
 	mempool_t	*syno_mdio_mempool;
@@ -352,6 +387,10 @@
 	 
 	void (*quiesce) (struct mddev *mddev, int state);
 	 
+#ifdef MY_ABC_HERE
+	unsigned char (*ismaxdegrade) (struct mddev *mddev);
+	void (*syno_set_rdev_auto_remap) (struct mddev *mddev);
+#endif  
 	void *(*takeover) (struct mddev *mddev);
 };
 
@@ -469,6 +508,13 @@
 extern void restore_bitmap_write_access(struct file *file);
 
 #ifdef MY_ABC_HERE
+void SynoAutoRemapReport(struct mddev *mddev, sector_t sector, struct block_device *bdev);
+#endif  
+#ifdef MY_ABC_HERE
+void RaidRemapModeSet(struct block_device *, unsigned char);
+#endif  
+
+#ifdef MY_ABC_HERE
 void SYNORaidRdevUnplug(struct mddev *mddev, struct md_rdev *rdev);
 #endif  
 extern void mddev_init(struct mddev *mddev);
diff -ur a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
--- a/drivers/md/persistent-data/dm-btree.c	2017-03-23 14:38:59.000000000 +0100
+++ b/drivers/md/persistent-data/dm-btree.c	2017-03-14 02:13:51.000000000 +0100
@@ -235,12 +235,22 @@
 	return f->level < (info->levels - 1);
 }
 
+static void unlock_all_frames(struct del_stack *s)
+{
+	struct frame *f;
+
+	while (unprocessed_frames(s)) {
+		f = s->spine + s->top--;
+		dm_tm_unlock(s->tm, f->b);
+	}
+}
+
 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
 {
 	int r;
 	struct del_stack *s;
 
-	s = kmalloc(sizeof(*s), GFP_KERNEL);
+	s = kmalloc(sizeof(*s), GFP_NOIO);
 	if (!s)
 		return -ENOMEM;
 	s->tm = info->tm;
@@ -290,9 +300,13 @@
 			f->current_child = f->nr_children;
 		}
 	}
-
 out:
+	if (r) {
+		/* cleanup all frames of del_stack */
+		unlock_all_frames(s);
+	}
 	kfree(s);
+
 	return r;
 }
 EXPORT_SYMBOL_GPL(dm_btree_del);
@@ -455,8 +469,10 @@
 
 	r = insert_at(sizeof(__le64), pn, parent_index + 1,
 		      le64_to_cpu(rn->keys[0]), &location);
-	if (r)
+	if (r) {
+		unlock_block(s->info, right);
 		return r;
+	}
 
 	if (key < le64_to_cpu(rn->keys[0])) {
 		unlock_block(s->info, right);
@@ -507,7 +523,7 @@
 
 	r = new_block(s->info, &right);
 	if (r < 0) {
-		/* FIXME: put left */
+		unlock_block(s->info, left);
 		return r;
 	}
 
@@ -651,12 +667,7 @@
 	struct btree_node *n;
 	struct dm_btree_value_type le64_type;
 
-	le64_type.context = NULL;
-	le64_type.size = sizeof(__le64);
-	le64_type.inc = NULL;
-	le64_type.dec = NULL;
-	le64_type.equal = NULL;
-
+	init_le64_type(info->tm, &le64_type);
 	init_shadow_spine(&spine, info);
 
 	for (level = 0; level < (info->levels - 1); level++) {
diff -ur a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
--- a/drivers/md/persistent-data/dm-btree-internal.h	2017-03-23 14:38:59.000000000 +0100
+++ b/drivers/md/persistent-data/dm-btree-internal.h	2017-03-14 02:13:51.000000000 +0100
@@ -137,4 +137,10 @@
 
 extern struct dm_block_validator btree_node_validator;
 
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+			   struct dm_btree_value_type *vt);
+
 #endif	/* DM_BTREE_INTERNAL_H */
diff -ur a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
--- a/drivers/md/persistent-data/dm-btree-remove.c	2017-03-23 14:38:59.000000000 +0100
+++ b/drivers/md/persistent-data/dm-btree-remove.c	2017-03-14 02:13:51.000000000 +0100
@@ -301,35 +301,40 @@
 {
 	int s;
 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
-	unsigned target = (nr_left + nr_center + nr_right) / 3;
-	BUG_ON(target > max_entries);
+	unsigned total = nr_left + nr_center + nr_right;
+	unsigned target_right = total / 3;
+	unsigned remainder = (target_right * 3) != total;
+	unsigned target_left = target_right + remainder;
+
+	BUG_ON(target_left > max_entries);
+	BUG_ON(target_right > max_entries);
 
 	if (nr_left < nr_right) {
-		s = nr_left - target;
+		s = nr_left - target_left;
 
 		if (s < 0 && nr_center < -s) {
 			/* not enough in central node */
-			shift(left, center, nr_center);
-			s = nr_center - target;
+			shift(left, center, -nr_center);
+			s += nr_center;
 			shift(left, right, s);
 			nr_right += s;
 		} else
 			shift(left, center, s);
 
-		shift(center, right, target - nr_right);
+		shift(center, right, target_right - nr_right);
 
 	} else {
-		s = target - nr_right;
+		s = target_right - nr_right;
 		if (s > 0 && nr_center < s) {
 			/* not enough in central node */
 			shift(center, right, nr_center);
-			s = target - nr_center;
+			s -= nr_center;
 			shift(left, right, s);
 			nr_left -= s;
 		} else
 			shift(center, right, s);
 
-		shift(left, center, nr_left - target);
+		shift(left, center, nr_left - target_left);
 	}
 
 	*key_ptr(parent, c->index) = center->keys[0];
@@ -544,14 +549,6 @@
 	return r;
 }
 
-static struct dm_btree_value_type le64_type = {
-	.context = NULL,
-	.size = sizeof(__le64),
-	.inc = NULL,
-	.dec = NULL,
-	.equal = NULL
-};
-
 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
 		    uint64_t *keys, dm_block_t *new_root)
 {
@@ -559,12 +556,14 @@
 	int index = 0, r = 0;
 	struct shadow_spine spine;
 	struct btree_node *n;
+	struct dm_btree_value_type le64_vt;
 
+	init_le64_type(info->tm, &le64_vt);
 	init_shadow_spine(&spine, info);
 	for (level = 0; level < info->levels; level++) {
 		r = remove_raw(&spine, info,
 			       (level == last_level ?
-				&info->value_type : &le64_type),
+				&info->value_type : &le64_vt),
 			       root, keys[level], (unsigned *)&index);
 		if (r < 0)
 			break;
diff -ur a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
--- a/drivers/md/persistent-data/dm-btree-spine.c	2017-03-23 14:38:58.000000000 +0100
+++ b/drivers/md/persistent-data/dm-btree-spine.c	2017-03-14 02:13:50.000000000 +0100
@@ -249,3 +249,40 @@
 {
 	return s->root;
 }
+
+static void le64_inc(void *context, const void *value_le)
+{
+	struct dm_transaction_manager *tm = context;
+	__le64 v_le;
+
+	memcpy(&v_le, value_le, sizeof(v_le));
+	dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+	struct dm_transaction_manager *tm = context;
+	__le64 v_le;
+
+	memcpy(&v_le, value_le, sizeof(v_le));
+	dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+	__le64 v1_le, v2_le;
+
+	memcpy(&v1_le, value1_le, sizeof(v1_le));
+	memcpy(&v2_le, value2_le, sizeof(v2_le));
+	return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+		    struct dm_btree_value_type *vt)
+{
+	vt->context = tm;
+	vt->size = sizeof(__le64);
+	vt->inc = le64_inc;
+	vt->dec = le64_dec;
+	vt->equal = le64_equal;
+}
diff -ur a/drivers/md/raid0.c b/drivers/md/raid0.c
--- a/drivers/md/raid0.c	2017-03-23 14:38:57.000000000 +0100
+++ b/drivers/md/raid0.c	2017-03-14 02:13:47.000000000 +0100
@@ -486,8 +486,14 @@
 		} else {
 			 
 #ifdef MY_ABC_HERE
+#ifdef MY_ABC_HERE
+			if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+				SynoReportBadSector(bio->bi_sector, bio->bi_rw, mddev->md_minor, bio->bi_bdev, __FUNCTION__);
+			}
+#else  
 			SynoReportBadSector(bio->bi_sector, bio->bi_rw, mddev->md_minor, bio->bi_bdev, __FUNCTION__);
 #endif  
+#endif  
 			md_error(mddev, rdev);
 		}
 	}
diff -ur a/drivers/md/raid10.c b/drivers/md/raid10.c
--- a/drivers/md/raid10.c	2017-03-23 14:38:59.000000000 +0100
+++ b/drivers/md/raid10.c	2017-03-14 02:13:49.000000000 +0100
@@ -296,6 +296,13 @@
 	 
 	update_head_pos(slot, r10_bio);
 
+#ifdef MY_ABC_HERE
+	if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+		printk("%s:%s(%d) BIO_AUTO_REMAP detected\n", __FILE__, __FUNCTION__, __LINE__);
+		SynoAutoRemapReport(conf->mddev, r10_bio->sector, conf->mirrors[dev].rdev->bdev);
+	}
+#endif
+
 	if (uptodate) {
 		 
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -313,8 +320,15 @@
 
 #ifdef MY_ABC_HERE
 		if (!IsDeviceDisappear(conf->mirrors[dev].rdev->bdev)) {
+#ifdef MY_ABC_HERE
+			if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+				SynoReportBadSector(bio->bi_sector, READ,
+								conf->mddev->md_minor, conf->mirrors[dev].rdev->bdev, __FUNCTION__);
+			}
+#else  
 			SynoReportBadSector(bio->bi_sector, READ,
 								conf->mddev->md_minor, conf->mirrors[dev].rdev->bdev, __FUNCTION__);
+#endif  
 		}
 #endif  
 
@@ -1404,6 +1418,94 @@
 }
 
 #if defined(MY_ABC_HERE)
+#ifdef MY_ABC_HERE
+static inline void SynoSetRdevAutoRemap(struct mddev *mddev)
+{
+	struct r10conf *conf = mddev->private;
+	struct geom* geo = &conf->geo;
+	struct md_rdev *rdev;
+	char b[BDEVNAME_SIZE];
+	int rdev_idx = 0;
+
+	if (1 >= geo->near_copies || (geo->raid_disks % geo->near_copies)) {
+		printk("md: %s: not a standard RAID 10, does not support auto remap mode", mdname(mddev));
+		return;
+	}
+
+	rdev_for_each(rdev, mddev) {
+		RaidRemapModeSet(rdev->bdev, MD_AUTO_REMAP_MODE_FORCE_OFF);
+	}
+
+	do {
+		int num_data_copies = conf->copies;
+		int survival_cnt = 0;
+		int last_survival_rdev = -1;
+
+		while (num_data_copies--) {
+			if (conf->mirrors[rdev_idx].rdev &&
+				!test_bit(Faulty, &conf->mirrors[rdev_idx].rdev->flags) &&
+				test_bit(In_sync, &conf->mirrors[rdev_idx].rdev->flags)) {
+				survival_cnt++;
+				last_survival_rdev = rdev_idx;
+			}
+			rdev_idx = (rdev_idx + 1) % geo->raid_disks;
+		}
+
+		if (1 == survival_cnt) {
+			RaidRemapModeSet(conf->mirrors[last_survival_rdev].rdev->bdev, MD_AUTO_REMAP_MODE_FORCE_ON);
+		}
+	} while (0 != rdev_idx);
+
+	rdev_for_each(rdev, mddev) {
+		if (rdev && rdev->bdev && rdev->bdev->bd_part) {
+			bdevname(rdev->bdev, b);
+			printk("md: %s: set %s to auto_remap [%d]\n", mdname(mddev), b, rdev->bdev->bd_part->auto_remap);
+		}
+	}
+}
+
+static inline unsigned char SynoIsRaidReachMaxDegrade(struct mddev *mddev)
+{
+	struct r10conf *conf = mddev->private;
+	int first = 0;
+	int first_orig = first;
+	int blStandardRaid10 = 0;
+	int ret = 0;
+	struct geom* geo = &conf->geo;
+
+	if (1 < geo->near_copies && !(geo->raid_disks % geo->near_copies)) {
+		blStandardRaid10 = 1;
+	}
+
+	do {
+		int n = conf->copies;
+		int cnt = 0;
+
+		while (n--) {
+			if (conf->mirrors[first].rdev &&
+				!test_bit(Faulty, &conf->mirrors[first].rdev->flags) &&
+				test_bit(In_sync, &conf->mirrors[first].rdev->flags)) {
+				cnt++;
+			}
+			first = (first+1) % geo->raid_disks;
+		}
+
+		if (cnt <= 1) {
+			ret = 1;
+			goto END;
+		}
+
+		if (!blStandardRaid10) {
+			first_orig = (first_orig+1) % geo->raid_disks;
+			first = first_orig;
+		}
+	} while (first != 0);
+
+END:
+	return ret;
+}
+#endif  
+
 static int
 blRaid10Enough(struct r10conf *conf,
 			   struct md_rdev *rdev)
@@ -1806,6 +1908,12 @@
 	} else
 		d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
 
+#ifdef MY_ABC_HERE
+	if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+		printk("%s:%s(%d) BIO_AUTO_REMAP detected\n", __FILE__, __FUNCTION__, __LINE__);
+		SynoAutoRemapReport(conf->mddev, r10_bio->sector, conf->mirrors[d].rdev->bdev);
+	}
+#endif
 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
 	else
@@ -2417,7 +2525,7 @@
 				   choose_data_offset(r10_bio, rdev) +
 				   (sector - r10_bio->sector));
 		wbio->bi_bdev = rdev->bdev;
-		if (submit_bio_wait(WRITE, wbio) == 0)
+		if (submit_bio_wait(WRITE, wbio) < 0)
 			 
 			ok = rdev_set_badblocks(rdev, sector,
 						sectors, 0)
@@ -3337,6 +3445,7 @@
 			 
 			conf->prev.stride = conf->dev_sectors;
 	}
+	conf->reshape_safe = conf->reshape_progress;
 	spin_lock_init(&conf->device_lock);
 	INIT_LIST_HEAD(&conf->retry_list);
 
@@ -3549,7 +3658,6 @@
 		}
 		conf->offset_diff = min_offset_diff;
 
-		conf->reshape_safe = conf->reshape_progress;
 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -3837,6 +3945,7 @@
 		conf->reshape_progress = size;
 	} else
 		conf->reshape_progress = 0;
+	conf->reshape_safe = conf->reshape_progress;
 	spin_unlock_irq(&conf->device_lock);
 
 	if (mddev->delta_disks && mddev->bitmap) {
@@ -3900,6 +4009,7 @@
 		rdev->new_data_offset = rdev->data_offset;
 	smp_wmb();
 	conf->reshape_progress = MaxSector;
+	conf->reshape_safe = MaxSector;
 	mddev->reshape_position = MaxSector;
 	spin_unlock_irq(&conf->device_lock);
 	return ret;
@@ -4166,6 +4276,7 @@
 	md_finish_reshape(conf->mddev);
 	smp_wmb();
 	conf->reshape_progress = MaxSector;
+	conf->reshape_safe = MaxSector;
 	spin_unlock_irq(&conf->device_lock);
 
 	if (conf->mddev->queue) {
@@ -4336,6 +4447,10 @@
 	.quiesce	= raid10_quiesce,
 	.size		= raid10_size,
 	.resize		= raid10_resize,
+#ifdef MY_ABC_HERE
+	.ismaxdegrade = SynoIsRaidReachMaxDegrade,
+	.syno_set_rdev_auto_remap = SynoSetRdevAutoRemap,
+#endif  
 	.takeover	= raid10_takeover,
 	.check_reshape	= raid10_check_reshape,
 	.start_reshape	= raid10_start_reshape,
diff -ur a/drivers/md/raid1.c b/drivers/md/raid1.c
--- a/drivers/md/raid1.c	2017-03-23 14:38:57.000000000 +0100
+++ b/drivers/md/raid1.c	2017-03-14 02:13:46.000000000 +0100
@@ -45,6 +45,17 @@
 }
 #endif  
 
+#ifdef MY_ABC_HERE
+static inline unsigned char SynoIsRaidReachMaxDegrade(struct mddev *mddev)
+{
+	struct r1conf *conf = mddev->private;
+	if (mddev->degraded >= conf->raid_disks - 1) {
+		return true;
+	}
+	return false;
+}
+#endif  
+
 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	struct pool_info *pi = data;
@@ -270,38 +281,36 @@
 	 
 	update_head_pos(mirror, r1_bio);
 
-	if (uptodate)
 #ifdef MY_ABC_HERE
-	{
-		if (r1_bio->read_failed) {
-			if (mirror == r1_bio->orig_disk_idx) {
-				SynoReportCorrectBadSector(bio->bi_sector, conf->mddev->md_minor,
-										   conf->mirrors[mirror].rdev->bdev, __FUNCTION__);
-			}
-			r1_bio->read_failed = 0;
-			r1_bio->orig_disk_idx = -1;
-		}
-		set_bit(R1BIO_Uptodate, &r1_bio->state);
+	if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+		printk("%s:%s(%d) BIO_AUTO_REMAP detected\n", __FILE__,__FUNCTION__,__LINE__);
+		SynoAutoRemapReport(conf->mddev, r1_bio->sector, conf->mirrors[mirror].rdev->bdev);
 	}
-#else  
-		set_bit(R1BIO_Uptodate, &r1_bio->state);
 #endif  
+
+	if (uptodate)
+		set_bit(R1BIO_Uptodate, &r1_bio->state);
 	else {
 		 
 		unsigned long flags;
 		spin_lock_irqsave(&conf->device_lock, flags);
 		if (r1_bio->mddev->degraded == conf->raid_disks ||
 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
-		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+		     test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
 			uptodate = 1;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 
 #ifdef MY_ABC_HERE
 		if (!IsDeviceDisappear(conf->mirrors[mirror].rdev->bdev)) {
+#ifdef MY_ABC_HERE
+			if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+				SynoReportBadSector(bio->bi_sector, READ,
+								conf->mddev->md_minor, conf->mirrors[mirror].rdev->bdev, __FUNCTION__);
+			}
+#else  
 			SynoReportBadSector(bio->bi_sector, READ,
 								conf->mddev->md_minor, conf->mirrors[mirror].rdev->bdev, __FUNCTION__);
-			r1_bio->read_failed = 1;
-			r1_bio->orig_disk_idx = mirror;
+#endif  
 
 			if (uptodate) {
 				 
@@ -924,10 +933,6 @@
 	r1_bio->state = 0;
 	r1_bio->mddev = mddev;
 	r1_bio->sector = bio->bi_sector;
-#ifdef MY_ABC_HERE
-	r1_bio->read_failed = 0;
-	r1_bio->orig_disk_idx = -1;
-#endif  
 
 	bio->bi_phys_segments = 0;
 	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
@@ -1287,6 +1292,7 @@
 {
 	char b[BDEVNAME_SIZE];
 	struct r1conf *conf = mddev->private;
+	unsigned long flags;
 
 #ifdef MY_ABC_HERE
 	 
@@ -1300,14 +1306,13 @@
 	}
 #endif  
 	set_bit(Blocked, &rdev->flags);
+	spin_lock_irqsave(&conf->device_lock, flags);
 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
-		unsigned long flags;
-		spin_lock_irqsave(&conf->device_lock, flags);
 		mddev->degraded++;
 		set_bit(Faulty, &rdev->flags);
-		spin_unlock_irqrestore(&conf->device_lock, flags);
 	} else
 		set_bit(Faulty, &rdev->flags);
+	spin_unlock_irqrestore(&conf->device_lock, flags);
 	 
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1367,6 +1372,7 @@
 	}
 #endif  
 
+	spin_lock_irqsave(&conf->device_lock, flags);
 	for (i = 0; i < conf->raid_disks; i++) {
 		struct md_rdev *rdev = conf->mirrors[i].rdev;
 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1393,7 +1399,6 @@
 			sysfs_notify_dirent_safe(rdev->sysfs_state);
 		}
 	}
-	spin_lock_irqsave(&conf->device_lock, flags);
 	mddev->degraded -= count;
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 
@@ -1533,6 +1538,12 @@
 	update_head_pos(r1_bio->read_disk, r1_bio);
 
 #ifdef MY_ABC_HERE
+#ifdef MY_ABC_HERE
+	if (bio_flagged(bio, BIO_AUTO_REMAP)) {
+		printk("%s:%s(%d) BIO_AUTO_REMAP detected\n", __FILE__,__FUNCTION__,__LINE__);
+		SynoAutoRemapReport(conf->mddev, r1_bio->sector, conf->mirrors[mirror].rdev->bdev);
+	}
+#endif  
 	if (uptodate) {
 		set_bit(R1BIO_Uptodate, &r1_bio->state);
 	} else {
@@ -1959,6 +1970,9 @@
 					       (unsigned long long)(sect +
 					           rdev->data_offset),
 					       bdevname(rdev->bdev, b));
+#ifdef MY_ABC_HERE
+					SynoReportCorrectBadSector(sect + rdev->data_offset, mddev->md_minor, rdev->bdev, __FUNCTION__);
+#endif  
 				}
 			}
 		}
@@ -2017,7 +2031,7 @@
 		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
 		wbio->bi_sector += rdev->data_offset;
 		wbio->bi_bdev = rdev->bdev;
-		if (submit_bio_wait(WRITE, wbio) == 0)
+		if (submit_bio_wait(WRITE, wbio) < 0)
 			 
 			ok = rdev_set_badblocks(rdev, sector,
 						sectors, 0)
@@ -2948,6 +2962,9 @@
 	.check_reshape	= raid1_reshape,
 	.quiesce	= raid1_quiesce,
 	.takeover	= raid1_takeover,
+#ifdef MY_ABC_HERE
+	.ismaxdegrade = SynoIsRaidReachMaxDegrade,
+#endif  
 };
 
 static int __init raid_init(void)
diff -ur a/drivers/md/raid1.h b/drivers/md/raid1.h
--- a/drivers/md/raid1.h	2017-03-23 14:38:56.000000000 +0100
+++ b/drivers/md/raid1.h	2017-03-14 02:13:47.000000000 +0100
@@ -1,6 +1,3 @@
-#ifndef MY_ABC_HERE
-#define MY_ABC_HERE
-#endif
 #ifndef _RAID1_H
 #define _RAID1_H
 
@@ -64,10 +61,6 @@
 	int			read_disk;
 
 	struct list_head	retry_list;
-#ifdef MY_ABC_HERE
-	unsigned int read_failed;
-	int orig_disk_idx;
-#endif  
 	 
 	struct bio_vec		*behind_bvecs;
 	int			behind_page_count;
diff -ur a/drivers/md/raid5.c b/drivers/md/raid5.c
--- a/drivers/md/raid5.c	2017-03-23 14:39:00.000000000 +0100
+++ b/drivers/md/raid5.c	2017-03-14 02:13:51.000000000 +0100
@@ -171,6 +171,20 @@
 	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 }
 
+#ifdef MY_ABC_HERE
+static void raid5_wakeup_main_thread(struct mddev *mddev)
+{
+	int old_level = mddev->level;
+	struct r5conf *conf = mddev->private;
+
+	if ((old_level == 5 || old_level == 6) && conf && atomic_read(&conf->proxy_enable) && conf->proxy_thread) {
+		md_wakeup_thread(conf->proxy_thread);
+	} else {
+		md_wakeup_thread(mddev->thread);
+	}
+}
+
+#endif  
 static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
 {
 	struct r5conf *conf = sh->raid_conf;
@@ -192,7 +206,11 @@
 	}
 
 	if (conf->worker_cnt_per_group == 0) {
+#ifdef MY_ABC_HERE
+		raid5_wakeup_main_thread(conf->mddev);
+#else  
 		md_wakeup_thread(conf->mddev->thread);
+#endif  
 		return;
 	}
 
@@ -235,19 +253,34 @@
 				return;
 			}
 		}
+#ifdef MY_ABC_HERE
+		raid5_wakeup_main_thread(conf->mddev);
+#else  
 		md_wakeup_thread(conf->mddev->thread);
+#endif  
 	} else {
 		BUG_ON(stripe_operations_active(sh));
 		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 			if (atomic_dec_return(&conf->preread_active_stripes)
 			    < IO_THRESHOLD)
+#ifdef MY_ABC_HERE
+				raid5_wakeup_main_thread(conf->mddev);
+#else  
 				md_wakeup_thread(conf->mddev->thread);
+#endif  
 		atomic_dec(&conf->active_stripes);
 		if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+#ifdef MY_DEF_HERE
+			SYNORaid5CheckPage(sh);
+#endif  
 			list_add_tail(&sh->lru, &conf->inactive_list);
 			wake_up(&conf->wait_for_stripe);
 			if (conf->retry_read_aligned)
+#ifdef MY_ABC_HERE
+				raid5_wakeup_main_thread(conf->mddev);
+#else  
 				md_wakeup_thread(conf->mddev->thread);
+#endif  
 		}
 	}
 }
@@ -304,7 +337,11 @@
 		goto slow_path;
 	wakeup = llist_add(&sh->release_list, &conf->released_stripes);
 	if (wakeup)
+#ifdef MY_ABC_HERE
+		raid5_wakeup_main_thread(conf->mddev);
+#else  
 		md_wakeup_thread(conf->mddev->thread);
+#endif  
 	return;
 slow_path:
 	local_irq_save(flags);
@@ -357,6 +394,9 @@
 	int num = sh->raid_conf->pool_size;
 
 	for (i = 0; i < num ; i++) {
+#ifdef MY_ABC_HERE
+		WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
+#endif  
 		p = sh->dev[i].page;
 		if (!p)
 			continue;
@@ -377,6 +417,9 @@
 			return 1;
 		}
 		sh->dev[i].page = page;
+#ifdef MY_ABC_HERE
+		sh->dev[i].orig_page = page;
+#endif  
 	}
 	return 0;
 }
@@ -687,6 +730,11 @@
 			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 				bi->bi_rw |= REQ_FLUSH;
 
+#ifdef MY_ABC_HERE
+			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
+				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
+			sh->dev[i].vec.bv_page = sh->dev[i].page;
+#endif  
 			bi->bi_vcnt = 1;
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_offset = 0;
@@ -728,6 +776,11 @@
 			else
 				rbi->bi_sector = (sh->sector
 						  + rrdev->data_offset);
+#ifdef MY_ABC_HERE
+			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
+				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
+			sh->dev[i].rvec.bv_page = sh->dev[i].page;
+#endif  
 			rbi->bi_vcnt = 1;
 			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			rbi->bi_io_vec[0].bv_offset = 0;
@@ -753,8 +806,13 @@
 }
 
 static struct dma_async_tx_descriptor *
+#ifdef MY_ABC_HERE
+async_copy_data(int frombio, struct bio *bio, struct page **page,
+	sector_t sector, struct dma_async_tx_descriptor *tx, struct stripe_head *sh)
+#else  
 async_copy_data(int frombio, struct bio *bio, struct page *page,
 	sector_t sector, struct dma_async_tx_descriptor *tx)
+#endif  
 {
 	struct bio_vec *bvl;
 	struct page *bio_page;
@@ -791,12 +849,26 @@
 		if (clen > 0) {
 			b_offset += bvl->bv_offset;
 			bio_page = bvl->bv_page;
+#ifdef MY_ABC_HERE
+			if (frombio) {
+				if (sh->raid_conf->skip_copy &&
+					sh->raid_conf->mddev->degraded == 0 &&
+					!test_bit(MD_RECOVERY_RUNNING, &sh->raid_conf->mddev->recovery) &&
+					b_offset == 0 && page_offset == 0 &&
+					clen == STRIPE_SIZE)
+					*page = bio_page;
+				else
+					tx = async_memcpy(*page, bio_page, page_offset, b_offset, clen, &submit);
+			} else
+				tx = async_memcpy(bio_page, *page, b_offset, page_offset, clen, &submit);
+#else  
 			if (frombio)
 				tx = async_memcpy(page, bio_page, page_offset,
 						  b_offset, clen, &submit);
 			else
 				tx = async_memcpy(bio_page, page, b_offset,
 						  page_offset, clen, &submit);
+#endif  
 		}
 		 
 		submit.depend_tx = tx;
@@ -865,8 +937,13 @@
 			spin_unlock_irq(&sh->stripe_lock);
 			while (rbi && rbi->bi_sector <
 				dev->sector + STRIPE_SECTORS) {
+#ifdef MY_ABC_HERE
+				tx = async_copy_data(0, rbi, &dev->page,
+					dev->sector, tx, sh);
+#else  
 				tx = async_copy_data(0, rbi, dev->page,
 					dev->sector, tx);
+#endif  
 				rbi = r5_next_bio(rbi, dev->sector);
 			}
 		}
@@ -885,6 +962,9 @@
 		return;
 
 	tgt = &sh->dev[target];
+#ifdef MY_ABC_HERE
+	if (!test_bit(R5_SkipCopy, &tgt->flags))
+#endif  
 	set_bit(R5_UPTODATE, &tgt->flags);
 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 	clear_bit(R5_Wantcompute, &tgt->flags);
@@ -1222,11 +1302,30 @@
 }
 #endif  
 
+#ifdef MY_DEF_HERE
+static void restore_r5page(struct page* to, struct page* from)
+{
+	char* pa_from = page_address(from);
+	char* pa_to = page_address(to);
+	memcpy(pa_to, pa_from, STRIPE_SIZE);
+}
+
+static int get_writebio_value(struct r5conf *conf)
+{
+	return (0 == conf->mddev->degraded ? conf->writebio : 0);
+}
+#endif  
+
 static struct dma_async_tx_descriptor *
 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
 	int disks = sh->disks;
 	int i;
+#ifdef MY_DEF_HERE
+	int writebio = get_writebio_value(sh->raid_conf);
+	int bi_vec_idx;
+	sector_t cur_bi_sector;
+#endif  
 
 	pr_debug("%s: stripe %llu\n", __func__,
 		(unsigned long long)sh->sector);
@@ -1243,6 +1342,9 @@
 			dev->towrite = NULL;
 			BUG_ON(dev->written);
 			wbi = dev->written = chosen;
+#ifdef MY_ABC_HERE
+			WARN_ON(dev->page != dev->orig_page);
+#endif  
 			spin_unlock_irq(&sh->stripe_lock);
 
 			while (wbi && wbi->bi_sector <
@@ -1254,8 +1356,20 @@
 				if (wbi->bi_rw & REQ_DISCARD)
 					set_bit(R5_Discard, &dev->flags);
 				else
+#ifdef MY_ABC_HERE
+				{
+					tx = async_copy_data(1, wbi, &dev->page,
+							dev->sector, tx, sh);
+					if (dev->page != dev->orig_page) {
+						set_bit(R5_SkipCopy, &dev->flags);
+						clear_bit(R5_UPTODATE, &dev->flags);
+						clear_bit(R5_OVERWRITE, &dev->flags);
+					}
+				}
+#else  
 					tx = async_copy_data(1, wbi, dev->page,
 						dev->sector, tx);
+#endif  
 				wbi = r5_next_bio(wbi, dev->sector);
 			}
 		}
@@ -1286,7 +1400,11 @@
 		struct r5dev *dev = &sh->dev[i];
 
 		if (dev->written || i == pd_idx || i == qd_idx) {
+#ifdef MY_ABC_HERE
+			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
+#else  
 			if (!discard)
+#endif  
 				set_bit(R5_UPTODATE, &dev->flags);
 			if (fua)
 				set_bit(R5_WantFUA, &dev->flags);
@@ -1591,6 +1709,11 @@
 {
 	struct kmem_cache *sc;
 	int devs = max(conf->raid_disks, conf->previous_raid_disks);
+#ifdef CONFIG_SYNO_MD_DATA_CORRECTION
+	int syno_self_heal_sh_num = conf->syno_self_heal_sh_size;
+	struct kmem_cache *syno_self_heal_sc;
+	char syno_self_heal_cache_name[32];
+#endif  
 
 	if (conf->mddev->gendisk)
 		sprintf(conf->cache_name[0],
@@ -1611,6 +1734,26 @@
 	while (num--)
 		if (!grow_one_stripe(conf))
 			return 1;
+
+#ifdef CONFIG_SYNO_MD_DATA_CORRECTION
+	sprintf(syno_self_heal_cache_name, "%s-raid%d-self-heal-sh-v%d", mdname(conf->mddev), conf->level, conf->active_name);
+	syno_self_heal_sc = kmem_cache_create(syno_self_heal_cache_name,
+			sizeof(struct syno_self_heal_stripe_head) + (devs - 1) * sizeof(struct r5dev),
+			0, 0, NULL);
+	if (!syno_self_heal_sc) {
+		printk(KERN_ERR "md/raid:%s: %s(%d): Failed to allocate cache for syno_self_heal_sc\n", mdname(conf->mddev), __func__, __LINE__);
+		return 1;
+	}
+
+	conf->syno_self_heal_slab_sh_cache = syno_self_heal_sc;
+	while (syno_self_heal_sh_num--) {
+		if (!syno_raid5_self_heal_grow_one_stripe(conf)) {
+			printk(KERN_ERR "md/raid:%s: %s(%d): Failed to grow self heal stripe\n", mdname(conf->mddev), __func__, __LINE__);
+			return 1;
+		}
+	}
+#endif  
+
 	return 0;
 }
 
@@ -1667,7 +1810,7 @@
 		kmem_cache_destroy(sc);
 		return -ENOMEM;
 	}
-	 
+
 	list_for_each_entry(nsh, &newstripes, lru) {
 		spin_lock_irq(&conf->device_lock);
 		wait_event_lock_irq(conf->wait_for_stripe,
@@ -1677,7 +1820,11 @@
 		spin_unlock_irq(&conf->device_lock);
 		atomic_set(&nsh->count, 1);
 		for(i=0; i<conf->pool_size; i++)
+#ifdef MY_ABC_HERE
+			nsh->dev[i].page = nsh->dev[i].orig_page = osh->dev[i].page;
+#else  
 			nsh->dev[i].page = osh->dev[i].page;
+#endif  
 		for( ; i<newsize; i++)
 			nsh->dev[i].page = NULL;
 		kmem_cache_free(conf->slab_cache, osh);
@@ -1720,6 +1867,9 @@
 			if (nsh->dev[i].page == NULL) {
 				struct page *p = alloc_page(GFP_NOIO);
 				nsh->dev[i].page = p;
+#ifdef MY_ABC_HERE
+				nsh->dev[i].orig_page = p;
+#endif  
 				if (!p)
 					err = -ENOMEM;
 			}
@@ -1728,7 +1878,8 @@
 	 
 	conf->slab_cache = sc;
 	conf->active_name = 1-conf->active_name;
-	conf->pool_size = newsize;
+	if (!err)
+		conf->pool_size = newsize;
 	return err;
 }
 
@@ -1758,6 +1909,253 @@
 	conf->slab_cache = NULL;
 }
 
+#ifdef MY_ABC_HERE
+ 
+static int syno_raid5_parity_disk_get(const struct r5conf* conf, sector_t chunk_number, int *pd_idx, int *qd_idx, int *st_idx)
+{
+	int raid_disks = conf->raid_disks;
+	int data_disks = raid_disks - conf->max_degraded;
+
+	switch (conf->level) {
+	case 4:
+		*pd_idx = data_disks;
+		*qd_idx = -1;
+		*st_idx = 0;
+		break;
+	case 5:
+		*qd_idx = -1;
+		switch (conf->algorithm) {
+		case ALGORITHM_LEFT_ASYMMETRIC:
+			*pd_idx = data_disks - sector_mod(chunk_number, raid_disks);
+			*st_idx = (0 == *pd_idx? 1: 0);
+			break;
+		case ALGORITHM_RIGHT_ASYMMETRIC:
+			*pd_idx = sector_mod(chunk_number, raid_disks);
+			*st_idx = (0 == *pd_idx? 1: 0);
+			break;
+		case ALGORITHM_LEFT_SYMMETRIC:
+			*pd_idx = data_disks - sector_mod(chunk_number, raid_disks);
+			*st_idx = (*pd_idx + 1) % raid_disks;
+			break;
+		case ALGORITHM_RIGHT_SYMMETRIC:
+			*pd_idx = sector_mod(chunk_number, raid_disks);
+			*st_idx = (*pd_idx + 1) % raid_disks;
+			break;
+		case ALGORITHM_PARITY_0:
+			*pd_idx = 0;
+			*st_idx = 1;
+			break;
+		case ALGORITHM_PARITY_N:
+			*pd_idx = data_disks;
+			*st_idx = 0;
+			break;
+		default:
+			BUG();
+		}
+		break;
+	case 6:
+		switch(conf->algorithm) {
+		case ALGORITHM_LEFT_ASYMMETRIC:
+			*pd_idx = raid_disks - 1 - sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + 1) % raid_disks;
+			*st_idx = (1 >= *qd_idx? *qd_idx + 1: 0);
+			break;
+		case ALGORITHM_RIGHT_ASYMMETRIC:
+			*pd_idx = sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + 1) % raid_disks;
+			*st_idx = (1 >= *qd_idx? *qd_idx + 1: 0);
+			break;
+		case ALGORITHM_LEFT_SYMMETRIC:
+			*pd_idx = raid_disks - 1 - sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + 1) % raid_disks;
+			*st_idx = (*qd_idx + 1) % raid_disks;
+			break;
+		case ALGORITHM_RIGHT_SYMMETRIC:
+			*pd_idx = sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + 1) % raid_disks;
+			*st_idx = (*qd_idx + 1) % raid_disks;
+			break;
+
+		case ALGORITHM_PARITY_0:
+			*pd_idx = 0;
+			*qd_idx = 1;
+			*st_idx = 2;
+			break;
+		case ALGORITHM_PARITY_N:
+			*pd_idx = data_disks;
+			*qd_idx = data_disks + 1;
+			*st_idx = 0;
+			break;
+
+		case ALGORITHM_ROTATING_ZERO_RESTART:
+			 
+			*pd_idx = sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + 1) % raid_disks;
+			*st_idx = (1 >= *qd_idx? *qd_idx + 1: 0);
+			break;
+
+		case ALGORITHM_ROTATING_N_RESTART:
+			 
+			chunk_number += 1;
+			*pd_idx = raid_disks - 1 - sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + 1) % raid_disks;
+			*st_idx = (1 >= *qd_idx? *qd_idx + 1: 0);
+			break;
+
+		case ALGORITHM_ROTATING_N_CONTINUE:
+			 
+			*pd_idx = raid_disks - 1 - sector_mod(chunk_number, raid_disks);
+			*qd_idx = (*pd_idx + raid_disks - 1) % raid_disks;
+			*st_idx = (*pd_idx + 1) % raid_disks;
+			break;
+
+		case ALGORITHM_LEFT_ASYMMETRIC_6:
+			 
+			*pd_idx = data_disks - sector_mod(chunk_number, raid_disks - 1);
+			*qd_idx = raid_disks - 1;
+			*st_idx = (0 == *pd_idx? 1: 0);
+			break;
+
+		case ALGORITHM_RIGHT_ASYMMETRIC_6:
+			*pd_idx = sector_mod(chunk_number, raid_disks-1);
+			*qd_idx = raid_disks - 1;
+			*st_idx = (0 == *pd_idx? 1: 0);
+			break;
+
+		case ALGORITHM_LEFT_SYMMETRIC_6:
+			*pd_idx = data_disks - sector_mod(chunk_number, raid_disks - 1);
+			*qd_idx = raid_disks - 1;
+			*st_idx = (*pd_idx + 1) % (raid_disks - 1);
+			break;
+
+		case ALGORITHM_RIGHT_SYMMETRIC_6:
+			*pd_idx = sector_mod(chunk_number, raid_disks - 1);
+			*qd_idx = raid_disks - 1;
+			*st_idx = (*pd_idx + 1) % (raid_disks - 1);
+			break;
+
+		case ALGORITHM_PARITY_0_6:
+			*pd_idx = 0;
+			*qd_idx = raid_disks - 1;
+			*st_idx = 1;
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return 0;
+}
+
+static int syno_raid5_data_corrupt_disk_get(const struct r5conf* conf, const int pd_idx, const int qd_idx, int bad_disk, int* bad_disks, int max_bad_disk)
+{
+	int d = 0;
+	int num_repair = 0;
+	int repair_disk[2] = {-1, -1};
+	int num_bad_disk = 0;
+
+	if (pd_idx != bad_disk && qd_idx != bad_disk) {
+		bad_disks[num_bad_disk++] = bad_disk;
+	}
+
+	for (d = 0; d < conf->raid_disks; d++) {
+		if (conf->disks[d].rdev) {
+			if (!test_bit(In_sync, &conf->disks[d].rdev->flags)) {
+				BUG_ON(num_repair >= conf->max_degraded);
+				repair_disk[num_repair++] = d;
+			}
+		} else {
+			BUG_ON(num_repair >= conf->max_degraded);
+			repair_disk[num_repair++] = d;
+		}
+	}
+	BUG_ON(conf->max_degraded != num_repair);
+
+	for (d = 0; d < num_repair && num_bad_disk < max_bad_disk; d++) {
+		if (pd_idx != repair_disk[d] && qd_idx != repair_disk[d]) {
+			bad_disks[num_bad_disk++] = repair_disk[d];
+		}
+	}
+
+	return num_bad_disk;
+}
+
+static int syno_raid5_disk_ahead_get(const int raid_disks, const int st_idx, int pd_idx, int qd_idx, int disk)
+{
+	int disk_ahead = -1;
+
+	if (0 > st_idx || 0 > pd_idx || 0 > disk) {
+		goto END;
+	}
+
+	if (disk < st_idx) {
+		disk += raid_disks;
+	}
+	disk_ahead = disk - st_idx ;
+
+	if (pd_idx < st_idx) {
+		pd_idx += raid_disks;
+	}
+	disk_ahead -= (pd_idx < disk? 1: 0);
+
+	if (-1 != qd_idx) {
+		if (qd_idx < st_idx) {
+			qd_idx += raid_disks;
+		}
+		disk_ahead -= (qd_idx < disk? 1: 0);
+	}
+
+END:
+	return disk_ahead;
+}
+
+static int syno_raid5_autoremap_report_sectors(const struct r5conf* conf, sector_t bad_sector, int bad_disk)
+{
+	sector_t sector = bad_sector ;
+	sector_t chunk_offset = sector_mod(sector, conf->chunk_sectors);
+	sector_t chunk_number = sector;
+	sector_t raid_sector = 0;
+
+	int ret = -1;
+	int raid_disks = conf->raid_disks ;
+	int data_disks = raid_disks - conf->max_degraded ;
+	int pd_idx = -1, qd_idx = -1, st_idx = -1;
+	int d = 0;
+	int bad_disks[3] = {0};  
+	int num_bad_disk = 0;
+
+	int disk_ahead = 0;
+
+	struct md_rdev *rdev = conf->disks[bad_disk].rdev;
+
+	if (0 > syno_raid5_parity_disk_get(conf, chunk_number, &pd_idx, &qd_idx, &st_idx)) {
+		printk("Failed to syno_raid5_parity_disk_get\n");
+		goto END;
+	}
+
+	num_bad_disk = syno_raid5_data_corrupt_disk_get(conf, pd_idx, qd_idx, bad_disk, bad_disks, 3);
+
+	for (d = 0; d < num_bad_disk; d++) {
+		if (0 > (disk_ahead = syno_raid5_disk_ahead_get(raid_disks, st_idx, pd_idx, qd_idx, bad_disks[d]))) {
+			printk("Failed to syno_raid5_disk_ahead_get\n");
+			goto END;
+		}
+		raid_sector = (chunk_number * data_disks + disk_ahead) * conf->chunk_sectors + chunk_offset ;
+		SynoAutoRemapReport(conf->mddev, raid_sector, rdev->bdev);
+	}
+
+	ret = 0;
+END:
+	return ret;
+}
+
+#endif  
+
 static void raid5_end_read_request(struct bio * bi, int error)
 {
 	struct stripe_head *sh = bi->bi_private;
@@ -1767,6 +2165,9 @@
 	char b[BDEVNAME_SIZE];
 	struct md_rdev *rdev = NULL;
 	sector_t s;
+#ifdef MY_ABC_HERE
+	char blIsRemapping = 0;
+#endif  
 
 	for (i=0 ; i<disks; i++)
 		if (bi == &sh->dev[i].req)
@@ -1790,6 +2191,15 @@
 	else
 		s = sh->sector + rdev->data_offset;
 
+#ifdef MY_ABC_HERE
+	if (bio_flagged(bi, BIO_AUTO_REMAP)) {
+		blIsRemapping = 1;
+		clear_bit(BIO_AUTO_REMAP, &bi->bi_flags);
+		printk("%s:%s(%d) BIO_AUTO_REMAP detected, sector:[%llu], sh count:[%d] disk count:[%d]\n", __FILE__,__FUNCTION__,__LINE__, (unsigned long long)sh->sector, atomic_read(&sh->count), i);
+		syno_raid5_autoremap_report_sectors(conf, sh->sector, i);
+	}
+#endif  
+
 	if (uptodate) {
 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
@@ -1820,6 +2230,16 @@
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
 		atomic_inc(&rdev->read_errors);
 
+#ifdef MY_ABC_HERE
+		if (conf->mddev->auto_remap &&
+			0 == IsDeviceDisappear(rdev->bdev) &&
+			!test_bit(R5_ReWrite, &sh->dev[i].flags) &&
+			test_bit(STRIPE_SYNCING, &sh->state)) {
+			 
+			retry = 1;
+		}
+#endif  
+
 		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
 			printk_ratelimited(
 				KERN_WARNING
@@ -1828,7 +2248,11 @@
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				bdn);
+#ifdef MY_ABC_HERE
+		else if ((conf->mddev->degraded >= conf->max_degraded) && !conf->mddev->auto_remap) {
+#else  
 		else if (conf->mddev->degraded >= conf->max_degraded) {
+#endif  
 			set_bad = 1;
 #ifdef MY_ABC_HERE
 			if (!test_bit(DiskError, &rdev->flags)) {
@@ -1878,7 +2302,13 @@
 
 #ifdef MY_ABC_HERE
 		if (0 == IsDeviceDisappear(rdev->bdev)) {
+#ifdef MY_ABC_HERE
+			if (1 == blIsRemapping) {
+				SynoReportBadSector(bi->bi_sector, READ, conf->mddev->md_minor, rdev->bdev, __FUNCTION__);
+			}
+#else  
 			SynoReportBadSector(bi->bi_sector, READ, conf->mddev->md_minor, rdev->bdev, __FUNCTION__);
+#endif  
 		}
 #endif  
 		if (retry)
@@ -2007,17 +2437,25 @@
 
 	bio_init(&dev->req);
 	dev->req.bi_io_vec = &dev->vec;
+	dev->req.bi_private = sh;
+#ifdef MY_ABC_HERE
+	dev->req.bi_max_vecs = 1;
+#else  
 	dev->req.bi_vcnt++;
 	dev->req.bi_max_vecs++;
-	dev->req.bi_private = sh;
 	dev->vec.bv_page = dev->page;
+#endif  
 
 	bio_init(&dev->rreq);
 	dev->rreq.bi_io_vec = &dev->rvec;
+	dev->rreq.bi_private = sh;
+#ifdef MY_ABC_HERE
+	dev->rreq.bi_max_vecs = 1;
+#else  
 	dev->rreq.bi_vcnt++;
 	dev->rreq.bi_max_vecs++;
-	dev->rreq.bi_private = sh;
 	dev->rvec.bv_page = dev->page;
+#endif  
 
 	dev->flags = 0;
 	dev->sector = compute_blocknr(sh, i, previous);
@@ -2685,6 +3123,12 @@
 		 
 		bi = sh->dev[i].written;
 		sh->dev[i].written = NULL;
+#ifdef MY_ABC_HERE
+		if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
+			WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
+			sh->dev[i].page = sh->dev[i].orig_page;
+		}
+#endif  
 		if (bi) bitmap_end = 1;
 		while (bi && bi->bi_sector <
 		       sh->dev[i].sector + STRIPE_SECTORS) {
@@ -2728,7 +3172,11 @@
 
 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
 		if (atomic_dec_and_test(&conf->pending_full_writes))
+#ifdef MY_ABC_HERE
+			raid5_wakeup_main_thread(conf->mddev);
+#else  
 			md_wakeup_thread(conf->mddev->thread);
+#endif  
 }
 
 static void
@@ -2883,12 +3331,23 @@
 			dev = &sh->dev[i];
 			if (!test_bit(R5_LOCKED, &dev->flags) &&
 			    (test_bit(R5_UPTODATE, &dev->flags) ||
+#ifdef MY_ABC_HERE
+			     test_bit(R5_Discard, &dev->flags) ||
+			     test_bit(R5_SkipCopy, &dev->flags))) {
+#else  
 			     test_bit(R5_Discard, &dev->flags))) {
+#endif  
 				 
 				struct bio *wbi, *wbi2;
 				pr_debug("Return write for disc %d\n", i);
 				if (test_and_clear_bit(R5_Discard, &dev->flags))
 					clear_bit(R5_UPTODATE, &dev->flags);
+#ifdef MY_ABC_HERE
+				if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
+					WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
+					dev->page = dev->orig_page;
+				}
+#endif  
 				wbi = dev->written;
 				dev->written = NULL;
 				while (wbi && wbi->bi_sector <
@@ -2907,6 +3366,12 @@
 						0);
 			} else if (test_bit(R5_Discard, &dev->flags))
 				discard_pending = 1;
+#ifdef MY_ABC_HERE
+			if (!test_bit(R5_LOCKED, &dev->flags)) {
+				WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
+				WARN_ON(dev->page != dev->orig_page);
+			}
+#endif  
 		}
 	if (!discard_pending &&
 	    test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
@@ -2929,7 +3394,11 @@
 
 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
 		if (atomic_dec_and_test(&conf->pending_full_writes))
+#ifdef MY_ABC_HERE
+			raid5_wakeup_main_thread(conf->mddev);
+#else  
 			md_wakeup_thread(conf->mddev->thread);
+#endif  
 }
 
 static void handle_stripe_dirtying(struct r5conf *conf,
@@ -3337,6 +3806,41 @@
 
 #ifdef MY_ABC_HERE
  
+void syno_read_err_retry5(struct r5conf *conf, struct stripe_head *sh,
+						  struct stripe_head_state *s, struct r5dev *dev, int idr)
+{
+	char b[BDEVNAME_SIZE];
+	struct md_rdev *rdev;
+
+	rcu_read_lock();
+	rdev = rcu_dereference(conf->disks[idr].rdev);
+	if(rdev) {
+		bdevname(rdev->bdev, b);
+	} else {
+		strlcpy(b, " ", BDEVNAME_SIZE);
+	}
+	rcu_read_unlock();
+
+	if (!test_bit(R5_ReWrite, &dev->flags)) {
+		printk("%s[%s]: set rewrite, raid%d, %s, sector %llu\n",
+			   __FILE__, __FUNCTION__,
+			   conf->mddev->md_minor, b, (unsigned long long)sh->sector);
+		set_bit(R5_Wantwrite, &dev->flags);
+		set_bit(R5_ReWrite, &dev->flags);
+		set_bit(R5_LOCKED, &dev->flags);
+	} else {
+		printk("%s[%s]: set reread, md%d, %s, sector %llu\n",
+			   __FILE__, __FUNCTION__,
+			   conf->mddev->md_minor, b, (unsigned long long)sh->sector);
+		 
+		set_bit(R5_Wantread, &dev->flags);
+		set_bit(R5_LOCKED, &dev->flags);
+	}
+}
+#endif  
+
+#ifdef MY_ABC_HERE
+ 
 static void syno_handle_raid6_sync_error(struct r5conf *conf, struct stripe_head *sh, int disks)
 {
 	int i;
@@ -3381,12 +3885,20 @@
 }
 #endif  
 
+#ifdef MY_ABC_HERE
+ 
+static int analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+#else  
 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+#endif  
 {
 	struct r5conf *conf = sh->raid_conf;
 	int disks = sh->disks;
 	struct r5dev *dev;
 #ifdef MY_ABC_HERE
+	int isSyncError = 0;
+#endif  
+#ifdef MY_ABC_HERE
 	unsigned char isBadSH = 0;
 #endif  
 	int i;
@@ -3541,9 +4053,22 @@
 		if (test_bit(R5_ReadError, &dev->flags))
 			clear_bit(R5_Insync, &dev->flags);
 		if (!test_bit(R5_Insync, &dev->flags)) {
+#ifdef MY_ABC_HERE
+			if (s->syncing && conf->mddev->auto_remap &&
+				rdev && test_bit(In_sync, &rdev->flags) &&
+				test_bit(R5_ReadError, &dev->flags)) {
+				 
+				isSyncError = 1;
+			} else {
+				if (s->failed < 2)
+					s->failed_num[s->failed] = i;
+				s->failed++;
+			}
+#else  
 			if (s->failed < 2)
 				s->failed_num[s->failed] = i;
 			s->failed++;
+#endif  
 			if (rdev && !test_bit(Faulty, &rdev->flags))
 				do_recovery = 1;
 		}
@@ -3558,6 +4083,10 @@
 			s->replacing = 1;
 	}
 	rcu_read_unlock();
+
+#ifdef MY_ABC_HERE
+	return isSyncError;
+#endif  
 }
 
 static void handle_stripe(struct stripe_head *sh)
@@ -3568,6 +4097,9 @@
 	int prexor;
 	int disks = sh->disks;
 	struct r5dev *pdev, *qdev;
+#ifdef MY_ABC_HERE
+	int isSyncError = 0;
+#endif  
 
 	clear_bit(STRIPE_HANDLE, &sh->state);
 	if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
@@ -3595,7 +4127,11 @@
 	       atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
 	       sh->check_state, sh->reconstruct_state);
 
+#ifdef MY_ABC_HERE
+	isSyncError = analyse_stripe(sh, &s);
+#else  
 	analyse_stripe(sh, &s);
+#endif  
 
 	if (s.handle_bad_blocks) {
 		set_bit(STRIPE_HANDLE, &sh->state);
@@ -3707,6 +4243,19 @@
 	if (s.to_write && !sh->reconstruct_state && !sh->check_state)
 		handle_stripe_dirtying(conf, sh, &s, disks);
 
+#ifdef MY_ABC_HERE
+	if (s.failed == 1 && isSyncError == 1) {
+		struct r5dev *dev = NULL;
+		for (i=disks; i--;) {
+			dev = &sh->dev[i];
+			if (test_bit(R5_ReadError, &dev->flags)) {
+				syno_read_err_retry5(conf, sh, &s, dev, i);
+				s.locked++;
+			}
+		}
+	}
+#endif  
+
 	if (sh->check_state ||
 	    (s.syncing && s.locked == 0 &&
 	     !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
@@ -3866,7 +4415,11 @@
 		atomic_dec(&conf->preread_active_stripes);
 		if (atomic_read(&conf->preread_active_stripes) <
 		    IO_THRESHOLD)
+#ifdef MY_ABC_HERE
+			raid5_wakeup_main_thread(conf->mddev);
+#else  
 			md_wakeup_thread(conf->mddev->thread);
+#endif  
 	}
 
 	return_io(s.return_bi);
@@ -3973,7 +4526,11 @@
 	conf->retry_read_aligned_list = bi;
 
 	spin_unlock_irqrestore(&conf->device_lock, flags);
+#ifdef MY_ABC_HERE
+	raid5_wakeup_main_thread(conf->mddev);
+#else  
 	md_wakeup_thread(conf->mddev->thread);
+#endif  
 }
 
 static struct bio *remove_bio_from_retry(struct r5conf *conf)
@@ -4002,6 +4559,9 @@
 	struct mddev *mddev;
 	struct r5conf *conf;
 	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
+#ifdef MY_ABC_HERE
+	int auto_remap = test_and_clear_bit(BIO_AUTO_REMAP, &bi->bi_flags);
+#endif
 	struct md_rdev *rdev;
 
 	bio_put(bi);
@@ -4013,6 +4573,13 @@
 
 	rdev_dec_pending(rdev, conf->mddev);
 
+#ifdef MY_ABC_HERE
+	if (auto_remap) {
+		printk("%s:%s(%d) BIO_AUTO_REMAP detected\n", __FILE__,__FUNCTION__,__LINE__);
+		SynoAutoRemapReport(conf->mddev, raid_bi->bi_sector, rdev->bdev);
+	}
+#endif  
+
 	if (!error && uptodate) {
 		trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
 					 raid_bi, 0);
@@ -4484,7 +5051,11 @@
 			if (test_bit(STRIPE_EXPANDING, &sh->state) ||
 			    !add_stripe_bio(sh, bi, dd_idx, rw)) {
 				 
+#ifdef MY_ABC_HERE
+				raid5_wakeup_main_thread(mddev);
+#else  
 				md_wakeup_thread(mddev->thread);
+#endif  
 				release_stripe(sh);
 				schedule();
 				goto retry;
@@ -4602,7 +5173,11 @@
 		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+#ifdef MY_ABC_HERE
+		raid5_wakeup_main_thread(mddev);
+#else  
 		md_wakeup_thread(mddev->thread);
+#endif  
 		wait_event(mddev->sb_wait, mddev->flags == 0 ||
 			   kthread_should_stop());
 		spin_lock_irq(&conf->device_lock);
@@ -4682,7 +5257,11 @@
 		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+#ifdef MY_ABC_HERE
+		raid5_wakeup_main_thread(mddev);
+#else  
 		md_wakeup_thread(mddev->thread);
+#endif  
 		wait_event(mddev->sb_wait,
 			   !test_bit(MD_CHANGE_DEVS, &mddev->flags)
 			   || kthread_should_stop());
@@ -4950,6 +5529,57 @@
 	pr_debug("--- raid5d inactive\n");
 }
 
+#ifdef MY_ABC_HERE
+static void raid5d_proxy(struct md_thread *thread)
+{
+	struct mddev *mddev = thread->mddev;
+	struct r5conf *conf = mddev->private;
+	int handled;
+	struct blk_plug plug;
+
+	blk_start_plug(&plug);
+	handled = 0;
+	spin_lock_irq(&conf->device_lock);
+	while (atomic_read(&conf->proxy_enable)) {
+		struct bio *bio;
+		int batch_size, released;
+
+		released = release_stripe_list(conf);
+
+		if (
+		    !list_empty(&conf->bitmap_list)) {
+			 
+			conf->seq_flush++;
+			spin_unlock_irq(&conf->device_lock);
+			bitmap_unplug(mddev->bitmap);
+			spin_lock_irq(&conf->device_lock);
+			conf->seq_write = conf->seq_flush;
+			activate_bit_delay(conf);
+		}
+		raid5_activate_delayed(conf);
+
+		while ((bio = remove_bio_from_retry(conf))) {
+			int ok;
+			spin_unlock_irq(&conf->device_lock);
+			ok = retry_aligned_read(conf, bio);
+			spin_lock_irq(&conf->device_lock);
+			if (!ok)
+				break;
+			handled++;
+		}
+
+		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+		if (!batch_size && !released)
+			break;
+		handled += batch_size;
+	}
+
+	spin_unlock_irq(&conf->device_lock);
+
+	blk_finish_plug(&plug);
+}
+
+#endif  
 static ssize_t
 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
 {
@@ -5076,6 +5706,7 @@
 			 raid5_store_rmw_level);
 
 #endif  
+
 static ssize_t
 raid5_show_preread_threshold(struct mddev *mddev, char *page)
 {
@@ -5110,6 +5741,51 @@
 					raid5_show_preread_threshold,
 					raid5_store_preread_threshold);
 
+#ifdef MY_ABC_HERE
+static ssize_t
+raid5_show_skip_copy(struct mddev *mddev, char *page)
+{
+	struct r5conf *conf = mddev->private;
+	if (conf)
+		return sprintf(page, "%d\n", conf->skip_copy);
+	else
+		return 0;
+}
+
+static ssize_t
+raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
+{
+	struct r5conf *conf = mddev->private;
+	unsigned long new;
+	if (len >= PAGE_SIZE)
+		return -EINVAL;
+	if (!conf)
+		return -ENODEV;
+
+	if (kstrtoul(page, 10, &new))
+		return -EINVAL;
+	new = !!new;
+	if (new == conf->skip_copy)
+		return len;
+
+	mddev_suspend(mddev);
+	conf->skip_copy = new;
+	if (new)
+		mddev->queue->backing_dev_info.capabilities |=
+			BDI_CAP_STABLE_WRITES;
+	else
+		mddev->queue->backing_dev_info.capabilities &=
+			~BDI_CAP_STABLE_WRITES;
+	mddev_resume(mddev);
+	return len;
+}
+
+static struct md_sysfs_entry
+raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
+		raid5_show_skip_copy,
+		raid5_store_skip_copy);
+#endif  
+
 static ssize_t
 stripe_cache_active_show(struct mddev *mddev, char *page)
 {
@@ -5199,6 +5875,9 @@
 	&raid5_preread_bypass_threshold.attr,
 	&raid5_group_thread_cnt.attr,
 #ifdef MY_ABC_HERE
+	&raid5_skip_copy.attr,
+#endif  
+#ifdef MY_ABC_HERE
 	&raid5_rmw_level.attr,
 #endif  
 #ifdef MY_ABC_HERE
@@ -5452,7 +6131,15 @@
 	atomic_set(&conf->active_stripes, 0);
 	atomic_set(&conf->preread_active_stripes, 0);
 	atomic_set(&conf->active_aligned_reads, 0);
+#ifdef MY_ABC_HERE
+	atomic_set(&conf->proxy_enable, 0);
+	conf->proxy_thread = NULL;
+#endif  
 	conf->bypass_threshold = BYPASS_THRESHOLD;
+#ifdef MY_ABC_HERE
+	conf->skip_copy = 1;
+	mddev->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+#endif  
 	conf->recovery_disabled = mddev->recovery_disabled - 1;
 
 	conf->raid_disks = mddev->raid_disks;
@@ -5865,8 +6552,8 @@
 		}
 
 		if (discard_supported &&
-		   mddev->queue->limits.max_discard_sectors >= stripe &&
-		   mddev->queue->limits.discard_granularity >= stripe)
+		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
+		    mddev->queue->limits.discard_granularity >= stripe)
 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
 						mddev->queue);
 		else
@@ -6350,9 +7037,24 @@
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 
 		if (mddev->delta_disks > 0) {
+#ifdef MY_ABC_HERE
+			conf->proxy_thread = md_register_thread(raid5d_proxy, mddev, "proxy");
+			if (conf->proxy_thread) {
+				atomic_set(&conf->proxy_enable, 1);
+				raid5_wakeup_main_thread(conf->mddev);
+			} else {
+				pr_err("Failed to start proxy, just pray\n");
+			}
+#endif  
 			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 			set_capacity(mddev->gendisk, mddev->array_sectors);
 			revalidate_disk(mddev->gendisk);
+#ifdef MY_ABC_HERE
+			if (atomic_read(&conf->proxy_enable)) {
+				atomic_set(&conf->proxy_enable, 0);
+				md_unregister_thread(&conf->proxy_thread);
+			}
+#endif  
 		} else {
 			int d;
 			spin_lock_irq(&conf->device_lock);
@@ -6527,7 +7229,11 @@
 			mddev->chunk_sectors = new_chunk;
 		}
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+#ifdef MY_ABC_HERE
+		raid5_wakeup_main_thread(mddev);
+#else  
 		md_wakeup_thread(mddev->thread);
+#endif  
 	}
 	return check_reshape(mddev);
 }
@@ -6660,6 +7366,9 @@
 	.finish_reshape = raid5_finish_reshape,
 	.quiesce	= raid5_quiesce,
 	.takeover	= raid6_takeover,
+#ifdef MY_ABC_HERE
+	.ismaxdegrade = SynoIsRaidReachMaxDegrade,
+#endif  
 };
 static struct md_personality raid5_personality =
 {
@@ -6687,6 +7396,9 @@
 	.finish_reshape = raid5_finish_reshape,
 	.quiesce	= raid5_quiesce,
 	.takeover	= raid5_takeover,
+#ifdef MY_ABC_HERE
+	.ismaxdegrade = SynoIsRaidReachMaxDegrade,
+#endif  
 };
 
 static struct md_personality raid4_personality =
@@ -6715,6 +7427,9 @@
 	.finish_reshape = raid5_finish_reshape,
 	.quiesce	= raid5_quiesce,
 	.takeover	= raid4_takeover,
+#ifdef MY_ABC_HERE
+	.ismaxdegrade = SynoIsRaidReachMaxDegrade,
+#endif  
 };
 
 static int __init raid5_init(void)
diff -ur a/drivers/md/raid5.h b/drivers/md/raid5.h
--- a/drivers/md/raid5.h	2017-03-23 14:38:58.000000000 +0100
+++ b/drivers/md/raid5.h	2017-03-14 02:13:49.000000000 +0100
@@ -56,6 +56,9 @@
 		struct bio	req, rreq;
 		struct bio_vec	vec, rvec;
 		struct page	*page;
+#ifdef MY_ABC_HERE
+		struct page	*orig_page;
+#endif  
 		struct bio	*toread, *read, *towrite, *written;
 		sector_t	sector;			 
 		unsigned long	flags;
@@ -104,6 +107,9 @@
 	R5_NeedReplace,	 
 	R5_WantReplace,  
 	R5_Discard,	 
+#ifdef MY_ABC_HERE
+	R5_SkipCopy,	 
+#endif  
 };
 
 enum {
@@ -209,6 +215,9 @@
 	int			bypass_count;  
 	int			bypass_threshold;  
 #ifdef MY_ABC_HERE
+	int			skip_copy;
+#endif  
+#ifdef MY_ABC_HERE
 	int         stripe_cache_memory_usage;
 #endif  
 	struct list_head	*last_hold;  
@@ -234,6 +243,11 @@
 	struct notifier_block	cpu_notify;
 #endif
 
+#ifdef MY_ABC_HERE
+	atomic_t            proxy_enable;
+	struct md_thread   *proxy_thread;
+#endif  
+	 
 	atomic_t		active_stripes;
 	struct list_head	inactive_list;
 	struct llist_head	released_stripes;
@@ -291,4 +305,9 @@
 extern int md_raid5_congested(struct mddev *mddev, int bits);
 extern void md_raid5_kick_device(struct r5conf *conf);
 extern int raid5_set_cache_size(struct mddev *mddev, int size);
+
+#ifdef MY_ABC_HERE
+#define sector_mod(a,b) sector_div(a,b)
+#endif  
+
 #endif
diff -ur a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
--- a/drivers/media/dvb-core/dvb_frontend.c	2017-03-23 14:55:55.000000000 +0100
+++ b/drivers/media/dvb-core/dvb_frontend.c	2017-03-14 02:33:02.000000000 +0100
@@ -2194,9 +2194,9 @@
 		dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
 				 __func__, c->delivery_system, fe->ops.info.type);
 
-		/* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
-		 * do it, it is done for it. */
-		info->caps |= FE_CAN_INVERSION_AUTO;
+		/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
+		if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
+			info->caps |= FE_CAN_INVERSION_AUTO;
 		err = 0;
 		break;
 	}
diff -ur a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
--- a/drivers/media/dvb-frontends/af9013.c	2017-03-23 14:55:04.000000000 +0100
+++ b/drivers/media/dvb-frontends/af9013.c	2017-03-14 02:32:12.000000000 +0100
@@ -606,6 +606,10 @@
 			}
 		}
 
+		/* Return an error if can't find bandwidth or the right clock */
+		if (i == ARRAY_SIZE(coeff_lut))
+			return -EINVAL;
+
 		ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
 			sizeof(coeff_lut[i].val));
 	}
diff -ur a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
--- a/drivers/media/dvb-frontends/cx24116.c	2017-03-23 14:55:12.000000000 +0100
+++ b/drivers/media/dvb-frontends/cx24116.c	2017-03-14 02:32:20.000000000 +0100
@@ -963,6 +963,10 @@
 	struct cx24116_state *state = fe->demodulator_priv;
 	int i, ret;
 
+	/* Validate length */
+	if (d->msg_len > sizeof(d->msg))
+                return -EINVAL;
+
 	/* Dump DiSEqC message */
 	if (debug) {
 		printk(KERN_INFO "cx24116: %s(", __func__);
@@ -974,10 +978,6 @@
 		printk(") toneburst=%d\n", toneburst);
 	}
 
-	/* Validate length */
-	if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
-		return -EINVAL;
-
 	/* DiSEqC message */
 	for (i = 0; i < d->msg_len; i++)
 		state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
diff -ur a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
--- a/drivers/media/dvb-frontends/s5h1420.c	2017-03-23 14:55:04.000000000 +0100
+++ b/drivers/media/dvb-frontends/s5h1420.c	2017-03-14 02:32:12.000000000 +0100
@@ -178,7 +178,7 @@
 	int result = 0;
 
 	dprintk("enter %s\n", __func__);
-	if (cmd->msg_len > 8)
+	if (cmd->msg_len > sizeof(cmd->msg))
 		return -EINVAL;
 
 	/* setup for DISEQC */
diff -ur a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
--- a/drivers/media/dvb-frontends/tda1004x.c	2017-03-23 14:55:09.000000000 +0100
+++ b/drivers/media/dvb-frontends/tda1004x.c	2017-03-14 02:32:16.000000000 +0100
@@ -902,9 +902,18 @@
 {
 	struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
 	struct tda1004x_state* state = fe->demodulator_priv;
+	int status;
 
 	dprintk("%s\n", __func__);
 
+	status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+	if (status == -1)
+		return -EIO;
+
+	/* Only update the properties cache if device is locked */
+	if (!(status & 8))
+		return 0;
+
 	// inversion status
 	fe_params->inversion = INVERSION_OFF;
 	if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
diff -ur a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
--- a/drivers/media/pci/bt8xx/bttv-driver.c	2017-03-23 14:56:59.000000000 +0100
+++ b/drivers/media/pci/bt8xx/bttv-driver.c	2017-03-14 02:34:00.000000000 +0100
@@ -2374,6 +2374,19 @@
 	return 0;
 }
 
+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
+					unsigned int *width_mask,
+					unsigned int *width_bias)
+{
+	if (fmt->flags & FORMAT_FLAGS_PLANAR) {
+		*width_mask = ~15; /* width must be a multiple of 16 pixels */
+		*width_bias = 8;   /* nearest */
+	} else {
+		*width_mask = ~3; /* width must be a multiple of 4 pixels */
+		*width_bias = 2;  /* nearest */
+	}
+}
+
 static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
 						struct v4l2_format *f)
 {
@@ -2383,6 +2396,7 @@
 	enum v4l2_field field;
 	__s32 width, height;
 	__s32 height2;
+	unsigned int width_mask, width_bias;
 	int rc;
 
 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -2415,9 +2429,9 @@
 	width = f->fmt.pix.width;
 	height = f->fmt.pix.height;
 
+	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
 	rc = limit_scaled_size_lock(fh, &width, &height, field,
-			       /* width_mask: 4 pixels */ ~3,
-			       /* width_bias: nearest */ 2,
+			       width_mask, width_bias,
 			       /* adjust_size */ 1,
 			       /* adjust_crop */ 0);
 	if (0 != rc)
@@ -2450,6 +2464,7 @@
 	struct bttv_fh *fh = priv;
 	struct bttv *btv = fh->btv;
 	__s32 width, height;
+	unsigned int width_mask, width_bias;
 	enum v4l2_field field;
 
 	retval = bttv_switch_type(fh, f->type);
@@ -2464,9 +2479,10 @@
 	height = f->fmt.pix.height;
 	field = f->fmt.pix.field;
 
+	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
 	retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
-			       /* width_mask: 4 pixels */ ~3,
-			       /* width_bias: nearest */ 2,
+			       width_mask, width_bias,
 			       /* adjust_size */ 1,
 			       /* adjust_crop */ 1);
 	if (0 != retval)
@@ -2474,8 +2490,6 @@
 
 	f->fmt.pix.field = field;
 
-	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-
 	/* update our state informations */
 	fh->fmt              = fmt;
 	fh->cap.field        = f->fmt.pix.field;
diff -ur a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
--- a/drivers/media/pci/saa7134/saa7134-alsa.c	2017-03-23 14:56:59.000000000 +0100
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c	2017-03-14 02:34:02.000000000 +0100
@@ -1136,6 +1136,8 @@
 
 static int alsa_device_exit(struct saa7134_dev *dev)
 {
+	if (!snd_saa7134_cards[dev->nr])
+		return 1;
 
 	snd_card_free(snd_saa7134_cards[dev->nr]);
 	snd_saa7134_cards[dev->nr] = NULL;
@@ -1185,7 +1187,8 @@
 	int idx;
 
 	for (idx = 0; idx < SNDRV_CARDS; idx++) {
-		snd_card_free(snd_saa7134_cards[idx]);
+		if (snd_saa7134_cards[idx])
+			snd_card_free(snd_saa7134_cards[idx]);
 	}
 
 	saa7134_dmasound_init = NULL;
diff -ur a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
--- a/drivers/media/rc/rc-main.c	2017-03-23 14:55:27.000000000 +0100
+++ b/drivers/media/rc/rc-main.c	2017-03-14 02:32:33.000000000 +0100
@@ -942,9 +942,6 @@
 {
 	struct rc_dev *dev = to_rc_dev(device);
 
-	if (!dev || !dev->input_dev)
-		return -ENODEV;
-
 	if (dev->rc_map.name)
 		ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
 	if (dev->driver_name)
diff -ur a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
--- a/drivers/media/usb/gspca/ov534.c	2017-03-23 14:56:15.000000000 +0100
+++ b/drivers/media/usb/gspca/ov534.c	2017-03-14 02:33:21.000000000 +0100
@@ -1488,8 +1488,13 @@
 	struct v4l2_fract *tpf = &cp->timeperframe;
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	/* Set requested framerate */
-	sd->frame_rate = tpf->denominator / tpf->numerator;
+	if (tpf->numerator == 0 || tpf->denominator == 0)
+		/* Set default framerate */
+		sd->frame_rate = 30;
+	else
+		/* Set requested framerate */
+		sd->frame_rate = tpf->denominator / tpf->numerator;
+
 	if (gspca_dev->streaming)
 		set_frame_rate(gspca_dev);
 
diff -ur a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
--- a/drivers/media/usb/gspca/topro.c	2017-03-23 14:56:28.000000000 +0100
+++ b/drivers/media/usb/gspca/topro.c	2017-03-14 02:33:31.000000000 +0100
@@ -4791,7 +4791,11 @@
 	struct v4l2_fract *tpf = &cp->timeperframe;
 	int fr, i;
 
-	sd->framerate = tpf->denominator / tpf->numerator;
+	if (tpf->numerator == 0 || tpf->denominator == 0)
+		sd->framerate = 30;
+	else
+		sd->framerate = tpf->denominator / tpf->numerator;
+
 	if (gspca_dev->streaming)
 		setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
 
diff -ur a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
--- a/drivers/media/usb/pwc/pwc-if.c	2017-03-23 14:55:57.000000000 +0100
+++ b/drivers/media/usb/pwc/pwc-if.c	2017-03-14 02:33:03.000000000 +0100
@@ -91,6 +91,7 @@
 	{ USB_DEVICE(0x0471, 0x0312) },
 	{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
 	{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+	{ USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
 	{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
 	{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
 	{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -799,6 +800,11 @@
 			name = "Philips SPC 900NC webcam";
 			type_id = 740;
 			break;
+		case 0x032C:
+			PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+			name = "Philips SPC 880NC webcam";
+			type_id = 740;
+			break;
 		default:
 			return -ENODEV;
 			break;
diff -ur a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
--- a/drivers/media/usb/usbvision/usbvision-video.c	2017-03-23 14:55:59.000000000 +0100
+++ b/drivers/media/usb/usbvision/usbvision-video.c	2017-03-14 02:33:05.000000000 +0100
@@ -1451,6 +1451,7 @@
 
 	usbvision_remove_sysfs(usbvision->vdev);
 	usbvision_unregister_video(usbvision);
+	kfree(usbvision->alt_max_pkt_size);
 
 	usb_free_urb(usbvision->ctrl_urb);
 
@@ -1511,7 +1512,7 @@
 	const struct usb_host_interface *interface;
 	struct usb_usbvision *usbvision = NULL;
 	const struct usb_endpoint_descriptor *endpoint;
-	int model, i;
+	int model, i, ret;
 
 	PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u",
 				dev->descriptor.idVendor,
@@ -1520,33 +1521,51 @@
 	model = devid->driver_info;
 	if (model < 0 || model >= usbvision_device_data_size) {
 		PDEBUG(DBG_PROBE, "model out of bounds %d", model);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_usb;
 	}
 	printk(KERN_INFO "%s: %s found\n", __func__,
 				usbvision_device_data[model].model_string);
 
 	if (usbvision_device_data[model].interface >= 0)
 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
-	else
+	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
 		interface = &dev->actconfig->interface[ifnum]->altsetting[0];
+	else {
+		dev_err(&intf->dev, "interface %d is invalid, max is %d\n",
+		    ifnum, dev->actconfig->desc.bNumInterfaces - 1);
+		ret = -ENODEV;
+		goto err_usb;
+	}
+
+	if (interface->desc.bNumEndpoints < 2) {
+		dev_err(&intf->dev, "interface %d has %d endpoints, but must"
+		    " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
+		ret = -ENODEV;
+		goto err_usb;
+	}
 	endpoint = &interface->endpoint[1].desc;
+
 	if (!usb_endpoint_xfer_isoc(endpoint)) {
 		dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n",
 		    __func__, ifnum);
 		dev_err(&intf->dev, "%s: Endpoint attributes %d",
 		    __func__, endpoint->bmAttributes);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_usb;
 	}
 	if (usb_endpoint_dir_out(endpoint)) {
 		dev_err(&intf->dev, "%s: interface %d. has ISO OUT endpoint!\n",
 		    __func__, ifnum);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_usb;
 	}
 
 	usbvision = usbvision_alloc(dev, intf);
 	if (usbvision == NULL) {
 		dev_err(&intf->dev, "%s: couldn't allocate USBVision struct\n", __func__);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err_usb;
 	}
 
 	if (dev->descriptor.bNumConfigurations > 1)
@@ -1565,7 +1584,8 @@
 	usbvision->alt_max_pkt_size = kmalloc(32 * usbvision->num_alt, GFP_KERNEL);
 	if (usbvision->alt_max_pkt_size == NULL) {
 		dev_err(&intf->dev, "usbvision: out of memory!\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err_pkt;
 	}
 
 	for (i = 0; i < usbvision->num_alt; i++) {
@@ -1599,6 +1619,12 @@
 
 	PDEBUG(DBG_PROBE, "success");
 	return 0;
+
+err_pkt:
+	usbvision_release(usbvision);
+err_usb:
+	usb_put_dev(dev);
+	return ret;
 }
 
 /*
diff -ur a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c	2017-03-23 14:55:51.000000000 +0100
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c	2017-03-14 02:32:58.000000000 +0100
@@ -263,7 +263,7 @@
 
 struct v4l2_standard32 {
 	__u32		     index;
-	__u32		     id[2]; /* __u64 would get the alignment wrong */
+	compat_u64	     id;
 	__u8		     name[24];
 	struct v4l2_fract    frameperiod; /* Frames, not fields */
 	__u32		     framelines;
@@ -283,7 +283,7 @@
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
 		put_user(kp->index, &up->index) ||
-		copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
+		put_user(kp->id, &up->id) ||
 		copy_to_user(up->name, kp->name, 24) ||
 		copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
 		put_user(kp->framelines, &up->framelines) ||
@@ -391,7 +391,8 @@
 		get_user(kp->index, &up->index) ||
 		get_user(kp->type, &up->type) ||
 		get_user(kp->flags, &up->flags) ||
-		get_user(kp->memory, &up->memory))
+		get_user(kp->memory, &up->memory) ||
+		get_user(kp->length, &up->length))
 			return -EFAULT;
 
 	if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -403,9 +404,6 @@
 			return -EFAULT;
 
 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-		if (get_user(kp->length, &up->length))
-			return -EFAULT;
-
 		num_planes = kp->length;
 		if (num_planes == 0) {
 			kp->m.planes = NULL;
@@ -438,16 +436,14 @@
 	} else {
 		switch (kp->memory) {
 		case V4L2_MEMORY_MMAP:
-			if (get_user(kp->length, &up->length) ||
-				get_user(kp->m.offset, &up->m.offset))
+			if (get_user(kp->m.offset, &up->m.offset))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_USERPTR:
 			{
 			compat_long_t tmp;
 
-			if (get_user(kp->length, &up->length) ||
-			    get_user(tmp, &up->m.userptr))
+			if (get_user(tmp, &up->m.userptr))
 				return -EFAULT;
 
 			kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -489,7 +485,8 @@
 		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
 		put_user(kp->sequence, &up->sequence) ||
 		put_user(kp->reserved2, &up->reserved2) ||
-		put_user(kp->reserved, &up->reserved))
+		put_user(kp->reserved, &up->reserved) ||
+		put_user(kp->length, &up->length))
 			return -EFAULT;
 
 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -512,13 +509,11 @@
 	} else {
 		switch (kp->memory) {
 		case V4L2_MEMORY_MMAP:
-			if (put_user(kp->length, &up->length) ||
-				put_user(kp->m.offset, &up->m.offset))
+			if (put_user(kp->m.offset, &up->m.offset))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_USERPTR:
-			if (put_user(kp->length, &up->length) ||
-				put_user(kp->m.userptr, &up->m.userptr))
+			if (put_user(kp->m.userptr, &up->m.userptr))
 				return -EFAULT;
 			break;
 		case V4L2_MEMORY_OVERLAY:
@@ -575,10 +570,10 @@
 	__u32	     type;		/*  Type of input */
 	__u32	     audioset;		/*  Associated audios (bitfield) */
 	__u32        tuner;             /*  Associated tuner */
-	v4l2_std_id  std;
+	compat_u64   std;
 	__u32	     status;
 	__u32	     reserved[4];
-} __attribute__ ((packed));
+};
 
 /* The 64-bit v4l2_input struct has extra padding at the end of the struct.
    Otherwise it is identical to the 32-bit version. */
@@ -718,6 +713,7 @@
 struct v4l2_event32 {
 	__u32				type;
 	union {
+		compat_s64		value64;
 		__u8			data[64];
 	} u;
 	__u32				pending;
diff -ur a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c	2017-03-23 14:55:49.000000000 +0100
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c	2017-03-14 02:32:57.000000000 +0100
@@ -116,7 +116,8 @@
 	if (!sgt || buf->db_attach)
 		return;
 
-	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
+			       buf->dma_dir);
 }
 
 static void vb2_dc_finish(void *buf_priv)
@@ -128,7 +129,7 @@
 	if (!sgt || buf->db_attach)
 		return;
 
-	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
 }
 
 /*********************************************/
diff -ur a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
--- a/drivers/mfd/lpc_ich.c	2017-03-23 14:41:21.000000000 +0100
+++ b/drivers/mfd/lpc_ich.c	2017-03-14 02:16:58.000000000 +0100
@@ -740,7 +740,7 @@
 static u32 c206_writable_pin[] = {0, 5, 16, 20, 21, 22, 34, 38, 48, 52, 54, 69, 70, 71};
 static u32 c226_writable_pin[] = {5, 16, 18, 19, 20, 21, 23, 32, 33, 34, 35, 36, 37, 45};
 static u32 avoton_writable_pin[] = {10, 15, 16, 17, 49, 50, 53, 54};
-static u32 broadwell_writable_pin[] = {3, 4, 28, 45, 70, 71};
+static u32 broadwell_writable_pin[] = {3, 4, 5, 28, 45, 70, 71};
 
 #ifdef MY_DEF_HERE
  
diff -ur a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
--- a/drivers/mfd/omap-usb-tll.c	2017-03-23 14:41:27.000000000 +0100
+++ b/drivers/mfd/omap-usb-tll.c	2017-03-14 02:17:07.000000000 +0100
@@ -269,6 +269,8 @@
 
 		if (IS_ERR(tll->ch_clk[i]))
 			dev_dbg(dev, "can't get clock : %s\n", clkname);
+		else
+			clk_prepare(tll->ch_clk[i]);
 	}
 
 	pm_runtime_put_sync(dev);
@@ -301,9 +303,12 @@
 	tll_dev = NULL;
 	spin_unlock(&tll_lock);
 
-	for (i = 0; i < tll->nch; i++)
-		if (!IS_ERR(tll->ch_clk[i]))
+	for (i = 0; i < tll->nch; i++) {
+		if (!IS_ERR(tll->ch_clk[i])) {
+			clk_unprepare(tll->ch_clk[i]);
 			clk_put(tll->ch_clk[i]);
+		}
+	}
 
 	pm_runtime_disable(&pdev->dev);
 	return 0;
diff -ur a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
--- a/drivers/mfd/sm501.c	2017-03-23 14:41:30.000000000 +0100
+++ b/drivers/mfd/sm501.c	2017-03-14 02:17:10.000000000 +0100
@@ -1229,7 +1229,7 @@
 	return ptr - buff;
 }
 
-static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
+static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
 
 /* sm501_init_reg
  *
diff -ur a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
--- a/drivers/misc/ad525x_dpot.c	2017-03-23 14:46:36.000000000 +0100
+++ b/drivers/misc/ad525x_dpot.c	2017-03-14 02:22:37.000000000 +0100
@@ -216,7 +216,7 @@
 			 */
 			value = swab16(value);
 
-			if (dpot->uid == DPOT_UID(AD5271_ID))
+			if (dpot->uid == DPOT_UID(AD5274_ID))
 				value = value >> 2;
 		return value;
 	default:
diff -ur a/drivers/misc/Kconfig b/drivers/misc/Kconfig
--- a/drivers/misc/Kconfig	2016-10-20 04:32:03.000000000 +0200
+++ b/drivers/misc/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -451,7 +451,7 @@
 	  still useful.
 
 config BMP085
-	bool
+	tristate
 	depends on SYSFS
 
 config BMP085_I2C
diff -ur a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
--- a/drivers/mmc/card/block.c	2017-03-23 14:57:48.000000000 +0100
+++ b/drivers/mmc/card/block.c	2017-03-14 02:34:45.000000000 +0100
@@ -59,8 +59,7 @@
 #define INAND_CMD38_ARG_SECTRIM2 0x88
 #define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
 
-#define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \
-				  (req->cmd_flags & REQ_META)) && \
+#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
 				  (rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER	0x01
 #define PACKED_CMD_WR	0x02
@@ -202,6 +201,8 @@
 
 	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 
+	mmc_blk_put(md);
+
 	return ret;
 }
 
@@ -908,6 +909,18 @@
 	md->reset_done &= ~type;
 }
 
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+	struct mmc_blk_data *md = mq->data;
+	/*
+	 * If this is a RPMB partition access, return ture
+	 */
+	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+		return true;
+
+	return false;
+}
+
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -1286,13 +1299,9 @@
 
 	/*
 	 * Reliable writes are used to implement Forced Unit Access and
-	 * REQ_META accesses, and are supported only on MMCs.
-	 *
-	 * XXX: this really needs a good explanation of why REQ_META
-	 * is treated special.
+	 * are supported only on MMCs.
 	 */
-	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
-			  (req->cmd_flags & REQ_META)) &&
+	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
 		(rq_data_dir(req) == WRITE) &&
 		(md->flags & MMC_BLK_REL_WR);
 
@@ -1821,9 +1830,11 @@
 			break;
 		case MMC_BLK_CMD_ERR:
 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
-			if (!mmc_blk_reset(md, card->host, type))
-				break;
-			goto cmd_abort;
+			if (mmc_blk_reset(md, card->host, type))
+				goto cmd_abort;
+			if (!ret)
+				goto start_new_req;
+			break;
 		case MMC_BLK_RETRY:
 			if (retry++ < 5)
 				break;
@@ -2311,11 +2322,12 @@
 		  MMC_QUIRK_BLK_NO_CMD23),
 
 	/*
-	 * Some Micron MMC cards needs longer data read timeout than
-	 * indicated in CSD.
+	 * Some MMC cards need longer data read timeout than indicated in CSD.
 	 */
 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
 		  MMC_QUIRK_LONG_READ_TIME),
+	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
 
 	/*
 	 * On these Samsung MoviNAND parts, performing secure erase or
diff -ur a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
--- a/drivers/mmc/card/queue.c	2017-03-23 14:57:45.000000000 +0100
+++ b/drivers/mmc/card/queue.c	2017-03-14 02:34:43.000000000 +0100
@@ -37,7 +37,7 @@
 		return BLKPREP_KILL;
 	}
 
-	if (mq && mmc_card_removed(mq->card))
+	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
 		return BLKPREP_KILL;
 
 	req->cmd_flags |= REQ_DONTPREP;
diff -ur a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
--- a/drivers/mmc/card/queue.h	2017-03-23 14:57:45.000000000 +0100
+++ b/drivers/mmc/card/queue.h	2017-03-14 02:34:42.000000000 +0100
@@ -73,4 +73,6 @@
 extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
 extern void mmc_packed_clean(struct mmc_queue *);
 
+extern int mmc_access_rpmb(struct mmc_queue *);
+
 #endif
diff -ur a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
--- a/drivers/mmc/core/core.c	2017-03-23 14:57:47.000000000 +0100
+++ b/drivers/mmc/core/core.c	2017-03-14 02:34:44.000000000 +0100
@@ -327,8 +327,10 @@
  */
 static void mmc_wait_data_done(struct mmc_request *mrq)
 {
-	mrq->host->context_info.is_done_rcv = true;
-	wake_up_interruptible(&mrq->host->context_info.wait);
+	struct mmc_context_info *context_info = &mrq->host->context_info;
+
+	context_info->is_done_rcv = true;
+	wake_up_interruptible(&context_info->wait);
 }
 
 static void mmc_wait_done(struct mmc_request *mrq)
@@ -799,11 +801,11 @@
 	/*
 	 * Some cards require longer data read timeout than indicated in CSD.
 	 * Address this by setting the read timeout to a "reasonably high"
-	 * value. For the cards tested, 300ms has proven enough. If necessary,
+	 * value. For the cards tested, 600ms has proven enough. If necessary,
 	 * this value can be increased if other problematic cards require this.
 	 */
 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
-		data->timeout_ns = 300000000;
+		data->timeout_ns = 600000000;
 		data->timeout_clks = 0;
 	}
 
diff -ur a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
--- a/drivers/mmc/core/mmc.c	2017-03-23 14:57:45.000000000 +0100
+++ b/drivers/mmc/core/mmc.c	2017-03-14 02:34:42.000000000 +0100
@@ -266,6 +266,9 @@
 	card->ext_csd.card_type = card_type;
 }
 
+/* Minimum partition switch timeout in milliseconds */
+#define MMC_MIN_PART_SWITCH_TIME	300
+
 /*
  * Decode extended CSD.
  */
@@ -330,6 +333,10 @@
 
 		/* EXT_CSD value is in units of 10ms, but we store in ms */
 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+		/* Some eMMC set the value too low so set a minimum */
+		if (card->ext_csd.part_time &&
+		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
 
 		/* Sleep / awake timeout in 100ns units */
 		if (sa_shift > 0 && sa_shift <= 0x17)
diff -ur a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
--- a/drivers/mmc/host/atmel-mci.c	2017-03-23 14:57:50.000000000 +0100
+++ b/drivers/mmc/host/atmel-mci.c	2017-03-14 02:34:47.000000000 +0100
@@ -1295,7 +1295,7 @@
 
 	if (ios->clock) {
 		unsigned int clock_min = ~0U;
-		u32 clkdiv;
+		int clkdiv;
 
 		spin_lock_bh(&host->lock);
 		if (!host->mode_reg) {
@@ -1320,7 +1320,12 @@
 		/* Calculate clock divider */
 		if (host->caps.has_odd_clk_div) {
 			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
-			if (clkdiv > 511) {
+			if (clkdiv < 0) {
+				dev_warn(&mmc->class_dev,
+					 "clock %u too fast; using %lu\n",
+					 clock_min, host->bus_hz / 2);
+				clkdiv = 0;
+			} else if (clkdiv > 511) {
 				dev_warn(&mmc->class_dev,
 				         "clock %u too slow; using %lu\n",
 				         clock_min, host->bus_hz / (511 + 2));
diff -ur a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
--- a/drivers/mmc/host/mmci.c	2017-03-23 14:57:50.000000000 +0100
+++ b/drivers/mmc/host/mmci.c	2017-03-14 02:34:46.000000000 +0100
@@ -1740,7 +1740,7 @@
 	{
 		.id     = 0x00280180,
 		.mask   = 0x00ffffff,
-		.data	= &variant_u300,
+		.data	= &variant_nomadik,
 	},
 	{
 		.id     = 0x00480180,
diff -ur a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
--- a/drivers/mmc/host/sdhci-esdhc.h	2017-03-23 14:57:52.000000000 +0100
+++ b/drivers/mmc/host/sdhci-esdhc.h	2017-03-14 02:34:48.000000000 +0100
@@ -40,7 +40,7 @@
 #define ESDHC_DMA_SYSCTL	0x40c
 #define ESDHC_DMA_SNOOP		0x00000040
 
-#define ESDHC_HOST_CONTROL_RES	0x05
+#define ESDHC_HOST_CONTROL_RES	0x01
 
 static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
 {
diff -ur a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
--- a/drivers/mmc/host/sdhci-pxav3.c	2017-03-23 14:57:50.000000000 +0100
+++ b/drivers/mmc/host/sdhci-pxav3.c	2017-03-14 02:34:47.000000000 +0100
@@ -255,6 +255,7 @@
 		mmc_of_parse(host->mmc);
 		sdhci_get_of_property(pdev);
 		pdata = pxav3_get_mmc_pdata(dev);
+		pdev->dev.platform_data = pdata;
 	} else if (pdata) {
 		/* on-chip device */
 		if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff -ur a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
--- a/drivers/mtd/maps/dc21285.c	2017-03-23 14:38:47.000000000 +0100
+++ b/drivers/mtd/maps/dc21285.c	2017-03-14 02:13:34.000000000 +0100
@@ -37,9 +37,9 @@
 	 * we want to write a bit pattern XXX1 to Xilinx to enable
 	 * the write gate, which will be open for about the next 2ms.
 	 */
-	spin_lock_irqsave(&nw_gpio_lock, flags);
+	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
 	nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
-	spin_unlock_irqrestore(&nw_gpio_lock, flags);
+	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
 
 	/*
 	 * let the ISA bus to catch on...
diff -ur a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
--- a/drivers/mtd/mtd_blkdevs.c	2017-03-23 14:38:35.000000000 +0100
+++ b/drivers/mtd/mtd_blkdevs.c	2017-03-14 02:13:18.000000000 +0100
@@ -198,6 +198,7 @@
 		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
 
 	mutex_lock(&dev->lock);
+	mutex_lock(&mtd_table_mutex);
 
 	if (dev->open)
 		goto unlock;
@@ -221,6 +222,7 @@
 
 unlock:
 	dev->open++;
+	mutex_unlock(&mtd_table_mutex);
 	mutex_unlock(&dev->lock);
 	blktrans_dev_put(dev);
 	return ret;
@@ -231,6 +233,7 @@
 error_put:
 	module_put(dev->tr->owner);
 	kref_put(&dev->ref, blktrans_dev_release);
+	mutex_unlock(&mtd_table_mutex);
 	mutex_unlock(&dev->lock);
 	blktrans_dev_put(dev);
 	return ret;
@@ -244,6 +247,7 @@
 		return;
 
 	mutex_lock(&dev->lock);
+	mutex_lock(&mtd_table_mutex);
 
 	if (--dev->open)
 		goto unlock;
@@ -257,6 +261,7 @@
 		__put_mtd_device(dev->mtd);
 	}
 unlock:
+	mutex_unlock(&mtd_table_mutex);
 	mutex_unlock(&dev->lock);
 	blktrans_dev_put(dev);
 }
diff -ur a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
--- a/drivers/mtd/mtdpart.c	2017-03-23 14:38:36.000000000 +0100
+++ b/drivers/mtd/mtdpart.c	2017-03-14 02:13:20.000000000 +0100
@@ -730,8 +730,10 @@
 
 	for (i = 0; i < nbparts; i++) {
 		slave = allocate_partition(master, parts + i, i, cur_offset);
-		if (IS_ERR(slave))
+		if (IS_ERR(slave)) {
+			del_mtd_partitions(master);
 			return PTR_ERR(slave);
+		}
 
 		mutex_lock(&mtd_partitions_mutex);
 		list_add(&slave->list, &mtd_partitions);
diff -ur a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
--- a/drivers/mtd/onenand/onenand_base.c	2017-03-23 14:38:52.000000000 +0100
+++ b/drivers/mtd/onenand/onenand_base.c	2017-03-14 02:13:38.000000000 +0100
@@ -2607,6 +2607,7 @@
  */
 static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
+	struct onenand_chip *this = mtd->priv;
 	int ret;
 
 	ret = onenand_block_isbad(mtd, ofs);
@@ -2618,7 +2619,7 @@
 	}
 
 	onenand_get_device(mtd, FL_WRITING);
-	ret = mtd_block_markbad(mtd, ofs);
+	ret = this->block_markbad(mtd, ofs);
 	onenand_release_device(mtd);
 	return ret;
 }
diff -ur a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
--- a/drivers/mtd/ubi/io.c	2017-03-23 14:38:40.000000000 +0100
+++ b/drivers/mtd/ubi/io.c	2017-03-14 02:13:26.000000000 +0100
@@ -931,6 +931,11 @@
 		goto bad;
 	}
 
+	if (data_size > ubi->leb_size) {
+		ubi_err("bad data_size");
+		goto bad;
+	}
+
 	if (vol_type == UBI_VID_STATIC) {
 		/*
 		 * Although from high-level point of view static volumes may
diff -ur a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
--- a/drivers/mtd/ubi/misc.c	2017-03-23 14:38:37.000000000 +0100
+++ b/drivers/mtd/ubi/misc.c	2017-03-14 02:13:22.000000000 +0100
@@ -74,6 +74,8 @@
 	for (i = 0; i < vol->used_ebs; i++) {
 		int size;
 
+		cond_resched();
+
 		if (i == vol->used_ebs - 1)
 			size = vol->last_eb_bytes;
 		else
diff -ur a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
--- a/drivers/mtd/ubi/upd.c	2017-03-23 14:38:39.000000000 +0100
+++ b/drivers/mtd/ubi/upd.c	2017-03-14 02:13:24.000000000 +0100
@@ -193,7 +193,7 @@
 	vol->changing_leb = 1;
 	vol->ch_lnum = req->lnum;
 
-	vol->upd_buf = vmalloc(req->bytes);
+	vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
 	if (!vol->upd_buf)
 		return -ENOMEM;
 
diff -ur a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
--- a/drivers/mtd/ubi/vtbl.c	2017-03-23 14:38:39.000000000 +0100
+++ b/drivers/mtd/ubi/vtbl.c	2017-03-14 02:13:24.000000000 +0100
@@ -651,6 +651,7 @@
 		if (ubi->corr_peb_count)
 			ubi_err("%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
+		return -ENOSPC;
 	}
 	ubi->rsvd_pebs += reserved_pebs;
 	ubi->avail_pebs -= reserved_pebs;
diff -ur a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
--- a/drivers/mtd/ubi/wl.c	2017-03-23 14:38:41.000000000 +0100
+++ b/drivers/mtd/ubi/wl.c	2017-03-14 02:13:26.000000000 +0100
@@ -1978,6 +1978,7 @@
 		if (ubi->corr_peb_count)
 			ubi_err("%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
+		err = -ENOSPC;
 		goto out_free;
 	}
 	ubi->avail_pebs -= reserved_pebs;
diff -ur a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
--- a/drivers/net/bonding/bond_main.c	2017-03-23 14:51:41.000000000 +0100
+++ b/drivers/net/bonding/bond_main.c	2017-03-14 02:28:55.000000000 +0100
@@ -697,6 +697,23 @@
 	}
 }
 
+static struct slave *bond_get_old_active(struct bonding *bond,
+					 struct slave *new_active)
+{
+	struct slave *slave;
+	int i;
+
+	bond_for_each_slave(bond, slave, i) {
+		if (slave == new_active)
+			continue;
+
+		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
+			return slave;
+	}
+
+	return NULL;
+}
+
 static void bond_do_fail_over_mac(struct bonding *bond,
 				  struct slave *new_active,
 				  struct slave *old_active)
@@ -729,6 +746,9 @@
 		write_unlock_bh(&bond->curr_slave_lock);
 		read_unlock(&bond->lock);
 
+		if (!old_active)
+			old_active = bond_get_old_active(bond, new_active);
+
 		if (old_active) {
 			memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
 			memcpy(saddr.sa_data, old_active->dev->dev_addr,
@@ -1864,6 +1884,7 @@
 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
 		pr_info("%s: destroying bond %s.\n",
 			bond_dev->name, bond_dev->name);
+		bond_remove_proc_entry(bond);
 		unregister_netdevice(bond_dev);
 	}
 	return ret;
diff -ur a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
--- a/drivers/net/can/sja1000/sja1000.c	2017-03-23 14:51:33.000000000 +0100
+++ b/drivers/net/can/sja1000/sja1000.c	2017-03-14 02:28:48.000000000 +0100
@@ -184,6 +184,12 @@
 	priv->write_reg(priv, SJA1000_RXERR, 0x0);
 	priv->read_reg(priv, SJA1000_ECC);
 
+	/* clear interrupt flags */
+	priv->read_reg(priv, SJA1000_IR);
+
+	/* clear interrupt flags */
+	priv->read_reg(priv, SJA1000_IR);
+
 	/* leave reset mode */
 	set_normal_mode(dev);
 }
diff -ur a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
--- a/drivers/net/can/usb/ems_usb.c	2017-03-23 14:51:34.000000000 +0100
+++ b/drivers/net/can/usb/ems_usb.c	2017-03-14 02:28:49.000000000 +0100
@@ -118,6 +118,9 @@
  */
 #define EMS_USB_ARM7_CLOCK 8000000
 
+#define CPC_TX_QUEUE_TRIGGER_LOW	25
+#define CPC_TX_QUEUE_TRIGGER_HIGH	35
+
 /*
  * CAN-Message representation in a CPC_MSG. Message object type is
  * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -279,6 +282,11 @@
 	switch (urb->status) {
 	case 0:
 		dev->free_slots = dev->intr_in_buffer[1];
+		if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
+			if (netif_queue_stopped(netdev)){
+				netif_wake_queue(netdev);
+			}
+		}
 		break;
 
 	case -ECONNRESET: /* unlink */
@@ -530,8 +538,6 @@
 	/* Release context */
 	context->echo_index = MAX_TX_URBS;
 
-	if (netif_queue_stopped(netdev))
-		netif_wake_queue(netdev);
 }
 
 /*
@@ -591,7 +597,7 @@
 	int err, i;
 
 	dev->intr_in_buffer[0] = 0;
-	dev->free_slots = 15; /* initial size */
+	dev->free_slots = 50; /* initial size */
 
 	for (i = 0; i < MAX_RX_URBS; i++) {
 		struct urb *urb = NULL;
@@ -840,7 +846,7 @@
 
 		/* Slow down tx path */
 		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
-		    dev->free_slots < 5) {
+		    dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
 			netif_stop_queue(netdev);
 		}
 	}
diff -ur a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c	2017-03-23 14:51:01.000000000 +0100
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c	2017-03-14 02:28:13.000000000 +0100
@@ -1012,13 +1012,12 @@
 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
 		8 * 4;
 
-	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
-				&ring_header->dma);
+	ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
+						&ring_header->dma, GFP_KERNEL);
 	if (unlikely(!ring_header->desc)) {
-		dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
+		dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
 		goto err_nomem;
 	}
-	memset(ring_header->desc, 0, ring_header->size);
 	/* init TPD ring */
 
 	tpd_ring[0].dma = roundup(ring_header->dma, 8);
diff -ur a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
--- a/drivers/net/ethernet/atheros/atlx/atl2.c	2017-03-23 14:50:59.000000000 +0100
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c	2017-03-14 02:28:12.000000000 +0100
@@ -1412,7 +1412,7 @@
 
 	err = -EIO;
 
-	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
 
 	/* Init PHY as early as possible due to power saving issue  */
diff -ur a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
--- a/drivers/net/ethernet/broadcom/tg3.c	2017-03-23 14:51:28.000000000 +0100
+++ b/drivers/net/ethernet/broadcom/tg3.c	2017-03-14 02:28:36.000000000 +0100
@@ -10508,7 +10508,7 @@
 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
 				sizeof(temperature));
 	spin_unlock_bh(&tp->lock);
-	return sprintf(buf, "%u\n", temperature);
+	return sprintf(buf, "%u\n", temperature * 1000);
 }
 
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
diff -ur a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
--- a/drivers/net/ethernet/jme.c	2017-03-23 14:50:11.000000000 +0100
+++ b/drivers/net/ethernet/jme.c	2017-03-14 02:27:25.000000000 +0100
@@ -3289,13 +3289,14 @@
 		jme_reset_phy_processor(jme);
 	jme_phy_calibration(jme);
 	jme_phy_setEA(jme);
-	jme_start_irq(jme);
 	netif_device_attach(netdev);
 
 	atomic_inc(&jme->link_changing);
 
 	jme_reset_link(jme);
 
+	jme_start_irq(jme);
+
 	return 0;
 }
 
diff -ur a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
--- a/drivers/net/ethernet/marvell/mvneta.c	2017-03-23 14:51:25.000000000 +0100
+++ b/drivers/net/ethernet/marvell/mvneta.c	2017-03-14 02:28:39.000000000 +0100
@@ -905,7 +905,7 @@
 	/* Set CPU queue access map - all CPUs have access to all RX
 	 * queues and to all TX queues
 	 */
-	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+	for_each_present_cpu(cpu)
 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
diff -ur a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c	2017-03-23 14:50:46.000000000 +0100
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c	2017-03-14 02:28:00.000000000 +0100
@@ -1834,7 +1834,7 @@
 			spin_lock_init(&s_state->lock);
 		}
 
-		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
+		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
 		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
 		INIT_WORK(&priv->mfunc.master.comm_work,
 			  mlx4_master_comm_channel);
diff -ur a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c	2017-03-23 14:50:44.000000000 +0100
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c	2017-03-14 02:27:58.000000000 +0100
@@ -182,7 +182,7 @@
 		return;
 	}
 
-	memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
+	memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
 	s_eqe->slave_id = slave;
 	/* ensure all information is written before setting the ownersip bit */
 	wmb();
diff -ur a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c	2017-03-23 14:51:01.000000000 +0100
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c	2017-03-14 02:28:13.000000000 +0100
@@ -1619,7 +1619,18 @@
 		return;
 	}
 	skb_reserve(new_skb, NET_IP_ALIGN);
+
+	pci_dma_sync_single_for_cpu(qdev->pdev,
+				    dma_unmap_addr(sbq_desc, mapaddr),
+				    dma_unmap_len(sbq_desc, maplen),
+				    PCI_DMA_FROMDEVICE);
+
 	memcpy(skb_put(new_skb, length), skb->data, length);
+
+	pci_dma_sync_single_for_device(qdev->pdev,
+				       dma_unmap_addr(sbq_desc, mapaddr),
+				       dma_unmap_len(sbq_desc, maplen),
+				       PCI_DMA_FROMDEVICE);
 	skb = new_skb;
 
 	/* Frame error, so drop the packet. */
diff -ur a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
--- a/drivers/net/ethernet/renesas/sh_eth.c	2017-03-23 14:50:48.000000000 +0100
+++ b/drivers/net/ethernet/renesas/sh_eth.c	2017-03-14 02:28:01.000000000 +0100
@@ -1160,7 +1160,8 @@
 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
 
 	/* Mark the last entry as wrapping the ring. */
-	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+	if (rxdesc)
+		rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
 
 	memset(mdp->tx_ring, 0, tx_ringsize);
 
@@ -1420,6 +1421,7 @@
 		desc_status >>= 16;
 #endif
 
+		skb = mdp->rx_skbuff[entry];
 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
 			ndev->stats.rx_errors++;
@@ -1435,12 +1437,11 @@
 				ndev->stats.rx_missed_errors++;
 			if (desc_status & RD_RFS10)
 				ndev->stats.rx_over_errors++;
-		} else {
+		} else	if (skb) {
 			if (!mdp->cd->hw_swap)
 				sh_eth_soft_swap(
 					phys_to_virt(ALIGN(rxdesc->addr, 4)),
 					pkt_len + 2);
-			skb = mdp->rx_skbuff[entry];
 			mdp->rx_skbuff[entry] = NULL;
 			if (mdp->cd->rpadir)
 				skb_reserve(skb, NET_IP_ALIGN);
diff -ur a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c	2017-03-23 14:51:18.000000000 +0100
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c	2017-03-14 02:28:32.000000000 +0100
@@ -731,10 +731,13 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 
-	if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
+	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
 
-		info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+					SOF_TIMESTAMPING_TX_HARDWARE |
+					SOF_TIMESTAMPING_RX_SOFTWARE |
 					SOF_TIMESTAMPING_RX_HARDWARE |
+					SOF_TIMESTAMPING_SOFTWARE |
 					SOF_TIMESTAMPING_RAW_HARDWARE;
 
 		if (priv->ptp_clock)
diff -ur a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
--- a/drivers/net/irda/irtty-sir.c	2017-03-23 14:49:47.000000000 +0100
+++ b/drivers/net/irda/irtty-sir.c	2017-03-14 02:26:54.000000000 +0100
@@ -429,16 +429,6 @@
 
 	/* Module stuff handled via irda_ldisc.owner - Jean II */
 
-	/* First make sure we're not already connected. */
-	if (tty->disc_data != NULL) {
-		priv = tty->disc_data;
-		if (priv && priv->magic == IRTTY_MAGIC) {
-			ret = -EEXIST;
-			goto out;
-		}
-		tty->disc_data = NULL;		/* ### */
-	}
-
 	/* stop the underlying  driver */
 	irtty_stop_receiver(tty, TRUE);
 	if (tty->ops->stop)
diff -ur a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
--- a/drivers/net/phy/broadcom.c	2017-03-23 14:49:57.000000000 +0100
+++ b/drivers/net/phy/broadcom.c	2017-03-14 02:27:06.000000000 +0100
@@ -840,7 +840,7 @@
 	{ PHY_ID_BCM5421, 0xfffffff0 },
 	{ PHY_ID_BCM5461, 0xfffffff0 },
 	{ PHY_ID_BCM5464, 0xfffffff0 },
-	{ PHY_ID_BCM5482, 0xfffffff0 },
+	{ PHY_ID_BCM5481, 0xfffffff0 },
 	{ PHY_ID_BCM5482, 0xfffffff0 },
 	{ PHY_ID_BCM50610, 0xfffffff0 },
 	{ PHY_ID_BCM50610M, 0xfffffff0 },
diff -ur a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
--- a/drivers/net/phy/dp83640.c	2017-03-23 14:49:58.000000000 +0100
+++ b/drivers/net/phy/dp83640.c	2017-03-14 02:27:08.000000000 +0100
@@ -45,7 +45,7 @@
 #define PSF_TX		0x1000
 #define EXT_EVENT	1
 #define CAL_EVENT	7
-#define CAL_TRIGGER	7
+#define CAL_TRIGGER	1
 #define PER_TRIGGER	6
 
 #define MII_DP83640_MICR 0x11
diff -ur a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
--- a/drivers/net/phy/phy.c	2017-03-23 14:49:57.000000000 +0100
+++ b/drivers/net/phy/phy.c	2017-03-14 02:27:08.000000000 +0100
@@ -1018,12 +1018,14 @@
 
 	/* According to 802.3az,the EEE is supported only in full duplex-mode.
 	 * Also EEE feature is active when core is operating with MII, GMII
-	 * or RGMII.
+	 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
+	 * should return an error if they do not support EEE.
 	 */
 	if ((phydev->duplex == DUPLEX_FULL) &&
 	    ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
 	    (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
+	     (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+	      phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID))) {
 		int eee_lp, eee_cap, eee_adv;
 		u32 lp, cap, adv;
 		int status;
diff -ur a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
--- a/drivers/net/ppp/ppp_generic.c	2017-03-23 14:50:08.000000000 +0100
+++ b/drivers/net/ppp/ppp_generic.c	2017-03-14 02:27:21.000000000 +0100
@@ -716,10 +716,8 @@
 			val &= 0xffff;
 		}
 		vj = slhc_init(val2+1, val+1);
-		if (!vj) {
-			netdev_err(ppp->dev,
-				   "PPP: no memory (VJ compressor)\n");
-			err = -ENOMEM;
+		if (IS_ERR(vj)) {
+			err = PTR_ERR(vj);
 			break;
 		}
 		ppp_lock(ppp);
@@ -2222,7 +2220,7 @@
 
 	pch->ppp = NULL;
 	pch->chan = chan;
-	pch->chan_net = net;
+	pch->chan_net = get_net(net);
 	chan->ppp = pch;
 	init_ppp_file(&pch->file, CHANNEL);
 	pch->file.hdrlen = chan->hdrlen;
@@ -2318,6 +2316,8 @@
 	spin_lock_bh(&pn->all_channels_lock);
 	list_del(&pch->list);
 	spin_unlock_bh(&pn->all_channels_lock);
+	put_net(pch->chan_net);
+	pch->chan_net = NULL;
 
 	pch->file.dead = 1;
 	wake_up_interruptible(&pch->file.rwait);
diff -ur a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
--- a/drivers/net/ppp/pppoe.c	2017-03-23 14:50:06.000000000 +0100
+++ b/drivers/net/ppp/pppoe.c	2017-03-14 02:27:19.000000000 +0100
@@ -313,7 +313,6 @@
 			if (po->pppoe_dev == dev &&
 			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
 				pppox_unbind_sock(sk);
-				sk->sk_state = PPPOX_ZOMBIE;
 				sk->sk_state_change(sk);
 				po->pppoe_dev = NULL;
 				dev_put(dev);
@@ -570,7 +569,7 @@
 
 	po = pppox_sk(sk);
 
-	if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+	if (po->pppoe_dev) {
 		dev_put(po->pppoe_dev);
 		po->pppoe_dev = NULL;
 	}
diff -ur a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
--- a/drivers/net/ppp/pptp.c	2017-03-23 14:50:05.000000000 +0100
+++ b/drivers/net/ppp/pptp.c	2017-03-14 02:27:19.000000000 +0100
@@ -419,6 +419,9 @@
 	struct pptp_opt *opt = &po->proto.pptp;
 	int error = 0;
 
+	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+		return -EINVAL;
+
 	lock_sock(sk);
 
 	opt->src_addr = sp->sa_addr.pptp;
@@ -440,6 +443,9 @@
 	struct flowi4 fl4;
 	int error = 0;
 
+	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+		return -EINVAL;
+
 	if (sp->sa_protocol != PX_PROTO_PPTP)
 		return -EINVAL;
 
diff -ur a/drivers/net/rionet.c b/drivers/net/rionet.c
--- a/drivers/net/rionet.c	2017-03-23 14:49:46.000000000 +0100
+++ b/drivers/net/rionet.c	2017-03-14 02:26:53.000000000 +0100
@@ -269,7 +269,7 @@
 	struct net_device *ndev = dev_id;
 	struct rionet_private *rnet = netdev_priv(ndev);
 
-	spin_lock(&rnet->lock);
+	spin_lock(&rnet->tx_lock);
 
 	if (netif_msg_intr(rnet))
 		printk(KERN_INFO
@@ -288,7 +288,7 @@
 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
 		netif_wake_queue(ndev);
 
-	spin_unlock(&rnet->lock);
+	spin_unlock(&rnet->tx_lock);
 }
 
 static int rionet_open(struct net_device *ndev)
diff -ur a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
--- a/drivers/net/slip/slhc.c	2017-03-23 14:50:06.000000000 +0100
+++ b/drivers/net/slip/slhc.c	2017-03-14 02:27:19.000000000 +0100
@@ -84,8 +84,9 @@
 static unsigned char * put16(unsigned char *cp, unsigned short x);
 static unsigned short pull16(unsigned char **cpp);
 
-/* Initialize compression data structure
+/* Allocate compression data structure
  *	slots must be in range 0 to 255 (zero meaning no compression)
+ * Returns pointer to structure or ERR_PTR() on error.
  */
 struct slcompress *
 slhc_init(int rslots, int tslots)
@@ -94,11 +95,14 @@
 	register struct cstate *ts;
 	struct slcompress *comp;
 
+	if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
+		return ERR_PTR(-EINVAL);
+
 	comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
 	if (! comp)
 		goto out_fail;
 
-	if ( rslots > 0  &&  rslots < 256 ) {
+	if (rslots > 0) {
 		size_t rsize = rslots * sizeof(struct cstate);
 		comp->rstate = kzalloc(rsize, GFP_KERNEL);
 		if (! comp->rstate)
@@ -106,7 +110,7 @@
 		comp->rslot_limit = rslots - 1;
 	}
 
-	if ( tslots > 0  &&  tslots < 256 ) {
+	if (tslots > 0) {
 		size_t tsize = tslots * sizeof(struct cstate);
 		comp->tstate = kzalloc(tsize, GFP_KERNEL);
 		if (! comp->tstate)
@@ -141,7 +145,7 @@
 out_free:
 	kfree(comp);
 out_fail:
-	return NULL;
+	return ERR_PTR(-ENOMEM);
 }
 
 /* Free a compression data structure */
diff -ur a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
--- a/drivers/net/slip/slip.c	2017-03-23 14:50:07.000000000 +0100
+++ b/drivers/net/slip/slip.c	2017-03-14 02:27:20.000000000 +0100
@@ -163,7 +163,7 @@
 	if (cbuff == NULL)
 		goto err_exit;
 	slcomp = slhc_init(16, 16);
-	if (slcomp == NULL)
+	if (IS_ERR(slcomp))
 		goto err_exit;
 #endif
 	spin_lock_bh(&sl->lock);
diff -ur a/drivers/net/team/team.c b/drivers/net/team/team.c
--- a/drivers/net/team/team.c	2017-03-23 14:49:49.000000000 +0100
+++ b/drivers/net/team/team.c	2017-03-14 02:26:56.000000000 +0100
@@ -1628,10 +1628,10 @@
 	struct team *team = netdev_priv(dev);
 	struct team_port *port;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(port, &team->port_list, list)
+	mutex_lock(&team->lock);
+	list_for_each_entry(port, &team->port_list, list)
 		vlan_vid_del(port->dev, proto, vid);
-	rcu_read_unlock();
+	mutex_unlock(&team->lock);
 
 	return 0;
 }
diff -ur a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
--- a/drivers/net/usb/asix_devices.c	2017-03-23 14:50:05.000000000 +0100
+++ b/drivers/net/usb/asix_devices.c	2017-03-14 02:27:18.000000000 +0100
@@ -466,19 +466,7 @@
 		return ret;
 	}
 
-	ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
-	if (ret < 0)
-		return ret;
-
-	msleep(150);
-
-	ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
-	if (ret < 0)
-		return ret;
-
-	msleep(150);
-
-	ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+	ax88772_reset(dev);
 
 	/* Read PHYID register *AFTER* the PHY was reset properly */
 	phyid = asix_get_phyid(dev);
@@ -888,7 +876,7 @@
 	.unbind = ax88772_unbind,
 	.status = asix_status,
 	.link_reset = ax88772_link_reset,
-	.reset = ax88772_reset,
+	.reset = ax88772_link_reset,
 	.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
 	.rx_fixup = asix_rx_fixup_common,
 	.tx_fixup = asix_tx_fixup,
diff -ur a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
--- a/drivers/net/usb/cdc_ncm.c	2017-03-23 14:50:03.000000000 +0100
+++ b/drivers/net/usb/cdc_ncm.c	2017-03-14 02:27:15.000000000 +0100
@@ -464,7 +464,11 @@
 
 	iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
 
-	/* reset data interface */
+	/* Reset data interface. Some devices will not reset properly
+	 * unless they are configured first.  Toggle the altsetting to
+	 * force a reset
+	 */
+	usb_set_interface(dev->udev, iface_no, data_altsetting);
 	temp = usb_set_interface(dev->udev, iface_no, 0);
 	if (temp)
 		goto error2;
diff -ur a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
--- a/drivers/net/usb/qmi_wwan.c	2017-03-23 14:50:04.000000000 +0100
+++ b/drivers/net/usb/qmi_wwan.c	2017-03-14 02:27:17.000000000 +0100
@@ -724,6 +724,7 @@
 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
@@ -742,6 +743,7 @@
 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
+	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
 	{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},	/* Olivetti Olicard 100 */
 	{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},	/* Olivetti Olicard 120 */
 	{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)},	/* Olivetti Olicard 140 */
diff -ur a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
--- a/drivers/net/usb/usbnet.c	2017-03-23 14:50:04.000000000 +0100
+++ b/drivers/net/usb/usbnet.c	2017-03-14 02:27:17.000000000 +0100
@@ -750,7 +750,7 @@
 {
 	struct usbnet		*dev = netdev_priv(net);
 	struct driver_info	*info = dev->driver_info;
-	int			retval, pm;
+	int			retval, pm, mpn;
 
 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
 	netif_stop_queue (net);
@@ -781,6 +781,8 @@
 
 	usbnet_purge_paused_rxq(dev);
 
+	mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
 	/* deferred work (task, timer, softirq) must also stop.
 	 * can't flush_scheduled_work() until we drop rtnl (later),
 	 * else workers could deadlock; so make workers a NOP.
@@ -791,8 +793,7 @@
 	if (!pm)
 		usb_autopm_put_interface(dev->intf);
 
-	if (info->manage_power &&
-	    !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+	if (info->manage_power && mpn)
 		info->manage_power(dev, 0);
 	else
 		usb_autopm_put_interface(dev->intf);
@@ -1617,6 +1618,13 @@
 	if (info->unbind)
 		info->unbind (dev, udev);
 out1:
+	/* subdrivers must undo all they did in bind() if they
+	 * fail it, but we may fail later and a deferred kevent
+	 * may trigger an error resubmitting itself and, worse,
+	 * schedule a timer. So we kill it all just in case.
+	 */
+	cancel_work_sync(&dev->kevent);
+	del_timer_sync(&dev->delay);
 	free_netdev(net);
 out:
 	return status;
diff -ur a/drivers/net/veth.c b/drivers/net/veth.c
--- a/drivers/net/veth.c	2017-03-23 14:49:46.000000000 +0100
+++ b/drivers/net/veth.c	2017-03-14 02:26:53.000000000 +0100
@@ -116,12 +116,6 @@
 		kfree_skb(skb);
 		goto drop;
 	}
-	/* don't change ip_summed == CHECKSUM_PARTIAL, as that
-	 * will cause bad checksum on forwarded packets
-	 */
-	if (skb->ip_summed == CHECKSUM_NONE &&
-	    rcv->features & NETIF_F_RXCSUM)
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
 		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
diff -ur a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
--- a/drivers/net/virtio_net.c	2017-03-23 14:49:47.000000000 +0100
+++ b/drivers/net/virtio_net.c	2017-03-14 02:26:54.000000000 +0100
@@ -1544,9 +1544,9 @@
 	/* Do we support "hardware" checksums? */
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
 		/* This opens up the world of extra features. */
-		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 		if (csum)
-			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 
 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff -ur a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
--- a/drivers/net/wan/farsync.c	2017-03-23 14:49:56.000000000 +0100
+++ b/drivers/net/wan/farsync.c	2017-03-14 02:27:04.000000000 +0100
@@ -2544,7 +2544,7 @@
                 dev->mem_start   = card->phys_mem
                                  + BUF_OFFSET ( txBuffer[i][0][0]);
                 dev->mem_end     = card->phys_mem
-                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
                 dev->base_addr   = card->pci_conf;
                 dev->irq         = card->irq;
 
diff -ur a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
--- a/drivers/net/wan/x25_asy.c	2017-03-23 14:49:53.000000000 +0100
+++ b/drivers/net/wan/x25_asy.c	2017-03-14 02:27:02.000000000 +0100
@@ -537,16 +537,12 @@
 
 static int x25_asy_open_tty(struct tty_struct *tty)
 {
-	struct x25_asy *sl = tty->disc_data;
+	struct x25_asy *sl;
 	int err;
 
 	if (tty->ops->write == NULL)
 		return -EOPNOTSUPP;
 
-	/* First make sure we're not already connected. */
-	if (sl && sl->magic == X25_ASY_MAGIC)
-		return -EEXIST;
-
 	/* OK.  Find a free X.25 channel to use. */
 	sl = x25_asy_alloc();
 	if (sl == NULL)
diff -ur a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
--- a/drivers/net/wireless/ath/ath9k/eeprom.c	2017-03-23 14:52:39.000000000 +0100
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c	2017-03-14 02:30:00.000000000 +0100
@@ -408,10 +408,9 @@
 
 	if (match) {
 		if (AR_SREV_9287(ah)) {
-			/* FIXME: array overrun? */
 			for (i = 0; i < numXpdGains; i++) {
 				minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
-				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
+				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
 						data_9287[idxL].pwrPdg[i],
 						data_9287[idxL].vpdPdg[i],
@@ -421,7 +420,7 @@
 		} else if (eeprom_4k) {
 			for (i = 0; i < numXpdGains; i++) {
 				minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
-				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
+				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
 						data_4k[idxL].pwrPdg[i],
 						data_4k[idxL].vpdPdg[i],
@@ -431,7 +430,7 @@
 		} else {
 			for (i = 0; i < numXpdGains; i++) {
 				minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
-				maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
+				maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
 						data_def[idxL].pwrPdg[i],
 						data_def[idxL].vpdPdg[i],
diff -ur a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
--- a/drivers/net/wireless/ath/ath9k/init.c	2017-03-23 14:52:45.000000000 +0100
+++ b/drivers/net/wireless/ath/ath9k/init.c	2017-03-14 02:30:05.000000000 +0100
@@ -818,6 +818,7 @@
 	hw->max_rate_tries = 10;
 	hw->sta_data_size = sizeof(struct ath_node);
 	hw->vif_data_size = sizeof(struct ath_vif);
+	hw->extra_tx_headroom = 4;
 
 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
diff -ur a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
--- a/drivers/net/wireless/ath/ath9k/main.c	2017-03-23 14:52:45.000000000 +0100
+++ b/drivers/net/wireless/ath/ath9k/main.c	2017-03-14 02:30:05.000000000 +0100
@@ -195,11 +195,13 @@
 	ath9k_debug_samp_bb_mac(sc);
 	ath9k_hw_disable_interrupts(ah);
 
-	if (!ath_drain_all_txq(sc))
-		ret = false;
-
-	if (!ath_stoprecv(sc))
-		ret = false;
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		ret &= ath_stoprecv(sc);
+		ret &= ath_drain_all_txq(sc);
+	} else {
+		ret &= ath_drain_all_txq(sc);
+		ret &= ath_stoprecv(sc);
+	}
 
 	return ret;
 }
diff -ur a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c	2017-03-23 14:51:45.000000000 +0100
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c	2017-03-14 02:29:01.000000000 +0100
@@ -1019,7 +1019,7 @@
 			u8 *pn = seq.ccmp.pn;
 
 			ieee80211_get_key_rx_seq(key, i, &seq);
-			aes_sc->pn = cpu_to_le64(
+			aes_sc[i].pn = cpu_to_le64(
 					(u64)pn[5] |
 					((u64)pn[4] << 8) |
 					((u64)pn[3] << 16) |
diff -ur a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c	2017-03-23 14:51:48.000000000 +0100
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c	2017-03-14 02:29:03.000000000 +0100
@@ -295,12 +295,12 @@
 			u8 *pn = seq.ccmp.pn;
 
 			ieee80211_get_key_rx_seq(key, i, &seq);
-			aes_sc->pn = cpu_to_le64((u64)pn[5] |
-						 ((u64)pn[4] << 8) |
-						 ((u64)pn[3] << 16) |
-						 ((u64)pn[2] << 24) |
-						 ((u64)pn[1] << 32) |
-						 ((u64)pn[0] << 40));
+			aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+						   ((u64)pn[4] << 8) |
+						   ((u64)pn[3] << 16) |
+						   ((u64)pn[2] << 24) |
+						   ((u64)pn[1] << 32) |
+						   ((u64)pn[0] << 40));
 		}
 		data->use_rsc_tsc = true;
 		break;
diff -ur a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
--- a/drivers/net/wireless/mwifiex/debugfs.c	2017-03-23 14:51:52.000000000 +0100
+++ b/drivers/net/wireless/mwifiex/debugfs.c	2017-03-14 02:29:07.000000000 +0100
@@ -633,7 +633,7 @@
 		(struct mwifiex_private *) file->private_data;
 	unsigned long addr = get_zeroed_page(GFP_KERNEL);
 	char *buf = (char *) addr;
-	int pos = 0, ret = 0, i;
+	int pos, ret, i;
 	u8 value[MAX_EEPROM_DATA];
 
 	if (!buf)
@@ -641,7 +641,7 @@
 
 	if (saved_offset == -1) {
 		/* No command has been given */
-		pos += snprintf(buf, PAGE_SIZE, "0");
+		pos = snprintf(buf, PAGE_SIZE, "0");
 		goto done;
 	}
 
@@ -650,17 +650,17 @@
 				  (u16) saved_bytes, value);
 	if (ret) {
 		ret = -EINVAL;
-		goto done;
+		goto out_free;
 	}
 
-	pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
+	pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
 
 	for (i = 0; i < saved_bytes; i++)
-		pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
-
-	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+		pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
 
 done:
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+out_free:
 	free_page(addr);
 	return ret;
 }
diff -ur a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
--- a/drivers/net/wireless/rt2x00/rt2800usb.c	2017-03-23 14:53:13.000000000 +0100
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c	2017-03-14 02:30:33.000000000 +0100
@@ -1019,6 +1019,7 @@
 	{ USB_DEVICE(0x07d1, 0x3c16) },
 	{ USB_DEVICE(0x07d1, 0x3c17) },
 	{ USB_DEVICE(0x2001, 0x3c1b) },
+	{ USB_DEVICE(0x2001, 0x3c25) },
 	/* Draytek */
 	{ USB_DEVICE(0x07fa, 0x7712) },
 	/* DVICO */
diff -ur a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c	2017-03-23 14:52:37.000000000 +0100
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c	2017-03-14 02:29:57.000000000 +0100
@@ -313,6 +313,7 @@
 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
 	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
diff -ur a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
--- a/drivers/net/wireless/rtlwifi/usb.c	2017-03-23 14:52:24.000000000 +0100
+++ b/drivers/net/wireless/rtlwifi/usb.c	2017-03-14 02:29:42.000000000 +0100
@@ -119,7 +119,7 @@
 
 	do {
 		status = usb_control_msg(udev, pipe, request, reqtype, value,
-					 index, pdata, len, 0); /*max. timeout*/
+					 index, pdata, len, 1000);
 		if (status < 0) {
 			/* firmware download is checksumed, don't retry */
 			if ((value >= FW_8192C_START_ADDRESS &&
diff -ur a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
--- a/drivers/net/wireless/ti/wlcore/io.h	2017-03-23 14:51:59.000000000 +0100
+++ b/drivers/net/wireless/ti/wlcore/io.h	2017-03-14 02:29:13.000000000 +0100
@@ -203,19 +203,23 @@
 
 static inline void wl1271_power_off(struct wl1271 *wl)
 {
-	int ret;
+	int ret = 0;
 
 	if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
 		return;
 
-	ret = wl->if_ops->power(wl->dev, false);
+	if (wl->if_ops->power)
+		ret = wl->if_ops->power(wl->dev, false);
 	if (!ret)
 		clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 }
 
 static inline int wl1271_power_on(struct wl1271 *wl)
 {
-	int ret = wl->if_ops->power(wl->dev, true);
+	int ret = 0;
+
+	if (wl->if_ops->power)
+		ret = wl->if_ops->power(wl->dev, true);
 	if (ret == 0)
 		set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 
diff -ur a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
--- a/drivers/net/wireless/ti/wlcore/spi.c	2017-03-23 14:51:56.000000000 +0100
+++ b/drivers/net/wireless/ti/wlcore/spi.c	2017-03-14 02:29:11.000000000 +0100
@@ -72,7 +72,9 @@
  */
 #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
 
-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
+/* Maximum number of SPI write chunks */
+#define WSPI_MAX_NUM_OF_CHUNKS \
+	((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
 
 struct wl12xx_spi_glue {
 	struct device *dev;
@@ -270,9 +272,10 @@
 					     void *buf, size_t len, bool fixed)
 {
 	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
-	struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
+	/* SPI write buffers - 2 for each chunk */
+	struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
 	struct spi_message m;
-	u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
+	u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
 	u32 *cmd;
 	u32 chunk_len;
 	int i;
diff -ur a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
--- a/drivers/net/xen-netback/xenbus.c	2017-03-23 14:49:50.000000000 +0100
+++ b/drivers/net/xen-netback/xenbus.c	2017-03-14 02:26:59.000000000 +0100
@@ -33,6 +33,8 @@
 	enum xenbus_state frontend_state;
 	struct xenbus_watch hotplug_status_watch;
 	u8 have_hotplug_status_watch:1;
+
+	const char *hotplug_script;
 };
 
 static int connect_rings(struct backend_info *);
@@ -55,6 +57,7 @@
 		xenvif_free(be->vif);
 		be->vif = NULL;
 	}
+	kfree(be->hotplug_script);
 	kfree(be);
 	dev_set_drvdata(&dev->dev, NULL);
 	return 0;
@@ -71,6 +74,7 @@
 	struct xenbus_transaction xbt;
 	int err;
 	int sg;
+	const char *script;
 	struct backend_info *be = kzalloc(sizeof(struct backend_info),
 					  GFP_KERNEL);
 	if (!be) {
@@ -131,6 +135,15 @@
 		goto fail;
 	}
 
+	script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+	if (IS_ERR(script)) {
+		err = PTR_ERR(script);
+		xenbus_dev_fatal(dev, err, "reading script");
+		goto fail;
+	}
+
+	be->hotplug_script = script;
+
 	err = xenbus_switch_state(dev, XenbusStateInitWait);
 	if (err)
 		goto fail;
@@ -160,22 +173,14 @@
 			  struct kobj_uevent_env *env)
 {
 	struct backend_info *be = dev_get_drvdata(&xdev->dev);
-	char *val;
 
-	val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-	if (IS_ERR(val)) {
-		int err = PTR_ERR(val);
-		xenbus_dev_fatal(xdev, err, "reading script");
-		return err;
-	} else {
-		if (add_uevent_var(env, "script=%s", val)) {
-			kfree(val);
-			return -ENOMEM;
-		}
-		kfree(val);
-	}
+	if (!be)
+		return 0;
+
+	if (add_uevent_var(env, "script=%s", be->hotplug_script))
+		return -ENOMEM;
 
-	if (!be || !be->vif)
+	if (!be->vif)
 		return 0;
 
 	return add_uevent_var(env, "vif=%s", be->vif->dev->name);
diff -ur a/drivers/of/address.c b/drivers/of/address.c
--- a/drivers/of/address.c	2017-03-23 14:41:40.000000000 +0100
+++ b/drivers/of/address.c	2017-03-14 02:17:26.000000000 +0100
@@ -629,10 +629,10 @@
 	struct resource res;
 
 	while (dn) {
-		if (of_address_to_resource(dn, 0, &res))
-			continue;
-		if (res.start == base_address)
+		if (!of_address_to_resource(dn, 0, &res) &&
+		    res.start == base_address)
 			return dn;
+
 		dn = of_find_matching_node(dn, matches);
 	}
 
diff -ur a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
--- a/drivers/parisc/iommu-helpers.h	2017-03-23 14:40:26.000000000 +0100
+++ b/drivers/parisc/iommu-helpers.h	2017-03-14 02:15:48.000000000 +0100
@@ -102,7 +102,11 @@
 	struct scatterlist *contig_sg;	   /* contig chunk head */
 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
 	unsigned int n_mappings = 0;
-	unsigned int max_seg_size = dma_get_max_seg_size(dev);
+	unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
+					(unsigned)DMA_CHUNK_SIZE);
+	unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
+	if (max_seg_boundary)	/* check if the addition above didn't overflow */
+		max_seg_size = min(max_seg_size, max_seg_boundary);
 
 	while (nents > 0) {
 
@@ -137,14 +141,11 @@
 
 			/*
 			** First make sure current dma stream won't
-			** exceed DMA_CHUNK_SIZE if we coalesce the
+			** exceed max_seg_size if we coalesce the
 			** next entry.
 			*/   
-			if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
-					    IOVP_SIZE) > DMA_CHUNK_SIZE))
-				break;
-
-			if (startsg->length + dma_len > max_seg_size)
+			if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
+				     max_seg_size))
 				break;
 
 			/*
diff -ur a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
--- a/drivers/pci/pcie/aer/aerdrv.c	2017-03-23 14:53:25.000000000 +0100
+++ b/drivers/pci/pcie/aer/aerdrv.c	2017-03-14 02:30:47.000000000 +0100
@@ -262,7 +262,6 @@
 	rpc->rpd = dev;
 	INIT_WORK(&rpc->dpc_handler, aer_isr);
 	mutex_init(&rpc->rpc_mutex);
-	init_waitqueue_head(&rpc->wait_release);
 
 	/* Use PCIe bus function to store rpc into PCIe device */
 	set_service_data(dev, rpc);
@@ -285,8 +284,7 @@
 		if (rpc->isr)
 			free_irq(dev->irq, dev);
 
-		wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+		flush_work(&rpc->dpc_handler);
 		aer_disable_rootport(rpc);
 		kfree(rpc);
 		set_service_data(dev, NULL);
diff -ur a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
--- a/drivers/pci/pcie/aer/aerdrv_core.c	2017-03-23 14:53:26.000000000 +0100
+++ b/drivers/pci/pcie/aer/aerdrv_core.c	2017-03-14 02:30:47.000000000 +0100
@@ -817,8 +817,6 @@
 	while (get_e_source(rpc, &e_src))
 		aer_isr_one_error(p_device, &e_src);
 	mutex_unlock(&rpc->rpc_mutex);
-
-	wake_up(&rpc->wait_release);
 }
 
 /**
diff -ur a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
--- a/drivers/pci/pcie/aer/aerdrv.h	2017-03-23 14:53:25.000000000 +0100
+++ b/drivers/pci/pcie/aer/aerdrv.h	2017-03-14 02:30:46.000000000 +0100
@@ -76,7 +76,6 @@
 					 * recovery on the same
 					 * root port hierarchy
 					 */
-	wait_queue_head_t wait_release;
 };
 
 struct aer_broadcast_data {
diff -ur a/drivers/pci/probe.c b/drivers/pci/probe.c
--- a/drivers/pci/probe.c	2017-03-23 14:53:21.000000000 +0100
+++ b/drivers/pci/probe.c	2017-03-14 02:30:41.000000000 +0100
@@ -151,6 +151,9 @@
 	struct pci_bus_region region;
 	bool bar_too_big = false, bar_disabled = false;
 
+	if (dev->non_compliant_bars)
+		return 0;
+
 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
 #ifdef MY_DEF_HERE
 	if (PCI_VENDOR_ID_INTEL == dev->vendor && 0x2934 == dev->device) {
@@ -862,6 +865,7 @@
 int pci_setup_device(struct pci_dev *dev)
 {
 	u32 class;
+	u16 cmd;
 	u8 hdr_type;
 	struct pci_slot *slot;
 	int pos = 0;
@@ -904,6 +908,16 @@
 	 
 	class = dev->class >> 8;
 
+	if (dev->non_compliant_bars) {
+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
+			cmd &= ~PCI_COMMAND_IO;
+			cmd &= ~PCI_COMMAND_MEMORY;
+			pci_write_config_word(dev, PCI_COMMAND, cmd);
+		}
+	}
+
 	switch (dev->hdr_type) {		     
 	case PCI_HEADER_TYPE_NORMAL:		     
 		if (class == PCI_CLASS_BRIDGE_PCI)
diff -ur a/drivers/pci/quirks.c b/drivers/pci/quirks.c
--- a/drivers/pci/quirks.c	2017-03-23 14:53:21.000000000 +0100
+++ b/drivers/pci/quirks.c	2017-03-14 02:30:41.000000000 +0100
@@ -2186,12 +2186,14 @@
 
 static void fixup_ti816x_class(struct pci_dev *dev)
 {
-	 
-	dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
-	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
+	u32 class = dev->class;
+
+	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
+	dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
+		 class, dev->class);
 }
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
-				 PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+			      PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
 
 static void fixup_mpss_256(struct pci_dev *dev)
 {
diff -ur a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
--- a/drivers/pci/xen-pcifront.c	2017-03-23 14:53:21.000000000 +0100
+++ b/drivers/pci/xen-pcifront.c	2017-03-14 02:30:41.000000000 +0100
@@ -51,7 +51,7 @@
 };
 
 struct pcifront_sd {
-	int domain;
+	struct pci_sysdata sd;
 	struct pcifront_device *pdev;
 };
 
@@ -65,7 +65,9 @@
 				    unsigned int domain, unsigned int bus,
 				    struct pcifront_device *pdev)
 {
-	sd->domain = domain;
+	/* Because we do not expose that information via XenBus. */
+	sd->sd.node = first_online_node;
+	sd->sd.domain = domain;
 	sd->pdev = pdev;
 }
 
@@ -463,8 +465,8 @@
 	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
 		 domain, bus);
 
-	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
-	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+	bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
 	if (!bus_entry || !sd) {
 		err = -ENOMEM;
 		goto err_out;
diff -ur a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
--- a/drivers/pcmcia/topic.h	2017-03-23 14:41:35.000000000 +0100
+++ b/drivers/pcmcia/topic.h	2017-03-14 02:17:18.000000000 +0100
@@ -104,6 +104,9 @@
 #define TOPIC_EXCA_IF_CONTROL		0x3e	/* 8 bit */
 #define TOPIC_EXCA_IFC_33V_ENA		0x01
 
+#define TOPIC_PCI_CFG_PPBCN		0x3e	/* 16-bit */
+#define TOPIC_PCI_CFG_PPBCN_WBEN	0x0400
+
 static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
 {
 	struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
@@ -137,6 +140,7 @@
 static int topic95_override(struct yenta_socket *socket)
 {
 	u8 fctrl;
+	u16 ppbcn;
 
 	/* enable 3.3V support for 16bit cards */
 	fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
@@ -145,6 +149,18 @@
 	/* tell yenta to use exca registers to power 16bit cards */
 	socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
 
+	/* Disable write buffers to prevent lockups under load with numerous
+	   Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the
+	   net.  This is not a power-on default according to the datasheet
+	   but some BIOSes seem to set it. */
+	if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0
+	    && socket->dev->revision <= 7
+	    && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) {
+		ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN;
+		pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn);
+		dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n");
+	}
+
 	return 0;
 }
 
diff -ur a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
--- a/drivers/pinctrl/core.c	2017-03-23 14:40:15.000000000 +0100
+++ b/drivers/pinctrl/core.c	2017-03-14 02:15:36.000000000 +0100
@@ -1075,7 +1075,7 @@
 EXPORT_SYMBOL_GPL(devm_pinctrl_put);
 
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-			 bool dup, bool locked)
+			 bool dup)
 {
 	int i, ret;
 	struct pinctrl_maps *maps_node;
@@ -1143,11 +1143,9 @@
 		maps_node->maps = maps;
 	}
 
-	if (!locked)
-		mutex_lock(&pinctrl_maps_mutex);
+	mutex_lock(&pinctrl_maps_mutex);
 	list_add_tail(&maps_node->node, &pinctrl_maps);
-	if (!locked)
-		mutex_unlock(&pinctrl_maps_mutex);
+	mutex_unlock(&pinctrl_maps_mutex);
 
 	return 0;
 }
@@ -1162,7 +1160,7 @@
 int pinctrl_register_mappings(struct pinctrl_map const *maps,
 			      unsigned num_maps)
 {
-	return pinctrl_register_map(maps, num_maps, true, false);
+	return pinctrl_register_map(maps, num_maps, true);
 }
 
 void pinctrl_unregister_map(struct pinctrl_map const *map)
diff -ur a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
--- a/drivers/pinctrl/core.h	2017-03-23 14:40:14.000000000 +0100
+++ b/drivers/pinctrl/core.h	2017-03-14 02:15:36.000000000 +0100
@@ -183,7 +183,7 @@
 }
 
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-			 bool dup, bool locked);
+			 bool dup);
 void pinctrl_unregister_map(struct pinctrl_map const *map);
 
 extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
diff -ur a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
--- a/drivers/pinctrl/devicetree.c	2017-03-23 14:40:10.000000000 +0100
+++ b/drivers/pinctrl/devicetree.c	2017-03-14 02:15:31.000000000 +0100
@@ -92,7 +92,7 @@
 	dt_map->num_maps = num_maps;
 	list_add_tail(&dt_map->node, &p->dt_maps);
 
-	return pinctrl_register_map(map, num_maps, false, true);
+	return pinctrl_register_map(map, num_maps, false);
 }
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
diff -ur a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c	2017-03-23 14:40:17.000000000 +0100
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c	2017-03-14 02:15:38.000000000 +0100
@@ -358,11 +358,11 @@
 	MPP_MODE(64,
 	   MPP_FUNCTION(0x0, "gpio", NULL),
 	   MPP_FUNCTION(0x1, "spi0", "miso"),
-	   MPP_FUNCTION(0x2, "spi0-1", "cs1")),
+	   MPP_FUNCTION(0x2, "spi0", "cs1")),
 	MPP_MODE(65,
 	   MPP_FUNCTION(0x0, "gpio", NULL),
 	   MPP_FUNCTION(0x1, "spi0", "mosi"),
-	   MPP_FUNCTION(0x2, "spi0-1", "cs2")),
+	   MPP_FUNCTION(0x2, "spi0", "cs2")),
 };
 
 static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
diff -ur a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c	2017-03-23 14:40:17.000000000 +0100
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c	2017-03-14 02:15:38.000000000 +0100
@@ -14,10 +14,7 @@
  * available: mv78230, mv78260 and mv78460. From a pin muxing
  * perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460
  * both have 67 MPP pins (more GPIOs and address lines for the memory
- * bus mainly). The only difference between the mv78260 and the
- * mv78460 in terms of pin muxing is the addition of two functions on
- * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two
- * cores, mv78460 has four cores).
+ * bus mainly).
  */
 
 #include <linux/err.h>
@@ -159,20 +156,17 @@
 	MPP_MODE(24,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "sata1", "prsnt",    V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re",   V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x3, "tdm", "rst",        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x4, "lcd", "hsync",      V_MV78230_PLUS)),
 	MPP_MODE(25,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "sata0", "prsnt",    V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we",   V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x3, "tdm", "pclk",       V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x4, "lcd", "vsync",      V_MV78230_PLUS)),
 	MPP_MODE(26,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x3, "tdm", "fsync",      V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x4, "lcd", "clk",        V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd",    V_MV78230_PLUS)),
+		 MPP_VAR_FUNCTION(0x4, "lcd", "clk",        V_MV78230_PLUS)),
 	MPP_MODE(27,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "ptp", "trig",       V_MV78230_PLUS),
@@ -187,8 +181,7 @@
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "ptp", "clk",        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x3, "tdm", "int0",       V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk",    V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
+		 MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk",    V_MV78230_PLUS)),
 	MPP_MODE(30,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "sd0", "clk",        V_MV78230_PLUS),
@@ -196,13 +189,11 @@
 	MPP_MODE(31,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "sd0", "cmd",        V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x3, "tdm", "int2",       V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
+		 MPP_VAR_FUNCTION(0x3, "tdm", "int2",       V_MV78230_PLUS)),
 	MPP_MODE(32,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "sd0", "d0",         V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x3, "tdm", "int3",       V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd",    V_MV78230_PLUS)),
+		 MPP_VAR_FUNCTION(0x3, "tdm", "int3",       V_MV78230_PLUS)),
 	MPP_MODE(33,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "sd0", "d1",         V_MV78230_PLUS),
@@ -234,7 +225,6 @@
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "spi", "cs1",        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x2, "uart2", "cts",      V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd",    V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync",  V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0",   V_MV78230_PLUS)),
 	MPP_MODE(41,
@@ -249,15 +239,13 @@
 		 MPP_VAR_FUNCTION(0x1, "uart2", "rxd",      V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x2, "uart0", "cts",      V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x3, "tdm", "int7",       V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x4, "tdm-1", "timer",    V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
+		 MPP_VAR_FUNCTION(0x4, "tdm-1", "timer",    V_MV78230_PLUS)),
 	MPP_MODE(43,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "uart2", "txd",      V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x2, "uart0", "rts",      V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x3, "spi", "cs3",        V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x4, "pcie", "rstout",    V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd",  V_MV78460)),
+		 MPP_VAR_FUNCTION(0x4, "pcie", "rstout",    V_MV78230_PLUS)),
 	MPP_MODE(44,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "uart2", "cts",      V_MV78230_PLUS),
@@ -286,7 +274,7 @@
 		 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3",   V_MV78230_PLUS)),
 	MPP_MODE(48,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
-		 MPP_VAR_FUNCTION(0x1, "tclk", NULL,        V_MV78230_PLUS),
+		 MPP_VAR_FUNCTION(0x1, "dev", "clkout",     V_MV78230_PLUS),
 		 MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)),
 	MPP_MODE(49,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
@@ -308,16 +296,13 @@
 		 MPP_VAR_FUNCTION(0x1, "dev", "ad19",       V_MV78260_PLUS)),
 	MPP_MODE(55,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
-		 MPP_VAR_FUNCTION(0x1, "dev", "ad20",       V_MV78260_PLUS),
-		 MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd",    V_MV78260_PLUS)),
+		 MPP_VAR_FUNCTION(0x1, "dev", "ad20",       V_MV78260_PLUS)),
 	MPP_MODE(56,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
-		 MPP_VAR_FUNCTION(0x1, "dev", "ad21",       V_MV78260_PLUS),
-		 MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd",    V_MV78260_PLUS)),
+		 MPP_VAR_FUNCTION(0x1, "dev", "ad21",       V_MV78260_PLUS)),
 	MPP_MODE(57,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
-		 MPP_VAR_FUNCTION(0x1, "dev", "ad22",       V_MV78260_PLUS),
-		 MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd",  V_MV78460)),
+		 MPP_VAR_FUNCTION(0x1, "dev", "ad22",       V_MV78260_PLUS)),
 	MPP_MODE(58,
 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
 		 MPP_VAR_FUNCTION(0x1, "dev", "ad23",       V_MV78260_PLUS)),
diff -ur a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
--- a/drivers/platform/x86/intel_scu_ipcutil.c	2017-03-23 14:57:41.000000000 +0100
+++ b/drivers/platform/x86/intel_scu_ipcutil.c	2017-03-14 02:34:39.000000000 +0100
@@ -49,7 +49,7 @@
 
 static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
 {
-	int count = data->count;
+	unsigned int count = data->count;
 
 	if (count == 0 || count == 3 || count > 4)
 		return -EINVAL;
diff -ur a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
--- a/drivers/pnp/pnpbios/bioscalls.c	2017-03-23 14:41:46.000000000 +0100
+++ b/drivers/pnp/pnpbios/bioscalls.c	2017-03-14 02:17:33.000000000 +0100
@@ -21,7 +21,7 @@
 
 #include "pnpbios.h"
 
-static struct {
+__visible struct {
 	u16 offset;
 	u16 segment;
 } pnp_bios_callpoint;
@@ -41,6 +41,7 @@
 
 __asm__(".text			\n"
 	__ALIGN_STR "\n"
+	".globl pnp_bios_callfunc\n"
 	"pnp_bios_callfunc:\n"
 	"	pushl %edx	\n"
 	"	pushl %ecx	\n"
@@ -66,9 +67,9 @@
  * after PnP BIOS oopses.
  */
 
-u32 pnp_bios_fault_esp;
-u32 pnp_bios_fault_eip;
-u32 pnp_bios_is_utter_crap = 0;
+__visible u32 pnp_bios_fault_esp;
+__visible u32 pnp_bios_fault_eip;
+__visible u32 pnp_bios_is_utter_crap = 0;
 
 static spinlock_t pnp_bios_lock;
 
diff -ur a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
--- a/drivers/power/wm831x_power.c	2017-03-23 14:39:51.000000000 +0100
+++ b/drivers/power/wm831x_power.c	2017-03-14 02:15:08.000000000 +0100
@@ -566,7 +566,7 @@
 
 	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
 	ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
-				   IRQF_TRIGGER_RISING, "System power low",
+				   IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
 				   power);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
@@ -576,7 +576,7 @@
 
 	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
 	ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
-				   IRQF_TRIGGER_RISING, "Power source",
+				   IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
 				   power);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
@@ -589,7 +589,7 @@
 				 platform_get_irq_byname(pdev,
 							 wm831x_bat_irqs[i]));
 		ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
-					   IRQF_TRIGGER_RISING,
+					   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
 					   wm831x_bat_irqs[i],
 					   power);
 		if (ret != 0) {
diff -ur a/drivers/regulator/core.c b/drivers/regulator/core.c
--- a/drivers/regulator/core.c	2017-03-23 14:46:36.000000000 +0100
+++ b/drivers/regulator/core.c	2017-03-14 02:22:36.000000000 +0100
@@ -769,7 +769,7 @@
 static void print_constraints(struct regulator_dev *rdev)
 {
 	struct regulation_constraints *constraints = rdev->constraints;
-	char buf[80] = "";
+	char buf[160] = "";
 	int count = 0;
 	int ret;
 
diff -ur a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
--- a/drivers/remoteproc/remoteproc_debugfs.c	2017-03-23 14:39:59.000000000 +0100
+++ b/drivers/remoteproc/remoteproc_debugfs.c	2017-03-14 02:15:19.000000000 +0100
@@ -156,7 +156,7 @@
 	char buf[10];
 	int ret;
 
-	if (count > sizeof(buf))
+	if (count < 1 || count > sizeof(buf))
 		return count;
 
 	ret = copy_from_user(buf, user_buf, count);
diff -ur a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
--- a/drivers/rtc/rtc-vr41xx.c	2017-03-23 14:46:53.000000000 +0100
+++ b/drivers/rtc/rtc-vr41xx.c	2017-03-14 02:22:59.000000000 +0100
@@ -272,12 +272,13 @@
 }
 
 static const struct rtc_class_ops vr41xx_rtc_ops = {
-	.release	= vr41xx_rtc_release,
-	.ioctl		= vr41xx_rtc_ioctl,
-	.read_time	= vr41xx_rtc_read_time,
-	.set_time	= vr41xx_rtc_set_time,
-	.read_alarm	= vr41xx_rtc_read_alarm,
-	.set_alarm	= vr41xx_rtc_set_alarm,
+	.release		= vr41xx_rtc_release,
+	.ioctl			= vr41xx_rtc_ioctl,
+	.read_time		= vr41xx_rtc_read_time,
+	.set_time		= vr41xx_rtc_set_time,
+	.read_alarm		= vr41xx_rtc_read_alarm,
+	.set_alarm		= vr41xx_rtc_set_alarm,
+	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
 };
 
 static int rtc_probe(struct platform_device *pdev)
diff -ur a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
--- a/drivers/s390/block/dasd_alias.c	2017-03-23 14:58:06.000000000 +0100
+++ b/drivers/s390/block/dasd_alias.c	2017-03-14 02:35:00.000000000 +0100
@@ -262,8 +262,10 @@
 		spin_unlock_irqrestore(&lcu->lock, flags);
 		cancel_work_sync(&lcu->suc_data.worker);
 		spin_lock_irqsave(&lcu->lock, flags);
-		if (device == lcu->suc_data.device)
+		if (device == lcu->suc_data.device) {
+			dasd_put_device(device);
 			lcu->suc_data.device = NULL;
+		}
 	}
 	was_pending = 0;
 	if (device == lcu->ruac_data.device) {
@@ -271,8 +273,10 @@
 		was_pending = 1;
 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
 		spin_lock_irqsave(&lcu->lock, flags);
-		if (device == lcu->ruac_data.device)
+		if (device == lcu->ruac_data.device) {
+			dasd_put_device(device);
 			lcu->ruac_data.device = NULL;
+		}
 	}
 	private->lcu = NULL;
 	spin_unlock_irqrestore(&lcu->lock, flags);
@@ -547,8 +551,10 @@
 	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
 			    " alias data in lcu (rc = %d), retry later", rc);
-		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
+		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+			dasd_put_device(device);
 	} else {
+		dasd_put_device(device);
 		lcu->ruac_data.device = NULL;
 		lcu->flags &= ~UPDATE_PENDING;
 	}
@@ -591,8 +597,10 @@
 	 */
 	if (!usedev)
 		return -EINVAL;
+	dasd_get_device(usedev);
 	lcu->ruac_data.device = usedev;
-	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
+	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+		dasd_put_device(usedev);
 	return 0;
 }
 
@@ -720,7 +728,7 @@
 	ASCEBC((char *) &cqr->magic, 4);
 	ccw = cqr->cpaddr;
 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
-	ccw->flags = 0 ;
+	ccw->flags = CCW_FLAG_SLI;
 	ccw->count = 16;
 	ccw->cda = (__u32)(addr_t) cqr->data;
 	((char *)cqr->data)[0] = reason;
@@ -924,6 +932,7 @@
 	/* 3. read new alias configuration */
 	_schedule_lcu_update(lcu, device);
 	lcu->suc_data.device = NULL;
+	dasd_put_device(device);
 	spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
@@ -983,6 +992,8 @@
 	}
 	lcu->suc_data.reason = reason;
 	lcu->suc_data.device = device;
+	dasd_get_device(device);
 	spin_unlock(&lcu->lock);
-	schedule_work(&lcu->suc_data.worker);
+	if (!schedule_work(&lcu->suc_data.worker))
+		dasd_put_device(device);
 };
diff -ur a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
--- a/drivers/scsi/3w-9xxx.c	2017-03-23 14:58:41.000000000 +0100
+++ b/drivers/scsi/3w-9xxx.c	2017-03-14 02:35:28.000000000 +0100
@@ -149,7 +149,6 @@
 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
 
 /* Functions */
 
@@ -226,6 +225,17 @@
 	.llseek		= noop_llseek,
 };
 
+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers.  Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+	return scsi_sg_count(cmd) != 1 ||
+		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
 /* This function will complete an aen request from the isr */
 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 {
@@ -1351,11 +1361,12 @@
 				}
 
 				/* Now complete the io */
+				if (twa_command_mapped(cmd))
+					scsi_dma_unmap(cmd);
+				cmd->scsi_done(cmd);
 				tw_dev->state[request_id] = TW_S_COMPLETED;
 				twa_free_request_id(tw_dev, request_id);
 				tw_dev->posted_request_count--;
-				tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-				twa_unmap_scsi_data(tw_dev, request_id);
 			}
 
 			/* Check for valid status after each drain */
@@ -1413,26 +1424,6 @@
 	}
 } /* End twa_load_sgl() */
 
-/* This function will perform a pci-dma mapping for a scatter gather list */
-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
-{
-	int use_sg;
-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
-	use_sg = scsi_dma_map(cmd);
-	if (!use_sg)
-		return 0;
-	else if (use_sg < 0) {
-		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
-		return 0;
-	}
-
-	cmd->SCp.phase = TW_PHASE_SGLIST;
-	cmd->SCp.have_data_in = use_sg;
-
-	return use_sg;
-} /* End twa_map_scsi_sg_data() */
-
 /* This function will poll for a response interrupt of a request */
 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
 {
@@ -1611,9 +1602,12 @@
 		    (tw_dev->state[i] != TW_S_INITIAL) &&
 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
 			if (tw_dev->srb[i]) {
-				tw_dev->srb[i]->result = (DID_RESET << 16);
-				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
-				twa_unmap_scsi_data(tw_dev, i);
+				struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+				cmd->result = (DID_RESET << 16);
+				if (twa_command_mapped(cmd))
+					scsi_dma_unmap(cmd);
+				cmd->scsi_done(cmd);
 			}
 		}
 	}
@@ -1792,21 +1786,20 @@
 	/* Save the scsi command for use by the ISR */
 	tw_dev->srb[request_id] = SCpnt;
 
-	/* Initialize phase to zero */
-	SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
 	switch (retval) {
 	case SCSI_MLQUEUE_HOST_BUSY:
+		if (twa_command_mapped(SCpnt))
+			scsi_dma_unmap(SCpnt);
 		twa_free_request_id(tw_dev, request_id);
-		twa_unmap_scsi_data(tw_dev, request_id);
 		break;
 	case 1:
-		tw_dev->state[request_id] = TW_S_COMPLETED;
-		twa_free_request_id(tw_dev, request_id);
-		twa_unmap_scsi_data(tw_dev, request_id);
 		SCpnt->result = (DID_ERROR << 16);
+		if (twa_command_mapped(SCpnt))
+			scsi_dma_unmap(SCpnt);
 		done(SCpnt);
+		tw_dev->state[request_id] = TW_S_COMPLETED;
+		twa_free_request_id(tw_dev, request_id);
 		retval = 0;
 	}
 out:
@@ -1864,8 +1857,7 @@
 		/* Map sglist from scsi layer to cmd packet */
 
 		if (scsi_sg_count(srb)) {
-			if ((scsi_sg_count(srb) == 1) &&
-			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+			if (!twa_command_mapped(srb)) {
 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
 					scsi_sg_copy_to_buffer(srb,
@@ -1874,8 +1866,8 @@
 				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
 			} else {
-				sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
-				if (sg_count == 0)
+				sg_count = scsi_dma_map(srb);
+				if (sg_count < 0)
 					goto out;
 
 				scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1938,7 +1930,7 @@
 {
 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
 
-	if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+	if (!twa_command_mapped(cmd) &&
 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
 		if (scsi_sg_count(cmd) == 1) {
@@ -1990,15 +1982,6 @@
 	return(table[index].text);
 } /* End twa_string_lookup() */
 
-/* This function will perform a pci-dma unmap */
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
-{
-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
-	if (cmd->SCp.phase == TW_PHASE_SGLIST)
-		scsi_dma_unmap(cmd);
-} /* End twa_unmap_scsi_data() */
-
 /* This function gets called when a disk is coming on-line */
 static int twa_slave_configure(struct scsi_device *sdev)
 {
diff -ur a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
--- a/drivers/scsi/3w-9xxx.h	2017-03-23 14:58:46.000000000 +0100
+++ b/drivers/scsi/3w-9xxx.h	2017-03-14 02:35:32.000000000 +0100
@@ -324,11 +324,6 @@
 #define TW_CURRENT_DRIVER_BUILD 0
 #define TW_CURRENT_DRIVER_BRANCH 0
 
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SINGLE  1
-#define TW_PHASE_SGLIST  2
-
 /* Misc defines */
 #define TW_9550SX_DRAIN_COMPLETED	      0xFFFF
 #define TW_SECTOR_SIZE                        512
diff -ur a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
--- a/drivers/scsi/3w-sas.c	2017-03-23 14:58:21.000000000 +0100
+++ b/drivers/scsi/3w-sas.c	2017-03-14 02:35:14.000000000 +0100
@@ -303,26 +303,6 @@
 	return 0;
 } /* End twl_post_command_packet() */
 
-/* This function will perform a pci-dma mapping for a scatter gather list */
-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
-{
-	int use_sg;
-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
-	use_sg = scsi_dma_map(cmd);
-	if (!use_sg)
-		return 0;
-	else if (use_sg < 0) {
-		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
-		return 0;
-	}
-
-	cmd->SCp.phase = TW_PHASE_SGLIST;
-	cmd->SCp.have_data_in = use_sg;
-
-	return use_sg;
-} /* End twl_map_scsi_sg_data() */
-
 /* This function hands scsi cdb's to the firmware */
 static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
 {
@@ -370,8 +350,8 @@
 	if (!sglistarg) {
 		/* Map sglist from scsi layer to cmd packet */
 		if (scsi_sg_count(srb)) {
-			sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
-			if (sg_count == 0)
+			sg_count = scsi_dma_map(srb);
+			if (sg_count <= 0)
 				goto out;
 
 			scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1116,15 +1096,6 @@
 	return retval;
 } /* End twl_initialize_device_extension() */
 
-/* This function will perform a pci-dma unmap */
-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
-{
-	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
-	if (cmd->SCp.phase == TW_PHASE_SGLIST)
-		scsi_dma_unmap(cmd);
-} /* End twl_unmap_scsi_data() */
-
 /* This function will handle attention interrupts */
 static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
 {
@@ -1265,11 +1236,11 @@
 			}
 
 			/* Now complete the io */
+			scsi_dma_unmap(cmd);
+			cmd->scsi_done(cmd);
 			tw_dev->state[request_id] = TW_S_COMPLETED;
 			twl_free_request_id(tw_dev, request_id);
 			tw_dev->posted_request_count--;
-			tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-			twl_unmap_scsi_data(tw_dev, request_id);
 		}
 
 		/* Check for another response interrupt */
@@ -1414,10 +1385,12 @@
 		if ((tw_dev->state[i] != TW_S_FINISHED) &&
 		    (tw_dev->state[i] != TW_S_INITIAL) &&
 		    (tw_dev->state[i] != TW_S_COMPLETED)) {
-			if (tw_dev->srb[i]) {
-				tw_dev->srb[i]->result = (DID_RESET << 16);
-				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
-				twl_unmap_scsi_data(tw_dev, i);
+			struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+			if (cmd) {
+				cmd->result = (DID_RESET << 16);
+				scsi_dma_unmap(cmd);
+				cmd->scsi_done(cmd);
 			}
 		}
 	}
@@ -1521,9 +1494,6 @@
 	/* Save the scsi command for use by the ISR */
 	tw_dev->srb[request_id] = SCpnt;
 
-	/* Initialize phase to zero */
-	SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
 	retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
 	if (retval) {
 		tw_dev->state[request_id] = TW_S_COMPLETED;
diff -ur a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
--- a/drivers/scsi/3w-sas.h	2017-03-23 14:58:46.000000000 +0100
+++ b/drivers/scsi/3w-sas.h	2017-03-14 02:35:33.000000000 +0100
@@ -103,10 +103,6 @@
 #define TW_CURRENT_DRIVER_BUILD 0
 #define TW_CURRENT_DRIVER_BRANCH 0
 
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SGLIST  2
-
 /* Misc defines */
 #define TW_SECTOR_SIZE                        512
 #define TW_MAX_UNITS			      32
diff -ur a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
--- a/drivers/scsi/3w-xxxx.c	2017-03-23 14:58:33.000000000 +0100
+++ b/drivers/scsi/3w-xxxx.c	2017-03-14 02:35:22.000000000 +0100
@@ -1283,32 +1283,6 @@
 	return 0;
 } /* End tw_initialize_device_extension() */
 
-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
-	int use_sg;
-
-	dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
-
-	use_sg = scsi_dma_map(cmd);
-	if (use_sg < 0) {
-		printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
-		return 0;
-	}
-
-	cmd->SCp.phase = TW_PHASE_SGLIST;
-	cmd->SCp.have_data_in = use_sg;
-
-	return use_sg;
-} /* End tw_map_scsi_sg_data() */
-
-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
-	dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
-
-	if (cmd->SCp.phase == TW_PHASE_SGLIST)
-		scsi_dma_unmap(cmd);
-} /* End tw_unmap_scsi_data() */
-
 /* This function will reset a device extension */
 static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
 {
@@ -1331,8 +1305,8 @@
 			srb = tw_dev->srb[i];
 			if (srb != NULL) {
 				srb->result = (DID_RESET << 16);
-				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
-				tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
+				scsi_dma_unmap(srb);
+				srb->scsi_done(srb);
 			}
 		}
 	}
@@ -1779,8 +1753,8 @@
 	command_packet->byte8.io.lba = lba;
 	command_packet->byte6.block_count = num_sectors;
 
-	use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
-	if (!use_sg)
+	use_sg = scsi_dma_map(srb);
+	if (use_sg <= 0)
 		return 1;
 
 	scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
@@ -1967,9 +1941,6 @@
 	/* Save the scsi command for use by the ISR */
 	tw_dev->srb[request_id] = SCpnt;
 
-	/* Initialize phase to zero */
-	SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
 	switch (*command) {
 		case READ_10:
 		case READ_6:
@@ -2196,12 +2167,11 @@
 
 				/* Now complete the io */
 				if ((error != TW_ISR_DONT_COMPLETE)) {
+					scsi_dma_unmap(tw_dev->srb[request_id]);
+					tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
 					tw_dev->state[request_id] = TW_S_COMPLETED;
 					tw_state_request_finish(tw_dev, request_id);
 					tw_dev->posted_request_count--;
-					tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-					
-					tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
 				}
 			}
 				
diff -ur a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
--- a/drivers/scsi/3w-xxxx.h	2017-03-23 14:58:43.000000000 +0100
+++ b/drivers/scsi/3w-xxxx.h	2017-03-14 02:35:30.000000000 +0100
@@ -195,11 +195,6 @@
 #define TW_AEN_SMART_FAIL        0x000F
 #define TW_AEN_SBUF_FAIL         0x0024
 
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SINGLE 1
-#define TW_PHASE_SGLIST 2
-
 /* Misc defines */
 #define TW_ALIGNMENT_6000		      64 /* 64 bytes */
 #define TW_ALIGNMENT_7000                     4  /* 4 bytes */
diff -ur a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
--- a/drivers/scsi/aacraid/commsup.c	2017-03-23 14:58:59.000000000 +0100
+++ b/drivers/scsi/aacraid/commsup.c	2017-03-14 02:35:42.000000000 +0100
@@ -83,9 +83,12 @@
 
 void aac_fib_map_free(struct aac_dev *dev)
 {
-	pci_free_consistent(dev->pdev,
-	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
-	  dev->hw_fib_va, dev->hw_fib_pa);
+	if (dev->hw_fib_va && dev->max_fib_size) {
+		pci_free_consistent(dev->pdev,
+		(dev->max_fib_size *
+		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+		dev->hw_fib_va, dev->hw_fib_pa);
+	}
 	dev->hw_fib_va = NULL;
 	dev->hw_fib_pa = 0;
 }
diff -ur a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
--- a/drivers/scsi/be2iscsi/be_main.c	2017-03-23 14:59:30.000000000 +0100
+++ b/drivers/scsi/be2iscsi/be_main.c	2017-03-14 02:36:02.000000000 +0100
@@ -4034,6 +4034,7 @@
 	scsi_host_put(phba->shost);
 free_kset:
 	iscsi_boot_destroy_kset(phba->boot_kset);
+	phba->boot_kset = NULL;
 	return -ENOMEM;
 }
 
diff -ur a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c	2017-03-23 14:58:55.000000000 +0100
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c	2017-03-14 02:35:39.000000000 +0100
@@ -569,7 +569,7 @@
 			/*
 			 * Command Lock contention
 			 */
-			err = SCSI_DH_RETRY;
+			err = SCSI_DH_IMM_RETRY;
 		break;
 	default:
 		break;
@@ -619,6 +619,8 @@
 		err = mode_select_handle_sense(sdev, h->sense);
 		if (err == SCSI_DH_RETRY && retry_cnt--)
 			goto retry;
+		if (err == SCSI_DH_IMM_RETRY)
+			goto retry;
 	}
 	if (err == SCSI_DH_OK) {
 		h->state = RDAC_STATE_ACTIVE;
Nur in b/drivers/scsi: esas2r.
diff -ur a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
--- a/drivers/scsi/hosts.c	2017-03-23 14:58:38.000000000 +0100
+++ b/drivers/scsi/hosts.c	2017-03-14 02:35:27.000000000 +0100
@@ -303,6 +303,17 @@
 		kfree(queuedata);
 	}
 
+	if (shost->shost_state == SHOST_CREATED) {
+		/*
+		 * Free the shost_dev device name here if scsi_host_alloc()
+		 * and scsi_host_put() have been called but neither
+		 * scsi_host_add() nor scsi_host_remove() has been called.
+		 * This avoids that the memory allocated for the shost_dev
+		 * name is leaked.
+		 */
+		kfree(dev_name(&shost->shost_dev));
+	}
+
 	scsi_destroy_command_freelist(shost);
 	if (shost->bqt)
 		blk_free_tags(shost->bqt);
diff -ur a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
--- a/drivers/scsi/hpsa.c	2017-03-23 14:58:49.000000000 +0100
+++ b/drivers/scsi/hpsa.c	2017-03-14 02:35:33.000000000 +0100
@@ -3893,10 +3893,6 @@
 
 	/* Save the PCI command register */
 	pci_read_config_word(pdev, 4, &command_register);
-	/* Turn the board off.  This is so that later pci_restore_state()
-	 * won't turn the board on before the rest of config space is ready.
-	 */
-	pci_disable_device(pdev);
 	pci_save_state(pdev);
 
 	/* find the first memory BAR, so we can find the cfg table */
@@ -3944,11 +3940,6 @@
 		goto unmap_cfgtable;
 
 	pci_restore_state(pdev);
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		dev_warn(&pdev->dev, "failed to enable device.\n");
-		goto unmap_cfgtable;
-	}
 	pci_write_config_word(pdev, 4, command_register);
 
 	/* Some devices (notably the HP Smart Array 5i Controller)
@@ -4443,6 +4434,23 @@
 	if (!reset_devices)
 		return 0;
 
+	/* kdump kernel is loading, we don't know in which state is
+	 * the pci interface. The dev->enable_cnt is equal zero
+	 * so we call enable+disable, wait a while and switch it on.
+	 */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
+		return -ENODEV;
+	}
+	pci_disable_device(pdev);
+	msleep(260);			/* a randomly chosen number */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "failed to enable device.\n");
+		return -ENODEV;
+	}
+	pci_set_master(pdev);
 	/* Reset the controller with a PCI power-cycle or via doorbell */
 	rc = hpsa_kdump_hard_reset_controller(pdev);
 
@@ -4451,10 +4459,11 @@
 	 * "performant mode".  Or, it might be 640x, which can't reset
 	 * due to concerns about shared bbwc between 6402/6404 pair.
 	 */
-	if (rc == -ENOTSUPP)
-		return rc; /* just try to do the kdump anyhow. */
-	if (rc)
-		return -ENODEV;
+	if (rc) {
+		if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
+			rc = -ENODEV;
+		goto out_disable;
+	}
 
 	/* Now try to get the controller to respond to a no-op */
 	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
@@ -4465,7 +4474,11 @@
 			dev_warn(&pdev->dev, "no-op failed%s\n",
 					(i < 11 ? "; re-trying" : ""));
 	}
-	return 0;
+
+out_disable:
+
+	pci_disable_device(pdev);
+	return rc;
 }
 
 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
@@ -4608,6 +4621,7 @@
 		iounmap(h->transtable);
 	if (h->cfgtable)
 		iounmap(h->cfgtable);
+	pci_disable_device(h->pdev);
 	pci_release_regions(h->pdev);
 	kfree(h);
 }
diff -ur a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
--- a/drivers/scsi/ipr.c	2017-03-23 14:58:46.000000000 +0100
+++ b/drivers/scsi/ipr.c	2017-03-14 02:35:30.000000000 +0100
@@ -554,9 +554,10 @@
 {
 	struct ipr_trace_entry *trace_entry;
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+	unsigned int trace_index;
 
-	trace_entry = &ioa_cfg->trace[atomic_add_return
-			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
+	trace_entry = &ioa_cfg->trace[trace_index];
 	trace_entry->time = jiffies;
 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
 	trace_entry->type = type;
@@ -1005,10 +1006,15 @@
 
 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
 {
+	unsigned int hrrq;
+
 	if (ioa_cfg->hrrq_num == 1)
-		return 0;
-	else
-		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+		hrrq = 0;
+	else {
+		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
+		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+	}
+	return hrrq;
 }
 
 /**
@@ -3901,13 +3907,17 @@
 	struct ipr_sglist *sglist;
 	char fname[100];
 	char *src;
-	int len, result, dnld_size;
+	char *endline;
+	int result, dnld_size;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	len = snprintf(fname, 99, "%s", buf);
-	fname[len-1] = '\0';
+	snprintf(fname, sizeof(fname), "%s", buf);
+
+	endline = strchr(fname, '\n');
+	if (endline)
+		*endline = '\0';
 
 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
@@ -6140,21 +6150,23 @@
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-	unsigned long hrrq_flags;
+	unsigned long lock_flags;
 
 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
 		scsi_dma_unmap(scsi_cmd);
 
-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 		scsi_cmd->scsi_done(scsi_cmd);
-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
 	} else {
-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+		spin_lock(&ipr_cmd->hrrq->_lock);
 		ipr_erp_start(ioa_cfg, ipr_cmd);
-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+		spin_unlock(&ipr_cmd->hrrq->_lock);
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 	}
 }
 
diff -ur a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
--- a/drivers/scsi/ipr.h	2017-03-23 14:58:41.000000000 +0100
+++ b/drivers/scsi/ipr.h	2017-03-14 02:35:29.000000000 +0100
@@ -257,7 +257,7 @@
 #define IPR_RUNTIME_RESET				0x40000000
 
 #define IPR_IPL_INIT_MIN_STAGE_TIME			5
-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 15
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 30
 #define IPR_IPL_INIT_STAGE_UNKNOWN			0x0
 #define IPR_IPL_INIT_STAGE_TRANSOP			0xB0000000
 #define IPR_IPL_INIT_STAGE_MASK				0xff000000
@@ -1452,6 +1452,7 @@
 
 #define IPR_NUM_TRACE_INDEX_BITS	8
 #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_INDEX_MASK		(IPR_NUM_TRACE_ENTRIES - 1)
 #define IPR_TRACE_SIZE	(sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
 	char trace_start[8];
 #define IPR_TRACE_START_LABEL			"trace"
diff -ur a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
--- a/drivers/scsi/iscsi_tcp.c	2017-03-23 14:58:39.000000000 +0100
+++ b/drivers/scsi/iscsi_tcp.c	2017-03-14 02:35:28.000000000 +0100
@@ -82,7 +82,7 @@
 	return 0;
 }
 
-static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
 {
 	struct iscsi_conn *conn;
 	struct iscsi_tcp_conn *tcp_conn;
diff -ur a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
--- a/drivers/scsi/iscsi_tcp.h	2017-03-23 14:58:45.000000000 +0100
+++ b/drivers/scsi/iscsi_tcp.h	2017-03-14 02:35:32.000000000 +0100
@@ -40,7 +40,7 @@
 
 	struct iscsi_sw_tcp_send out;
 	/* old values for socket callbacks */
-	void			(*old_data_ready)(struct sock *, int);
+	void			(*old_data_ready)(struct sock *);
 	void			(*old_state_change)(struct sock *);
 	void			(*old_write_space)(struct sock *);
 
diff -ur a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
--- a/drivers/scsi/Kconfig	2016-10-20 04:32:03.000000000 +0200
+++ b/drivers/scsi/Kconfig	2016-06-23 08:51:53.000000000 +0200
@@ -601,6 +601,7 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called arcmsr (modprobe arcmsr).
 
+source "drivers/scsi/esas2r/Kconfig"
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt2sas/Kconfig"
 source "drivers/scsi/mpt3sas/Kconfig"
diff -ur a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
--- a/drivers/scsi/libfc/fc_fcp.c	2017-03-23 14:59:09.000000000 +0100
+++ b/drivers/scsi/libfc/fc_fcp.c	2017-03-14 02:35:50.000000000 +0100
@@ -1039,11 +1039,26 @@
 		fc_fcp_pkt_hold(fsp);
 		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
-		if (!fc_fcp_lock_pkt(fsp)) {
+		spin_lock_bh(&fsp->scsi_pkt_lock);
+		if (!(fsp->state & FC_SRB_COMPL)) {
+			fsp->state |= FC_SRB_COMPL;
+			/*
+			 * TODO: dropping scsi_pkt_lock and then reacquiring
+			 * again around fc_fcp_cleanup_cmd() is required,
+			 * since fc_fcp_cleanup_cmd() calls into
+			 * fc_seq_set_resp() and that func preempts cpu using
+			 * schedule. May be schedule and related code should be
+			 * removed instead of unlocking here to avoid scheduling
+			 * while atomic bug.
+			 */
+			spin_unlock_bh(&fsp->scsi_pkt_lock);
+
 			fc_fcp_cleanup_cmd(fsp, error);
+
+			spin_lock_bh(&fsp->scsi_pkt_lock);
 			fc_io_compl(fsp);
-			fc_fcp_unlock_pkt(fsp);
 		}
+		spin_unlock_bh(&fsp->scsi_pkt_lock);
 
 		fc_fcp_pkt_release(fsp);
 		spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff -ur a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
--- a/drivers/scsi/lpfc/lpfc_init.c	2017-03-23 15:00:07.000000000 +0100
+++ b/drivers/scsi/lpfc/lpfc_init.c	2017-03-14 02:36:31.000000000 +0100
@@ -2682,7 +2682,7 @@
 	}
 
 	vports = lpfc_create_vport_work_array(phba);
-	if (vports != NULL)
+	if (vports != NULL) {
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 			struct Scsi_Host *shost;
 			shost = lpfc_shost_from_vport(vports[i]);
@@ -2699,7 +2699,8 @@
 			}
 			spin_unlock_irq(shost->host_lock);
 		}
-		lpfc_destroy_vport_work_array(phba, vports);
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
 
 	lpfc_unblock_mgmt_io(phba);
 	return 0;
diff -ur a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
--- a/drivers/scsi/lpfc/lpfc_sli.c	2017-03-23 15:00:17.000000000 +0100
+++ b/drivers/scsi/lpfc/lpfc_sli.c	2017-03-14 02:36:40.000000000 +0100
@@ -262,6 +262,16 @@
 		return NULL;
 
 	q->hba_index = idx;
+
+	/*
+	 * insert barrier for instruction interlock : data from the hardware
+	 * must have the valid bit checked before it can be copied and acted
+	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+	 * instructions allowing action on content before valid bit checked,
+	 * add barrier here as well. May not be needed as "content" is a
+	 * single 32-bit entity here (vs multi word structure for cq's).
+	 */
+	mb();
 	return eqe;
 }
 
@@ -367,6 +377,17 @@
 
 	cqe = q->qe[q->hba_index].cqe;
 	q->hba_index = idx;
+
+	/*
+	 * insert barrier for instruction interlock : data from the hardware
+	 * must have the valid bit checked before it can be copied and acted
+	 * upon. Speculative instructions were allowing a bcopy at the start
+	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+	 * after our return, to copy data before the valid bit check above
+	 * was done. As such, some of the copied data was stale. The barrier
+	 * ensures the check is before any data is copied.
+	 */
+	mb();
 	return cqe;
 }
 
diff -ur a/drivers/scsi/Makefile b/drivers/scsi/Makefile
--- a/drivers/scsi/Makefile	2016-10-20 04:32:03.000000000 +0200
+++ b/drivers/scsi/Makefile	2016-06-23 08:51:53.000000000 +0200
@@ -141,6 +141,7 @@
 obj-$(CONFIG_SCSI_CXGB4_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
+obj-$(CONFIG_SCSI_ESAS2R)	+= esas2r/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
 obj-$(CONFIG_SCSI_VIRTIO)	+= virtio_scsi.o
 obj-$(CONFIG_VMWARE_PVSCSI)	+= vmw_pvscsi.o
diff -ur a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
--- a/drivers/scsi/megaraid/megaraid_sas_base.c	2017-03-23 14:59:58.000000000 +0100
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c	2017-03-14 02:36:26.000000000 +0100
@@ -3597,7 +3597,7 @@
 	}
 
 	instance->max_sectors_per_req = instance->max_num_sge *
-						PAGE_SIZE / 512;
+						SGE_BUFFER_SIZE / 512;
 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
 		instance->max_sectors_per_req = tmp_sectors;
 
@@ -5046,6 +5046,9 @@
 	int i;
 	int error = 0;
 	compat_uptr_t ptr;
+	unsigned long local_raw_ptr;
+	u32 local_sense_off;
+	u32 local_sense_len;
 
 	if (clear_user(ioc, sizeof(*ioc)))
 		return -EFAULT;
@@ -5063,9 +5066,14 @@
 	 * sense_len is not null, so prepare the 64bit value under
 	 * the same condition.
 	 */
-	if (ioc->sense_len) {
+	if (get_user(local_raw_ptr, ioc->frame.raw) ||
+		get_user(local_sense_off, &ioc->sense_off) ||
+		get_user(local_sense_len, &ioc->sense_len))
+		return -EFAULT;
+
+	if (local_sense_len) {
 		void __user **sense_ioc_ptr =
-			(void __user **)(ioc->frame.raw + ioc->sense_off);
+			(void __user **)((u8*)local_raw_ptr + local_sense_off);
 		compat_uptr_t *sense_cioc_ptr =
 			(compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
 		if (get_user(ptr, sense_cioc_ptr) ||
diff -ur a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
--- a/drivers/scsi/megaraid/megaraid_sas.h	2017-03-23 14:59:51.000000000 +0100
+++ b/drivers/scsi/megaraid/megaraid_sas.h	2017-03-14 02:36:22.000000000 +0100
@@ -300,6 +300,7 @@
 	MR_EVT_ARGS_GENERIC,
 };
 
+#define SGE_BUFFER_SIZE	4096
 /*
  * define constants for device list query options
  */
diff -ur a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
--- a/drivers/scsi/mvsas/mv_sas.c	2017-03-23 14:58:59.000000000 +0100
+++ b/drivers/scsi/mvsas/mv_sas.c	2017-03-14 02:35:41.000000000 +0100
@@ -987,6 +987,8 @@
 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
 			  struct mvs_slot_info *slot, u32 slot_idx)
 {
+	if (!slot)
+		return;
 	if (!slot->task)
 		return;
 	if (!sas_protocol_ata(task->task_proto))
diff -ur a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
--- a/drivers/scsi/qla2xxx/qla_isr.c	2017-03-23 14:59:09.000000000 +0100
+++ b/drivers/scsi/qla2xxx/qla_isr.c	2017-03-14 02:35:47.000000000 +0100
@@ -523,8 +523,9 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
-	uint32_t	rscn_entry, host_pid;
+	uint32_t	rscn_entry, host_pid, tmp_pid;
 	unsigned long	flags;
+	fc_port_t	*fcport = NULL;
 
 	/* Setup to process RIO completion. */
 	handle_cnt = 0;
@@ -918,6 +919,20 @@
 		if (qla2x00_is_a_vp_did(vha, rscn_entry))
 			break;
 
+		/*
+		 * Search for the rport related to this RSCN entry and mark it
+		 * as lost.
+		 */
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (atomic_read(&fcport->state) != FCS_ONLINE)
+				continue;
+			tmp_pid = fcport->d_id.b24;
+			if (fcport->d_id.b24 == rscn_entry) {
+				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+				break;
+			}
+		}
+
 		atomic_set(&vha->loop_down_timer, 0);
 		vha->flags.management_server_logged_in = 0;
 
diff -ur a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
--- a/drivers/scsi/scsi.c	2017-03-23 14:58:23.000000000 +0100
+++ b/drivers/scsi/scsi.c	2017-03-14 02:35:16.000000000 +0100
@@ -561,7 +561,9 @@
 			syno_disk_hiternation_cmd_printk(cmd->device, cmd);
 		}
 #endif  
-		cmd->device->idle = jiffies;
+		if (0 == cmd->device->do_standby_syncing) {
+			cmd->device->idle = jiffies;
+		}
 		cmd->device->spindown = 0;
 	}
 
@@ -596,7 +598,9 @@
 				syno_disk_hiternation_cmd_printk(cmd->device, cmd);
 			}
 #endif  
-			cmd->device->idle = jiffies;
+			if (0 == cmd->device->do_standby_syncing) {
+				cmd->device->idle = jiffies;
+			}
 		}
 	} else if(LOG_SENSE != cmd->cmnd[0] &&
 			TEST_UNIT_READY != cmd->cmnd[0] &&
@@ -608,7 +612,9 @@
 			}
 #endif  
 
-		cmd->device->idle = jiffies;
+		if (0 == cmd->device->do_standby_syncing) {
+			cmd->device->idle = jiffies;
+		}
 	}
 #endif  
 
diff -ur a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
--- a/drivers/scsi/scsi_error.c	2017-03-23 14:58:36.000000000 +0100
+++ b/drivers/scsi/scsi_error.c	2017-03-14 02:35:24.000000000 +0100
@@ -1847,8 +1847,17 @@
 	 * We never actually get interrupted because kthread_run
 	 * disables signal delivery for the created thread.
 	 */
-	while (!kthread_should_stop()) {
+	while (true) {
+		/*
+		 * The sequence in kthread_stop() sets the stop flag first
+		 * then wakes the process.  To avoid missed wakeups, the task
+		 * should always be in a non running state before the stop
+		 * flag is checked
+		 */
 		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
+
 		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
 		    shost->host_failed != shost->host_busy) {
 			SCSI_LOG_ERROR_RECOVERY(1,
diff -ur a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
--- a/drivers/scsi/scsi_lib.c	2017-03-23 14:58:38.000000000 +0100
+++ b/drivers/scsi/scsi_lib.c	2017-03-14 02:35:26.000000000 +0100
@@ -565,6 +565,216 @@
 	return error;
 }
 
+#ifdef MY_ABC_HERE
+extern unsigned char
+blSectorNeedAutoRemap(struct scsi_cmnd *scsi_cmd, sector_t lba);
+
+static void
+syno_scsi_do_remap_done(struct request *req, int uptodate)
+{
+	__blk_put_request(req->q, req);
+}
+
+static unsigned int
+syno_scsi_do_remap(struct scsi_cmnd *scsi_cmd, sector_t badLba)
+{
+	unsigned int iRet = -1, iCheck = 0, i = 0;
+	unsigned int uSectors = 0;
+	struct request_queue *q = NULL;
+	u8 lbal = 0;
+	size_t size = 0;
+	struct scsi_device *device = NULL;
+	struct request *req = NULL;
+
+	if (NULL == scsi_cmd) {
+		printk("%s:%s(%d) Failed to get scsi_cmd", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	device = scsi_cmd->device;
+	if (NULL == device) {
+		printk("%s:%s(%d) Failed to get device", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	q = device->request_queue;
+	if (NULL == q) {
+		printk("%s:%s(%d) Failed to get request_queue\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	uSectors = queue_physical_block_size(q) / queue_logical_block_size(q);
+	lbal = (u8)(badLba & 0xff);
+	lbal = (lbal & (~(uSectors - 1)));  
+	size = SYNO_SCSI_SECT_SIZE * uSectors;
+
+	req = blk_get_request(q, WRITE, GFP_ATOMIC);
+	if (NULL == req) {
+		printk("%s:%s(%d) Failed to get request\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	req->cmd[0] = WRITE_16;
+	req->cmd[1] = 0;
+	 
+	req->cmd[2] = (u8)((badLba & 0xff00000000000000) >> 56);
+	req->cmd[3] = (u8)((badLba & 0xff000000000000) >> 48);
+	req->cmd[4] = (u8)((badLba & 0xff0000000000) >> 40);
+	req->cmd[5] = (u8)((badLba & 0xff00000000) >> 32);
+	req->cmd[6] = (u8)((badLba & 0xff000000) >> 24);
+	req->cmd[7] = (u8)((badLba & 0xff0000) >> 16);
+	req->cmd[8] = (u8)((badLba & 0xff00) >> 8);
+	req->cmd[9] = lbal;
+	 
+	req->cmd[10] = (u8)((uSectors & 0xff000000) >> 24);
+	req->cmd[11] = (u8)((uSectors & 0xff0000) >> 16);
+	req->cmd[12] = (u8)((uSectors & 0xff00) >> 8);
+	req->cmd[13] = (u8)(uSectors & 0xff);
+
+	req->cmd_len = COMMAND_SIZE(req->cmd[0]);
+
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	req->cmd_flags |= REQ_QUIET;
+	req->timeout = 60 * HZ;
+	req->retries = 0;
+
+	iCheck = blk_rq_map_kern(q, req, page_address(ZERO_PAGE(0)), size, GFP_DMA | GFP_KERNEL);
+	if (0 != iCheck) {
+		printk("%s:%s(%d) blk_rq_map_kern return != 0\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	sdev_printk(KERN_INFO, device, "Insert write command :");
+	for (i = 0; i < req->cmd_len; ++i) {
+		printk("%02x ", req->cmd[i]);
+	}
+	printk("\n");
+
+	blk_execute_rq_nowait(q, NULL, req, 1, syno_scsi_do_remap_done);
+
+	iRet = 0;
+	goto OUT;
+ERR:
+	if (NULL != req) {
+		blk_put_request(req);
+	}
+OUT:
+	return iRet;
+}
+
+static int
+syno_scsi_check_ncq_fake_unc(const u8 * sense_buffer, int iSbLen)
+{
+	int iRet = 0;
+	const u8 * desc = NULL;
+	u8 format = 0;
+
+	if (7 > iSbLen) {
+		goto OUT;
+	}
+
+	format = 0x7f & sense_buffer[0];
+	if (0x72 == format || 0x73 == format) {
+		desc = scsi_sense_desc_find(sense_buffer, iSbLen, 0  );
+		if (desc && (0xa == desc[1])) {
+			iRet = SYNO_NCQ_FAKE_UNC & desc[SYNO_DESCRIPTOR_RESERVED_INDEX];
+		}
+	}
+OUT:
+	return iRet;
+}
+
+static unsigned int
+syno_scsi_writes_sector(struct scsi_cmnd *scsi_cmd)
+{
+	unsigned int iRet = -1, iCheck = 0;
+	sector_t badLba = 0;
+	u8 blIsWrite = 0;
+#ifdef MY_ABC_HERE
+	struct bio* b = NULL;
+	unsigned int len = 0;
+	int i = 0;
+#endif  
+
+	if (NULL == scsi_cmd) {
+		printk("%s:%s(%d) Failed to get scsi_cmd", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	if (NULL == scsi_cmd->request) {
+		printk("%s:%s(%d) Failed to get scsi_cmd request\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	if (NULL == scsi_cmd->sense_buffer) {
+		printk("%s:%s(%d) Failed to get scsi_cmd sense_buffer\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	if (NULL == scsi_cmd->cmnd) {
+		printk("%s:%s(%d) Failed to get scsi_cmd cmnd\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	iCheck = scsi_get_sense_info_fld(scsi_cmd->sense_buffer,
+			SCSI_SENSE_BUFFERSIZE,
+			(u64*) &badLba);
+	if (0 == iCheck) {
+		printk("%s:%s(%d) sense info in sense data invalid\n", __FILE__, __FUNCTION__,  __LINE__);
+		goto ERR;
+	}
+
+	if (syno_scsi_check_ncq_fake_unc(scsi_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE)) {
+		scmd_printk(KERN_INFO, scsi_cmd, "UNC ERROR code but NCQ abort, do NOT remap");
+		goto ERR;
+	}
+
+	switch (scsi_cmd->cmnd[0]) {
+		case WRITE_6:
+		case WRITE_10:
+		case WRITE_12:
+		case WRITE_16:
+		case WRITE_32:
+			blIsWrite = 1;
+			break;
+		default:
+			if (DMA_TO_DEVICE == scsi_cmd->sc_data_direction) {
+				blIsWrite = 1;
+			}
+	}
+
+	scmd_printk(KERN_INFO, scsi_cmd, "%s unc at %llu\n",
+		(blIsWrite) ? "write" : "read",
+		(unsigned long long)badLba);
+
+	if (!blIsWrite && !blSectorNeedAutoRemap(scsi_cmd, badLba)) {
+		goto ERR;
+	}
+
+#ifdef MY_ABC_HERE
+	 
+	if (!blIsWrite) {
+		for (b = scsi_cmd->request->bio; b; b = b->bi_next) {
+			len = 0;
+			for (i = 0; i < b->bi_vcnt; i++) {
+				len += b->bi_io_vec[i].bv_len;
+			}
+			if (b->bi_sector <= badLba && badLba < b->bi_sector + (len >> 9)) {
+				set_bit(BIO_AUTO_REMAP, &b->bi_flags);
+				printk("%s:%s(%d) set bio BIO_AUTO_REMAP bit on\n",
+					__FILE__, __FUNCTION__, __LINE__);
+			}
+		}
+	}
+#endif  
+
+	syno_scsi_do_remap(scsi_cmd, badLba);
+	iRet = 0;
+ERR:
+	return iRet;
+}
+#endif  
+
 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 {
 	int result = cmd->result;
@@ -814,6 +1024,31 @@
 			 
 			action = ACTION_FAIL;
 			break;
+#ifdef MY_ABC_HERE
+		case MEDIUM_ERROR:
+			switch (sshdr.asc) {
+				case 0x11:
+					switch (sshdr.ascq) {
+						case 0x00:
+						case 0x04:
+						case 0x14:
+							syno_scsi_writes_sector(cmd);
+							description = "Medium with UNC error";
+							action = ACTION_FAIL;
+							break;
+						default:
+							description = "Medium error Unhandled ASCQ code";
+							action = ACTION_FAIL;
+							break;
+					}
+					break;
+				default:
+					description = "Medium error Unhandled ASC code";
+					action = ACTION_FAIL;
+					break;
+			}
+			break;
+#endif  
 		default:
 			description = "Unhandled sense code";
 			action = ACTION_FAIL;
diff -ur a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
--- a/drivers/scsi/scsi_sysfs.c	2017-03-23 14:58:40.000000000 +0100
+++ b/drivers/scsi/scsi_sysfs.c	2017-03-14 02:35:27.000000000 +0100
@@ -496,6 +496,31 @@
 #endif
 
 #ifdef MY_ABC_HERE
+extern void
+ScsiRemapModeSet(struct scsi_device *sdev, unsigned char blAutoRemap);
+static ssize_t
+sdev_show_auto_remap(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	sdev = to_scsi_device(dev);
+	return snprintf (buf, 20, "%d type 0x%x\n", sdev->auto_remap, sdev->type);
+}
+
+static ssize_t
+sdev_store_auto_remap(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev;
+	int val = 0;
+	sdev = to_scsi_device(dev);
+	sscanf (buf, "%d", &val);
+
+	ScsiRemapModeSet(sdev, val ? 1 : 0);
+	return count;
+}
+static DEVICE_ATTR(auto_remap, S_IRUGO | S_IWUSR, sdev_show_auto_remap, sdev_store_auto_remap);
+#endif  
+
+#ifdef MY_ABC_HERE
 static ssize_t
 syno_disk_serial_show(struct device *device, struct device_attribute *attr, char *buf)
 {
@@ -568,6 +593,45 @@
 }
 
 static DEVICE_ATTR(syno_spindown, S_IRUGO, sdev_show_syno_spindown, NULL);
+
+static ssize_t
+sdev_show_syno_standby_syncing(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	int iRet = -EFAULT;
+
+	if (NULL == (sdev = to_scsi_device(dev))) {
+		goto END;
+	}
+
+	iRet = snprintf (buf, 20, "%u\n", sdev->do_standby_syncing);
+
+END:
+	return iRet;
+}
+
+static ssize_t
+sdev_store_syno_standby_syncing(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev;
+	unsigned long ulstandby_syncing;
+
+	if (NULL == (sdev = to_scsi_device(dev))) {
+		goto END;
+	}
+
+	sscanf(buf, "%lu", &ulstandby_syncing);
+	if (0 < ulstandby_syncing) {
+		sdev->do_standby_syncing = 1;
+	} else {
+		sdev->do_standby_syncing = 0;
+	}
+
+END:
+	return count;
+}
+
+static DEVICE_ATTR(syno_standby_syncing, S_IRUGO | S_IWUSR, sdev_show_syno_standby_syncing, sdev_store_syno_standby_syncing);
 #endif  
 
 #ifdef MY_DEF_HERE
@@ -863,8 +927,12 @@
 	&dev_attr_ioerr_cnt.attr,
 	&dev_attr_modalias.attr,
 #ifdef MY_ABC_HERE
+	&dev_attr_auto_remap.attr,
+#endif  
+#ifdef MY_ABC_HERE
 	&dev_attr_syno_idle_time.attr,
 	&dev_attr_syno_spindown.attr,
+	&dev_attr_syno_standby_syncing.attr,
 #endif  
 #ifdef MY_ABC_HERE
 	&dev_attr_syno_scmd_min_timeout.attr,
@@ -960,7 +1028,7 @@
 		return -EINVAL;
 
 	sdev->queue_ramp_up_period = msecs_to_jiffies(period);
-	return period;
+	return count;
 }
 
 static struct device_attribute sdev_attr_queue_ramp_up_period =
@@ -1178,28 +1246,25 @@
 void scsi_remove_target(struct device *dev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
-	struct scsi_target *starget, *last = NULL;
+	struct scsi_target *starget, *last_target = NULL;
 	unsigned long flags;
 
+restart:
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_for_each_entry(starget, &shost->__targets, siblings) {
-		if (starget->state == STARGET_DEL)
+		if (starget->state == STARGET_DEL ||
+		    starget == last_target)
 			continue;
 		if (starget->dev.parent == dev || &starget->dev == dev) {
-			 
 			kref_get(&starget->reap_ref);
+			last_target = starget;
 			spin_unlock_irqrestore(shost->host_lock, flags);
-			if (last)
-				scsi_target_reap(last);
-			last = starget;
 			__scsi_remove_target(starget);
-			spin_lock_irqsave(shost->host_lock, flags);
+			scsi_target_reap(starget);
+			goto restart;
 		}
 	}
 	spin_unlock_irqrestore(shost->host_lock, flags);
-
-	if (last)
-		scsi_target_reap(last);
 }
 EXPORT_SYMBOL(scsi_remove_target);
 
diff -ur a/drivers/scsi/sd.c b/drivers/scsi/sd.c
--- a/drivers/scsi/sd.c	2017-03-23 14:58:30.000000000 +0100
+++ b/drivers/scsi/sd.c	2017-03-14 02:35:19.000000000 +0100
@@ -120,6 +120,10 @@
 #include <linux/usb.h>
 #include "../usb/storage/usb.h"
 #ifdef MY_DEF_HERE
+#include <linux/synolib.h>
+static DEFINE_IDA(cache_index_ida);
+#endif  
+#ifdef MY_DEF_HERE
 extern u8 syno_is_synology_pm(const struct ata_port *ap);
 #endif  
 #endif  
@@ -2726,9 +2730,9 @@
 extern int syno_ida_get_new(struct ida *idp, int starting_id, int *id);
 #endif  
 
-#ifdef MY_ABC_HERE
+#if defined(MY_ABC_HERE) || defined(MY_DEF_HERE)
  
-static int syno_sd_format_sas_disk_name(char *prefix, int synoindex, char *buf, int buflen)
+static int syno_sd_format_numeric_disk_name(char *prefix, int synoindex, char *buf, int buflen)
 {
 	 
 	if (buflen <= (strlen(prefix) + (synoindex + 1)/10 + 1)) {
@@ -2742,7 +2746,7 @@
 	return 0;
 }
 #endif  
-
+ 
 static void sd_unlock_native_capacity(struct gendisk *disk)
 {
 	struct scsi_device *sdev = scsi_disk(disk)->device;
@@ -2886,6 +2890,12 @@
 	}
 #endif  
 
+#ifdef MY_ABC_HERE
+	if(strcmp(sdp->host->hostt->name, "TCM_Loopback") == 0){
+		return SYNO_DISK_ISCSI;
+	}
+#endif  
+
 #ifdef MY_DEF_HERE
 	if(strcmp(sdp->host->hostt->name, "Virtio SCSI HBA") == 0){
 #ifdef MY_ABC_HERE
@@ -2947,6 +2957,11 @@
 #endif  
 		}
 #endif  
+#ifdef MY_DEF_HERE
+		if (sdp->host->isCacheSSD) {
+			return SYNO_DISK_CACHE;
+		}
+#endif  
 		 
 		return SYNO_DISK_SATA;
 	}
@@ -2962,6 +2977,11 @@
 #endif  
 	 
 	if (SYNO_PORT_TYPE_SAS == sdp->host->hostt->syno_port_type) {
+#ifdef MY_DEF_HERE
+		if (sdp->host->isCacheSSD) {
+			return SYNO_DISK_CACHE;
+		}
+#endif  
 		return SYNO_DISK_SAS;
 	}
 	return SYNO_DISK_UNKNOWN;
@@ -2984,6 +3004,9 @@
 #endif  
 	int iRetry = 0;
 	u32 want_idx = 0;
+#ifdef MY_DEF_HERE
+	u32 cache_idx = 0;
+#endif  
 #endif  
 #ifdef MY_ABC_HERE
 	u32 synoidx;
@@ -3013,6 +3036,12 @@
 	do {
 		if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
 			goto out_put;
+#ifdef MY_DEF_HERE
+		if (SYNO_DISK_CACHE == sdkp->synodisktype) {
+			if (!ida_pre_get(&cache_index_ida, GFP_KERNEL))
+			goto out_put;
+		}
+#endif  
 
 #ifdef MY_ABC_HERE
 		if (1 == g_is_sas_model) {
@@ -3081,6 +3110,9 @@
 #endif
 				want_idx = CONFIG_SYNO_MAX_INTERNAL_DISK + 1;
 				break;
+#ifdef MY_DEF_HERE
+			case SYNO_DISK_CACHE:
+#endif  
 			case SYNO_DISK_SAS:
 			case SYNO_DISK_SATA:
 			default:
@@ -3114,6 +3146,13 @@
 		}
 
 		error = syno_ida_get_new(&sd_index_ida, want_idx, &index);
+#ifdef MY_DEF_HERE
+		if (SYNO_DISK_CACHE == sdkp->synodisktype) {
+			error = syno_ida_get_new(&cache_index_ida, (want_idx - M2SATA_START_IDX), &cache_idx);
+			sdkp->synoindex = cache_idx;
+		}
+#endif  
+
 #ifdef MY_ABC_HERE
 		if (1 == g_is_sas_model) {
 			sdkp->synoindex = synoidx;
@@ -3159,7 +3198,7 @@
 		case SYNO_DISK_ISCSI:
 #ifdef MY_ABC_HERE
 			if (1 == g_is_sas_model) {
-				error = syno_sd_format_sas_disk_name(CONFIG_SYNO_SAS_ISCSI_DEVICE_PREFIX, synoidx, gd->disk_name, DISK_NAME_LEN);
+				error = syno_sd_format_numeric_disk_name(CONFIG_SYNO_SAS_ISCSI_DEVICE_PREFIX, synoidx, gd->disk_name, DISK_NAME_LEN);
 				printk("got iSCSI disk[%d]\n", synoidx);
 				break;
 			}
@@ -3179,7 +3218,7 @@
 
 		case SYNO_DISK_SAS:
 #ifdef MY_ABC_HERE
-			error = syno_sd_format_sas_disk_name(CONFIG_SYNO_SAS_DEVICE_PREFIX, synoidx, gd->disk_name, DISK_NAME_LEN);
+			error = syno_sd_format_numeric_disk_name(CONFIG_SYNO_SAS_DEVICE_PREFIX, synoidx, gd->disk_name, DISK_NAME_LEN);
 			for (i = 0;i < SCSI_HOST_SEARCH_DEPTH && NULL != searchDev;i++) {
 				if (scsi_is_host_device(searchDev)) {
 					gd->systemDisk = 1;
@@ -3201,11 +3240,16 @@
 #endif  
 			error = sd_format_disk_name(CONFIG_SYNO_SATA_DEVICE_PREFIX, index, gd->disk_name, DISK_NAME_LEN);
 			break;
+#ifdef MY_DEF_HERE
+		case SYNO_DISK_CACHE:
+			error = syno_sd_format_numeric_disk_name(CONFIG_SYNO_CACHE_DEVICE_PREFIX, cache_idx, gd->disk_name, DISK_NAME_LEN);
+			break;
+#endif  
 		case SYNO_DISK_USB:
 		default:
 #ifdef MY_ABC_HERE
 			if (1 == g_is_sas_model) {
-				error = syno_sd_format_sas_disk_name(CONFIG_SYNO_SAS_USB_DEVICE_PREFIX, synoidx, gd->disk_name, DISK_NAME_LEN);
+				error = syno_sd_format_numeric_disk_name(CONFIG_SYNO_SAS_USB_DEVICE_PREFIX, synoidx, gd->disk_name, DISK_NAME_LEN);
 				break;
 			}
 #endif  
@@ -3258,6 +3302,11 @@
  out_free_index:
 	spin_lock(&sd_index_lock);
 	ida_remove(&sd_index_ida, index);
+#ifdef MY_DEF_HERE
+	if (SYNO_DISK_CACHE == sdkp->synodisktype) {
+		ida_remove(&cache_index_ida, cache_idx);
+	}
+#endif  
 #ifdef MY_ABC_HERE
 	if (1 == g_is_sas_model) {
 		switch(sdkp->synodisktype) {
@@ -3314,7 +3363,13 @@
 	struct gendisk *disk = sdkp->disk;
 
 	spin_lock(&sd_index_lock);
+
 	ida_remove(&sd_index_ida, sdkp->index);
+#ifdef MY_DEF_HERE
+	if (SYNO_DISK_CACHE == sdkp->synodisktype) {
+		ida_remove(&cache_index_ida, sdkp->synoindex);
+	}
+#endif  
 #ifdef MY_ABC_HERE
 	if (1 == g_is_sas_model) {
 		switch(sdkp->synodisktype) {
@@ -3400,8 +3455,8 @@
 	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
 	int ret = 0;
 
-	if (!sdkp)
-		return 0;	 
+	if (!sdkp)	 
+		return 0;
 
 	if (sdkp->WCE) {
 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3425,6 +3480,9 @@
 	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
 	int ret = 0;
 
+	if (!sdkp)	 
+		return 0;
+
 	if (!sdkp->device->manage_start_stop)
 		goto done;
 
@@ -3542,7 +3600,7 @@
 EXPORT_SYMBOL(SynoSCSIGetDeviceIndex);
 #endif  
 
-#if defined(MY_ABC_HERE)
+#if defined(MY_ABC_HERE) || defined(MY_ABC_HERE)
  
 static unsigned char
 blIsScsiDevice(int major)
@@ -3610,3 +3668,188 @@
 
 EXPORT_SYMBOL(IsDeviceDisappear);
 #endif  
+
+#ifdef MY_ABC_HERE
+ 
+void
+PartitionRemapModeSet(struct gendisk *gd,
+					  struct hd_struct *phd,
+					  unsigned char blAutoRemap)
+{
+	struct scsi_disk *sdkp;
+	struct scsi_device *sdev;
+
+	if (!gd || !phd) {
+		goto END;
+	}
+
+	phd->auto_remap = blAutoRemap;
+	if (!blAutoRemap) {
+		if (!blIsScsiDevice(gd->major)) {
+			 
+			printk("This is not a kind of scsi disk %d\n", gd->major);
+			goto END;
+		}
+
+		sdkp = container_of(gd->private_data, struct scsi_disk, driver);
+		if (!sdkp) {
+			printk(" sdkp is NULL\n");
+			goto END;
+		}
+
+		sdev = sdkp->device;
+		if(!sdev) {
+			printk(" sdev is NULL\n");
+			goto END;
+		}
+		sdev->auto_remap = 0;
+	}
+END:
+	return;
+}
+
+void
+ScsiRemapModeSet(struct scsi_device *sdev,
+				 unsigned char blAutoRemap)
+{
+	struct scsi_disk *sdkp;
+	struct gendisk *gd;
+	struct hd_struct *phd;
+	int i = 0;
+
+	if (!sdev) {
+		goto END;
+	}
+
+	if (TYPE_DISK != sdev->type) {
+		printk("Only support scsi disk\n");
+		goto END;
+	}
+
+	sdev->auto_remap = blAutoRemap;
+	sdkp = dev_get_drvdata(&sdev->sdev_gendev);
+	if (!sdkp) {
+		goto END;
+	}
+
+	gd = sdkp->disk;
+	if (!gd) {
+		goto END;
+	}
+
+	for (i = 0; i < gd->minors; i++) {
+		phd = disk_get_part(gd, i+1);
+		if (!phd || !phd->nr_sects)
+			continue;
+
+		phd->auto_remap = blAutoRemap;
+	}
+END:
+	return;
+}
+
+void
+RaidRemapModeSet(struct block_device *bdev, unsigned char blAutoRemap)
+{
+	struct gendisk *disk = NULL;
+	struct scsi_disk *sdkp;
+
+	if (!bdev) {
+		WARN_ON(bdev == NULL);
+		return;
+	}
+
+	disk = bdev->bd_disk;
+	if (!disk) {
+		WARN_ON(disk == NULL);
+		return;
+	}
+
+	if (!blIsScsiDevice(disk->major)) {
+		 
+		printk("This is not a kind of scsi disk %d\n", disk->major);
+		return;
+	}
+
+	if (bdev->bd_part) {
+		 
+		bdev->bd_part->auto_remap = blAutoRemap;
+	} else {
+		 
+		sdkp = container_of(disk->private_data, struct scsi_disk, driver);
+		if (!sdkp) {
+			WARN_ON(!sdkp);
+			return;
+		}
+		ScsiRemapModeSet(sdkp->device, blAutoRemap);
+	}
+}
+
+unsigned char
+blSectorNeedAutoRemap(struct scsi_cmnd *scsi_cmd,
+					  sector_t lba)
+{
+	struct scsi_device *sdev;
+	struct scsi_disk *sdkp;
+	struct gendisk *gd;
+	struct hd_struct *phd;
+	char szName[BDEVNAME_SIZE];
+	sector_t start, end;
+	u8 ret = 0;
+	int i = 0;
+
+	if (!scsi_cmd) {
+		WARN_ON(1);
+		goto END;
+	}
+
+	sdev = scsi_cmd->device;
+	if (!sdev) {
+		WARN_ON(1);
+		goto END;
+	}
+
+	if (TYPE_DISK != sdev->type) {
+		printk("Only support scsi disk\n");
+		goto END;
+	}
+
+	if (sdev->auto_remap) {
+		ret = 1;
+		printk("%s auto remap is on\n", dev_name(&sdev->sdev_gendev));
+		goto END;
+	}
+
+	sdkp = dev_get_drvdata(&sdev->sdev_gendev);
+	if (!sdkp) {
+		goto END;
+	}
+
+	gd = sdkp->disk;
+	if (!gd) {
+		goto END;
+	}
+
+	for (i = 0; i < gd->minors; i++) {
+		phd = disk_get_part(gd, i+1);
+		if (!phd || !phd->nr_sects)
+			continue;
+
+		start = phd->start_sect;
+		end = phd->nr_sects + start - 1;
+
+		if (lba >= start && lba <= end) {
+			printk("lba %llu start %llu end %llu\n", (unsigned long long)lba, (unsigned long long)start, (unsigned long long)end);
+			ret = phd->auto_remap;
+			printk("%s auto_remap %u\n", disk_name(gd, i+1, szName), phd->auto_remap);
+		}
+	}
+END:
+	return ret;
+}
+
+EXPORT_SYMBOL(blSectorNeedAutoRemap);
+EXPORT_SYMBOL(RaidRemapModeSet);
+EXPORT_SYMBOL(ScsiRemapModeSet);
+EXPORT_SYMBOL(PartitionRemapModeSet);
+#endif  
diff -ur a/drivers/scsi/sd.h b/drivers/scsi/sd.h
--- a/drivers/scsi/sd.h	2017-03-23 14:58:41.000000000 +0100
+++ b/drivers/scsi/sd.h	2017-03-14 02:35:29.000000000 +0100
@@ -55,6 +55,9 @@
 	SYNO_DISK_SYNOBOOT,
 	SYNO_DISK_ISCSI,
 	SYNO_DISK_SAS,
+#ifdef MY_DEF_HERE
+	SYNO_DISK_CACHE,
+#endif  
 	SYNO_DISK_END,  
 }SYNO_DISK_TYPE;
 #endif  
@@ -74,7 +77,7 @@
 #ifdef MY_ABC_HERE
 	SYNO_DISK_TYPE	synodisktype;
 #endif  
-#ifdef MY_ABC_HERE
+#if defined(MY_ABC_HERE) || defined(MY_DEF_HERE)
 	u32		synoindex;
 #endif  
 	unsigned int	physical_block_size;
diff -ur a/drivers/scsi/ses.c b/drivers/scsi/ses.c
--- a/drivers/scsi/ses.c	2017-03-23 14:58:35.000000000 +0100
+++ b/drivers/scsi/ses.c	2017-03-14 02:35:25.000000000 +0100
@@ -51,6 +51,7 @@
 static int ses_recv_diag(struct scsi_device *sdev, int page_code,
 			 void *buf, int bufflen)
 {
+	int ret;
 	unsigned char cmd[] = {
 		RECEIVE_DIAGNOSTIC,
 		1,		 
@@ -59,9 +60,23 @@
 		bufflen & 0xff,
 		0
 	};
+	unsigned char recv_page_code;
 
-	return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+	ret =  scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
 				NULL, SES_TIMEOUT, SES_RETRIES, NULL);
+	if (unlikely(!ret))
+		return ret;
+
+	recv_page_code = ((unsigned char *)buf)[0];
+
+	if (likely(recv_page_code == page_code))
+		return ret;
+
+	sdev_printk(KERN_ERR, sdev,
+		    "Wrong diagnostic page; asked for %d got %u\n",
+		    page_code, recv_page_code);
+
+	return -EINVAL;
 }
 
 static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -100,9 +115,16 @@
 	for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
 		for (j = 0; j < type_ptr[1]; j++) {
 			desc_ptr += 4;
+
 			if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+#ifdef MY_DEF_HERE
+			    type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE &&
+			    type_ptr[0] != ENCLOSURE_COMPONENT_ENCLOSURE)
+#else  
 			    type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+#endif  
 				continue;
+
 			if (count++ == descriptor) {
 				memcpy(desc_ptr, desc, 4);
 				 
@@ -111,6 +133,9 @@
 				desc_ptr[0] &= 0xf0;
 			}
 		}
+#ifdef MY_DEF_HERE
+		desc_ptr += 4;
+#endif  
 	}
 
 	return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
@@ -131,11 +156,19 @@
 		for (j = 0; j < type_ptr[1]; j++) {
 			desc_ptr += 4;
 			if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+#ifdef MY_DEF_HERE
+			    type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE &&
+			    type_ptr[0] != ENCLOSURE_COMPONENT_ENCLOSURE)
+#else  
 			    type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+#endif  
 				continue;
 			if (count++ == descriptor)
 				return desc_ptr;
 		}
+#ifdef MY_DEF_HERE
+		desc_ptr += 4;
+#endif  
 	}
 	return NULL;
 }
@@ -211,6 +244,23 @@
 	return ses_set_page2_descriptor(edev, ecomp, desc);
 }
 
+#ifdef MY_DEF_HERE
+static int syno_ses_set_poweroff(struct enclosure_device *edev,
+					struct enclosure_component *ecomp)
+{
+	unsigned char desc[4] = {0};
+	unsigned char *desc_get = NULL;
+
+	desc_get = ses_get_page2_descriptor(edev, ecomp);
+
+	desc[2] = 0x40;
+	 
+	desc[3] = ((desc_get[3] & 0x3) | (63 << 2));
+
+	return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+#endif  
+
 static int ses_set_active(struct enclosure_device *edev,
 			  struct enclosure_component *ecomp,
 			  enum enclosure_component_setting val)
@@ -392,8 +442,14 @@
 					name = desc_ptr;
 				}
 			}
+
 			if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+#ifdef MY_DEF_HERE
+			    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
+			    type_ptr[0] == ENCLOSURE_COMPONENT_ENCLOSURE) {
+#else  
 			    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
+#endif  
 
 				if (create)
 					ecomp =	enclosure_component_register(edev,
@@ -410,7 +466,15 @@
 			if (desc_ptr)
 				desc_ptr += len;
 
-			if (addl_desc_ptr)
+			if (addl_desc_ptr &&
+			     
+			    (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+			     type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
+			     type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
+			      
+			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
+			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
+			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
 				addl_desc_ptr += addl_desc_ptr[1] + 2;
 
 		}
@@ -535,7 +599,12 @@
 
 	for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) {
 		if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+#ifdef MY_DEF_HERE
+		    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
+		    type_ptr[0] == ENCLOSURE_COMPONENT_ENCLOSURE)
+#else  
 		    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+#endif  
 			components += type_ptr[1];
 	}
 	ses_dev->page1 = buf;
@@ -617,6 +686,34 @@
 	return err;
 }
 
+#ifdef MY_DEF_HERE
+static void syno_ses_shutdown(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct enclosure_device *edev, *prev = NULL;
+	struct enclosure_component *cdev = NULL;
+	int i = 0;
+
+	if ((0 != memcmp(sdev->model, "RX1216sas", sizeof(sdev->model)) &&
+	     0 != memcmp(sdev->model, "RXD1215sas", sizeof(sdev->model))) ||
+	     SYSTEM_POWER_OFF != system_state) {
+		return;
+	}
+
+	sdev_printk(KERN_ERR, sdev, "ses shutdown \n");
+
+	while (NULL != (edev = enclosure_find(dev, prev))) {
+		prev = edev;
+		for (i = 0; i < edev->components; i++) {
+			cdev = &edev->component[i];
+			if(ENCLOSURE_COMPONENT_ENCLOSURE == cdev->type) {
+				syno_ses_set_poweroff(edev, cdev);
+			}
+		}
+	}
+}
+#endif  
+
 static int ses_remove(struct device *dev)
 {
 	return 0;
@@ -698,6 +795,9 @@
 		.name		= "ses",
 		.probe		= ses_probe,
 		.remove		= ses_remove,
+#ifdef MY_DEF_HERE
+		.shutdown	= syno_ses_shutdown,
+#endif  
 	},
 };
 
diff -ur a/drivers/scsi/sg.c b/drivers/scsi/sg.c
--- a/drivers/scsi/sg.c	2017-03-23 14:58:40.000000000 +0100
+++ b/drivers/scsi/sg.c	2017-03-14 02:35:27.000000000 +0100
@@ -589,7 +589,8 @@
 	else
 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
 	hp->dxfer_len = mxsize;
-	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+	if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
+	    (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
 		hp->dxferp = (char __user *)buf + cmd_size;
 	else
 		hp->dxferp = NULL;
@@ -1210,7 +1211,7 @@
 	}
 
 	sfp->mmap_called = 1;
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = sfp;
 	vma->vm_ops = &sg_mmap_vm_ops;
 	return 0;
@@ -1631,6 +1632,9 @@
 			md->from_user = 0;
 	}
 
+	if (unlikely(iov_count > UIO_MAXIOV))
+		return -EINVAL;
+
 	if (iov_count) {
 		int len, size = sizeof(struct sg_iovec) * iov_count;
 		struct iovec *iov;
diff -ur a/drivers/scsi/sr.c b/drivers/scsi/sr.c
--- a/drivers/scsi/sr.c	2017-03-23 14:58:24.000000000 +0100
+++ b/drivers/scsi/sr.c	2017-03-14 02:35:17.000000000 +0100
@@ -141,6 +141,9 @@
 {
 	struct scsi_cd *cd = dev_get_drvdata(dev);
 
+	if (!cd)	/* E.g.: runtime suspend following sr_remove() */
+		return 0;
+
 	if (cd->media_present)
 		return -EBUSY;
 	else
@@ -1001,6 +1004,7 @@
 
 	blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
 	del_gendisk(cd->disk);
+	dev_set_drvdata(dev, NULL);
 
 	mutex_lock(&sr_ref_mutex);
 	kref_put(&cd->kref, sr_kref_release);
diff -ur a/drivers/scsi/st.c b/drivers/scsi/st.c
--- a/drivers/scsi/st.c	2017-03-23 14:58:41.000000000 +0100
+++ b/drivers/scsi/st.c	2017-03-14 02:35:27.000000000 +0100
@@ -1247,9 +1247,9 @@
 	spin_lock(&st_use_lock);
 	STp->in_use = 0;
 	spin_unlock(&st_use_lock);
-	scsi_tape_put(STp);
 	if (resumed)
 		scsi_autopm_put_device(STp->device);
+	scsi_tape_put(STp);
 	return retval;
 
 }
diff -ur a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
--- a/drivers/spi/spi-atmel.c	2017-03-23 14:48:00.000000000 +0100
+++ b/drivers/spi/spi-atmel.c	2017-03-14 02:24:29.000000000 +0100
@@ -594,7 +594,8 @@
 
 	*plen = len;
 
-	if (atmel_spi_dma_slave_config(as, &slave_config, 8))
+	if (atmel_spi_dma_slave_config(as, &slave_config,
+				       xfer->bits_per_word))
 		goto err_exit;
 
 	/* Send both scatterlists */
diff -ur a/drivers/spi/spi.c b/drivers/spi/spi.c
--- a/drivers/spi/spi.c	2017-03-23 14:47:58.000000000 +0100
+++ b/drivers/spi/spi.c	2017-03-14 02:24:26.000000000 +0100
@@ -1026,8 +1026,7 @@
  *
  * The caller is responsible for assigning the bus number and initializing
  * the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() and kfree() to prevent a memory
- * leak.
+ * adding the device) calling spi_master_put() to prevent a memory leak.
  */
 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
 {
@@ -1044,7 +1043,7 @@
 	master->bus_num = -1;
 	master->num_chipselect = 1;
 	master->dev.class = &spi_master_class;
-	master->dev.parent = get_device(dev);
+	master->dev.parent = dev;
 	spi_master_set_devdata(master, &master[1]);
 
 	return master;
diff -ur a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
--- a/drivers/spi/spi-pxa2xx.c	2017-03-23 14:47:59.000000000 +0100
+++ b/drivers/spi/spi-pxa2xx.c	2017-03-14 02:24:28.000000000 +0100
@@ -546,6 +546,10 @@
 	if (!(sccr1_reg & SSCR1_TIE))
 		mask &= ~SSSR_TFS;
 
+	/* Ignore RX timeout interrupt if it is disabled */
+	if (!(sccr1_reg & SSCR1_TINTE))
+		mask &= ~SSSR_TINT;
+
 	if (!(status & mask))
 		return IRQ_NONE;
 
diff -ur a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c	2017-03-23 14:45:17.000000000 +0100
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c	2017-03-14 02:20:58.000000000 +0100
@@ -119,10 +119,21 @@
 	unsigned int bits = data[1];
 
 	if (mask) {
+		unsigned int val;
+
 		s->state &= ~mask;
 		s->state |= (bits & mask);
-
-		outl(s->state, dev->iobase + reg);
+		val = s->state;
+		if (s->n_chan == 16) {
+			/*
+			 * It seems the PCI-7230 needs the 16-bit DO state
+			 * to be shifted left by 16 bits before being written
+			 * to the 32-bit register.  Set the value in both
+			 * halves of the register to be sure.
+			 */
+			val |= val << 16;
+		}
+		outl(val, dev->iobase + reg);
 	}
 
 	/*
diff -ur a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
--- a/drivers/staging/iio/adc/lpc32xx_adc.c	2017-03-23 14:44:44.000000000 +0100
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c	2017-03-14 02:20:32.000000000 +0100
@@ -76,7 +76,7 @@
 
 	if (mask == IIO_CHAN_INFO_RAW) {
 		mutex_lock(&indio_dev->mlock);
-		clk_enable(info->clk);
+		clk_prepare_enable(info->clk);
 		/* Measurement setup */
 		__raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
 			LPC32XX_ADC_SELECT(info->adc_base));
@@ -84,7 +84,7 @@
 		__raw_writel(AD_PDN_CTRL | AD_STROBE,
 			LPC32XX_ADC_CTRL(info->adc_base));
 		wait_for_completion(&info->completion); /* set by ISR */
-		clk_disable(info->clk);
+		clk_disable_unprepare(info->clk);
 		*val = info->value;
 		mutex_unlock(&indio_dev->mlock);
 
diff -ur a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
--- a/drivers/staging/ozwpan/ozusbsvc1.c	2017-03-23 14:43:42.000000000 +0100
+++ b/drivers/staging/ozwpan/ozusbsvc1.c	2017-03-14 02:19:41.000000000 +0100
@@ -314,7 +314,11 @@
 			struct oz_multiple_fixed *body =
 				(struct oz_multiple_fixed *)data_hdr;
 			u8 *data = body->data;
-			int n = (len - sizeof(struct oz_multiple_fixed)+1)
+			unsigned int n;
+			if (!body->unit_size ||
+				len < sizeof(struct oz_multiple_fixed) - 1)
+				break;
+			n = (len - (sizeof(struct oz_multiple_fixed) - 1))
 				/ body->unit_size;
 			while (n--) {
 				oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
@@ -376,10 +380,15 @@
 	case OZ_GET_DESC_RSP: {
 			struct oz_get_desc_rsp *body =
 				(struct oz_get_desc_rsp *)usb_hdr;
-			int data_len = elt->length -
-					sizeof(struct oz_get_desc_rsp) + 1;
-			u16 offs = le16_to_cpu(get_unaligned(&body->offset));
-			u16 total_size =
+			u16 offs, total_size;
+			u8 data_len;
+
+			if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
+				break;
+			data_len = elt->length -
+					(sizeof(struct oz_get_desc_rsp) - 1);
+			offs = le16_to_cpu(get_unaligned(&body->offset));
+			total_size =
 				le16_to_cpu(get_unaligned(&body->total_size));
 			oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
 			oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
diff -ur a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
--- a/drivers/staging/panel/panel.c	2017-03-23 14:44:17.000000000 +0100
+++ b/drivers/staging/panel/panel.c	2017-03-14 02:20:13.000000000 +0100
@@ -275,11 +275,11 @@
  * LCD types
  */
 #define LCD_TYPE_NONE		0
-#define LCD_TYPE_OLD		1
-#define LCD_TYPE_KS0074		2
-#define LCD_TYPE_HANTRONIX	3
-#define LCD_TYPE_NEXCOM		4
-#define LCD_TYPE_CUSTOM		5
+#define LCD_TYPE_CUSTOM		1
+#define LCD_TYPE_OLD		2
+#define LCD_TYPE_KS0074		3
+#define LCD_TYPE_HANTRONIX	4
+#define LCD_TYPE_NEXCOM		5
 
 /*
  * keypad types
@@ -457,8 +457,7 @@
 static int lcd_type = -1;
 module_param(lcd_type, int, 0000);
 MODULE_PARM_DESC(lcd_type,
-		 "LCD type: 0=none, 1=old //, 2=serial ks0074, "
-		 "3=hantronix //, 4=nexcom //, 5=compiled-in");
+		"LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
 
 static int lcd_proto = -1;
 module_param(lcd_proto, int, 0000);
diff -ur a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h	2017-03-23 14:45:00.000000000 +0100
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h	2017-03-14 02:20:43.000000000 +0100
@@ -1432,12 +1432,12 @@
 
 extern const long ieee80211_wlan_frequencies[];
 
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
+static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
 {
 	ieee->scans++;
 }
 
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
+static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
 {
 	return ieee->scans;
 }
diff -ur a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
--- a/drivers/staging/rtl8192e/rtllib.h	2017-03-23 14:44:03.000000000 +0100
+++ b/drivers/staging/rtl8192e/rtllib.h	2017-03-14 02:20:03.000000000 +0100
@@ -2744,7 +2744,6 @@
 extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
 extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
 extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
-extern inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee);
 extern u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee);
 extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
 					  short pwr);
@@ -2926,12 +2925,12 @@
 
 extern const long rtllib_wlan_frequencies[];
 
-extern inline void rtllib_increment_scans(struct rtllib_device *ieee)
+static inline void rtllib_increment_scans(struct rtllib_device *ieee)
 {
 	ieee->scans++;
 }
 
-extern inline int rtllib_get_scans(struct rtllib_device *ieee)
+static inline int rtllib_get_scans(struct rtllib_device *ieee)
 {
 	return ieee->scans;
 }
diff -ur a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
--- a/drivers/staging/rtl8192e/rtllib_softmac.c	2017-03-23 14:44:04.000000000 +0100
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c	2017-03-14 02:20:03.000000000 +0100
@@ -336,7 +336,7 @@
 	}
 }
 
-inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
+static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
 {
 	unsigned int len, rate_len;
 	u8 *tag;
diff -ur a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h	2017-03-23 14:46:27.000000000 +0100
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h	2017-03-14 02:22:24.000000000 +0100
@@ -2230,7 +2230,7 @@
 	return ((struct ieee80211_device *)netdev_priv(dev))->priv;
 }
 
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
 {
 	/* Single white space is for Linksys APs */
 	if (essid_len == 1 && essid[0] == ' ')
@@ -2246,7 +2246,7 @@
 	return 1;
 }
 
-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
+static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
 {
 	/*
 	 * It is possible for both access points and our device to support
@@ -2272,7 +2272,7 @@
 	return 0;
 }
 
-extern inline int ieee80211_get_hdrlen(u16 fc)
+static inline int ieee80211_get_hdrlen(u16 fc)
 {
 	int hdrlen = IEEE80211_3ADDR_LEN;
 
@@ -2555,12 +2555,12 @@
 
 extern const long ieee80211_wlan_frequencies[];
 
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
+static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
 {
 	ieee->scans++;
 }
 
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
+static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
 {
 	return ieee->scans;
 }
diff -ur a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
--- a/drivers/staging/rtl8712/ieee80211.h	2017-03-23 14:44:22.000000000 +0100
+++ b/drivers/staging/rtl8712/ieee80211.h	2017-03-14 02:20:18.000000000 +0100
@@ -721,7 +721,7 @@
 #define IEEE_G            (1<<2)
 #define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
 
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
 {
 	/* Single white space is for Linksys APs */
 	if (essid_len == 1 && essid[0] == ' ')
@@ -735,7 +735,7 @@
 	return 1;
 }
 
-extern inline int ieee80211_get_hdrlen(u16 fc)
+static inline int ieee80211_get_hdrlen(u16 fc)
 {
 	int hdrlen = 24;
 
diff -ur a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
--- a/drivers/staging/rtl8712/rtl8712_recv.c	2017-03-23 14:44:17.000000000 +0100
+++ b/drivers/staging/rtl8712/rtl8712_recv.c	2017-03-14 02:20:14.000000000 +0100
@@ -1075,7 +1075,8 @@
 		/* for first fragment packet, driver need allocate 1536 +
 		 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
 		if ((mf == 1) && (frag == 0))
-			alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
+			/*1658+6=1664, 1664 is 128 alignment.*/
+			alloc_sz = max_t(u16, tmp_len, 1658);
 		else
 			alloc_sz = tmp_len;
 		/* 2 is for IP header 4 bytes alignment in QoS packet case.
diff -ur a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
--- a/drivers/staging/rtl8712/usb_intf.c	2017-03-23 14:44:17.000000000 +0100
+++ b/drivers/staging/rtl8712/usb_intf.c	2017-03-14 02:20:14.000000000 +0100
@@ -144,6 +144,7 @@
 	{USB_DEVICE(0x0DF6, 0x0058)},
 	{USB_DEVICE(0x0DF6, 0x0049)},
 	{USB_DEVICE(0x0DF6, 0x004C)},
+	{USB_DEVICE(0x0DF6, 0x006C)},
 	{USB_DEVICE(0x0DF6, 0x0064)},
 	/* Skyworth */
 	{USB_DEVICE(0x14b2, 0x3300)},
diff -ur a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
--- a/drivers/staging/speakup/fakekey.c	2017-03-23 14:45:48.000000000 +0100
+++ b/drivers/staging/speakup/fakekey.c	2017-03-14 02:21:35.000000000 +0100
@@ -81,6 +81,7 @@
 	__this_cpu_write(reporting_keystroke, true);
 	input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
 	input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
+	input_sync(virt_keyboard);
 	__this_cpu_write(reporting_keystroke, false);
 
 	/* reenable preemption */
diff -ur a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
--- a/drivers/staging/speakup/selection.c	2017-03-23 14:45:48.000000000 +0100
+++ b/drivers/staging/speakup/selection.c	2017-03-14 02:21:35.000000000 +0100
@@ -139,7 +139,9 @@
 	struct tty_ldisc *ld;
 	DECLARE_WAITQUEUE(wait, current);
 
-	ld = tty_ldisc_ref_wait(tty);
+	ld = tty_ldisc_ref(tty);
+	if (!ld)
+		goto tty_unref;
 
 	/* FIXME: this is completely unsafe */
 	add_wait_queue(&vc->paste_wait, &wait);
@@ -158,6 +160,7 @@
 	current->state = TASK_RUNNING;
 
 	tty_ldisc_deref(ld);
+tty_unref:
 	tty_kref_put(tty);
 }
 
diff -ur a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
--- a/drivers/staging/usbip/usbip_common.c	2017-03-23 14:43:48.000000000 +0100
+++ b/drivers/staging/usbip/usbip_common.c	2017-03-14 02:19:47.000000000 +0100
@@ -749,6 +749,17 @@
 	if (!(size > 0))
 		return 0;
 
+	if (size > urb->transfer_buffer_length) {
+		 
+		if (ud->side == USBIP_STUB) {
+			usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+			return 0;
+		} else {
+			usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+			return -EPIPE;
+		}
+	}
+
 	ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
 	if (ret != size) {
 		dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff -ur a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
--- a/drivers/staging/wlags49_h2/wl_internal.h	2017-03-23 14:44:37.000000000 +0100
+++ b/drivers/staging/wlags49_h2/wl_internal.h	2017-03-14 02:20:26.000000000 +0100
@@ -971,7 +971,7 @@
 /* Interrupt enable disable functions                               */
 /********************************************************************/
 
-extern inline void wl_act_int_on(struct wl_private *lp)
+static inline void wl_act_int_on(struct wl_private *lp)
 {
 	/*
 	 * Only do something when the driver is handling
@@ -983,7 +983,7 @@
 	}
 }
 
-extern inline void wl_act_int_off(struct wl_private *lp)
+static inline void wl_act_int_off(struct wl_private *lp)
 {
 	/*
 	 * Only do something when the driver is handling
diff -ur a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
--- a/drivers/target/iscsi/iscsi_target.c	2017-03-23 14:40:02.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target.c	2017-03-14 02:15:20.000000000 +0100
@@ -518,7 +518,7 @@
 
 static int __init iscsi_target_init_module(void)
 {
-	int ret = 0;
+	int ret = 0, size;
 
 	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
 
@@ -527,6 +527,7 @@
 		pr_err("Unable to allocate memory for iscsit_global\n");
 		return -1;
 	}
+	spin_lock_init(&iscsit_global->ts_bitmap_lock);
 	mutex_init(&auth_id_lock);
 	spin_lock_init(&sess_idr_lock);
 	idr_init(&tiqn_idr);
@@ -536,15 +537,11 @@
 	if (ret < 0)
 		goto out;
 
-	ret = iscsi_thread_set_init();
-	if (ret < 0)
+	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
+	iscsit_global->ts_bitmap = vzalloc(size);
+	if (!iscsit_global->ts_bitmap) {
+		pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
 		goto configfs_out;
-
-	if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
-			TARGET_THREAD_SET_COUNT) {
-		pr_err("iscsi_allocate_thread_sets() returned"
-			" unexpected value!\n");
-		goto ts_out1;
 	}
 
 	lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
@@ -553,7 +550,7 @@
 	if (!lio_cmd_cache) {
 		pr_err("Unable to kmem_cache_create() for"
 				" lio_cmd_cache\n");
-		goto ts_out2;
+		goto bitmap_out;
 	}
 
 	lio_qr_cache = kmem_cache_create("lio_qr_cache",
@@ -608,10 +605,8 @@
 	kmem_cache_destroy(lio_qr_cache);
 cmd_out:
 	kmem_cache_destroy(lio_cmd_cache);
-ts_out2:
-	iscsi_deallocate_thread_sets();
-ts_out1:
-	iscsi_thread_set_free();
+bitmap_out:
+	vfree(iscsit_global->ts_bitmap);
 configfs_out:
 	iscsi_target_deregister_configfs();
 out:
@@ -621,8 +616,6 @@
 
 static void __exit iscsi_target_cleanup_module(void)
 {
-	iscsi_deallocate_thread_sets();
-	iscsi_thread_set_free();
 	iscsit_release_discovery_tpg();
 	iscsit_unregister_transport(&iscsi_target_transport);
 	kmem_cache_destroy(lio_cmd_cache);
@@ -633,6 +626,7 @@
 
 	iscsi_target_deregister_configfs();
 
+	vfree(iscsit_global->ts_bitmap);
 	kfree(iscsit_global);
 }
 
@@ -3590,17 +3584,16 @@
 
 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
 {
-	struct iscsi_thread_set *ts = conn->thread_set;
 	int ord, cpu;
 	/*
-	 * thread_id is assigned from iscsit_global->ts_bitmap from
-	 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+	 * bitmap_id is assigned from iscsit_global->ts_bitmap from
+	 * within iscsit_start_kthreads()
 	 *
-	 * Here we use thread_id to determine which CPU that this
-	 * iSCSI connection's iscsi_thread_set will be scheduled to
+	 * Here we use bitmap_id to determine which CPU that this
+	 * iSCSI connection's RX/TX threads will be scheduled to
 	 * execute upon.
 	 */
-	ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+	ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
 	for_each_online_cpu(cpu) {
 		if (ord-- == 0) {
 			cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3792,7 +3785,7 @@
 	switch (state) {
 	case ISTATE_SEND_LOGOUTRSP:
 		if (!iscsit_logout_post_handler(cmd, conn))
-			goto restart;
+			return -ECONNRESET;
 		/* fall through */
 	case ISTATE_SEND_STATUS:
 	case ISTATE_SEND_ASYNCMSG:
@@ -3820,8 +3813,6 @@
 
 err:
 	return -1;
-restart:
-	return -EAGAIN;
 }
 
 static int iscsit_handle_response_queue(struct iscsi_conn *conn)
@@ -3848,21 +3839,13 @@
 int iscsi_target_tx_thread(void *arg)
 {
 	int ret = 0;
-	struct iscsi_conn *conn;
-	struct iscsi_thread_set *ts = arg;
+	struct iscsi_conn *conn = arg;
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
 	 * connection recovery / failure event can be triggered externally.
 	 */
 	allow_signal(SIGINT);
 
-restart:
-	conn = iscsi_tx_thread_pre_handler(ts);
-	if (!conn)
-		goto out;
-
-	ret = 0;
-
 	while (!kthread_should_stop()) {
 		/*
 		 * Ensure that both TX and RX per connection kthreads
@@ -3871,11 +3854,9 @@
 		iscsit_thread_check_cpumask(conn, current, 1);
 
 		wait_event_interruptible(conn->queues_wq,
-					 !iscsit_conn_all_queues_empty(conn) ||
-					 ts->status == ISCSI_THREAD_SET_RESET);
+					 !iscsit_conn_all_queues_empty(conn));
 
-		if ((ts->status == ISCSI_THREAD_SET_RESET) ||
-		     signal_pending(current))
+		if (signal_pending(current))
 			goto transport_err;
 
 get_immediate:
@@ -3886,15 +3867,20 @@
 		ret = iscsit_handle_response_queue(conn);
 		if (ret == 1)
 			goto get_immediate;
-		else if (ret == -EAGAIN)
-			goto restart;
+		else if (ret == -ECONNRESET)
+			goto out;
 		else if (ret < 0)
 			goto transport_err;
 	}
 
 transport_err:
-	iscsit_take_action_for_connection_exit(conn);
-	goto restart;
+	/*
+	 * Avoid the normal connection failure code-path if this connection
+	 * is still within LOGIN mode, and iscsi_np process context is
+	 * responsible for cleaning up the early connection failure.
+	 */
+	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+		iscsit_take_action_for_connection_exit(conn);
 out:
 	return 0;
 }
@@ -3974,35 +3960,46 @@
 	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 }
 
+static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
+{
+	bool ret;
+
+	spin_lock_bh(&conn->state_lock);
+	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
+	spin_unlock_bh(&conn->state_lock);
+
+	return ret;
+}
+
 int iscsi_target_rx_thread(void *arg)
 {
-	int ret;
+	int ret, rc;
 	u8 buffer[ISCSI_HDR_LEN], opcode;
 	u32 checksum = 0, digest = 0;
-	struct iscsi_conn *conn = NULL;
-	struct iscsi_thread_set *ts = arg;
+	struct iscsi_conn *conn = arg;
 	struct kvec iov;
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
 	 * connection recovery / failure event can be triggered externally.
 	 */
 	allow_signal(SIGINT);
-
-restart:
-	conn = iscsi_rx_thread_pre_handler(ts);
-	if (!conn)
-		goto out;
+	/*
+	 * Wait for iscsi_post_login_handler() to complete before allowing
+	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
+	 */
+	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+	if (rc < 0 || iscsi_target_check_conn_state(conn))
+		return 0;
 
 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
 		struct completion comp;
-		int rc;
 
 		init_completion(&comp);
 		rc = wait_for_completion_interruptible(&comp);
 		if (rc < 0)
 			goto transport_err;
 
-		goto out;
+		goto transport_err;
 	}
 
 	while (!kthread_should_stop()) {
@@ -4085,8 +4082,6 @@
 	if (!signal_pending(current))
 		atomic_set(&conn->transport_failed, 1);
 	iscsit_take_action_for_connection_exit(conn);
-	goto restart;
-out:
 	return 0;
 }
 
@@ -4148,7 +4143,24 @@
 	if (conn->conn_transport->transport_type == ISCSI_TCP)
 		complete(&conn->conn_logout_comp);
 
-	iscsi_release_thread_set(conn);
+	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
+		if (conn->tx_thread &&
+		    cmpxchg(&conn->tx_thread_active, true, false)) {
+			send_sig(SIGINT, conn->tx_thread, 1);
+			kthread_stop(conn->tx_thread);
+		}
+	} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
+		if (conn->rx_thread &&
+		    cmpxchg(&conn->rx_thread_active, true, false)) {
+			send_sig(SIGINT, conn->rx_thread, 1);
+			kthread_stop(conn->rx_thread);
+		}
+	}
+
+	spin_lock(&iscsit_global->ts_bitmap_lock);
+	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+			      get_order(1));
+	spin_unlock(&iscsit_global->ts_bitmap_lock);
 
 	iscsit_stop_timers_for_cmds(conn);
 	iscsit_stop_nopin_response_timer(conn);
@@ -4427,15 +4439,24 @@
 	struct iscsi_conn *conn)
 {
 	struct iscsi_session *sess = conn->sess;
-
-	iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
-	iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+	int sleep = 1;
+	/*
+	 * Traditional iscsi/tcp will invoke this logic from TX thread
+	 * context during session logout, so clear tx_thread_active and
+	 * sleep if iscsit_close_connection() has not already occured.
+	 *
+	 * Since iser-target invokes this logic from it's own workqueue,
+	 * always sleep waiting for RX/TX thread shutdown to complete
+	 * within iscsit_close_connection().
+	 */
+	if (conn->conn_transport->transport_type == ISCSI_TCP)
+		sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
 	atomic_set(&conn->conn_logout_remove, 0);
 	complete(&conn->conn_logout_comp);
 
 	iscsit_dec_conn_usage_count(conn);
-	iscsit_stop_session(sess, 1, 1);
+	iscsit_stop_session(sess, sleep, sleep);
 	iscsit_dec_session_usage_count(sess);
 	target_put_session(sess->se_sess);
 }
@@ -4443,13 +4464,15 @@
 static void iscsit_logout_post_handler_samecid(
 	struct iscsi_conn *conn)
 {
-	iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
-	iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+	int sleep = 1;
+
+	if (conn->conn_transport->transport_type == ISCSI_TCP)
+		sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
 	atomic_set(&conn->conn_logout_remove, 0);
 	complete(&conn->conn_logout_comp);
 
-	iscsit_cause_connection_reinstatement(conn, 1);
+	iscsit_cause_connection_reinstatement(conn, sleep);
 	iscsit_dec_conn_usage_count(conn);
 }
 
@@ -4663,6 +4686,7 @@
 	struct iscsi_session *sess;
 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
 	struct se_session *se_sess, *se_sess_tmp;
+	LIST_HEAD(free_list);
 	int session_count = 0;
 
 	spin_lock_bh(&se_tpg->session_lock);
@@ -4684,14 +4708,17 @@
 		}
 		atomic_set(&sess->session_reinstatement, 1);
 		spin_unlock(&sess->conn_lock);
-		spin_unlock_bh(&se_tpg->session_lock);
 
-		iscsit_free_session(sess);
-		spin_lock_bh(&se_tpg->session_lock);
+		list_move_tail(&se_sess->sess_list, &free_list);
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
 
+		iscsit_free_session(sess);
 		session_count++;
 	}
-	spin_unlock_bh(&se_tpg->session_lock);
 
 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
 			" Group: %hu\n", session_count, tpg->tpgt);
diff -ur a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
--- a/drivers/target/iscsi/iscsi_target_configfs.c	2017-03-23 14:39:59.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target_configfs.c	2017-03-14 02:15:18.000000000 +0100
@@ -1730,7 +1730,8 @@
 }
 
 /*
- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
+ * or not held.
  *
  * Also, this function calls iscsit_inc_session_usage_count() on the
  * struct iscsi_session in question.
@@ -1738,19 +1739,32 @@
 static int lio_tpg_shutdown_session(struct se_session *se_sess)
 {
 	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+	bool local_lock = false;
+
+	if (!spin_is_locked(&se_tpg->session_lock)) {
+		spin_lock_irq(&se_tpg->session_lock);
+		local_lock = true;
+	}
 
 	spin_lock(&sess->conn_lock);
 	if (atomic_read(&sess->session_fall_back_to_erl0) ||
 	    atomic_read(&sess->session_logout) ||
 	    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
 		spin_unlock(&sess->conn_lock);
+		if (local_lock)
+			spin_unlock_irq(&sess->conn_lock);
 		return 0;
 	}
 	atomic_set(&sess->session_reinstatement, 1);
 	spin_unlock(&sess->conn_lock);
 
 	iscsit_stop_time2retain_timer(sess);
+	spin_unlock_irq(&se_tpg->session_lock);
+
 	iscsit_stop_session(sess, 1, 1);
+	if (!local_lock)
+		spin_lock_irq(&se_tpg->session_lock);
 
 	return 1;
 }
diff -ur a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
--- a/drivers/target/iscsi/iscsi_target_core.h	2017-03-23 14:39:59.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target_core.h	2017-03-14 02:15:19.000000000 +0100
@@ -585,6 +585,12 @@
 	struct iscsi_session	*sess;
 	/* Pointer to thread_set in use for this conn's threads */
 	struct iscsi_thread_set	*thread_set;
+	int			bitmap_id;
+	int			rx_thread_active;
+	struct task_struct	*rx_thread;
+	struct completion	rx_login_comp;
+	int			tx_thread_active;
+	struct task_struct	*tx_thread;
 	/* list_head for session connection list */
 	struct list_head	conn_list;
 } ____cacheline_aligned;
@@ -861,10 +867,12 @@
 	/* Unique identifier used for the authentication daemon */
 	u32			auth_id;
 	u32			inactive_ts;
+#define ISCSIT_BITMAP_BITS	262144
 	/* Thread Set bitmap count */
 	int			ts_bitmap_count;
 	/* Thread Set bitmap pointer */
 	unsigned long		*ts_bitmap;
+	spinlock_t		ts_bitmap_lock;
 	/* Used for iSCSI discovery session authentication */
 	struct iscsi_node_acl	discovery_acl;
 	struct iscsi_portal_group	*discovery_tpg;
diff -ur a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
--- a/drivers/target/iscsi/iscsi_target_erl0.c	2017-03-23 14:39:57.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target_erl0.c	2017-03-14 02:15:16.000000000 +0100
@@ -865,7 +865,10 @@
 	}
 	spin_unlock_bh(&conn->state_lock);
 
-	iscsi_thread_set_force_reinstatement(conn);
+	if (conn->tx_thread && conn->tx_thread_active)
+		send_sig(SIGINT, conn->tx_thread, 1);
+	if (conn->rx_thread && conn->rx_thread_active)
+		send_sig(SIGINT, conn->rx_thread, 1);
 
 sleep:
 	wait_for_completion(&conn->conn_wait_rcfr_comp);
@@ -890,10 +893,10 @@
 		return;
 	}
 
-	if (iscsi_thread_set_force_reinstatement(conn) < 0) {
-		spin_unlock_bh(&conn->state_lock);
-		return;
-	}
+	if (conn->tx_thread && conn->tx_thread_active)
+		send_sig(SIGINT, conn->tx_thread, 1);
+	if (conn->rx_thread && conn->rx_thread_active)
+		send_sig(SIGINT, conn->rx_thread, 1);
 
 	atomic_set(&conn->connection_reinstatement, 1);
 	if (!sleep) {
diff -ur a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
--- a/drivers/target/iscsi/iscsi_target_login.c	2017-03-23 14:39:58.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target_login.c	2017-03-14 02:15:16.000000000 +0100
@@ -84,6 +84,7 @@
 	init_completion(&conn->conn_logout_comp);
 	init_completion(&conn->rx_half_close_comp);
 	init_completion(&conn->tx_half_close_comp);
+	init_completion(&conn->rx_login_comp);
 	spin_lock_init(&conn->cmd_lock);
 	spin_lock_init(&conn->conn_usage_lock);
 	spin_lock_init(&conn->immed_queue_lock);
@@ -683,7 +684,53 @@
 		iscsit_start_nopin_timer(conn);
 }
 
-static int iscsi_post_login_handler(
+int iscsit_start_kthreads(struct iscsi_conn *conn)
+{
+	int ret = 0;
+
+	spin_lock(&iscsit_global->ts_bitmap_lock);
+	conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+					ISCSIT_BITMAP_BITS, get_order(1));
+	spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+	if (conn->bitmap_id < 0) {
+		pr_err("bitmap_find_free_region() failed for"
+		       " iscsit_start_kthreads()\n");
+		return -ENOMEM;
+	}
+
+	conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
+				      "%s", ISCSI_TX_THREAD_NAME);
+	if (IS_ERR(conn->tx_thread)) {
+		pr_err("Unable to start iscsi_target_tx_thread\n");
+		ret = PTR_ERR(conn->tx_thread);
+		goto out_bitmap;
+	}
+	conn->tx_thread_active = true;
+
+	conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
+				      "%s", ISCSI_RX_THREAD_NAME);
+	if (IS_ERR(conn->rx_thread)) {
+		pr_err("Unable to start iscsi_target_rx_thread\n");
+		ret = PTR_ERR(conn->rx_thread);
+		goto out_tx;
+	}
+	conn->rx_thread_active = true;
+
+	return 0;
+out_tx:
+	send_sig(SIGINT, conn->tx_thread, 1);
+	kthread_stop(conn->tx_thread);
+	conn->tx_thread_active = false;
+out_bitmap:
+	spin_lock(&iscsit_global->ts_bitmap_lock);
+	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+			      get_order(1));
+	spin_unlock(&iscsit_global->ts_bitmap_lock);
+	return ret;
+}
+
+void iscsi_post_login_handler(
 	struct iscsi_np *np,
 	struct iscsi_conn *conn,
 	u8 zero_tsih)
@@ -693,7 +740,6 @@
 	struct se_session *se_sess = sess->se_sess;
 	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
-	struct iscsi_thread_set *ts;
 
 	iscsit_inc_conn_usage_count(conn);
 
@@ -708,7 +754,6 @@
 	/*
 	 * SCSI Initiator -> SCSI Target Port Mapping
 	 */
-	ts = iscsi_get_thread_set();
 	if (!zero_tsih) {
 		iscsi_set_session_parameters(sess->sess_ops,
 				conn->param_list, 0);
@@ -736,8 +781,6 @@
 		spin_unlock_bh(&sess->conn_lock);
 
 		iscsi_post_login_start_timers(conn);
-
-		iscsi_activate_thread_set(conn, ts);
 		/*
 		 * Determine CPU mask to ensure connection's RX and TX kthreads
 		 * are scheduled on the same CPU.
@@ -745,15 +788,20 @@
 		iscsit_thread_get_cpumask(conn);
 		conn->conn_rx_reset_cpumask = 1;
 		conn->conn_tx_reset_cpumask = 1;
-
+		/*
+		 * Wakeup the sleeping iscsi_target_rx_thread() now that
+		 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+		 */
+		complete(&conn->rx_login_comp);
 		iscsit_dec_conn_usage_count(conn);
+
 		if (stop_timer) {
 			spin_lock_bh(&se_tpg->session_lock);
 			iscsit_stop_time2retain_timer(sess);
 			spin_unlock_bh(&se_tpg->session_lock);
 		}
 		iscsit_dec_session_usage_count(sess);
-		return 0;
+		return;
 	}
 
 	iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -795,7 +843,6 @@
 	spin_unlock_bh(&se_tpg->session_lock);
 
 	iscsi_post_login_start_timers(conn);
-	iscsi_activate_thread_set(conn, ts);
 	/*
 	 * Determine CPU mask to ensure connection's RX and TX kthreads
 	 * are scheduled on the same CPU.
@@ -803,10 +850,12 @@
 	iscsit_thread_get_cpumask(conn);
 	conn->conn_rx_reset_cpumask = 1;
 	conn->conn_tx_reset_cpumask = 1;
-
+	/*
+	 * Wakeup the sleeping iscsi_target_rx_thread() now that
+	 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+	 */
+	complete(&conn->rx_login_comp);
 	iscsit_dec_conn_usage_count(conn);
-
-	return 0;
 }
 
 static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1282,20 +1331,9 @@
 	if (iscsi_target_start_negotiation(login, conn) < 0)
 		goto new_sess_out;
 
-	if (!conn->sess) {
-		pr_err("struct iscsi_conn session pointer is NULL!\n");
-		goto new_sess_out;
-	}
-
 	iscsi_stop_login_thread_timer(np);
 
-	if (signal_pending(current))
-		goto new_sess_out;
-
-	ret = iscsi_post_login_handler(np, conn, zero_tsih);
-
-	if (ret < 0)
-		goto new_sess_out;
+	iscsi_post_login_handler(np, conn, zero_tsih);
 
 	iscsit_deaccess_np(np, tpg);
 	tpg = NULL;
diff -ur a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
--- a/drivers/target/iscsi/iscsi_target_login.h	2017-03-23 14:39:58.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target_login.h	2017-03-14 02:15:18.000000000 +0100
@@ -12,6 +12,7 @@
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
 extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
 extern int iscsi_target_login_thread(void *);
 extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
 
diff -ur a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
--- a/drivers/target/iscsi/iscsi_target_nego.c	2017-03-23 14:39:57.000000000 +0100
+++ b/drivers/target/iscsi/iscsi_target_nego.c	2017-03-14 02:15:16.000000000 +0100
@@ -19,6 +19,7 @@
  ******************************************************************************/
 
 #include <linux/ctype.h>
+#include <linux/kthread.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
@@ -352,10 +353,24 @@
 		ntohl(login_rsp->statsn), login->rsp_length);
 
 	padding = ((-login->rsp_length) & 3);
+	/*
+	 * Before sending the last login response containing the transition
+	 * bit for full-feature-phase, go ahead and start up TX/RX threads
+	 * now to avoid potential resource allocation failures after the
+	 * final login response has been sent.
+	 */
+	if (login->login_complete) {
+		int rc = iscsit_start_kthreads(conn);
+		if (rc) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					    ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+	}
 
 	if (conn->conn_transport->iscsit_put_login_tx(conn, login,
 					login->rsp_length + padding) < 0)
-		return -1;
+		goto err;
 
 	login->rsp_length		= 0;
 	mutex_lock(&sess->cmdsn_mutex);
@@ -364,6 +379,24 @@
 	mutex_unlock(&sess->cmdsn_mutex);
 
 	return 0;
+
+err:
+	if (login->login_complete) {
+		if (conn->rx_thread && conn->rx_thread_active) {
+			send_sig(SIGINT, conn->rx_thread, 1);
+			complete(&conn->rx_login_comp);
+			kthread_stop(conn->rx_thread);
+		}
+		if (conn->tx_thread && conn->tx_thread_active) {
+			send_sig(SIGINT, conn->tx_thread, 1);
+			kthread_stop(conn->tx_thread);
+		}
+		spin_lock(&iscsit_global->ts_bitmap_lock);
+		bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+				      get_order(1));
+		spin_unlock(&iscsit_global->ts_bitmap_lock);
+	}
+	return -1;
 }
 
 static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
diff -ur a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
--- a/drivers/target/target_core_pscsi.c	2017-03-23 14:39:53.000000000 +0100
+++ b/drivers/target/target_core_pscsi.c	2017-03-14 02:15:10.000000000 +0100
@@ -520,6 +520,7 @@
 					" pdv_host_id: %d\n", pdv->pdv_host_id);
 				return -EINVAL;
 			}
+			pdv->pdv_lld_host = sh;
 		}
 	} else {
 		if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
@@ -602,6 +603,8 @@
 		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
 		    (phv->phv_lld_host != NULL))
 			scsi_host_put(phv->phv_lld_host);
+		else if (pdv->pdv_lld_host)
+			scsi_host_put(pdv->pdv_lld_host);
 
 		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
 			scsi_device_put(sd);
diff -ur a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
--- a/drivers/target/target_core_pscsi.h	2017-03-23 14:39:54.000000000 +0100
+++ b/drivers/target/target_core_pscsi.h	2017-03-14 02:15:11.000000000 +0100
@@ -45,6 +45,7 @@
 	int	pdv_lun_id;
 	struct block_device *pdv_bd;
 	struct scsi_device *pdv_sd;
+	struct Scsi_Host *pdv_lld_host;
 } ____cacheline_aligned;
 
 typedef enum phv_modes {
diff -ur a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
--- a/drivers/tty/hvc/hvc_xen.c	2017-03-23 14:42:11.000000000 +0100
+++ b/drivers/tty/hvc/hvc_xen.c	2017-03-14 02:17:55.000000000 +0100
@@ -299,11 +299,27 @@
 	return 0;
 }
 
+static void xen_console_update_evtchn(struct xencons_info *info)
+{
+	if (xen_hvm_domain()) {
+		uint64_t v;
+		int err;
+
+		err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
+		if (!err && v)
+			info->evtchn = v;
+	} else
+		info->evtchn = xen_start_info->console.domU.evtchn;
+}
+
 void xen_console_resume(void)
 {
 	struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
-	if (info != NULL && info->irq)
+	if (info != NULL && info->irq) {
+		if (!xen_initial_domain())
+			xen_console_update_evtchn(info);
 		rebind_evtchn_irq(info->evtchn, info->irq);
+	}
 }
 
 static void xencons_disconnect_backend(struct xencons_info *info)
diff -ur a/drivers/tty/pty.c b/drivers/tty/pty.c
--- a/drivers/tty/pty.c	2017-03-23 14:41:50.000000000 +0100
+++ b/drivers/tty/pty.c	2017-03-14 02:17:36.000000000 +0100
@@ -622,7 +622,14 @@
 /* this is called once with whichever end is closed last */
 static void pty_unix98_shutdown(struct tty_struct *tty)
 {
-	devpts_kill_index(tty->driver_data, tty->index);
+	struct inode *ptmx_inode;
+
+	if (tty->driver->subtype == PTY_TYPE_MASTER)
+		ptmx_inode = tty->driver_data;
+	else
+		ptmx_inode = tty->link->driver_data;
+	devpts_kill_index(ptmx_inode, tty->index);
+	devpts_del_ref(ptmx_inode);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -713,6 +720,18 @@
 	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
 	tty->driver_data = inode;
 
+	/*
+	 * In the case where all references to ptmx inode are dropped and we
+	 * still have /dev/tty opened pointing to the master/slave pair (ptmx
+	 * is closed/released before /dev/tty), we must make sure that the inode
+	 * is still valid when we call the final pty_unix98_shutdown, thus we
+	 * hold an additional reference to the ptmx inode. For the same /dev/tty
+	 * last close case, we also need to make sure the super_block isn't
+	 * destroyed (devpts instance unmounted), before /dev/tty is closed and
+	 * on its release devpts_kill_index is called.
+	 */
+	devpts_add_ref(inode);
+
 	tty_add_file(tty, filp);
 
 	slave_inode = devpts_pty_new(inode,
diff -ur a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
--- a/drivers/tty/serial/8250/8250_core.c	2017-03-23 14:42:12.000000000 +0100
+++ b/drivers/tty/serial/8250/8250_core.c	2017-03-14 02:17:56.000000000 +0100
@@ -613,22 +613,16 @@
 
 static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
 {
-	unsigned char old_dll, old_dlm, old_lcr;
-	unsigned int id;
+	unsigned char old_lcr;
+	unsigned int id, old_dl;
 
 	old_lcr = serial_in(p, UART_LCR);
 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+	old_dl = serial_dl_read(p);
+	serial_dl_write(p, 0);
+	id = serial_dl_read(p);
+	serial_dl_write(p, old_dl);
 
-	old_dll = serial_in(p, UART_DLL);
-	old_dlm = serial_in(p, UART_DLM);
-
-	serial_out(p, UART_DLL, 0);
-	serial_out(p, UART_DLM, 0);
-
-	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
-	serial_out(p, UART_DLL, old_dll);
-	serial_out(p, UART_DLM, old_dlm);
 	serial_out(p, UART_LCR, old_lcr);
 
 	return id;
diff -ur a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
--- a/drivers/tty/serial/8250/8250_pnp.c	2017-03-23 14:42:09.000000000 +0100
+++ b/drivers/tty/serial/8250/8250_pnp.c	2017-03-14 02:17:54.000000000 +0100
@@ -365,6 +365,11 @@
 	/* Winbond CIR port, should not be probed. We should keep track
 	   of it to prevent the legacy serial driver from probing it */
 	{	"WEC1022",		CIR_PORT	},
+	/*
+	 * SMSC IrCC SIR/FIR port, should not be probed by serial driver
+	 * as well so its own driver can bind to it.
+	 */
+	{	"SMCF010",		CIR_PORT	},
 	{	"",			0	}
 };
 
diff -ur a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
--- a/drivers/tty/serial/of_serial.c	2017-03-23 14:42:06.000000000 +0100
+++ b/drivers/tty/serial/of_serial.c	2017-03-14 02:17:50.000000000 +0100
@@ -262,7 +262,6 @@
 	{ .compatible = "ibm,qpace-nwp-serial",
 		.data = (void *)PORT_NWPSERIAL, },
 #endif
-	{ .type = "serial",         .data = (void *)PORT_UNKNOWN, },
 	{ /* end of list */ },
 };
 
diff -ur a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
--- a/drivers/tty/serial/samsung.c	2017-03-23 14:42:00.000000000 +0100
+++ b/drivers/tty/serial/samsung.c	2017-03-14 02:17:44.000000000 +0100
@@ -723,6 +723,8 @@
 	/* check to see if we need  to change clock source */
 
 	if (ourport->baudclk != clk) {
+		clk_prepare_enable(clk);
+
 		s3c24xx_serial_setsource(port, clk_sel);
 
 		if (!IS_ERR(ourport->baudclk)) {
@@ -730,8 +732,6 @@
 			ourport->baudclk = ERR_PTR(-EINVAL);
 		}
 
-		clk_prepare_enable(clk);
-
 		ourport->baudclk = clk;
 		ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
 	}
diff -ur a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
--- a/drivers/tty/tty_io.c	2017-03-23 14:41:55.000000000 +0100
+++ b/drivers/tty/tty_io.c	2017-03-14 02:17:40.000000000 +0100
@@ -1747,6 +1747,17 @@
 	return ret;
 }
 
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+	struct tty_ldisc *ld;
+	int ret;
+
+	ld = tty_ldisc_ref_wait(tty);
+	ret = put_user(ld->ops->num, p);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
 static int send_break(struct tty_struct *tty, unsigned int duration)
 {
 	int retval;
@@ -1915,7 +1926,7 @@
 	case TIOCGSID:
 		return tiocgsid(tty, real_tty, p);
 	case TIOCGETD:
-		return put_user(tty->ldisc->ops->num, (int __user *)p);
+		return tiocgetd(tty, p);
 	case TIOCSETD:
 		return tiocsetd(tty, p);
 	case TIOCVHANGUP:
diff -ur a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
--- a/drivers/tty/vt/vt.c	2017-03-23 14:42:02.000000000 +0100
+++ b/drivers/tty/vt/vt.c	2017-03-14 02:17:44.000000000 +0100
@@ -3083,9 +3083,10 @@
 		goto err;
 
 	desc = csw->con_startup();
-
-	if (!desc)
+	if (!desc) {
+		retval = -ENODEV;
 		goto err;
+	}
 
 	retval = -EINVAL;
 
diff -ur a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
--- a/drivers/usb/class/cdc-acm.c	2017-03-23 14:47:36.000000000 +0100
+++ b/drivers/usb/class/cdc-acm.c	2017-03-14 02:23:55.000000000 +0100
@@ -996,6 +996,9 @@
 	if (quirks == NO_UNION_NORMAL) {
 		data_interface = usb_ifnum_to_if(usb_dev, 1);
 		control_interface = usb_ifnum_to_if(usb_dev, 0);
+		/* we would crash */
+		if (!data_interface || !control_interface)
+			return -ENODEV;
 		goto skip_normal_probe;
 	}
 
@@ -1723,6 +1726,16 @@
 	},
 #endif
 
+	/*Samsung phone in firmware update mode */
+	{ USB_DEVICE(0x04e8, 0x685d),
+	.driver_info = IGNORE_DEVICE,
+	},
+
+	/* Exclude Infineon Flash Loader utility */
+	{ USB_DEVICE(0x058b, 0x0041),
+	.driver_info = IGNORE_DEVICE,
+	},
+
 	/* control interfaces without any protocol set */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
 		USB_CDC_PROTO_NONE) },
diff -ur a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
--- a/drivers/usb/class/usblp.c	2017-03-23 14:47:35.000000000 +0100
+++ b/drivers/usb/class/usblp.c	2017-03-14 02:23:55.000000000 +0100
@@ -870,11 +870,11 @@
 
 	add_wait_queue(&usblp->wwait, &waita);
 	for (;;) {
-		set_current_state(TASK_INTERRUPTIBLE);
 		if (mutex_lock_interruptible(&usblp->mut)) {
 			rc = -EINTR;
 			break;
 		}
+		set_current_state(TASK_INTERRUPTIBLE);
 		rc = usblp_wtest(usblp, nonblock);
 		mutex_unlock(&usblp->mut);
 		if (rc <= 0)
diff -ur a/drivers/usb/core/config.c b/drivers/usb/core/config.c
--- a/drivers/usb/core/config.c	2017-03-23 14:47:04.000000000 +0100
+++ b/drivers/usb/core/config.c	2017-03-14 02:23:16.000000000 +0100
@@ -112,16 +112,18 @@
 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
 		ep->ss_ep_comp.bmAttributes = 16;
 	} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
-			desc->bmAttributes > 2) {
+		   USB_SS_MULT(desc->bmAttributes) > 3) {
 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
 				"config %d interface %d altsetting %d ep %d: "
-				"setting to 3\n", desc->bmAttributes + 1,
+				"setting to 3\n",
+				USB_SS_MULT(desc->bmAttributes),
 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
 		ep->ss_ep_comp.bmAttributes = 2;
 	}
 
 	if (usb_endpoint_xfer_isoc(&ep->desc))
-		max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+		max_tx = (desc->bMaxBurst + 1) *
+			(USB_SS_MULT(desc->bmAttributes)) *
 			usb_endpoint_maxp(&ep->desc);
 	else if (usb_endpoint_xfer_int(&ep->desc))
 		max_tx = usb_endpoint_maxp(&ep->desc) *
diff -ur a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
--- a/drivers/usb/core/devio.c	2017-03-23 14:47:06.000000000 +0100
+++ b/drivers/usb/core/devio.c	2017-03-14 02:23:18.000000000 +0100
@@ -459,7 +459,7 @@
 	snoop(&urb->dev->dev, "urb complete\n");
 	snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
 			as->status, COMPLETE, NULL, 0);
-	if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
+	if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
 		snoop_urb_data(urb, urb->actual_length);
 
 	if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
@@ -1480,7 +1480,7 @@
 	for (;;) {
 		__set_current_state(TASK_INTERRUPTIBLE);
 		as = async_getcompleted(ps);
-		if (as)
+		if (as || !connected(ps))
 			break;
 		if (signal_pending(current))
 			break;
@@ -1503,7 +1503,7 @@
 	}
 	if (signal_pending(current))
 		return -EINTR;
-	return -EIO;
+	return -ENODEV;
 }
 
 static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
@@ -1512,10 +1512,11 @@
 	struct async *as;
 
 	as = async_getcompleted(ps);
-	retval = -EAGAIN;
 	if (as) {
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
+	} else {
+		retval = (connected(ps) ? -EAGAIN : -ENODEV);
 	}
 	return retval;
 }
@@ -1645,7 +1646,7 @@
 	}
 	if (signal_pending(current))
 		return -EINTR;
-	return -EIO;
+	return -ENODEV;
 }
 
 static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
@@ -1653,11 +1654,12 @@
 	int retval;
 	struct async *as;
 
-	retval = -EAGAIN;
 	as = async_getcompleted(ps);
 	if (as) {
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
+	} else {
+		retval = (connected(ps) ? -EAGAIN : -ENODEV);
 	}
 	return retval;
 }
@@ -1822,7 +1824,8 @@
 {
 	__u32 caps;
 
-	caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM;
+	caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
+			USBDEVFS_CAP_REAP_AFTER_DISCONNECT;
 	if (!ps->dev->bus->no_stop_on_short)
 		caps |= USBDEVFS_CAP_BULK_CONTINUATION;
 	if (ps->dev->bus->sg_tablesize)
@@ -1878,6 +1881,31 @@
 		return -EPERM;
 
 	usb_lock_device(dev);
+
+	switch (cmd) {
+	case USBDEVFS_REAPURB:
+		snoop(&dev->dev, "%s: REAPURB\n", __func__);
+		ret = proc_reapurb(ps, p);
+		goto done;
+
+	case USBDEVFS_REAPURBNDELAY:
+		snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
+		ret = proc_reapurbnonblock(ps, p);
+		goto done;
+
+#ifdef CONFIG_COMPAT
+	case USBDEVFS_REAPURB32:
+		snoop(&dev->dev, "%s: REAPURB32\n", __func__);
+		ret = proc_reapurb_compat(ps, p);
+		goto done;
+
+	case USBDEVFS_REAPURBNDELAY32:
+		snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
+		ret = proc_reapurbnonblock_compat(ps, p);
+		goto done;
+#endif
+	}
+
 	if (!connected(ps)) {
 		usb_unlock_device(dev);
 		return -ENODEV;
@@ -1971,16 +1999,6 @@
 			inode->i_mtime = CURRENT_TIME;
 		break;
 
-	case USBDEVFS_REAPURB32:
-		snoop(&dev->dev, "%s: REAPURB32\n", __func__);
-		ret = proc_reapurb_compat(ps, p);
-		break;
-
-	case USBDEVFS_REAPURBNDELAY32:
-		snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
-		ret = proc_reapurbnonblock_compat(ps, p);
-		break;
-
 	case USBDEVFS_IOCTL32:
 		snoop(&dev->dev, "%s: IOCTL32\n", __func__);
 		ret = proc_ioctl_compat(ps, ptr_to_compat(p));
@@ -1992,16 +2010,6 @@
 		ret = proc_unlinkurb(ps, p);
 		break;
 
-	case USBDEVFS_REAPURB:
-		snoop(&dev->dev, "%s: REAPURB\n", __func__);
-		ret = proc_reapurb(ps, p);
-		break;
-
-	case USBDEVFS_REAPURBNDELAY:
-		snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
-		ret = proc_reapurbnonblock(ps, p);
-		break;
-
 	case USBDEVFS_DISCSIGNAL:
 		snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
 		ret = proc_disconnectsignal(ps, p);
@@ -2038,6 +2046,8 @@
 		ret = proc_disconnect_claim(ps, p);
 		break;
 	}
+
+ done:
 	usb_unlock_device(dev);
 	if (ret >= 0)
 		inode->i_atime = CURRENT_TIME;
diff -ur a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
--- a/drivers/usb/core/driver.c	2017-03-23 14:47:04.000000000 +0100
+++ b/drivers/usb/core/driver.c	2017-03-14 02:23:17.000000000 +0100
@@ -381,11 +381,15 @@
 int usb_driver_claim_interface(struct usb_driver *driver,
 				struct usb_interface *iface, void *priv)
 {
-	struct device *dev = &iface->dev;
+	struct device *dev;
 	struct usb_device *udev;
 	int retval = 0;
 	int lpm_disable_error;
 
+	if (!iface)
+		return -ENODEV;
+
+	dev = &iface->dev;
 	if (dev->driver)
 		return -EBUSY;
 
diff -ur a/drivers/usb/core/ethub.c b/drivers/usb/core/ethub.c
--- a/drivers/usb/core/ethub.c	2017-03-23 14:47:08.000000000 +0100
+++ b/drivers/usb/core/ethub.c	2017-03-14 02:23:20.000000000 +0100
@@ -1453,7 +1453,10 @@
 	udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
 	udev->manufacturer = usb_cache_string(udev,
 					      udev->descriptor.iManufacturer);
-	udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+	do {
+		udelay(500);
+		udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+	} while (!udev->serial && retry--);
 
 #ifdef MY_DEF_HERE
 	if (0x054c == le16_to_cpu(udev->descriptor.idVendor) &&
diff -ur a/drivers/usb/core/file.c b/drivers/usb/core/file.c
--- a/drivers/usb/core/file.c	2017-03-23 14:47:03.000000000 +0100
+++ b/drivers/usb/core/file.c	2017-03-14 02:23:15.000000000 +0100
@@ -145,7 +145,7 @@
 #endif
 	intf->minor = -1;
 
-	dev_dbg("looking for a minor, starting at %d\n", minor_base);
+	dev_dbg(&intf->dev, "looking for a minor, starting at %d\n", minor_base);
 
 	if (class_driver->fops == NULL)
 		goto exit;
diff -ur a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
--- a/drivers/usb/core/hcd-pci.c	2017-03-23 14:47:03.000000000 +0100
+++ b/drivers/usb/core/hcd-pci.c	2017-03-14 02:23:15.000000000 +0100
@@ -73,6 +73,15 @@
 		if (companion->bus != pdev->bus ||
 				PCI_SLOT(companion->devfn) != slot)
 			continue;
+
+		/*
+		 * Companion device should be either UHCI,OHCI or EHCI host
+		 * controller, otherwise skip.
+		 */
+		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+				companion->class != CL_EHCI)
+			continue;
+
 		companion_hcd = pci_get_drvdata(companion);
 		if (!companion_hcd || !companion_hcd->self.root_hub)
 			continue;
diff -ur a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
--- a/drivers/usb/core/hub.c	2017-03-23 14:47:09.000000000 +0100
+++ b/drivers/usb/core/hub.c	2017-03-14 02:23:21.000000000 +0100
@@ -108,6 +108,9 @@
 static int usb_device_supports_lpm(struct usb_device *udev)
 {
 	 
+	if (udev->quirks & USB_QUIRK_NO_LPM)
+		return 0;
+
 	if (udev->speed == USB_SPEED_HIGH) {
 		if (udev->bos->ext_cap &&
 			(USB_LPM_SUPPORT &
@@ -3606,7 +3609,8 @@
 						r = -EPROTO;
 					break;
 				}
-				if (r == 0)
+				 
+				if (r == 0  || (r == -ETIMEDOUT && j == 0))
 					break;
 			}
 			udev->descriptor.bMaxPacketSize0 =
@@ -3717,6 +3721,8 @@
 		goto fail;
 	}
 
+	usb_detect_quirks(udev);
+
 #if defined (MY_ABC_HERE)
 	if (IS_XHCI(hcd)
 #ifdef MY_ABC_HERE
@@ -4017,7 +4023,6 @@
 		if (status < 0)
 			goto loop;
 
-		usb_detect_quirks(udev);
 		if (udev->quirks & USB_QUIRK_DELAY_INIT)
 			msleep(1000);
 
diff -ur a/drivers/usb/core/port.c b/drivers/usb/core/port.c
--- a/drivers/usb/core/port.c	2017-03-23 14:47:03.000000000 +0100
+++ b/drivers/usb/core/port.c	2017-03-14 02:23:15.000000000 +0100
@@ -11,6 +11,9 @@
 static const struct attribute_group *port_dev_group[];
 extern inline int hub_is_superspeed(struct usb_device *hdev);
 
+#ifdef MY_DEF_HERE
+extern u32 syno_pch_lpc_gpio_pin(int pin, int *pValue, int isWrite);
+#endif  
 static ssize_t show_port_connect_type(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
@@ -138,6 +141,7 @@
 #if defined(MY_DEF_HERE) ||\
 	defined(MY_DEF_HERE)
 	struct usb_device *hdev = hub->hdev;
+	int i = 0;
 #endif  
 #ifdef MY_DEF_HERE
 	extern char gSynoCastratedXhcAddr[CONFIG_SYNO_NUM_CASTRATED_XHC][13];
@@ -147,6 +151,7 @@
 	extern char gSynoUsbVbusHostAddr[CONFIG_SYNO_USB_VBUS_NUM_GPIO][13];
 	extern int gSynoUsbVbusPort[CONFIG_SYNO_USB_VBUS_NUM_GPIO];
 	extern unsigned gSynoUsbVbusGpp[CONFIG_SYNO_USB_VBUS_NUM_GPIO];
+	int value = 0;
 #endif  
 
 	port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
@@ -168,7 +173,6 @@
 
 #ifdef MY_DEF_HERE
 	if (hdev && hdev->serial) {
-		int i;
 		for (i = 0; i < CONFIG_SYNO_NUM_CASTRATED_XHC; i++) {
 			if (0 == strcmp(gSynoCastratedXhcAddr[i], hdev->serial) &&
 				gSynoCastratedXhcPortBitmap[i] & (0x01 << (port1 - 1))) {
@@ -183,15 +187,26 @@
 
 #ifdef MY_DEF_HERE
 	if (hdev && hdev->serial) {
-		int i;
 		for (i = 0; i < CONFIG_SYNO_USB_VBUS_NUM_GPIO; i++) {
 			if (0 == strcmp(gSynoUsbVbusHostAddr[i], hdev->serial)) {
+#ifdef MY_DEF_HERE
+				value = 0;
+				if (0 == syno_pch_lpc_gpio_pin(gSynoUsbVbusGpp[i], &value, 0) && 0 == value) {
+					value = 1;
+					if (0 == syno_pch_lpc_gpio_pin(gSynoUsbVbusGpp[i], &value, 1)) {
+						printk(KERN_INFO " port%d is going to power up Vbus by "
+								"GPIO#%d\n", port1, gSynoUsbVbusGpp[i]);
+						mdelay(100);
+					}
+				}
+#else  
 				if (0 == gpio_get_value(gSynoUsbVbusGpp[i])) {
 					gpio_set_value(gSynoUsbVbusGpp[i], 1);
-					printk(KERN_INFO " port%d is going to power up Vbus by"
+					printk(KERN_INFO " port%d is going to power up Vbus by "
 							"GPIO#%d\n", port1, gSynoUsbVbusGpp[i]);
 					mdelay(100);
 				}
+#endif  
 				if (port1 == gSynoUsbVbusPort[i])
 					port_dev->syno_vbus_gpp = gSynoUsbVbusGpp[i];
 			}
diff -ur a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
--- a/drivers/usb/core/quirks.c	2017-03-23 14:47:02.000000000 +0100
+++ b/drivers/usb/core/quirks.c	2017-03-14 02:23:14.000000000 +0100
@@ -25,6 +25,11 @@
 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
 
+	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
+
+	{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
 	{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
 
 	{ USB_DEVICE(0x046d, 0x08c2), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -41,6 +46,10 @@
 
 	{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	{ USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
+
+	{ USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
 
@@ -140,6 +149,10 @@
 	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
 			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
 
+	{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
+
+	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+
 	{ }   
 };
 
diff -ur a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
--- a/drivers/usb/dwc3/ep0.c	2017-03-23 14:47:24.000000000 +0100
+++ b/drivers/usb/dwc3/ep0.c	2017-03-14 02:23:43.000000000 +0100
@@ -718,6 +718,10 @@
 		dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
 		break;
+	case USB_REQ_SET_INTERFACE:
+		dev_vdbg(dwc->dev, "USB_REQ_SET_INTERFACE\n");
+		dwc->start_config_issued = false;
+		/* Fall through */
 	default:
 		dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
@@ -800,6 +804,11 @@
 		unsigned maxp = ep0->endpoint.maxpacket;
 
 		transfer_size += (maxp - (transfer_size % maxp));
+
+		/* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */
+		if (transfer_size > DWC3_EP0_BOUNCE_SIZE)
+			transfer_size = DWC3_EP0_BOUNCE_SIZE;
+
 		transferred = min_t(u32, ur->length,
 				transfer_size - length);
 		memcpy(ur->buf, dwc->ep0_bounce, transferred);
@@ -912,11 +921,14 @@
 			return;
 		}
 
-		WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
-
 		maxpacket = dep->endpoint.maxpacket;
 		transfer_size = roundup(req->request.length, maxpacket);
 
+		if (transfer_size > DWC3_EP0_BOUNCE_SIZE) {
+			dev_WARN(dwc->dev, "bounce buf can't handle req len\n");
+			transfer_size = DWC3_EP0_BOUNCE_SIZE;
+		}
+
 		dwc->ep0_bounced = true;
 
 		/*
diff -ur a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
--- a/drivers/usb/dwc3/gadget.c	2017-03-23 14:47:26.000000000 +0100
+++ b/drivers/usb/dwc3/gadget.c	2017-03-14 02:23:45.000000000 +0100
@@ -319,6 +319,8 @@
 		if (!(reg & DWC3_DGCMD_CMDACT)) {
 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
 					DWC3_DGCMD_STATUS(reg));
+			if (DWC3_DGCMD_STATUS(reg))
+				return -EINVAL;
 			return 0;
 		}
 
@@ -355,6 +357,8 @@
 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
 					DWC3_DEPCMD_STATUS(reg));
+			if (DWC3_DEPCMD_STATUS(reg))
+				return -EINVAL;
 			return 0;
 		}
 
diff -ur a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
--- a/drivers/usb/gadget/configfs.c	2017-03-23 14:47:32.000000000 +0100
+++ b/drivers/usb/gadget/configfs.c	2017-03-14 02:23:51.000000000 +0100
@@ -755,6 +755,7 @@
 			}
 		}
 		c->next_interface_id = 0;
+		memset(c->interface, 0, sizeof(c->interface));
 		c->superspeed = 0;
 		c->highspeed = 0;
 		c->fullspeed = 0;
diff -ur a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
--- a/drivers/usb/gadget/printer.c	2017-03-23 14:47:32.000000000 +0100
+++ b/drivers/usb/gadget/printer.c	2017-03-14 02:23:51.000000000 +0100
@@ -975,6 +975,15 @@
 		break;
 	}
 	/* host either stalls (value < 0) or reports success */
+	if (value >= 0) {
+		req->length = value;
+		req->zero = value < wLength;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
+			req->status = 0;
+		}
+	}
 	return value;
 }
 
diff -ur a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
--- a/drivers/usb/host/ehci-sysfs.c	2017-03-23 14:47:40.000000000 +0100
+++ b/drivers/usb/host/ehci-sysfs.c	2017-03-14 02:24:01.000000000 +0100
@@ -28,7 +28,7 @@
 	int			count = PAGE_SIZE;
 	char			*ptr = buf;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	nports = HCS_N_PORTS(ehci->hcs_params);
 
 	for (index = 0; index < nports; ++index) {
@@ -53,7 +53,7 @@
 	struct ehci_hcd		*ehci;
 	int			portnum, new_owner;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	new_owner = PORT_OWNER;		/* Owned by companion */
 	if (sscanf(buf, "%d", &portnum) != 1)
 		return -EINVAL;
@@ -83,7 +83,7 @@
 	struct ehci_hcd		*ehci;
 	int			n;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
 	return n;
 }
@@ -99,7 +99,7 @@
 	unsigned long		flags;
 	ssize_t			ret;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
 		return -EINVAL;
 
diff -ur a/drivers/usb/host/etxhci.c b/drivers/usb/host/etxhci.c
--- a/drivers/usb/host/etxhci.c	2017-03-23 14:47:40.000000000 +0100
+++ b/drivers/usb/host/etxhci.c	2017-03-14 02:23:59.000000000 +0100
@@ -1664,8 +1664,11 @@
 		goto fail;
 
 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
+	 
 	if (udev->descriptor.bDeviceProtocol == 2)
 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+	else if (udev->speed == USB_SPEED_FULL)
+		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
 
 	if (xhci->hci_version > 0x95) {
 		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(descriptor->bNbrPorts));
@@ -2774,8 +2777,12 @@
 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
 	slot_ctx = etxhci_get_slot_ctx(xhci, config_cmd->in_ctx);
 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
+	 
 	if (tt->multi)
 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+	else if (hdev->speed == USB_SPEED_FULL)
+		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
+
 	if (xhci->hci_version > 0x95) {
 		xhci_dbg(xhci, "xHCI version %x needs hub "
 				"TT think time and number of ports\n",
diff -ur a/drivers/usb/host/etxhci-mem.c b/drivers/usb/host/etxhci-mem.c
--- a/drivers/usb/host/etxhci-mem.c	2017-03-23 14:47:43.000000000 +0100
+++ b/drivers/usb/host/etxhci-mem.c	2017-03-14 02:24:04.000000000 +0100
@@ -1350,8 +1350,6 @@
 		{ USB_DEVICE(0x054c, 0x05bf), .driver_info = XHCI_QUIRK_DONOT_DOWNGRADE },
 		{ USB_DEVICE(0x04c5, 0x120e), .driver_info = XHCI_QUIRK_DONOT_DOWNGRADE },
 		{ USB_DEVICE(0x8564, 0x1000), .driver_info = XHCI_QUIRK_DONOT_DOWNGRADE },
-		{ USB_DEVICE(0x0bc2, 0x2312), .driver_info = XHCI_QUIRK_DONOT_DOWNGRADE },
-		{ USB_DEVICE(0x13fe, 0x5200), .driver_info = XHCI_QUIRK_DONOT_DOWNGRADE },
 		{ USB_DEVICE(0x0471, 0x0151), .driver_info = XHCI_QUIRK_DONOT_DATA_TOGGLE_CLEANUP },
 	};
 	struct xhci_virt_device *virt_dev;
@@ -1429,10 +1427,10 @@
 		/* Attempt to use the ring cache */
 		if (virt_dev->num_rings_cached == 0)
 			return -ENOMEM;
+		virt_dev->num_rings_cached--;
 		virt_dev->eps[ep_index].new_ring =
 			virt_dev->ring_cache[virt_dev->num_rings_cached];
 		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-		virt_dev->num_rings_cached--;
 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
 					1, type);
 	}
diff -ur a/drivers/usb/host/etxhci-pci.c b/drivers/usb/host/etxhci-pci.c
--- a/drivers/usb/host/etxhci-pci.c	2017-03-23 14:47:38.000000000 +0100
+++ b/drivers/usb/host/etxhci-pci.c	2017-03-14 02:23:59.000000000 +0100
@@ -31,7 +31,7 @@
 #define PCI_DEVICE_ID_ETRON_EJ168	0x7023
 #define PCI_DEVICE_ID_ETRON_EJ188	0x7052
 
-static const char hcd_name[] = "etxhci_hcd-150603d1";
+static const char hcd_name[] = "etxhci_hcd-161024";
 
 /* called after powerup, by probe or system-pm "wakeup" */
 static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
@@ -119,6 +119,7 @@
 	struct xhci_hcd *xhci;
 	struct hc_driver *driver;
 	struct usb_hcd *hcd;
+	char name[16];
 
 	if (dev->vendor != PCI_VENDOR_ID_ETRON)
 		return -ENODEV;
@@ -160,7 +161,8 @@
 		goto put_usb3_hcd;
 	/* Roothub already marked as USB 3.0 speed */
 
-	xhci->bulk_xfer_wq = create_singlethread_workqueue(pci_name(dev));
+	snprintf(name, sizeof(name), "etxhci_wq%d", hcd->self.busnum);
+	xhci->bulk_xfer_wq = create_singlethread_workqueue(name);
 	if (!xhci->bulk_xfer_wq) {
 		retval = -ENOMEM;
 		goto put_usb3_hcd;
diff -ur a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
--- a/drivers/usb/host/oxu210hp-hcd.c	2017-03-23 14:47:40.000000000 +0100
+++ b/drivers/usb/host/oxu210hp-hcd.c	2017-03-14 02:23:58.000000000 +0100
@@ -2495,11 +2495,12 @@
 					|| oxu->reset_done[i] != 0)
 				continue;
 
-			/* start 20 msec resume signaling from this port,
-			 * and make khubd collect PORT_STAT_C_SUSPEND to
+			/* start USB_RESUME_TIMEOUT resume signaling from this
+			 * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
 			 * stop that signaling.
 			 */
-			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+			oxu->reset_done[i] = jiffies +
+				msecs_to_jiffies(USB_RESUME_TIMEOUT);
 			oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
 			mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
 		}
diff -ur a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
--- a/drivers/usb/host/whci/qset.c	2017-03-23 14:47:45.000000000 +0100
+++ b/drivers/usb/host/whci/qset.c	2017-03-14 02:24:07.000000000 +0100
@@ -377,6 +377,10 @@
 	if (std->pl_virt == NULL)
 		return -ENOMEM;
 	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
+		kfree(std->pl_virt);
+		return -EFAULT;
+	}
 
 	for (p = 0; p < std->num_pointers; p++) {
 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
diff -ur a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
--- a/drivers/usb/host/xhci.c	2017-03-23 14:47:41.000000000 +0100
+++ b/drivers/usb/host/xhci.c	2017-03-14 02:24:01.000000000 +0100
@@ -88,7 +88,8 @@
 				"waited %u microseconds.\n",
 				XHCI_MAX_HALT_USEC);
 	if (!ret)
-		xhci->xhc_state &= ~XHCI_STATE_HALTED;
+		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
 	return ret;
 }
 
@@ -1060,6 +1061,11 @@
 
 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
 		size = urb->number_of_packets;
+	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
+	    urb->transfer_buffer_length > 0 &&
+	    urb->transfer_flags & URB_ZERO_PACKET &&
+	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
+		size = 2;
 	else
 		size = 1;
 
@@ -1224,7 +1230,9 @@
 	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
 		xhci_dbg(xhci, "HW died, freeing TD.\n");
 		urb_priv = urb->hcpriv;
-		for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+		for (i = urb_priv->td_cnt;
+		     i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+		     i++) {
 			td = urb_priv->td[i];
 			if (!list_empty(&td->td_list))
 				list_del_init(&td->td_list);
@@ -2733,6 +2741,9 @@
 			return -EINVAL;
 	}
 
+	if (virt_dev->tt_info)
+		old_active_eps = virt_dev->tt_info->active_eps;
+
 	if (virt_dev->udev != udev) {
 		 
 		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
@@ -3815,8 +3826,12 @@
 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
+	 
 	if (tt->multi)
 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+	else if (hdev->speed == USB_SPEED_FULL)
+		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
+
 	if (xhci->hci_version > 0x95) {
 		xhci_dbg(xhci, "xHCI version %x needs hub "
 				"TT think time and number of ports\n",
@@ -3957,6 +3972,9 @@
 {
 	int retval;
 
+	if (usb_disabled())
+		return -ENODEV;
+
 	retval = xhci_register_pci();
 	if (retval < 0) {
 		printk(KERN_DEBUG "Problem registering PCI driver.");
@@ -3979,6 +3997,7 @@
 	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
 	 
 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+
 	return 0;
 unreg_pci:
 	xhci_unregister_pci();
diff -ur a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
--- a/drivers/usb/host/xhci.h	2017-03-23 14:47:45.000000000 +0100
+++ b/drivers/usb/host/xhci.h	2017-03-14 02:24:06.000000000 +0100
@@ -165,6 +165,7 @@
 #define XDEV_U0		(0x0 << 5)
 #define XDEV_U2		(0x2 << 5)
 #define XDEV_U3		(0x3 << 5)
+#define XDEV_INACTIVE	(0x6 << 5)
 #define XDEV_RESUME	(0xf << 5)
  
 #define PORT_POWER	(1 << 9)
@@ -837,7 +838,7 @@
 #define NEC_FW_MINOR(p)		(((p) >> 0) & 0xff)
 #define NEC_FW_MAJOR(p)		(((p) >> 8) & 0xff)
 
-#define TRBS_PER_SEGMENT	64
+#define TRBS_PER_SEGMENT	256
  
 #define MAX_RSVD_CMD_TRBS	(TRBS_PER_SEGMENT - 3)
 #define TRB_SEGMENT_SIZE	(TRBS_PER_SEGMENT*16)
diff -ur a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
--- a/drivers/usb/host/xhci-hub.c	2017-03-23 14:47:43.000000000 +0100
+++ b/drivers/usb/host/xhci-hub.c	2017-03-14 02:24:05.000000000 +0100
@@ -390,8 +390,10 @@
 {
 	u32 pls = status_reg & PORT_PLS_MASK;
 
-	if (pls == XDEV_RESUME)
+	if (pls == XDEV_RESUME) {
+		*status |= USB_SS_PORT_LS_U3;
 		return;
+	}
 
 	if (status_reg & PORT_CAS) {
 		 
@@ -947,10 +949,10 @@
 	spin_lock_irqsave(&xhci->lock, flags);
 
 	if (hcd->self.root_hub->do_remote_wakeup) {
-		if (bus_state->resuming_ports) {
+		if (bus_state->resuming_ports ||	 
+		    bus_state->port_remote_wakeup) {	 
 			spin_unlock_irqrestore(&xhci->lock, flags);
-			xhci_dbg(xhci, "suspend failed because "
-						"a port is resuming\n");
+			xhci_dbg(xhci, "suspend failed because a port is resuming\n");
 			return -EBUSY;
 		}
 	}
diff -ur a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
--- a/drivers/usb/host/xhci-mem.c	2017-03-23 14:47:40.000000000 +0100
+++ b/drivers/usb/host/xhci-mem.c	2017-03-14 02:23:59.000000000 +0100
@@ -1396,10 +1396,10 @@
 		/* Attempt to use the ring cache */
 		if (virt_dev->num_rings_cached == 0)
 			return -ENOMEM;
+		virt_dev->num_rings_cached--;
 		virt_dev->eps[ep_index].new_ring =
 			virt_dev->ring_cache[virt_dev->num_rings_cached];
 		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-		virt_dev->num_rings_cached--;
 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
 					1, type);
 	}
@@ -1469,10 +1469,10 @@
 	 * use Event Data TRBs, and we don't chain in a link TRB on short
 	 * transfers, we're basically dividing by 1.
 	 *
-	 * xHCI 1.0 specification indicates that the Average TRB Length should
-	 * be set to 8 for control endpoints.
+	 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+	 * should be set to 8 for control endpoints.
 	 */
-	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
 		ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
 	else
 		ep_ctx->tx_info |=
@@ -1857,6 +1857,11 @@
 	kfree(xhci->port_array);
 	kfree(xhci->rh_bw);
 
+	xhci->usb2_ports = NULL;
+	xhci->usb3_ports = NULL;
+	xhci->port_array = NULL;
+	xhci->rh_bw = NULL;
+
 	xhci->page_size = 0;
 	xhci->page_shift = 0;
 	xhci->bus_state[0].bus_suspended = 0;
diff -ur a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
--- a/drivers/usb/host/xhci-ring.c	2017-03-23 14:47:39.000000000 +0100
+++ b/drivers/usb/host/xhci-ring.c	2017-03-14 02:23:58.000000000 +0100
@@ -19,7 +19,7 @@
 		return 0;
 	 
 	segment_offset = trb - seg->trbs;
-	if (segment_offset > TRBS_PER_SEGMENT)
+	if (segment_offset >= TRBS_PER_SEGMENT)
 		return 0;
 	return seg->dma + (segment_offset * sizeof(*trb));
 }
@@ -1283,6 +1283,9 @@
 		usb_hcd_resume_root_hub(hcd);
 	}
 
+	if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
 
@@ -1688,8 +1691,13 @@
 		break;
 	case COMP_DEV_ERR:
 	case COMP_STALL:
+		frame->status = -EPROTO;
+		skip_td = true;
+		break;
 	case COMP_TX_ERR:
 		frame->status = -EPROTO;
+		if (event_trb != td->last_trb)
+			return 0;
 		skip_td = true;
 		break;
 	case COMP_STOP:
@@ -1867,6 +1875,7 @@
 	u32 trb_comp_code;
 	int ret = 0;
 	int td_num = 0;
+	bool handling_skipped_tds = false;
 
 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
 	xdev = xhci->devs[slot_id];
@@ -1987,6 +1996,10 @@
 		ep->skip = true;
 		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
 		goto cleanup;
+	case COMP_PING_ERR:
+		ep->skip = true;
+		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
+		goto cleanup;
 	default:
 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
 			status = 0;
@@ -2091,10 +2104,13 @@
 						 ep, &status);
 
 cleanup:
-		 
-		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+
+		handling_skipped_tds = ep->skip &&
+			trb_comp_code != COMP_MISSED_INT &&
+			trb_comp_code != COMP_PING_ERR;
+
+		if (!handling_skipped_tds)
 			inc_deq(xhci, xhci->event_ring);
-		}
 
 		if (ret) {
 			urb = td->urb;
@@ -2126,7 +2142,7 @@
 			spin_lock(&xhci->lock);
 		}
 
-	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
+	} while (handling_skipped_tds);
 
 	return 0;
 }
@@ -2214,7 +2230,7 @@
 		xhci_halt(xhci);
 hw_died:
 		spin_unlock(&xhci->lock);
-		return -ESHUTDOWN;
+		return IRQ_HANDLED;
 	}
 
 	status |= STS_EINT;
@@ -2538,9 +2554,11 @@
 	struct xhci_td *td;
 	struct scatterlist *sg;
 	int num_sgs;
-	int trb_buff_len, this_sg_len, running_total;
+	int trb_buff_len, this_sg_len, running_total, ret;
 	unsigned int total_packet_count;
+	bool zero_length_needed;
 	bool first_trb;
+	int last_trb_num;
 	u64 addr;
 	bool more_trbs_coming;
 
@@ -2556,13 +2574,26 @@
 	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
 			usb_endpoint_maxp(&urb->ep->desc));
 
-	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+	ret = prepare_transfer(xhci, xhci->devs[slot_id],
 			ep_index, urb->stream_id,
 			num_trbs, urb, 0, mem_flags);
-	if (trb_buff_len < 0)
-		return trb_buff_len;
+	if (ret < 0)
+		return ret;
 
 	urb_priv = urb->hcpriv;
+
+	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+		urb_priv->length == 2;
+	if (zero_length_needed) {
+		num_trbs++;
+		xhci_dbg(xhci, "Creating zero length td.\n");
+		ret = prepare_transfer(xhci, xhci->devs[slot_id],
+				ep_index, urb->stream_id,
+				1, urb, 1, mem_flags);
+		if (ret < 0)
+			return ret;
+	}
+
 	td = urb_priv->td[0];
 
 	start_trb = &ep_ring->enqueue->generic;
@@ -2579,6 +2610,7 @@
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
+	last_trb_num = zero_length_needed ? 2 : 1;
 	 
 	do {
 		u32 field = 0;
@@ -2592,12 +2624,15 @@
 		} else
 			field |= ep_ring->cycle_state;
 
-		if (num_trbs > 1) {
+		if (num_trbs > last_trb_num) {
 			field |= TRB_CHAIN;
-		} else {
-			 
+		} else if (num_trbs == last_trb_num) {
 			td->last_trb = ep_ring->enqueue;
 			field |= TRB_IOC;
+		} else if (zero_length_needed && num_trbs == 1) {
+			trb_buff_len = 0;
+			urb_priv->td[1]->last_trb = ep_ring->enqueue;
+			field |= TRB_IOC;
 		}
 
 		if (usb_urb_dir_in(urb))
@@ -2654,7 +2689,7 @@
 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
 			trb_buff_len =
 				urb->transfer_buffer_length - running_total;
-	} while (running_total < urb->transfer_buffer_length);
+	} while (num_trbs > 0);
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -2671,7 +2706,9 @@
 	int num_trbs;
 	struct xhci_generic_trb *start_trb;
 	bool first_trb;
+	int last_trb_num;
 	bool more_trbs_coming;
+	bool zero_length_needed;
 	int start_cycle;
 	u32 field, length_field;
 
@@ -2699,7 +2736,7 @@
 		num_trbs++;
 		running_total += TRB_MAX_BUFF_SIZE;
 	}
-	 
+
 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
 			ep_index, urb->stream_id,
 			num_trbs, urb, 0, mem_flags);
@@ -2707,6 +2744,19 @@
 		return ret;
 
 	urb_priv = urb->hcpriv;
+
+	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+		urb_priv->length == 2;
+	if (zero_length_needed) {
+		num_trbs++;
+		xhci_dbg(xhci, "Creating zero length td.\n");
+		ret = prepare_transfer(xhci, xhci->devs[slot_id],
+				ep_index, urb->stream_id,
+				1, urb, 1, mem_flags);
+		if (ret < 0)
+			return ret;
+	}
+
 	td = urb_priv->td[0];
 
 	start_trb = &ep_ring->enqueue->generic;
@@ -2723,7 +2773,8 @@
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
-
+	last_trb_num = zero_length_needed ? 2 : 1;
+	 
 	do {
 		u32 remainder = 0;
 		field = 0;
@@ -2735,12 +2786,15 @@
 		} else
 			field |= ep_ring->cycle_state;
 
-		if (num_trbs > 1) {
+		if (num_trbs > last_trb_num) {
 			field |= TRB_CHAIN;
-		} else {
-			 
+		} else if (num_trbs == last_trb_num) {
 			td->last_trb = ep_ring->enqueue;
 			field |= TRB_IOC;
+		} else if (zero_length_needed && num_trbs == 1) {
+			trb_buff_len = 0;
+			urb_priv->td[1]->last_trb = ep_ring->enqueue;
+			field |= TRB_IOC;
 		}
 
 		if (usb_urb_dir_in(urb))
@@ -2775,7 +2829,7 @@
 		trb_buff_len = urb->transfer_buffer_length - running_total;
 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
 			trb_buff_len = TRB_MAX_BUFF_SIZE;
-	} while (running_total < urb->transfer_buffer_length);
+	} while (num_trbs > 0);
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -2825,7 +2879,7 @@
 	if (start_cycle == 0)
 		field |= 0x1;
 
-	if (xhci->hci_version == 0x100) {
+	if (xhci->hci_version >= 0x100) {
 		if (urb->transfer_buffer_length > 0) {
 			if (setup->bRequestType & USB_DIR_IN)
 				field |= TRB_TX_TYPE(TRB_DATA_IN);
diff -ur a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
--- a/drivers/usb/misc/iowarrior.c	2017-03-23 14:47:10.000000000 +0100
+++ b/drivers/usb/misc/iowarrior.c	2017-03-14 02:23:23.000000000 +0100
@@ -792,6 +792,12 @@
 	iface_desc = interface->cur_altsetting;
 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
+	if (iface_desc->desc.bNumEndpoints < 1) {
+		dev_err(&interface->dev, "Invalid number of endpoints\n");
+		retval = -EINVAL;
+		goto error;
+	}
+
 	/* set up the endpoint information */
 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
 		endpoint = &iface_desc->endpoint[i].desc;
diff -ur a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
--- a/drivers/usb/musb/musb_core.c	2017-03-23 14:47:50.000000000 +0100
+++ b/drivers/usb/musb/musb_core.c	2017-03-14 02:24:13.000000000 +0100
@@ -131,7 +131,7 @@
 /*-------------------------------------------------------------------------*/
 
 #ifndef CONFIG_BLACKFIN
-static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
+static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
 {
 	void __iomem *addr = phy->io_priv;
 	int	i = 0;
@@ -150,7 +150,7 @@
 	 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
 	 */
 
-	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
 			MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
 
@@ -175,7 +175,7 @@
 	return ret;
 }
 
-static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
+static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
 {
 	void __iomem *addr = phy->io_priv;
 	int	i = 0;
@@ -190,8 +190,8 @@
 	power &= ~MUSB_POWER_SUSPENDM;
 	musb_writeb(addr, MUSB_POWER, power);
 
-	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
-	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
+	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
+	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
 
 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
diff -ur a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
--- a/drivers/usb/renesas_usbhs/fifo.c	2017-03-23 14:47:22.000000000 +0100
+++ b/drivers/usb/renesas_usbhs/fifo.c	2017-03-14 02:23:41.000000000 +0100
@@ -166,7 +166,8 @@
 		goto __usbhs_pkt_handler_end;
 	}
 
-	ret = func(pkt, &is_done);
+	if (likely(func))
+		ret = func(pkt, &is_done);
 
 	if (is_done)
 		__usbhsf_pkt_del(pkt);
@@ -933,6 +934,7 @@
 
 	pkt->trans = len;
 
+	usbhsf_tx_irq_ctrl(pipe, 0);
 	INIT_WORK(&pkt->work, xfer_work);
 	schedule_work(&pkt->work);
 
diff -ur a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
--- a/drivers/usb/serial/cp210x.c	2017-03-23 14:47:19.000000000 +0100
+++ b/drivers/usb/serial/cp210x.c	2017-03-14 02:23:37.000000000 +0100
@@ -98,6 +98,7 @@
 	{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
 	{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
 	{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+	{ USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
 	{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
 	{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
 	{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
@@ -107,6 +108,7 @@
 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -116,6 +118,7 @@
 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -127,10 +130,11 @@
 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
+	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
-	{ USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
 	{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
 	{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
@@ -138,6 +142,8 @@
 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
@@ -159,6 +165,11 @@
 	{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
@@ -185,6 +196,7 @@
 	{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
 	{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
 	{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+	{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
diff -ur a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
--- a/drivers/usb/serial/cypress_m8.c	2017-03-23 14:47:19.000000000 +0100
+++ b/drivers/usb/serial/cypress_m8.c	2017-03-14 02:23:37.000000000 +0100
@@ -443,6 +443,11 @@
 	struct usb_serial *serial = port->serial;
 	struct cypress_private *priv;
 
+	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+		dev_err(&port->dev, "required endpoint is missing\n");
+		return -ENODEV;
+	}
+
 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
@@ -599,12 +604,6 @@
 		cypress_set_termios(tty, port, &priv->tmp_termios);
 
 	/* setup the port and start reading from the device */
-	if (!port->interrupt_in_urb) {
-		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
-			__func__);
-		return -1;
-	}
-
 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
 		port->interrupt_in_urb->transfer_buffer,
diff -ur a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
--- a/drivers/usb/serial/digi_acceleport.c	2017-03-23 14:47:16.000000000 +0100
+++ b/drivers/usb/serial/digi_acceleport.c	2017-03-14 02:23:33.000000000 +0100
@@ -1236,8 +1236,27 @@
 
 static int digi_startup(struct usb_serial *serial)
 {
+	struct device *dev = &serial->interface->dev;
 	struct digi_serial *serial_priv;
 	int ret;
+	int i;
+
+	/* check whether the device has the expected number of endpoints */
+	if (serial->num_port_pointers < serial->type->num_ports + 1) {
+		dev_err(dev, "OOB endpoints missing\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+		if (!serial->port[i]->read_urb) {
+			dev_err(dev, "bulk-in endpoint missing\n");
+			return -ENODEV;
+		}
+		if (!serial->port[i]->write_urb) {
+			dev_err(dev, "bulk-out endpoint missing\n");
+			return -ENODEV;
+		}
+	}
 
 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
 	if (!serial_priv)
diff -ur a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
--- a/drivers/usb/serial/ftdi_sio.c	2017-03-23 14:47:20.000000000 +0100
+++ b/drivers/usb/serial/ftdi_sio.c	2017-03-14 02:23:36.000000000 +0100
@@ -619,6 +619,10 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
 	/*
 	 * ELV devices:
 	 */
@@ -713,6 +717,7 @@
 	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
 	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
 	{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+	{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
 	{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
 	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
@@ -833,6 +838,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
+	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
 	{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
 
 	/* Papouch devices based on FTDI chip */
diff -ur a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
--- a/drivers/usb/serial/ftdi_sio_ids.h	2017-03-23 14:47:23.000000000 +0100
+++ b/drivers/usb/serial/ftdi_sio_ids.h	2017-03-14 02:23:41.000000000 +0100
@@ -151,6 +151,7 @@
 #define XSENS_AWINDA_STATION_PID 0x0101
 #define XSENS_AWINDA_DONGLE_PID 0x0102
 #define XSENS_MTW_PID		0x0200	/* Xsens MTw */
+#define XSENS_MTDEVBOARD_PID	0x0300	/* Motion Tracker Development Board */
 #define XSENS_CONVERTER_PID	0xD00D	/* Xsens USB-serial converter */
 
 /* Xsens devices using FTDI VID */
@@ -563,6 +564,14 @@
  */
 #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
 
+/*
+ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
+ */
+#define FTDI_CUSTOMWARE_MINIPLEX_PID	0xfd48	/* MiniPlex first generation NMEA Multiplexer */
+#define FTDI_CUSTOMWARE_MINIPLEX2_PID	0xfd49	/* MiniPlex-USB and MiniPlex-2 series */
+#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID	0xfd4a	/* MiniPlex-2Wi */
+#define FTDI_CUSTOMWARE_MINIPLEX3_PID	0xfd4b	/* MiniPlex-3 series */
+
 /********************************/
 /** third-party VID/PID combos **/
 /********************************/
@@ -599,6 +608,7 @@
  */
 #define RATOC_VENDOR_ID		0x0584
 #define RATOC_PRODUCT_ID_USB60F	0xb020
+#define RATOC_PRODUCT_ID_SCU18	0xb03a
 
 /*
  * Infineon Technologies
diff -ur a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
--- a/drivers/usb/serial/io_edgeport.c	2017-03-23 14:47:21.000000000 +0100
+++ b/drivers/usb/serial/io_edgeport.c	2017-03-14 02:23:38.000000000 +0100
@@ -2944,16 +2944,9 @@
 {
 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
 
-	/* stop reads and writes on all ports */
-	/* free up our endpoint stuff */
 	if (edge_serial->is_epic) {
 		usb_kill_urb(edge_serial->interrupt_read_urb);
-		usb_free_urb(edge_serial->interrupt_read_urb);
-		kfree(edge_serial->interrupt_in_buffer);
-
 		usb_kill_urb(edge_serial->read_urb);
-		usb_free_urb(edge_serial->read_urb);
-		kfree(edge_serial->bulk_in_buffer);
 	}
 }
 
@@ -2965,6 +2958,16 @@
 {
 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
 
+	if (edge_serial->is_epic) {
+		usb_kill_urb(edge_serial->interrupt_read_urb);
+		usb_free_urb(edge_serial->interrupt_read_urb);
+		kfree(edge_serial->interrupt_in_buffer);
+
+		usb_kill_urb(edge_serial->read_urb);
+		usb_free_urb(edge_serial->read_urb);
+		kfree(edge_serial->bulk_in_buffer);
+	}
+
 	kfree(edge_serial);
 }
 
diff -ur a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
--- a/drivers/usb/serial/ipaq.c	2017-03-23 14:47:18.000000000 +0100
+++ b/drivers/usb/serial/ipaq.c	2017-03-14 02:23:35.000000000 +0100
@@ -531,7 +531,8 @@
 	 * through. Since this has a reasonably high failure rate, we retry
 	 * several times.
 	 */
-	while (retries--) {
+	while (retries) {
+		retries--;
 		result = usb_control_msg(serial->dev,
 				usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
 				0x1, 0, NULL, 0, 100);
diff -ur a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
--- a/drivers/usb/serial/keyspan.c	2017-03-23 14:47:18.000000000 +0100
+++ b/drivers/usb/serial/keyspan.c	2017-03-14 02:23:34.000000000 +0100
@@ -2382,6 +2382,10 @@
 
 	s_priv = usb_get_serial_data(serial);
 
+	/* Make sure to unlink the URBs submitted in attach. */
+	usb_kill_urb(s_priv->instat_urb);
+	usb_kill_urb(s_priv->indat_urb);
+
 	usb_free_urb(s_priv->instat_urb);
 	usb_free_urb(s_priv->indat_urb);
 	usb_free_urb(s_priv->glocont_urb);
diff -ur a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
--- a/drivers/usb/serial/mct_u232.c	2017-03-23 14:47:18.000000000 +0100
+++ b/drivers/usb/serial/mct_u232.c	2017-03-14 02:23:35.000000000 +0100
@@ -376,14 +376,21 @@
 
 static int mct_u232_port_probe(struct usb_serial_port *port)
 {
+	struct usb_serial *serial = port->serial;
 	struct mct_u232_private *priv;
 
+	/* check first to simplify error handling */
+	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+		dev_err(&port->dev, "expected endpoint missing\n");
+		return -ENODEV;
+	}
+
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
 	/* Use second interrupt-in endpoint for reading. */
-	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+	priv->read_urb = serial->port[1]->interrupt_in_urb;
 	priv->read_urb->context = port;
 
 	spin_lock_init(&priv->lock);
diff -ur a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
--- a/drivers/usb/serial/option.c	2017-03-23 14:47:19.000000000 +0100
+++ b/drivers/usb/serial/option.c	2017-03-14 02:23:36.000000000 +0100
@@ -162,6 +162,7 @@
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED	0x9001
 #define NOVATELWIRELESS_PRODUCT_E362		0x9010
 #define NOVATELWIRELESS_PRODUCT_E371		0x9011
+#define NOVATELWIRELESS_PRODUCT_U620L		0x9022
 #define NOVATELWIRELESS_PRODUCT_G2		0xA010
 #define NOVATELWIRELESS_PRODUCT_MC551		0xB001
 
@@ -268,6 +269,9 @@
 #define TELIT_PRODUCT_CC864_SINGLE		0x1006
 #define TELIT_PRODUCT_DE910_DUAL		0x1010
 #define TELIT_PRODUCT_UE910_V2			0x1012
+#define TELIT_PRODUCT_LE922_USBCFG0		0x1042
+#define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+#define TELIT_PRODUCT_LE922_USBCFG5		0x1045
 #define TELIT_PRODUCT_LE920			0x1200
 #define TELIT_PRODUCT_LE910			0x1201
 
@@ -276,6 +280,10 @@
 #define ZTE_PRODUCT_MF622			0x0001
 #define ZTE_PRODUCT_MF628			0x0015
 #define ZTE_PRODUCT_MF626			0x0031
+#define ZTE_PRODUCT_ZM8620_X			0x0396
+#define ZTE_PRODUCT_ME3620_MBIM			0x0426
+#define ZTE_PRODUCT_ME3620_X			0x1432
+#define ZTE_PRODUCT_ME3620_L			0x1433
 #define ZTE_PRODUCT_AC2726			0xfff1
 #define ZTE_PRODUCT_CDMA_TECH			0xfffe
 #define ZTE_PRODUCT_AC8710T			0xffff
@@ -308,6 +316,7 @@
 #define TOSHIBA_PRODUCT_G450			0x0d45
 
 #define ALINK_VENDOR_ID				0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E			0x9001 /* Yes, ALINK_VENDOR_ID */
 #define ALINK_PRODUCT_PH300			0x9100
 #define ALINK_PRODUCT_3GU			0x9200
 
@@ -350,6 +359,7 @@
 /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
 #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
+#define FOUR_G_SYSTEMS_PRODUCT_W100		0x9b01
 
 /* iBall 3.5G connect wireless modem */
 #define IBALL_3_5G_CONNECT			0x9605
@@ -365,18 +375,22 @@
 #define HAIER_PRODUCT_CE81B			0x10f8
 #define HAIER_PRODUCT_CE100			0x2009
 
-/* Cinterion (formerly Siemens) products */
-#define SIEMENS_VENDOR_ID				0x0681
-#define CINTERION_VENDOR_ID				0x1e2d
+/* Gemalto's Cinterion products (formerly Siemens) */
+#define SIEMENS_VENDOR_ID			0x0681
+#define CINTERION_VENDOR_ID			0x1e2d
+#define CINTERION_PRODUCT_HC25_MDMNET		0x0040
 #define CINTERION_PRODUCT_HC25_MDM		0x0047
-#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
+#define CINTERION_PRODUCT_HC28_MDMNET		0x004A /* same for HC28J */
 #define CINTERION_PRODUCT_HC28_MDM		0x004C
-#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
 #define CINTERION_PRODUCT_EU3_E			0x0051
 #define CINTERION_PRODUCT_EU3_P			0x0052
 #define CINTERION_PRODUCT_PH8			0x0053
 #define CINTERION_PRODUCT_AHXX			0x0055
 #define CINTERION_PRODUCT_PLXX			0x0060
+#define CINTERION_PRODUCT_PH8_2RMNET		0x0082
+#define CINTERION_PRODUCT_PH8_AUDIO		0x0083
+#define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
+#define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID			0x0b3c
@@ -523,6 +537,11 @@
 	.sendsetup = BIT(0) | BIT(1),
 };
 
+static const struct option_blacklist_info four_g_w100_blacklist = {
+	.sendsetup = BIT(1) | BIT(2),
+	.reserved = BIT(3),
+};
+
 static const struct option_blacklist_info alcatel_x200_blacklist = {
 	.sendsetup = BIT(0) | BIT(1),
 	.reserved = BIT(4),
@@ -549,6 +568,18 @@
 	.sendsetup = BIT(1) | BIT(2) | BIT(3),
 };
 
+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+	.reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+	.reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+	.reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
 	.reserved = BIT(1) | BIT(2),
 };
@@ -590,6 +621,10 @@
 	.reserved = BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+	.reserved = BIT(5) | BIT(6),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
 	.sendsetup = BIT(0),
 	.reserved = BIT(1) | BIT(2),
@@ -600,6 +635,20 @@
 	.reserved = BIT(1) | BIT(5),
 };
 
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+	.sendsetup = BIT(2),
+	.reserved = BIT(0) | BIT(1) | BIT(3),
+};
+
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+	.sendsetup = BIT(0),
+	.reserved = BIT(1) | BIT(2) | BIT(3),
+};
+
+static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
+	.reserved = BIT(4) | BIT(5),
+};
+
 static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1043,6 +1092,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
 
 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1093,9 +1143,13 @@
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1143,6 +1197,12 @@
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1578,6 +1638,14 @@
 	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
 	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1596,6 +1664,8 @@
 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
 	},
@@ -1616,6 +1686,9 @@
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
   	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
   	},
+	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+	},
 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
@@ -1643,10 +1716,16 @@
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
+		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
+		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
@@ -1752,8 +1831,11 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
 	{ } /* Terminating entry */
diff -ur a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
--- a/drivers/usb/serial/pl2303.c	2017-03-23 14:47:18.000000000 +0100
+++ b/drivers/usb/serial/pl2303.c	2017-03-14 02:23:36.000000000 +0100
@@ -63,7 +63,6 @@
 	{ USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
 	{ USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
-	{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) },
 	{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) },
diff -ur a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
--- a/drivers/usb/serial/pl2303.h	2017-03-23 14:47:21.000000000 +0100
+++ b/drivers/usb/serial/pl2303.h	2017-03-14 02:23:39.000000000 +0100
@@ -62,10 +62,6 @@
 #define ALCATEL_VENDOR_ID	0x11f7
 #define ALCATEL_PRODUCT_ID	0x02df
 
-/* Samsung I330 phone cradle */
-#define SAMSUNG_VENDOR_ID	0x04e8
-#define SAMSUNG_PRODUCT_ID	0x8001
-
 #define SIEMENS_VENDOR_ID	0x11f5
 #define SIEMENS_PRODUCT_ID_SX1	0x0001
 #define SIEMENS_PRODUCT_ID_X65	0x0003
diff -ur a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
--- a/drivers/usb/serial/quatech2.c	2017-03-23 14:47:20.000000000 +0100
+++ b/drivers/usb/serial/quatech2.c	2017-03-14 02:23:37.000000000 +0100
@@ -141,6 +141,7 @@
 
 	serial_priv = usb_get_serial_data(serial);
 
+	usb_kill_urb(serial_priv->read_urb);
 	usb_free_urb(serial_priv->read_urb);
 	kfree(serial_priv);
 }
diff -ur a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
--- a/drivers/usb/serial/sierra.c	2017-03-23 14:47:20.000000000 +0100
+++ b/drivers/usb/serial/sierra.c	2017-03-14 02:23:38.000000000 +0100
@@ -289,6 +289,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
 	},
+	{ USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
 	/* AT&T Direct IP LTE modems */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff -ur a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
--- a/drivers/usb/serial/symbolserial.c	2017-03-23 14:47:16.000000000 +0100
+++ b/drivers/usb/serial/symbolserial.c	2017-03-14 02:23:33.000000000 +0100
@@ -97,7 +97,7 @@
 
 static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct symbol_private *priv = usb_get_serial_data(port->serial);
+	struct symbol_private *priv = usb_get_serial_port_data(port);
 	unsigned long flags;
 	int result = 0;
 
@@ -123,7 +123,7 @@
 static void symbol_throttle(struct tty_struct *tty)
 {
 	struct usb_serial_port *port = tty->driver_data;
-	struct symbol_private *priv = usb_get_serial_data(port->serial);
+	struct symbol_private *priv = usb_get_serial_port_data(port);
 
 	spin_lock_irq(&priv->lock);
 	priv->throttled = true;
@@ -133,7 +133,7 @@
 static void symbol_unthrottle(struct tty_struct *tty)
 {
 	struct usb_serial_port *port = tty->driver_data;
-	struct symbol_private *priv = usb_get_serial_data(port->serial);
+	struct symbol_private *priv = usb_get_serial_port_data(port);
 	int result;
 	bool was_throttled;
 
diff -ur a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
--- a/drivers/usb/serial/ti_usb_3410_5052.c	2017-03-23 14:47:18.000000000 +0100
+++ b/drivers/usb/serial/ti_usb_3410_5052.c	2017-03-14 02:23:35.000000000 +0100
@@ -155,7 +155,7 @@
 /* the array dimension is the number of default entries plus */
 /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
 /* null entry */
-static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[16+TI_EXTRA_VID_PID_COUNT+1] = {
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
 	{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -181,7 +181,7 @@
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
 };
 
-static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[20+2*TI_EXTRA_VID_PID_COUNT+1] = {
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
 	{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
diff -ur a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
--- a/drivers/usb/serial/visor.c	2017-03-23 14:47:16.000000000 +0100
+++ b/drivers/usb/serial/visor.c	2017-03-14 02:23:33.000000000 +0100
@@ -96,7 +96,7 @@
 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
 	{ USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
-	{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
+	{ USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
 	{ USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
 		.driver_info = (kernel_ulong_t)&palm_os_4_probe },
@@ -549,6 +549,11 @@
 		(serial->num_interrupt_in == 0))
 		return 0;
 
+	if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
+		dev_err(&serial->interface->dev, "missing endpoints\n");
+		return -ENODEV;
+	}
+
 	/*
 	* It appears that Treos and Kyoceras want to use the
 	* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
@@ -602,8 +607,10 @@
 	 */
 
 	/* some sanity check */
-	if (serial->num_ports < 2)
-		return -1;
+	if (serial->num_bulk_out < 2) {
+		dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
+		return -ENODEV;
+	}
 
 	/* port 0 now uses the modified endpoint Address */
 	port = serial->port[0];
diff -ur a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
--- a/drivers/usb/serial/whiteheat.c	2017-03-23 14:47:19.000000000 +0100
+++ b/drivers/usb/serial/whiteheat.c	2017-03-14 02:23:36.000000000 +0100
@@ -80,6 +80,8 @@
 static int  whiteheat_firmware_attach(struct usb_serial *serial);
 
 /* function prototypes for the Connect Tech WhiteHEAT serial converter */
+static int whiteheat_probe(struct usb_serial *serial,
+				const struct usb_device_id *id);
 static int  whiteheat_attach(struct usb_serial *serial);
 static void whiteheat_release(struct usb_serial *serial);
 static int  whiteheat_port_probe(struct usb_serial_port *port);
@@ -116,6 +118,7 @@
 	.description =		"Connect Tech - WhiteHEAT",
 	.id_table =		id_table_std,
 	.num_ports =		4,
+	.probe =		whiteheat_probe,
 	.attach =		whiteheat_attach,
 	.release =		whiteheat_release,
 	.port_probe =		whiteheat_port_probe,
@@ -212,6 +215,34 @@
 /*****************************************************************************
  * Connect Tech's White Heat serial driver functions
  *****************************************************************************/
+
+static int whiteheat_probe(struct usb_serial *serial,
+				const struct usb_device_id *id)
+{
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	size_t num_bulk_in = 0;
+	size_t num_bulk_out = 0;
+	size_t min_num_bulk;
+	unsigned int i;
+
+	iface_desc = serial->interface->cur_altsetting;
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+		endpoint = &iface_desc->endpoint[i].desc;
+		if (usb_endpoint_is_bulk_in(endpoint))
+			++num_bulk_in;
+		if (usb_endpoint_is_bulk_out(endpoint))
+			++num_bulk_out;
+	}
+
+	min_num_bulk = COMMAND_PORT + 1;
+	if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
+		return -ENODEV;
+
+	return 0;
+}
+
 static int whiteheat_attach(struct usb_serial *serial)
 {
 	struct usb_serial_port *command_port;
diff -ur a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
--- a/drivers/usb/storage/unusual_devs.h	2017-03-23 14:47:16.000000000 +0100
+++ b/drivers/usb/storage/unusual_devs.h	2017-03-14 02:23:31.000000000 +0100
@@ -759,6 +759,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_GO_SLOW ),
 
+/* Reported by Christian Schaller <cschalle@redhat.com> */
+UNUSUAL_DEV(  0x059f, 0x0651, 0x0000, 0x0000,
+		"LaCie",
+		"External HDD",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NO_WP_DETECT ),
+
 /* Submitted by Joel Bourquard <numlock@freesurf.ch>
  * Some versions of this device need the SubClass and Protocol overrides
  * while others don't.
@@ -2023,6 +2030,18 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_READ_DISC_INFO ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+		"ZTE,Incorporated",
+		"ZTE WCDMA Technologies MSM",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_SINGLE_LUN ),
+
 /* Reported by Sven Geggus <sven-usbst@geggus.net>
  * This encrypted pen drive returns bogus data for the initial READ(10).
  */
diff -ur a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
--- a/drivers/vhost/scsi.c	2017-03-23 14:38:36.000000000 +0100
+++ b/drivers/vhost/scsi.c	2017-03-14 02:13:18.000000000 +0100
@@ -1088,7 +1088,7 @@
 		 * lun[4-7] need to be zero according to virtio-scsi spec.
 		 */
 		evt->event.lun[0] = 0x01;
-		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+		evt->event.lun[1] = tpg->tport_tpgt;
 		if (lun->unpacked_lun >= 256)
 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1894,12 +1894,12 @@
 			struct tcm_vhost_tport, tport_wwn);
 
 	struct tcm_vhost_tpg *tpg;
-	unsigned long tpgt;
+	u16 tpgt;
 	int ret;
 
 	if (strstr(name, "tpgt_") != name)
 		return ERR_PTR(-EINVAL);
-	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
 		return ERR_PTR(-EINVAL);
 
 	tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
diff -ur a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
--- a/drivers/vhost/vhost.c	2017-03-23 14:38:35.000000000 +0100
+++ b/drivers/vhost/vhost.c	2017-03-14 02:13:17.000000000 +0100
@@ -855,6 +855,7 @@
 		}
 		if (eventfp != d->log_file) {
 			filep = d->log_file;
+			d->log_file = eventfp;
 			ctx = d->log_ctx;
 			d->log_ctx = eventfp ?
 				eventfd_ctx_fileget(eventfp) : NULL;
diff -ur a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
--- a/drivers/video/da8xx-fb.c	2017-03-23 14:48:58.000000000 +0100
+++ b/drivers/video/da8xx-fb.c	2017-03-14 02:25:55.000000000 +0100
@@ -226,8 +226,7 @@
 		.lower_margin   = 2,
 		.hsync_len      = 0,
 		.vsync_len      = 0,
-		.sync           = FB_SYNC_CLK_INVERT |
-			FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		.sync           = FB_SYNC_CLK_INVERT,
 	},
 	/* Sharp LK043T1DG01 */
 	[1] = {
@@ -241,7 +240,7 @@
 		.lower_margin   = 2,
 		.hsync_len      = 41,
 		.vsync_len      = 10,
-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		.sync           = 0,
 		.flag           = 0,
 	},
 	[2] = {
@@ -256,7 +255,7 @@
 		.lower_margin   = 10,
 		.hsync_len      = 10,
 		.vsync_len      = 10,
-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		.sync           = 0,
 		.flag           = 0,
 	},
 };
diff -ur a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
--- a/drivers/virtio/virtio.c	2017-03-23 14:46:27.000000000 +0100
+++ b/drivers/virtio/virtio.c	2017-03-14 02:22:26.000000000 +0100
@@ -238,6 +238,7 @@
 static void __exit virtio_exit(void)
 {
 	bus_unregister(&virtio_bus);
+	ida_destroy(&virtio_index_ida);
 }
 core_initcall(virtio_init);
 module_exit(virtio_exit);
diff -ur a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
--- a/drivers/watchdog/omap_wdt.c	2017-03-23 14:48:28.000000000 +0100
+++ b/drivers/watchdog/omap_wdt.c	2017-03-14 02:25:10.000000000 +0100
@@ -134,6 +134,13 @@
 
 	pm_runtime_get_sync(wdev->dev);
 
+	/*
+	 * Make sure the watchdog is disabled. This is unfortunately required
+	 * because writing to various registers with the watchdog running has no
+	 * effect.
+	 */
+	omap_wdt_disable(wdev);
+
 	/* initialize prescaler */
 	while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
 		cpu_relax();
diff -ur a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
--- a/drivers/watchdog/rc32434_wdt.c	2017-03-23 14:48:28.000000000 +0100
+++ b/drivers/watchdog/rc32434_wdt.c	2017-03-14 02:25:10.000000000 +0100
@@ -237,7 +237,7 @@
 			return -EINVAL;
 		/* Fall through */
 	case WDIOC_GETTIMEOUT:
-		return copy_to_user(argp, &timeout, sizeof(int));
+		return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
 	default:
 		return -ENOTTY;
 	}
diff -ur a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
--- a/drivers/xen/gntdev.c	2017-03-23 14:41:10.000000000 +0100
+++ b/drivers/xen/gntdev.c	2017-03-14 02:16:40.000000000 +0100
@@ -65,7 +65,7 @@
 	 * Only populated if populate_freeable_maps == 1 */
 	struct list_head freeable_maps;
 	/* lock protects maps and freeable_maps */
-	spinlock_t lock;
+	struct mutex lock;
 	struct mm_struct *mm;
 	struct mmu_notifier mn;
 };
@@ -214,9 +214,9 @@
 	}
 
 	if (populate_freeable_maps && priv) {
-		spin_lock(&priv->lock);
+		mutex_lock(&priv->lock);
 		list_del(&map->next);
-		spin_unlock(&priv->lock);
+		mutex_unlock(&priv->lock);
 	}
 
 	if (map->pages && !use_ptemod)
@@ -392,9 +392,9 @@
 		 * not do any unmapping, since that has been done prior to
 		 * closing the vma, but it may still iterate the unmap_ops list.
 		 */
-		spin_lock(&priv->lock);
+		mutex_lock(&priv->lock);
 		map->vma = NULL;
-		spin_unlock(&priv->lock);
+		mutex_unlock(&priv->lock);
 	}
 	vma->vm_private_data = NULL;
 	gntdev_put_map(priv, map);
@@ -438,14 +438,14 @@
 	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 	struct grant_map *map;
 
-	spin_lock(&priv->lock);
+	mutex_lock(&priv->lock);
 	list_for_each_entry(map, &priv->maps, next) {
 		unmap_if_in_range(map, start, end);
 	}
 	list_for_each_entry(map, &priv->freeable_maps, next) {
 		unmap_if_in_range(map, start, end);
 	}
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 }
 
 static void mn_invl_page(struct mmu_notifier *mn,
@@ -462,7 +462,7 @@
 	struct grant_map *map;
 	int err;
 
-	spin_lock(&priv->lock);
+	mutex_lock(&priv->lock);
 	list_for_each_entry(map, &priv->maps, next) {
 		if (!map->vma)
 			continue;
@@ -481,7 +481,7 @@
 		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 		WARN_ON(err);
 	}
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 }
 
 static struct mmu_notifier_ops gntdev_mmu_ops = {
@@ -503,7 +503,7 @@
 
 	INIT_LIST_HEAD(&priv->maps);
 	INIT_LIST_HEAD(&priv->freeable_maps);
-	spin_lock_init(&priv->lock);
+	mutex_init(&priv->lock);
 
 	if (use_ptemod) {
 		priv->mm = get_task_mm(current);
@@ -534,12 +534,14 @@
 
 	pr_debug("priv %p\n", priv);
 
+	mutex_lock(&priv->lock);
 	while (!list_empty(&priv->maps)) {
 		map = list_entry(priv->maps.next, struct grant_map, next);
 		list_del(&map->next);
 		gntdev_put_map(NULL /* already removed */, map);
 	}
 	WARN_ON(!list_empty(&priv->freeable_maps));
+	mutex_unlock(&priv->lock);
 
 	if (use_ptemod)
 		mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -577,10 +579,10 @@
 		return -EFAULT;
 	}
 
-	spin_lock(&priv->lock);
+	mutex_lock(&priv->lock);
 	gntdev_add_map(priv, map);
 	op.index = map->index << PAGE_SHIFT;
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 
 	if (copy_to_user(u, &op, sizeof(op)) != 0)
 		return -EFAULT;
@@ -599,7 +601,7 @@
 		return -EFAULT;
 	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 
-	spin_lock(&priv->lock);
+	mutex_lock(&priv->lock);
 	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 	if (map) {
 		list_del(&map->next);
@@ -607,7 +609,7 @@
 			list_add_tail(&map->next, &priv->freeable_maps);
 		err = 0;
 	}
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 	if (map)
 		gntdev_put_map(priv, map);
 	return err;
@@ -675,7 +677,7 @@
 	out_flags = op.action;
 	out_event = op.event_channel_port;
 
-	spin_lock(&priv->lock);
+	mutex_lock(&priv->lock);
 
 	list_for_each_entry(map, &priv->maps, next) {
 		uint64_t begin = map->index << PAGE_SHIFT;
@@ -703,7 +705,7 @@
 	rc = 0;
 
  unlock_out:
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 
 	/* Drop the reference to the event channel we did not save in the map */
 	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
@@ -753,7 +755,7 @@
 	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 			index, count, vma->vm_start, vma->vm_pgoff);
 
-	spin_lock(&priv->lock);
+	mutex_lock(&priv->lock);
 	map = gntdev_find_map_index(priv, index, count);
 	if (!map)
 		goto unlock_out;
@@ -768,7 +770,7 @@
 
 	vma->vm_ops = &gntdev_vmops;
 
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 
 	if (use_ptemod)
 		vma->vm_flags |= VM_DONTCOPY;
@@ -788,7 +790,7 @@
 			map->flags |= GNTMAP_readonly;
 	}
 
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 
 	if (use_ptemod) {
 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -816,11 +818,11 @@
 	return 0;
 
 unlock_out:
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 	return err;
 
 out_unlock_put:
-	spin_unlock(&priv->lock);
+	mutex_unlock(&priv->lock);
 out_put_map:
 	if (use_ptemod)
 		map->vma = NULL;
diff -ur a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
--- a/fs/9p/vfs_inode.c	2017-03-23 15:08:19.000000000 +0100
+++ b/fs/9p/vfs_inode.c	2017-03-14 02:46:14.000000000 +0100
@@ -536,8 +536,7 @@
 	unlock_new_inode(inode);
 	return inode;
 error:
-	unlock_new_inode(inode);
-	iput(inode);
+	iget_failed(inode);
 	return ERR_PTR(retval);
 
 }
diff -ur a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
--- a/fs/9p/vfs_inode_dotl.c	2017-03-23 15:08:19.000000000 +0100
+++ b/fs/9p/vfs_inode_dotl.c	2017-03-14 02:46:14.000000000 +0100
@@ -151,8 +151,7 @@
 	unlock_new_inode(inode);
 	return inode;
 error:
-	unlock_new_inode(inode);
-	iput(inode);
+	iget_failed(inode);
 	return ERR_PTR(retval);
 
 }
diff -ur a/fs/aio.c b/fs/aio.c
--- a/fs/aio.c	2017-03-23 15:07:47.000000000 +0100
+++ b/fs/aio.c	2017-03-14 02:45:43.000000000 +0100
@@ -976,12 +976,17 @@
 
 static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb)
 {
-	if (unlikely(!access_ok(!rw, kiocb->ki_buf, kiocb->ki_nbytes)))
-		return -EFAULT;
+	size_t len = kiocb->ki_nbytes;
+
+	if (len > MAX_RW_COUNT)
+		len = MAX_RW_COUNT;
+
+	if (unlikely(!access_ok(!rw, kiocb->ki_buf, len)))
+                return -EFAULT;
 
 	kiocb->ki_iovec = &kiocb->ki_inline_vec;
 	kiocb->ki_iovec->iov_base = kiocb->ki_buf;
-	kiocb->ki_iovec->iov_len = kiocb->ki_nbytes;
+	kiocb->ki_iovec->iov_len = len;
 	kiocb->ki_nr_segs = 1;
 	return 0;
 }
diff -ur a/fs/binfmt_elf.c b/fs/binfmt_elf.c
--- a/fs/binfmt_elf.c	2017-03-23 15:07:49.000000000 +0100
+++ b/fs/binfmt_elf.c	2017-03-14 02:45:45.000000000 +0100
@@ -680,16 +680,16 @@
 			 */
 			would_dump(bprm, interpreter);
 
-			retval = kernel_read(interpreter, 0, bprm->buf,
-					     BINPRM_BUF_SIZE);
-			if (retval != BINPRM_BUF_SIZE) {
+			/* Get the exec headers */
+			retval = kernel_read(interpreter, 0,
+					     (void *)&loc->interp_elf_ex,
+					     sizeof(loc->interp_elf_ex));
+			if (retval != sizeof(loc->interp_elf_ex)) {
 				if (retval >= 0)
 					retval = -EIO;
 				goto out_free_dentry;
 			}
 
-			/* Get the exec headers */
-			loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
 			break;
 		}
 		elf_ppnt++;
@@ -822,7 +822,7 @@
 			total_size = total_mapping_size(elf_phdata,
 							loc->elf_ex.e_phnum);
 			if (!total_size) {
-				error = -EINVAL;
+				retval = -EINVAL;
 				goto out_free_dentry;
 			}
 		}
diff -ur a/fs/btrfs/backref.c b/fs/btrfs/backref.c
--- a/fs/btrfs/backref.c	2017-03-23 15:07:53.000000000 +0100
+++ b/fs/btrfs/backref.c	2017-03-14 02:45:51.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
  
 #include <linux/vmalloc.h>
 #include "ctree.h"
@@ -9,6 +12,21 @@
 #include "locking.h"
 
 #define BACKREF_FOUND_SHARED 6
+#ifdef MY_ABC_HERE
+#define BACKREF_NEXT_ITEM 253
+#define BACKREF_FOUND_SHARED_ROOT 254
+#endif  
+
+#ifdef MY_ABC_HERE
+enum btrfs_backref_mode {
+	 
+	BTRFS_BACKREF_NORMAL,
+#ifdef MY_ABC_HERE
+	 
+	BTRFS_BACKREF_FIND_SHARED_ROOT,
+#endif  
+};
+#endif  
 
 struct extent_inode_elem {
 	u64 inum;
@@ -129,6 +147,9 @@
 static int __add_prelim_ref(struct list_head *head, u64 root_id,
 			    struct btrfs_key *key, int level,
 			    u64 parent, u64 wanted_disk_byte, int count,
+#ifdef MY_ABC_HERE
+			    enum btrfs_backref_mode mode,
+#endif  
 			    gfp_t gfp_mask)
 {
 	struct __prelim_ref *ref;
@@ -141,10 +162,19 @@
 		return -ENOMEM;
 
 	ref->root_id = root_id;
-	if (key)
+	if (key) {
 		ref->key_for_search = *key;
-	else
+		 
+#ifdef MY_ABC_HERE
+		 
+		if (mode == BTRFS_BACKREF_NORMAL)
+#endif  
+		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
+		    ref->key_for_search.offset >= LLONG_MAX)
+			ref->key_for_search.offset = 0;
+	} else {
 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
+	}
 
 	ref->inode_list = NULL;
 	ref->level = level;
@@ -159,6 +189,12 @@
 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
 			   struct ulist *parents, struct __prelim_ref *ref,
 			   int level, u64 time_seq, const u64 *extent_item_pos,
+#ifdef MY_ABC_HERE
+			   enum btrfs_backref_mode mode, u64 num_bytes,
+#endif  
+#ifdef MY_ABC_HERE
+			   u64 file_offset, int check_first_ref,
+#endif  
 			   u64 total_refs)
 {
 	int ret = 0;
@@ -171,6 +207,15 @@
 	u64 disk_byte;
 	u64 wanted_disk_byte = ref->wanted_disk_byte;
 	u64 count = 0;
+#ifdef MY_ABC_HERE
+	u64 total_count;
+	u64 datao;
+
+	if (mode == BTRFS_BACKREF_NORMAL || key_for_search->type != BTRFS_EXTENT_DATA_KEY)
+		total_count = total_refs;
+	else
+		total_count = ref->count + total_refs;
+#endif  
 
 	if (level != 0) {
 		eb = path->nodes[level];
@@ -183,7 +228,11 @@
 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
 		ret = btrfs_next_old_leaf(root, path, time_seq);
 
+#ifdef MY_ABC_HERE
+	while (!ret && count < total_count) {
+#else
 	while (!ret && count < total_refs) {
+#endif  
 		eb = path->nodes[0];
 		slot = path->slots[0];
 
@@ -196,9 +245,29 @@
 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 
+#ifdef MY_ABC_HERE
+		if (mode != BTRFS_BACKREF_NORMAL &&
+		    key_for_search->type == BTRFS_EXTENT_DATA_KEY &&
+		    key.offset >= key_for_search->offset + num_bytes)
+			break;
+#endif  
 		if (disk_byte == wanted_disk_byte) {
 			eie = NULL;
 			old = NULL;
+#ifdef MY_ABC_HERE
+			if (mode != BTRFS_BACKREF_NORMAL) {
+				datao = key.offset - btrfs_file_extent_offset(eb, fi);
+				if (datao != key_for_search->offset)
+					goto next;
+#ifdef MY_ABC_HERE
+				if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT && check_first_ref &&
+					key.offset < file_offset) {
+					 
+					return BACKREF_NEXT_ITEM;
+				}
+#endif  
+			}
+#endif  
 			count++;
 			if (extent_item_pos) {
 				ret = check_extent_in_eb(&key, eb, fi,
@@ -236,7 +305,16 @@
 				  struct btrfs_path *path, u64 time_seq,
 				  struct __prelim_ref *ref,
 				  struct ulist *parents,
+#ifdef MY_ABC_HERE
+				  const u64 *extent_item_pos, enum btrfs_backref_mode mode,
+				  u64 num_bytes,
+#ifdef MY_ABC_HERE
+				  int check_first_ref, u64 file_offset,
+#endif  
+				  u64 total_refs)
+#else
 				  const u64 *extent_item_pos, u64 total_refs)
+#endif  
 {
 	struct btrfs_root *root;
 	struct btrfs_key root_key;
@@ -245,6 +323,12 @@
 	int root_level;
 	int level = ref->level;
 	int index;
+#ifdef MY_ABC_HERE
+	u64 origin_offset = ref->key_for_search.offset;
+
+	if (ref->key_for_search.offset >= LLONG_MAX)
+		ref->key_for_search.offset = 0;
+#endif  
 
 	root_key.objectid = ref->root_id;
 	root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -252,7 +336,7 @@
 
 	index = srcu_read_lock(&fs_info->subvol_srcu);
 
-	root = btrfs_read_fs_root_no_name(fs_info, &root_key);
+	root = btrfs_get_fs_root(fs_info, &root_key, false);
 	if (IS_ERR(root)) {
 		srcu_read_unlock(&fs_info->subvol_srcu, index);
 		ret = PTR_ERR(root);
@@ -292,8 +376,21 @@
 		eb = path->nodes[level];
 	}
 
+#ifdef MY_ABC_HERE
+	 
+	ref->key_for_search.offset = origin_offset;
+#ifdef MY_ABC_HERE
+	ret = add_all_parents(root, path, parents, ref, level, time_seq,
+			      extent_item_pos, mode, num_bytes, file_offset,
+			      check_first_ref, total_refs);
+#else
+	ret = add_all_parents(root, path, parents, ref, level, time_seq,
+			      extent_item_pos, mode, num_bytes, total_refs);
+#endif  
+#else
 	ret = add_all_parents(root, path, parents, ref, level, time_seq,
 			      extent_item_pos, total_refs);
+#endif  
 out:
 	path->lowest_level = 0;
 	btrfs_release_path(path);
@@ -304,7 +401,15 @@
 				   struct btrfs_path *path, u64 time_seq,
 				   struct list_head *head,
 				   const u64 *extent_item_pos, u64 total_refs,
+#ifdef MY_ABC_HERE
+				   u64 root_objectid,
+#ifdef MY_ABC_HERE
+				   u64 inum, u64 file_offset, u64 datao,
+#endif  
+				   enum btrfs_backref_mode mode, u64 num_bytes)
+#else
 				   u64 root_objectid)
+#endif  
 {
 	int err;
 	int ret = 0;
@@ -320,16 +425,38 @@
 		return -ENOMEM;
 
 	list_for_each_entry_safe(ref, ref_safe, head, list) {
+#ifdef MY_ABC_HERE
+		int check_first_ref = 0;
+#endif  
 		if (ref->parent)	 
 			continue;
 		if (ref->count == 0)
 			continue;
+#ifdef MY_ABC_HERE
+		if (mode == BTRFS_BACKREF_NORMAL)
+#endif  
 		if (root_objectid && ref->root_id != root_objectid) {
 			ret = BACKREF_FOUND_SHARED;
 			goto out;
 		}
+#ifdef MY_ABC_HERE
+		if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT) {
+			if (ref->level == 0 && ref->root_id == root_objectid &&
+				ref->key_for_search.objectid == inum &&
+				ref->key_for_search.offset == file_offset - datao) {
+				WARN_ON(root_objectid == 0);
+				check_first_ref = 1;
+			}
+		}
+#endif  
 		err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
 					     parents, extent_item_pos,
+#ifdef MY_ABC_HERE
+					     mode, num_bytes,
+#endif  
+#ifdef MY_ABC_HERE
+					     check_first_ref, file_offset,
+#endif  
 					     total_refs);
 		 
 		if (err == -ENOENT) {
@@ -466,7 +593,11 @@
 
 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
 			      struct list_head *prefs, u64 *total_refs,
+#ifdef MY_ABC_HERE
+			      u64 root_objectid, u64 inum, enum btrfs_backref_mode mode)
+#else
 			      u64 inum)
+#endif  
 {
 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
 	struct rb_node *n = &head->node.rb_node;
@@ -502,6 +633,9 @@
 		default:
 			BUG_ON(1);
 		}
+#ifdef MY_ABC_HERE
+		if (mode == BTRFS_BACKREF_NORMAL || node->type != BTRFS_EXTENT_DATA_REF_KEY)
+#endif  
 		*total_refs += (node->ref_mod * sgn);
 		switch (node->type) {
 		case BTRFS_TREE_BLOCK_REF_KEY: {
@@ -510,17 +644,25 @@
 			ref = btrfs_delayed_node_to_tree_ref(node);
 			ret = __add_prelim_ref(prefs, ref->root, &op_key,
 					       ref->level + 1, 0, node->bytenr,
+#ifdef MY_ABC_HERE
+					       node->ref_mod * sgn, 0, GFP_ATOMIC);
+#else
 					       node->ref_mod * sgn, GFP_ATOMIC);
+#endif  
 			break;
 		}
 		case BTRFS_SHARED_BLOCK_REF_KEY: {
 			struct btrfs_delayed_tree_ref *ref;
 
 			ref = btrfs_delayed_node_to_tree_ref(node);
-			ret = __add_prelim_ref(prefs, ref->root, NULL,
+			ret = __add_prelim_ref(prefs, 0, NULL,
 					       ref->level + 1, ref->parent,
 					       node->bytenr,
+#ifdef MY_ABC_HERE
+					       node->ref_mod * sgn, 0, GFP_ATOMIC);
+#else
 					       node->ref_mod * sgn, GFP_ATOMIC);
+#endif  
 			break;
 		}
 		case BTRFS_EXTENT_DATA_REF_KEY: {
@@ -538,20 +680,28 @@
 
 			ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
 					       node->bytenr,
+#ifdef MY_ABC_HERE
+					       node->ref_mod * sgn, mode, GFP_ATOMIC);
+#else
 					       node->ref_mod * sgn, GFP_ATOMIC);
+#endif  
 			break;
 		}
 		case BTRFS_SHARED_DATA_REF_KEY: {
 			struct btrfs_delayed_data_ref *ref;
 
 			ref = btrfs_delayed_node_to_data_ref(node);
-
-			key.objectid = ref->objectid;
-			key.type = BTRFS_EXTENT_DATA_KEY;
-			key.offset = ref->offset;
-			ret = __add_prelim_ref(prefs, ref->root, &key, 0,
+#ifdef MY_ABC_HERE
+			if (mode != BTRFS_BACKREF_NORMAL)
+				*total_refs += (node->ref_mod * sgn);
+#endif  
+			ret = __add_prelim_ref(prefs, 0, NULL, 0,
 					       ref->parent, node->bytenr,
+#ifdef MY_ABC_HERE
+					       node->ref_mod * sgn, 0, GFP_ATOMIC);
+#else
 					       node->ref_mod * sgn, GFP_ATOMIC);
+#endif  
 			break;
 		}
 		default:
@@ -567,7 +717,16 @@
 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
 			     struct btrfs_path *path, u64 bytenr,
 			     int *info_level, struct list_head *prefs,
+#ifdef MY_ABC_HERE
+			     struct ulist *roots, u64 *lowest_full_backref,
+			     u64 *lowest_rootid, u64 *lowest_inum, u64 *lowest_offset,
+#endif  
+#ifdef MY_ABC_HERE
+			     u64 *total_refs, u64 root_objectid, u64 inum,
+			     enum btrfs_backref_mode mode)
+#else
 			     u64 *total_refs, u64 inum)
+#endif  
 {
 	int ret = 0;
 	int slot;
@@ -588,6 +747,9 @@
 
 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
 	flags = btrfs_extent_flags(leaf, ei);
+#ifdef MY_ABC_HERE
+	if (mode == BTRFS_BACKREF_NORMAL || !(flags & BTRFS_EXTENT_FLAG_DATA))
+#endif  
 	*total_refs += btrfs_extent_refs(leaf, ei);
 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
@@ -621,22 +783,49 @@
 		case BTRFS_SHARED_BLOCK_REF_KEY:
 			ret = __add_prelim_ref(prefs, 0, NULL,
 						*info_level + 1, offset,
+#ifdef MY_ABC_HERE
+						bytenr, 1, 0, GFP_NOFS);
+#else
 						bytenr, 1, GFP_NOFS);
+#endif  
 			break;
 		case BTRFS_SHARED_DATA_REF_KEY: {
 			struct btrfs_shared_data_ref *sdref;
 			int count;
 
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT &&
+			    *lowest_full_backref > offset)
+				*lowest_full_backref = offset;
+#endif  
 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
 			count = btrfs_shared_data_ref_count(leaf, sdref);
+#ifdef MY_ABC_HERE
+			if (mode != BTRFS_BACKREF_NORMAL)
+				*total_refs += count;
+			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
+					       bytenr, count, 0, GFP_NOFS);
+#else
 			ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
 					       bytenr, count, GFP_NOFS);
+#endif  
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT &&
+				!ulist_search(roots, offset)) {
+				ret = BACKREF_FOUND_SHARED_ROOT;
+				break;
+			}
+#endif  
 			ret = __add_prelim_ref(prefs, offset, NULL,
 					       *info_level + 1, 0,
+#ifdef MY_ABC_HERE
+					       bytenr, 1, 0, GFP_NOFS);
+#else
 					       bytenr, 1, GFP_NOFS);
+#endif  
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			struct btrfs_extent_data_ref *dref;
@@ -650,14 +839,39 @@
 			key.type = BTRFS_EXTENT_DATA_KEY;
 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_NORMAL)
+#endif  
 			if (inum && key.objectid != inum) {
 				ret = BACKREF_FOUND_SHARED;
 				break;
 			}
 
 			root = btrfs_extent_data_ref_root(leaf, dref);
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT) {
+				WARN_ON(!root_objectid || !inum);
+				if (!ulist_search(roots, root)) {
+					ret = BACKREF_FOUND_SHARED_ROOT;
+					break;
+				}
+				if (*lowest_rootid > root ||
+					(*lowest_rootid == root && *lowest_inum > key.objectid) ||
+					(*lowest_rootid == root && *lowest_inum == key.objectid &&
+					 *lowest_offset > key.offset)) {
+					*lowest_rootid = root;
+					*lowest_inum = key.objectid;
+					*lowest_offset = key.offset;
+				}
+			}
+#endif  
+#ifdef MY_ABC_HERE
+			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+					       bytenr, count, mode, GFP_NOFS);
+#else
 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
 					       bytenr, count, GFP_NOFS);
+#endif  
 			break;
 		}
 		default:
@@ -673,7 +887,17 @@
 
 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
 			    struct btrfs_path *path, u64 bytenr,
+#ifdef MY_ABC_HERE
+			    struct ulist *roots, u64 *lowest_full_backref,
+			    u64 *lowest_rootid, u64 *lowest_inum, u64 *lowest_offset,
+#endif  
+#ifdef MY_ABC_HERE
+			    int info_level, struct list_head *prefs,
+			    u64 *total_refs, u64 root_objectid,
+			    u64 inum, enum btrfs_backref_mode mode)
+#else
 			    int info_level, struct list_head *prefs, u64 inum)
+#endif  
 {
 	struct btrfs_root *extent_root = fs_info->extent_root;
 	int ret;
@@ -705,23 +929,50 @@
 		case BTRFS_SHARED_BLOCK_REF_KEY:
 			ret = __add_prelim_ref(prefs, 0, NULL,
 						info_level + 1, key.offset,
+#ifdef MY_ABC_HERE
+						bytenr, 1, 0, GFP_NOFS);
+#else
 						bytenr, 1, GFP_NOFS);
+#endif  
 			break;
 		case BTRFS_SHARED_DATA_REF_KEY: {
 			struct btrfs_shared_data_ref *sdref;
 			int count;
 
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT &&
+			    *lowest_full_backref > key.offset)
+				*lowest_full_backref = key.offset;
+#endif  
 			sdref = btrfs_item_ptr(leaf, slot,
 					      struct btrfs_shared_data_ref);
 			count = btrfs_shared_data_ref_count(leaf, sdref);
+#ifdef MY_ABC_HERE
+			if (mode != BTRFS_BACKREF_NORMAL)
+				*total_refs += count;
+			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
+						bytenr, count, 0, GFP_NOFS);
+#else
 			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
 						bytenr, count, GFP_NOFS);
+#endif  
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT &&
+				!ulist_search(roots, key.offset)) {
+				ret = BACKREF_FOUND_SHARED_ROOT;
+				break;
+			}
+#endif  
 			ret = __add_prelim_ref(prefs, key.offset, NULL,
 					       info_level + 1, 0,
+#ifdef MY_ABC_HERE
+					       bytenr, 1, 0, GFP_NOFS);
+#else
 					       bytenr, 1, GFP_NOFS);
+#endif  
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			struct btrfs_extent_data_ref *dref;
@@ -736,14 +987,39 @@
 			key.type = BTRFS_EXTENT_DATA_KEY;
 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_NORMAL)
+#endif  
 			if (inum && key.objectid != inum) {
 				ret = BACKREF_FOUND_SHARED;
 				break;
 			}
 
 			root = btrfs_extent_data_ref_root(leaf, dref);
+#ifdef MY_ABC_HERE
+			if (mode == BTRFS_BACKREF_FIND_SHARED_ROOT) {
+				WARN_ON(!root_objectid || !inum);
+				if (!ulist_search(roots, root)) {
+					ret = BACKREF_FOUND_SHARED_ROOT;
+					break;
+				}
+				if (*lowest_rootid > root ||
+					(*lowest_rootid == root && *lowest_inum > key.objectid) ||
+					(*lowest_rootid == root && *lowest_inum == key.objectid &&
+					 *lowest_offset == key.offset)) {
+					*lowest_rootid = root;
+					*lowest_inum = key.objectid;
+					*lowest_offset = key.offset;
+				}
+			}
+#endif  
+#ifdef MY_ABC_HERE
+			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+					       bytenr, count, mode, GFP_NOFS);
+#else
 			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
 					       bytenr, count, GFP_NOFS);
+#endif  
 			break;
 		}
 		default:
@@ -757,11 +1033,200 @@
 	return ret;
 }
 
+#ifdef MY_ABC_HERE
+static int check_first_ref(struct extent_buffer *eb, u64 bytenr,
+		  u64 inum, u64 file_offset)
+{
+	u64 disk_byte;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	int slot;
+	int nritems;
+	int extent_type;
+
+	nritems = btrfs_header_nritems(eb);
+	for (slot = 0; slot < nritems; ++slot) {
+		btrfs_item_key_to_cpu(eb, &key, slot);
+		if (key.type != BTRFS_EXTENT_DATA_KEY)
+			continue;
+		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+		extent_type = btrfs_file_extent_type(eb, fi);
+		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+			continue;
+		 
+		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+		if (disk_byte != bytenr)
+			continue;
+
+		if (key.objectid < inum ||
+		    (key.objectid == inum && key.offset < file_offset))
+			return 0;
+	}
+
+	return 1;
+}
+
+static int find_parent_nodes_shared_root(struct btrfs_fs_info *fs_info,
+			     u64 bytenr, u64 leaf_bytenr, u64 datao,
+			     struct ulist *refs, struct ulist *roots,
+			     u64 root_objectid, u64 inum, u64 offset)
+{
+	struct btrfs_key key;
+	struct btrfs_path *path;
+	int info_level = 0;
+	int ret;
+	struct list_head prefs;
+	struct __prelim_ref *ref;
+	u64 total_refs = 0;
+	u64 num_bytes = 0x10000000;  
+	u64 lowest_full_backref = (u64)-1;
+	u64 lowest_rootid = (u64)-1;
+	u64 lowest_inum = (u64)-1;
+	u64 lowest_offset = (u64)-1;
+	enum btrfs_backref_mode mode = BTRFS_BACKREF_FIND_SHARED_ROOT;
+
+	INIT_LIST_HEAD(&prefs);
+
+	key.objectid = bytenr;
+	key.offset = (u64)-1;
+	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+		key.type = BTRFS_METADATA_ITEM_KEY;
+	else
+		key.type = BTRFS_EXTENT_ITEM_KEY;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	path->search_commit_root = 1;
+	path->skip_locking = 1;
+
+	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+	BUG_ON(ret == 0);
+
+	if (path->slots[0]) {
+		struct extent_buffer *leaf;
+		int slot;
+
+		path->slots[0]--;
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (key.objectid == bytenr &&
+		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
+		     key.type == BTRFS_METADATA_ITEM_KEY)) {
+			num_bytes = key.offset;
+			ret = __add_inline_refs(fs_info, path, bytenr,
+						&info_level, &prefs,
+						roots, &lowest_full_backref,
+						&lowest_rootid, &lowest_inum, &lowest_offset,
+						&total_refs, root_objectid,
+						inum, mode);
+			if (ret)
+				goto out;
+			ret = __add_keyed_refs(fs_info, path, bytenr,
+					       roots, &lowest_full_backref,
+					       &lowest_rootid, &lowest_inum, &lowest_offset,
+					       info_level, &prefs,
+					       &total_refs, root_objectid,
+					       inum, mode);
+			if (ret)
+				goto out;
+			if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+				if (lowest_full_backref != (u64)-1) {
+					if (leaf_bytenr != lowest_full_backref) {
+						ret = BACKREF_NEXT_ITEM;
+						goto out;
+					}
+				} else if (lowest_rootid != (u64)-1) {
+					if (lowest_rootid != root_objectid || lowest_inum != inum ||
+						lowest_offset != offset - datao) {
+						ret = BACKREF_NEXT_ITEM;
+						goto out;
+					}
+				}
+			}
+		}
+	}
+	btrfs_release_path(path);
+
+	ret = __add_missing_keys(fs_info, &prefs);
+	if (ret)
+		goto out;
+
+	__merge_refs(&prefs, 1);
+
+	WARN_ON(!path->search_commit_root);
+	 
+	ret = __resolve_indirect_refs(fs_info, path, 0, &prefs,
+				      NULL, total_refs,
+				      lowest_full_backref == (u64) -1 ? root_objectid : 0,
+				      inum, offset, datao, mode, num_bytes);
+	if (ret)
+		goto out;
+
+	__merge_refs(&prefs, 2);
+
+	while (!list_empty(&prefs)) {
+		ref = list_first_entry(&prefs, struct __prelim_ref, list);
+		WARN_ON(ref->count < 0);
+		if (roots && ref->count && ref->root_id && ref->parent == 0) {
+			if (!ulist_search(roots, ref->root_id)) {
+				ret = BACKREF_FOUND_SHARED_ROOT;
+				goto out;
+			}
+		}
+		if (ref->count && ref->parent) {
+			if (ref->level == 0 &&
+			    ref->key_for_search.type == 0) {
+				struct extent_buffer *eb;
+				eb = read_tree_block(fs_info->extent_root,
+						    ref->parent, fs_info->extent_root->leafsize, 0);
+				if (!eb || !extent_buffer_uptodate(eb)) {
+					free_extent_buffer(eb);
+					ret = -EIO;
+					goto out;
+				}
+				btrfs_tree_read_lock(eb);
+				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+				ret = check_first_ref(eb, bytenr, inum, offset);
+				btrfs_tree_read_unlock_blocking(eb);
+				free_extent_buffer(eb);
+				if (!ret) {
+					ret = BACKREF_NEXT_ITEM;
+					goto out;
+				}
+			}
+			ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
+			if (ret < 0)
+				goto out;
+		}
+		list_del(&ref->list);
+		kmem_cache_free(btrfs_prelim_ref_cache, ref);
+	}
+
+out:
+	btrfs_free_path(path);
+	while (!list_empty(&prefs)) {
+		ref = list_first_entry(&prefs, struct __prelim_ref, list);
+		list_del(&ref->list);
+		kmem_cache_free(btrfs_prelim_ref_cache, ref);
+	}
+	return ret;
+}
+#endif  
+
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
 			     struct btrfs_fs_info *fs_info, u64 bytenr,
 			     u64 time_seq, struct ulist *refs,
 			     struct ulist *roots, const u64 *extent_item_pos,
+#ifdef MY_ABC_HERE
+			     u64 root_objectid, u64 inum, enum btrfs_backref_mode mode,
+			     int in_run_delayed)
+#else
 			     u64 root_objectid, u64 inum)
+#endif  
 {
 	struct btrfs_key key;
 	struct btrfs_path *path;
@@ -774,6 +1239,9 @@
 	struct __prelim_ref *ref;
 	struct extent_inode_elem *eie = NULL;
 	u64 total_refs = 0;
+#ifdef MY_ABC_HERE
+	u64 num_bytes = 0x10000000;  
+#endif  
 
 	INIT_LIST_HEAD(&prefs);
 	INIT_LIST_HEAD(&prefs_delayed);
@@ -811,6 +1279,11 @@
 		spin_lock(&delayed_refs->lock);
 		head = btrfs_find_delayed_ref_head(trans, bytenr);
 		if (head) {
+#ifdef MY_ABC_HERE
+			if (in_run_delayed) {
+				 
+			} else
+#endif  
 			if (!mutex_trylock(&head->mutex)) {
 				atomic_inc(&head->node.refs);
 				spin_unlock(&delayed_refs->lock);
@@ -825,7 +1298,12 @@
 			spin_unlock(&delayed_refs->lock);
 			ret = __add_delayed_refs(head, time_seq,
 						 &prefs_delayed, &total_refs,
+#ifdef MY_ABC_HERE
+						 root_objectid, inum, mode);
+			if (!in_run_delayed)
+#else
 						 inum);
+#endif  
 			mutex_unlock(&head->mutex);
 			if (ret)
 				goto out;
@@ -845,13 +1323,33 @@
 		if (key.objectid == bytenr &&
 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
+#ifdef MY_ABC_HERE
+			num_bytes = key.offset;
+#endif  
 			ret = __add_inline_refs(fs_info, path, bytenr,
 						&info_level, &prefs,
+#ifdef MY_ABC_HERE
+						NULL, NULL, NULL, NULL, NULL,
+#endif  
+#ifdef MY_ABC_HERE
+						&total_refs, root_objectid,
+						inum, mode);
+#else
 						&total_refs, inum);
+#endif  
 			if (ret)
 				goto out;
 			ret = __add_keyed_refs(fs_info, path, bytenr,
+#ifdef MY_ABC_HERE
+					       NULL, NULL, NULL, NULL, NULL,
+#endif  
+#ifdef MY_ABC_HERE
+					       info_level, &prefs,
+					       &total_refs, root_objectid,
+					       inum, mode);
+#else
 					       info_level, &prefs, inum);
+#endif  
 			if (ret)
 				goto out;
 		}
@@ -868,7 +1366,15 @@
 
 	ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
 				      extent_item_pos, total_refs,
+#ifdef MY_ABC_HERE
+				      root_objectid,
+#ifdef MY_ABC_HERE
+				      0, 0, 0,
+#endif  
+				      mode, num_bytes);
+#else
 				      root_objectid);
+#endif  
 	if (ret)
 		goto out;
 
@@ -977,7 +1483,11 @@
 		return -ENOMEM;
 
 	ret = find_parent_nodes(trans, fs_info, bytenr,
+#ifdef MY_ABC_HERE
+				time_seq, *leafs, NULL, extent_item_pos, 0, 0, 0, 0);
+#else
 				time_seq, *leafs, NULL, extent_item_pos, 0, 0);
+#endif  
 	if (ret < 0 && ret != -ENOENT) {
 		free_leaf_list(*leafs);
 		return ret;
@@ -988,7 +1498,12 @@
 
 static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 				  struct btrfs_fs_info *fs_info, u64 bytenr,
+#ifdef MY_ABC_HERE
+				  u64 time_seq, struct ulist **roots,
+				  u64 root_objectid, enum btrfs_backref_mode mode)
+#else
 				  u64 time_seq, struct ulist **roots)
+#endif  
 {
 	struct ulist *tmp;
 	struct ulist_node *node = NULL;
@@ -1007,7 +1522,12 @@
 	ULIST_ITER_INIT(&uiter);
 	while (1) {
 		ret = find_parent_nodes(trans, fs_info, bytenr,
+#ifdef MY_ABC_HERE
+					time_seq, tmp, *roots, NULL,
+					0, 0, mode, 0);
+#else
 					time_seq, tmp, *roots, NULL, 0, 0);
+#endif  
 		if (ret < 0 && ret != -ENOENT) {
 			ulist_free(tmp);
 			ulist_free(*roots);
@@ -1024,6 +1544,68 @@
 	return 0;
 }
 
+#ifdef MY_ABC_HERE
+ 
+static int __btrfs_find_all_roots_shared(struct btrfs_fs_info *fs_info,
+				  u64 bytenr, u64 leaf_bytenr, u64 datao, struct ulist *roots,
+				  u64 root_id, u64 inum, u64 file_offset)
+{
+	struct ulist *tmp;
+	struct ulist_node *node = NULL;
+	struct ulist_iterator uiter;
+	int ret = 0;
+
+	tmp = ulist_alloc(GFP_NOFS);
+	if (!tmp)
+		return -ENOMEM;
+
+	ULIST_ITER_INIT(&uiter);
+	while (1) {
+		ret = find_parent_nodes_shared_root(fs_info, bytenr, leaf_bytenr, datao,
+					tmp, roots, root_id, inum, file_offset);
+		if (ret == BACKREF_NEXT_ITEM || ret == BACKREF_FOUND_SHARED_ROOT) {
+			ulist_free(tmp);
+			return ret;
+		}
+		if (ret < 0 && ret != -ENOENT) {
+			ulist_free(tmp);
+			return ret;
+		}
+
+		node = ulist_next(tmp, &uiter);
+		if (!node)
+			break;
+		bytenr = node->val;
+		cond_resched();
+	}
+
+	ulist_free(tmp);
+	return 0;
+}
+
+int btrfs_find_shared_root(struct btrfs_fs_info *fs_info, u64 bytenr, u64 datao,
+			 struct ulist *root_list, struct btrfs_snapshot_size_entry *entry,
+			 struct btrfs_snapshot_size_ctx *ctx)
+{
+	int ret;
+	u64 leaf_bytenr = 0;
+
+	if (entry->level == 0)
+		leaf_bytenr = entry->path->nodes[0]->start;
+
+	down_read(&fs_info->commit_root_sem);
+	ret = __btrfs_find_all_roots_shared(fs_info, bytenr, leaf_bytenr, datao,
+						root_list, entry->root_id, entry->key.objectid,
+						entry->key.offset);
+	up_read(&fs_info->commit_root_sem);
+
+	WARN_ON(ret > 0 && ret != BACKREF_NEXT_ITEM && ret != BACKREF_FOUND_SHARED_ROOT);
+
+	return ret;
+}
+
+#endif  
+
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 			 struct btrfs_fs_info *fs_info, u64 bytenr,
 			 u64 time_seq, struct ulist **roots)
@@ -1032,7 +1614,11 @@
 
 	if (!trans)
 		down_read(&fs_info->commit_root_sem);
+#ifdef MY_ABC_HERE
+	ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots, 0, 0);
+#else
 	ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
+#endif  
 	if (!trans)
 		up_read(&fs_info->commit_root_sem);
 	return ret;
@@ -1064,7 +1650,11 @@
 	ULIST_ITER_INIT(&uiter);
 	while (1) {
 		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
+#ifdef MY_ABC_HERE
+					roots, NULL, root_objectid, inum, 0, 0);
+#else
 					roots, NULL, root_objectid, inum);
+#endif  
 		if (ret == BACKREF_FOUND_SHARED) {
 			 
 			ret = 1;
@@ -1182,7 +1772,8 @@
 			read_extent_buffer(eb, dest + bytes_left,
 					   name_off, name_len);
 		if (eb != eb_in) {
-			btrfs_tree_read_unlock_blocking(eb);
+			if (!path->skip_locking)
+				btrfs_tree_read_unlock_blocking(eb);
 			free_extent_buffer(eb);
 		}
 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
@@ -1200,9 +1791,10 @@
 		eb = path->nodes[0];
 		 
 		if (eb != eb_in) {
-			atomic_inc(&eb->refs);
-			btrfs_tree_read_lock(eb);
-			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+			if (!path->skip_locking)
+				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+			path->nodes[0] = NULL;
+			path->locks[0] = 0;
 		}
 		btrfs_release_path(path);
 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
@@ -1342,7 +1934,6 @@
 {
 	int ret;
 	int type;
-	struct btrfs_tree_block_info *info;
 	struct btrfs_extent_inline_ref *eiref;
 
 	if (*ptr == (unsigned long)-1)
@@ -1362,9 +1953,17 @@
 			return 1;
 	}
 
-	info = (struct btrfs_tree_block_info *)(ei + 1);
 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
-	*out_level = btrfs_tree_block_level(eb, info);
+
+	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+		struct btrfs_tree_block_info *info;
+
+		info = (struct btrfs_tree_block_info *)(ei + 1);
+		*out_level = btrfs_tree_block_level(eb, info);
+	} else {
+		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+		*out_level = (u8)key->offset;
+	}
 
 	if (ret == 1)
 		*ptr = (unsigned long)-1;
@@ -1430,7 +2029,11 @@
 	ULIST_ITER_INIT(&ref_uiter);
 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
 		ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
+#ifdef MY_ABC_HERE
+					     tree_mod_seq_elem.seq, &roots, 0, 0);
+#else
 					     tree_mod_seq_elem.seq, &roots);
+#endif  
 		if (ret)
 			break;
 		ULIST_ITER_INIT(&root_uiter);
diff -ur a/fs/btrfs/backref.h b/fs/btrfs/backref.h
--- a/fs/btrfs/backref.h	2017-03-23 15:07:55.000000000 +0100
+++ b/fs/btrfs/backref.h	2017-03-14 02:45:53.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
  
 #ifndef __BTRFS_BACKREF__
 #define __BTRFS_BACKREF__
@@ -37,6 +40,12 @@
 
 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
 
+#ifdef MY_ABC_HERE
+int btrfs_find_shared_root(struct btrfs_fs_info *fs_info,
+			 u64 bytenr, u64 datao, struct ulist *root_list,
+			 struct btrfs_snapshot_size_entry *entry,
+			 struct btrfs_snapshot_size_ctx *ctx);
+#endif  
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 			 struct btrfs_fs_info *fs_info, u64 bytenr,
 			 u64 time_seq, struct ulist **roots);
diff -ur a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
--- a/fs/btrfs/btrfs_inode.h	2017-03-23 15:07:56.000000000 +0100
+++ b/fs/btrfs/btrfs_inode.h	2017-03-14 02:45:54.000000000 +0100
@@ -23,6 +23,9 @@
 #define BTRFS_INODE_IN_DELALLOC_LIST		9
 #define BTRFS_INODE_READDIO_NEED_LOCK		10
 #define BTRFS_INODE_HAS_PROPS		        11
+#ifdef MY_ABC_HERE
+#define BTRFS_INODE_IN_SYNO_DEFRAG			31
+#endif  
  
 #define BTRFS_INODE_BTREE_ERR		        12
 #define BTRFS_INODE_BTREE_LOG1_ERR		13
diff -ur a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
--- a/fs/btrfs/ctree.c	2017-03-23 15:08:00.000000000 +0100
+++ b/fs/btrfs/ctree.c	2017-03-14 02:45:56.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
  *
@@ -213,11 +216,19 @@
  */
 static void add_root_to_dirty_list(struct btrfs_root *root)
 {
+	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
+	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
+		return;
+
 	spin_lock(&root->fs_info->trans_lock);
-	if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
-	    list_empty(&root->dirty_list)) {
-		list_add(&root->dirty_list,
-			 &root->fs_info->dirty_cowonly_roots);
+	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
+		/* Want the extent tree to be the last on the list */
+		if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+			list_move_tail(&root->dirty_list,
+				       &root->fs_info->dirty_cowonly_roots);
+		else
+			list_move(&root->dirty_list,
+				  &root->fs_info->dirty_cowonly_roots);
 	}
 	spin_unlock(&root->fs_info->trans_lock);
 }
@@ -1775,10 +1786,12 @@
 			if (!err) {
 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
 							map_start);
-			} else {
+			} else if (err == 1) {
 				read_extent_buffer(eb, &unaligned,
 						   offset, sizeof(unaligned));
 				tmp = &unaligned;
+			} else {
+				return err;
 			}
 
 		} else {
@@ -2828,6 +2841,8 @@
 		}
 
 		ret = key_search(b, key, level, &prev_cmp, &slot);
+		if (ret < 0)
+			goto done;
 
 		if (level != 0) {
 			int dec = 0;
@@ -5319,6 +5334,9 @@
 
 #define ADVANCE 1
 #define ADVANCE_ONLY_NEXT -1
+#ifdef MY_ABC_HERE
+#define ADVANCE_ONLY_UPNEXT 0
+#endif /* MY_ABC_HERE */
 
 /*
  * This function compares two trees and calls the provided callback for
@@ -5576,6 +5594,336 @@
 	return ret;
 }
 
+#ifdef MY_ABC_HERE
+static int tree_move_upnext(struct btrfs_root *root,
+				    struct btrfs_path *path,
+				    int *level, int root_level)
+{
+	int nritems;
+	nritems = btrfs_header_nritems(path->nodes[*level]);
+
+	do {
+		if (*level == root_level)
+			return -1;
+		path->slots[*level] = 0;
+		free_extent_buffer(path->nodes[*level]);
+		path->nodes[*level] = NULL;
+		(*level)++;
+
+		path->slots[*level]++;
+
+		nritems = btrfs_header_nritems(path->nodes[*level]);
+	} while (path->slots[*level] >= nritems);
+
+	return 0;
+}
+
+static int tree_advance_with_mode(struct btrfs_root *root,
+			struct btrfs_path *path,
+			int *level, int root_level,
+			int mode,
+			struct btrfs_key *key)
+{
+	int ret;
+
+	if (mode == ADVANCE_ONLY_UPNEXT) {
+		ret = tree_move_upnext(root, path, level, root_level);
+	} else if (*level == 0 || mode == ADVANCE_ONLY_NEXT) {
+		ret = tree_move_next_or_upnext(root, path, level, root_level);
+	} else {
+		tree_move_down(root, path, level, root_level);
+		ret = 0;
+	}
+	if (ret >= 0) {
+		if (*level == 0)
+			btrfs_item_key_to_cpu(path->nodes[*level], key,
+					path->slots[*level]);
+		else
+			btrfs_node_key_to_cpu(path->nodes[*level], key,
+					path->slots[*level]);
+	}
+	return ret;
+}
+
+static int compare_snapshot_entry(struct btrfs_snapshot_size_entry *e1,
+		struct btrfs_snapshot_size_entry *e2)
+{
+	int cmp;
+	u64 e1_blockptr;
+	u64 e2_blockptr;
+	u64 e1_gen;
+	u64 e2_gen;
+
+	/*
+	 * We process the in the order
+	 * 1. lower key first
+	 * 2. "higher" level first
+	 * 3. lower block bytenr first
+	 */
+	cmp = btrfs_comp_cpu_keys(&e1->key, &e2->key);
+	if (cmp != 0)
+		return cmp;
+	if (e1->level > e2->level)
+		return -1;
+	if (e1->level < e2->level)
+		return 1;
+	if (e1->level != 0) {
+		e1_blockptr = btrfs_node_blockptr(
+				e1->path->nodes[e1->level],
+				e1->path->slots[e1->level]);
+		e2_blockptr = btrfs_node_blockptr(
+				e2->path->nodes[e2->level],
+				e2->path->slots[e2->level]);
+		e1_gen = btrfs_node_ptr_generation(
+				e1->path->nodes[e1->level],
+				e1->path->slots[e1->level]);
+		e2_gen = btrfs_node_ptr_generation(
+				e2->path->nodes[e2->level],
+				e2->path->slots[e2->level]);
+		if (e1_blockptr == e2_blockptr &&
+		    e1_gen == e2_gen)
+			return 0;
+		if (e1_blockptr < e2_blockptr)
+			return -1;
+		if (e1_blockptr > e2_blockptr)
+			return 1;
+		else
+			WARN_ON(1);
+	} else {
+		e1_blockptr = e1->path->nodes[e1->level]->start;
+		e2_blockptr = e2->path->nodes[e2->level]->start;
+		if (e1_blockptr < e2_blockptr)
+			return -1;
+		if (e1_blockptr > e2_blockptr)
+			return 1;
+	}
+	return 0;
+}
+
+static int snap_entry_insert(struct btrfs_snapshot_size_ctx *ctx,
+			struct btrfs_snapshot_size_entry **insert, int replace)
+{
+	struct rb_node **p = &ctx->root.rb_node;
+	struct rb_node *parent_node = NULL;
+	struct btrfs_snapshot_size_entry *entry;
+	int cmp = 0;
+
+	while (*p) {
+		parent_node = *p;
+		entry = rb_entry(parent_node, struct btrfs_snapshot_size_entry, node);
+
+		cmp = compare_snapshot_entry(*insert, entry);
+		if (cmp < 0) {
+			p = &(*p)->rb_left;
+		} else if (cmp > 0) {
+			p = &(*p)->rb_right;
+		} else {
+			/*
+			 * If the newly added entry shares the same key with the existing node in
+			 * rbtree, and the added entry has smaller subvolume id. We need to keep
+			 * that entry, and advance the exsiting node in rbtree.
+			 */
+			if (replace && (*insert)->root_id < entry->root_id) {
+				rb_replace_node(*p, &(*insert)->node, &ctx->root);
+				*insert = entry;
+			}
+			return 1;
+		}
+	}
+
+	rb_link_node(&(*insert)->node, parent_node, p);
+	rb_insert_color(&(*insert)->node, &ctx->root);
+	return 0;
+}
+
+int btrfs_snapshot_size_query(struct file *file,
+		struct btrfs_ioctl_snapshot_size_query_args *snap_args,
+		struct ulist *roots,
+		int (*cb)(struct btrfs_fs_info *, u64,
+			      u64, struct ulist *,
+			      struct btrfs_snapshot_size_entry *,
+			      struct btrfs_snapshot_size_ctx *))
+{
+	int i;
+	int ret = 0;
+	int level;
+	int nritems;
+	u64 snap_count = snap_args->snap_count;
+	struct btrfs_root *snap_root = NULL;
+	struct btrfs_key location;
+	struct btrfs_fs_info *fs_info = BTRFS_I(file_inode(file))->root->fs_info;
+	struct rb_node *node;
+	struct btrfs_snapshot_size_ctx *ctx;
+	struct btrfs_snapshot_size_entry *entry;
+
+	ctx = kzalloc(sizeof(*ctx) + sizeof(struct btrfs_snapshot_size_entry) * snap_count, GFP_KERNEL);
+	if (!ctx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ctx->root = RB_ROOT;
+
+	for (i = 0; i < snap_count; ++i) {
+		location.objectid = snap_args->snap_id[i];
+		location.type = BTRFS_ROOT_ITEM_KEY;
+		location.offset = (u64) -1;
+		snap_root = btrfs_read_fs_root_no_name(fs_info, &location);
+		if (IS_ERR(snap_root)) {
+			ret = PTR_ERR(snap_root);
+			goto out;
+		}
+		if (btrfs_root_dead(snap_root)) {
+			ret = -EPERM;
+			goto out;
+		}
+		if (!btrfs_root_readonly(snap_root)) {
+			ret = -EPERM;
+			goto out;
+		}
+		entry = &ctx->snaps[i];
+		entry->root_id = snap_args->snap_id[i];
+		entry->root = snap_root;
+		spin_lock(&snap_root->root_item_lock);
+		snap_root->send_in_progress++;
+		spin_unlock(&snap_root->root_item_lock);
+
+		entry->path = btrfs_alloc_path();
+		if (!ctx->snaps[i].path) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		entry->path->search_commit_root = 1;
+		entry->path->skip_locking = 1;
+
+		down_read(&snap_root->fs_info->commit_root_sem);
+		level = btrfs_header_level(snap_root->commit_root);
+		entry->root_level = entry->level = level;
+		entry->path->nodes[level] = snap_root->commit_root;
+		extent_buffer_get(entry->path->nodes[level]);
+		up_read(&snap_root->fs_info->commit_root_sem);
+
+		if (level == 0)
+			btrfs_item_key_to_cpu(entry->path->nodes[level],
+			&entry->key, entry->path->slots[level]);
+		else
+			btrfs_node_key_to_cpu(entry->path->nodes[level],
+			&entry->key, entry->path->slots[level]);
+
+		while (snap_entry_insert(ctx, &entry, 0)) {
+			if (0 > tree_advance_with_mode(entry->root, entry->path,
+				   &entry->level, entry->root_level, ADVANCE_ONLY_NEXT, &entry->key)) {
+				break;
+			}
+		}
+	}
+
+	while (!RB_EMPTY_ROOT(&ctx->root)) {
+		u64 bytenr;
+		int advance = ADVANCE;
+		int node_removed = 0;
+		struct btrfs_snapshot_size_entry *next_entry;
+		struct rb_node *next_node;
+
+		ret = 0;
+		node = rb_first(&ctx->root);
+		entry = rb_entry(node, struct btrfs_snapshot_size_entry, node);
+		/*
+		 * Only skip level > 2. If we could skip one node at level 3,
+		 * due to shared node, we could save time traversing level 0~2.
+		 * Shared nodes usually happen at higher level.
+		 */
+		if (entry->path->slots[entry->level] == 0 && entry->level > 2) {
+			bytenr = entry->path->nodes[entry->level]->start;
+			ret = cb(fs_info, bytenr, 0, roots, entry, ctx);
+			if (ret < 0) {
+				goto out;
+			} else if (ret > 0) {
+				advance = ADVANCE_ONLY_UPNEXT;
+				goto advance;
+			}
+		}
+
+		nritems = btrfs_header_nritems(entry->path->nodes[entry->level]);
+		while (entry->level == 0 && entry->key.type != BTRFS_EXTENT_DATA_KEY) {
+			if (0 > tree_advance_with_mode(entry->root, entry->path, &entry->level,
+					entry->root_level, ADVANCE, &entry->key)) {
+				rb_erase(node, &ctx->root);
+				node_removed = 1;
+				break;
+			}
+			if (entry->path->slots[entry->level] == nritems - 1)
+				break;
+		}
+		if (node_removed)
+			continue;
+
+		/*
+		 * OK the leaf containing BTRFS_EXTENT_DATA_KEY is not shared.
+		 * Now we check EXTENT_ITEM itself.
+		 */
+		if (entry->level == 0 && entry->key.type == BTRFS_EXTENT_DATA_KEY) {
+			struct btrfs_file_extent_item *ei;
+			u64 datao;
+			u8 type;
+
+			ei = btrfs_item_ptr(entry->path->nodes[0], entry->path->slots[0],
+					    struct btrfs_file_extent_item);
+			bytenr = btrfs_file_extent_disk_bytenr(entry->path->nodes[0], ei);
+			datao = btrfs_file_extent_offset(entry->path->nodes[0], ei);
+			type = btrfs_file_extent_type(entry->path->nodes[0], ei);
+			if (type == BTRFS_FILE_EXTENT_PREALLOC ||
+			    (type == BTRFS_FILE_EXTENT_REG && bytenr != 0)) {
+				ret = cb(fs_info, bytenr, datao, roots, entry, ctx);
+				if (0 > ret)
+					goto out;
+				if (ret == 0)
+					ctx->size += btrfs_file_extent_disk_num_bytes(entry->path->nodes[0], ei);
+			}
+		}
+advance:
+		next_node = rb_next(node);
+		if (next_node)
+			next_entry = rb_entry(next_node, struct btrfs_snapshot_size_entry, node);
+		do {
+			int cmp = 0;
+			if (0 > tree_advance_with_mode(entry->root, entry->path,
+					&entry->level, entry->root_level, advance, &entry->key)) {
+				rb_erase(node, &ctx->root);
+				break;
+			}
+			if (!next_node)
+				break;
+			advance = ADVANCE_ONLY_NEXT;
+			if (node_removed)
+				continue;
+			cmp = compare_snapshot_entry(entry, next_entry);
+			/*
+			 * After advance if this entry is still the lowest key in the tree,
+			 * don't move it out and insert again. Is's just waste of time.
+			 */
+			if (cmp < 0)
+				break;
+			rb_erase(node, &ctx->root);
+			node_removed = 1;
+		} while (snap_entry_insert(ctx, &entry, 1));
+	}
+out:
+	if (ctx) {
+		snap_args->calc_size = ctx->size;
+		for (i = 0; i < snap_count; ++i) {
+			btrfs_free_path(ctx->snaps[i].path);
+			if (ctx->snaps[i].root) {
+				spin_lock(&ctx->snaps[i].root->root_item_lock);
+				ctx->snaps[i].root->send_in_progress--;
+				spin_unlock(&ctx->snaps[i].root->root_item_lock);
+			}
+		}
+	}
+	kfree(ctx);
+	return ret;
+}
+#endif /* MY_ABC_HERE */
+
 /*
  * this is similar to btrfs_next_leaf, but does not try to preserve
  * and fixup the path.  It looks for and returns the next key in the
diff -ur a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
--- a/fs/btrfs/ctree.h	2017-03-23 15:08:03.000000000 +0100
+++ b/fs/btrfs/ctree.h	2017-03-14 02:46:00.000000000 +0100
@@ -854,6 +854,8 @@
 	struct percpu_counter total_bytes_pinned;
 
 	struct list_head list;
+	 
+	struct list_head ro_bgs;
 
 	struct rw_semaphore groups_sem;
 	 
@@ -915,7 +917,6 @@
 	BTRFS_DC_ERROR		= 1,
 	BTRFS_DC_CLEAR		= 2,
 	BTRFS_DC_SETUP		= 3,
-	BTRFS_DC_NEED_WRITE	= 4,
 };
 
 struct btrfs_caching_control {
@@ -928,6 +929,20 @@
 	atomic_t count;
 };
 
+struct btrfs_io_ctl {
+	void *cur, *orig;
+	struct page *page;
+	struct page **pages;
+	struct btrfs_root *root;
+	struct inode *inode;
+	unsigned long size;
+	int index;
+	int num_pages;
+	int entries;
+	int bitmaps;
+	unsigned check_crcs:1;
+};
+
 struct btrfs_block_group_cache {
 	struct btrfs_key key;
 	struct btrfs_block_group_item item;
@@ -947,7 +962,6 @@
 	unsigned long full_stripe_len;
 
 	unsigned int ro:1;
-	unsigned int dirty:1;
 	unsigned int iref:1;
 	unsigned int has_caching_ctl:1;
 	unsigned int removed:1;
@@ -972,7 +986,14 @@
 
 	struct list_head bg_list;
 
+	struct list_head ro_list;
+
 	atomic_t trimming;
+
+	struct list_head dirty_list;
+	struct list_head io_list;
+
+	struct btrfs_io_ctl io_ctl;
 };
 
 struct seq_list {
@@ -1089,6 +1110,8 @@
 	struct mutex chunk_mutex;
 	struct mutex volume_mutex;
 
+	struct mutex ro_block_group_mutex;
+
 	struct btrfs_stripe_hash_table *stripe_hash_table;
 
 	struct mutex ordered_operations_mutex;
@@ -1322,6 +1345,7 @@
 #define BTRFS_ROOT_DEFRAG_RUNNING	6
 #define BTRFS_ROOT_FORCE_COW		7
 #define BTRFS_ROOT_MULTI_LOG_TASKS	8
+#define BTRFS_ROOT_DIRTY		9
 
 struct btrfs_root {
 	struct extent_buffer *node;
@@ -1450,9 +1474,36 @@
 
 	__u32 compress_type;
 
+#ifdef MY_ABC_HERE
+	 
+	__u16 syno_thresh;
+	 
+	__u8 syno_ratio_denom;
+	__u8 syno_ratio_nom;
+	__u32 unused[3];
+#else
 	__u32 unused[4];
+#endif  
 };
 
+#ifdef MY_ABC_HERE
+struct btrfs_snapshot_size_entry {
+	u64 root_id;
+	struct btrfs_root *root;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct rb_node node;
+	int root_level;
+	int level;
+};
+
+struct btrfs_snapshot_size_ctx {
+	u64 size;
+	struct rb_root root;
+	struct btrfs_snapshot_size_entry snaps[0];
+};
+#endif  
+
 #define BTRFS_INODE_ITEM_KEY		1
 #define BTRFS_INODE_REF_KEY		12
 #define BTRFS_INODE_EXTREF_KEY		13
@@ -2752,8 +2803,13 @@
 			 struct btrfs_root *root,
 			 u64 bytenr, u64 num_bytes, u64 parent,
 			 u64 root_objectid, u64 owner, u64 offset, int no_quota);
+
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root);
 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 				    struct btrfs_root *root);
+int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root);
 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
 int btrfs_free_block_groups(struct btrfs_fs_info *info);
 int btrfs_read_block_groups(struct btrfs_root *root);
@@ -2882,6 +2938,15 @@
 int btrfs_compare_trees(struct btrfs_root *left_root,
 			struct btrfs_root *right_root,
 			btrfs_changed_cb_t cb, void *ctx);
+#ifdef MY_ABC_HERE
+int btrfs_snapshot_size_query(struct file *file,
+				  struct btrfs_ioctl_snapshot_size_query_args *snap_args,
+				  struct ulist *root_list,
+				  int (*cb)(struct btrfs_fs_info *, u64,
+				            u64, struct ulist *,
+				            struct btrfs_snapshot_size_entry *,
+				            struct btrfs_snapshot_size_ctx *));
+#endif  
 int btrfs_cow_block(struct btrfs_trans_handle *trans,
 		    struct btrfs_root *root, struct extent_buffer *buf,
 		    struct extent_buffer *parent, int parent_slot,
@@ -3306,6 +3371,7 @@
 		      struct page **pages, size_t num_pages,
 		      loff_t pos, size_t write_bytes,
 		      struct extent_state **cached);
+int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
 
 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
 			struct btrfs_root *root);
diff -ur a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
--- a/fs/btrfs/disk-io.c	2017-03-23 15:07:59.000000000 +0100
+++ b/fs/btrfs/disk-io.c	2017-03-14 02:45:55.000000000 +0100
@@ -491,6 +491,7 @@
 		goto err;
 
 	eb->read_mirror = mirror;
+
 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
 		ret = -EIO;
 		goto err;
@@ -2184,6 +2185,7 @@
 	mutex_init(&fs_info->transaction_kthread_mutex);
 	mutex_init(&fs_info->cleaner_mutex);
 	mutex_init(&fs_info->volume_mutex);
+	mutex_init(&fs_info->ro_block_group_mutex);
 	init_rwsem(&fs_info->commit_root_sem);
 	init_rwsem(&fs_info->cleanup_work_sem);
 	init_rwsem(&fs_info->subvol_sem);
@@ -2200,7 +2202,10 @@
 #ifdef MY_ABC_HERE
 	fs_info->ordered_extent_nr = 0;
 	fs_info->delalloc_inodes_nr = 0;
-	fs_info->flushoncommit_threshold = 1000;
+	if (totalram_pages > ((2ULL*1024*1024*1024)/PAGE_SIZE))
+		fs_info->flushoncommit_threshold = 0;
+	else
+		fs_info->flushoncommit_threshold = 1000;
 #endif
 
 	spin_lock_init(&fs_info->qgroup_lock);
@@ -2348,6 +2353,7 @@
 		       "unsupported option features (%Lx).\n",
 		       features);
 		err = -EINVAL;
+		brelse(bh);
 		goto fail_alloc;
 	}
 
diff -ur a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
--- a/fs/btrfs/extent_io.c	2017-03-23 15:08:01.000000000 +0100
+++ b/fs/btrfs/extent_io.c	2017-03-14 02:45:57.000000000 +0100
@@ -1712,8 +1712,7 @@
 	int in_validation;
 };
 
-static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
-				int did_repair)
+static int free_io_failure(struct inode *inode, struct io_failure_record *rec)
 {
 	int ret;
 	int err = 0;
@@ -1810,7 +1809,6 @@
 	u64 start = eb->start;
 	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
 	int ret = 0;
-
 	if (root->fs_info->sb->s_flags & MS_RDONLY)
 		return -EROFS;
 
@@ -1835,7 +1833,6 @@
 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
 	struct extent_state *state;
 	int num_copies;
-	int did_repair = 0;
 	int ret;
 
 	private = 0;
@@ -1856,7 +1853,6 @@
 		 
 		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
 			 failrec->start);
-		did_repair = 1;
 		goto out;
 	}
 	if (fs_info->sb->s_flags & MS_RDONLY)
@@ -1873,19 +1869,44 @@
 		num_copies = btrfs_num_copies(fs_info, failrec->logical,
 					      failrec->len);
 		if (num_copies > 1)  {
-			ret = repair_io_failure(fs_info, start, failrec->len,
-						failrec->logical, page,
-						failrec->failed_mirror);
-			did_repair = !ret;
+			repair_io_failure(fs_info, start, failrec->len,
+					  failrec->logical, page,
+					  failrec->failed_mirror);
 		}
-		ret = 0;
 	}
 
 out:
-	if (!ret)
-		ret = free_io_failure(inode, failrec, did_repair);
+	free_io_failure(inode, failrec);
 
-	return ret;
+	return 0;
+}
+
+void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
+{
+	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+	struct io_failure_record *failrec;
+	struct extent_state *state, *next;
+
+	if (RB_EMPTY_ROOT(&failure_tree->state))
+		return;
+
+	spin_lock(&failure_tree->lock);
+	state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
+	while (state) {
+		if (state->start > end)
+			break;
+
+		ASSERT(state->end <= end);
+
+		next = next_state(state);
+
+		failrec = (struct io_failure_record *)state->private;
+		free_extent_state(state);
+		kfree(failrec);
+
+		state = next;
+	}
+	spin_unlock(&failure_tree->lock);
 }
 
 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
@@ -1972,13 +1993,14 @@
 			 failrec->in_validation);
 		 
 	}
+
 	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
 				      failrec->logical, failrec->len);
 	if (num_copies == 1) {
 		 
 		pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
 			 num_copies, failrec->this_mirror, failed_mirror);
-		free_io_failure(inode, failrec, 0);
+		free_io_failure(inode, failrec);
 		return -EIO;
 	}
 
@@ -2005,13 +2027,13 @@
 	if (failrec->this_mirror > num_copies) {
 		pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
 			 num_copies, failrec->this_mirror, failed_mirror);
-		free_io_failure(inode, failrec, 0);
+		free_io_failure(inode, failrec);
 		return -EIO;
 	}
 
 	bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
 	if (!bio) {
-		free_io_failure(inode, failrec, 0);
+		free_io_failure(inode, failrec);
 		return -EIO;
 	}
 	bio->bi_end_io = failed_bio->bi_end_io;
@@ -2041,6 +2063,11 @@
 	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
 					 failrec->this_mirror,
 					 failrec->bio_flags, 0);
+	if (ret) {
+		free_io_failure(inode, failrec);
+		bio_put(bio);
+	}
+
 	return ret;
 }
 
@@ -2131,7 +2158,6 @@
 	u64 extent_len = 0;
 	int mirror;
 	int ret;
-
 	if (err)
 		uptodate = 0;
 
@@ -2461,7 +2487,7 @@
 	sector_t sector;
 	struct extent_map *em;
 	struct block_device *bdev;
-	int ret;
+	int ret = 0;
 	int nr = 0;
 	int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
 	size_t pg_offset = 0;
@@ -2599,6 +2625,7 @@
 			SetPageError(page);
 			if (!parent_locked)
 				unlock_extent(tree, cur, cur + iosize - 1);
+			goto out;
 		}
 		cur = cur + iosize;
 		pg_offset += iosize;
@@ -2609,7 +2636,7 @@
 			SetPageUptodate(page);
 		unlock_page(page);
 	}
-	return 0;
+	return ret;
 }
 
 static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
@@ -3850,8 +3877,11 @@
 		}
 		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
 					      em_len, flags);
-		if (ret)
+		if (ret) {
+			if (ret == 1)
+				ret = 0;
 			goto out_free;
+		}
 	}
 out_free:
 	free_extent_map(em);
@@ -4446,6 +4476,7 @@
 			all_uptodate = 0;
 		}
 	}
+
 	if (all_uptodate) {
 		if (start_i == 0)
 			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -4457,14 +4488,24 @@
 	atomic_set(&eb->io_pages, num_reads);
 	for (i = start_i; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
+
 		if (!PageUptodate(page)) {
+			if (ret) {
+				atomic_dec(&eb->io_pages);
+				unlock_page(page);
+				continue;
+			}
+
 			ClearPageError(page);
 			err = __extent_read_full_page(tree, page,
 						      get_extent, &bio,
 						      mirror_num, &bio_flags,
 						      READ | REQ_META);
-			if (err)
+			if (err) {
 				ret = err;
+				 
+				atomic_dec(&eb->io_pages);
+			}
 		} else {
 			unlock_page(page);
 		}
@@ -4582,7 +4623,7 @@
 		PAGE_CACHE_SHIFT;
 
 	if (i != end_i)
-		return -EINVAL;
+		return 1;
 
 	if (i == 0) {
 		offset = start_offset;
diff -ur a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
--- a/fs/btrfs/extent_io.h	2017-03-23 15:07:57.000000000 +0100
+++ b/fs/btrfs/extent_io.h	2017-03-14 02:45:55.000000000 +0100
@@ -333,6 +333,7 @@
 int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
 			 int mirror_num);
+void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end);
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 noinline u64 find_lock_delalloc_range(struct inode *inode,
 				      struct extent_io_tree *tree,
diff -ur a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
--- a/fs/btrfs/extent-tree.c	2017-03-23 15:08:04.000000000 +0100
+++ b/fs/btrfs/extent-tree.c	2017-03-14 02:45:59.000000000 +0100
@@ -42,8 +42,9 @@
 	RESERVE_ALLOC_NO_ACCOUNT = 2,
 };
 
-static int update_block_group(struct btrfs_root *root,
-			      u64 bytenr, u64 num_bytes, int alloc);
+static int update_block_group(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root, u64 bytenr,
+			      u64 num_bytes, int alloc);
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root,
 				u64 bytenr, u64 num_bytes, u64 parent,
@@ -482,6 +483,23 @@
 	btrfs_put_block_group(block_group);
 }
 
+#ifdef MY_ABC_HERE
+ 
+static noinline void
+syno_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache)
+{
+	struct btrfs_caching_control *caching_ctl;
+
+	caching_ctl = get_caching_control(cache);
+	if (!caching_ctl)
+		return;
+
+	wait_event(caching_ctl->wait, block_group_cache_done(cache));
+
+	put_caching_control(caching_ctl);
+}
+#endif  
+
 static int cache_block_group(struct btrfs_block_group_cache *cache,
 			     int load_cache_only)
 {
@@ -523,6 +541,9 @@
 	if (cache->cached != BTRFS_CACHE_NO) {
 		spin_unlock(&cache->lock);
 		kfree(caching_ctl);
+#ifdef MY_ABC_HERE
+		syno_wait_block_group_cache_progress(cache);
+#endif  
 		return 0;
 	}
 	WARN_ON(cache->caching_ctl);
@@ -582,6 +603,10 @@
 
 	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
 
+#ifdef MY_ABC_HERE
+	syno_wait_block_group_cache_progress(cache);
+#endif  
+
 	return ret;
 }
 
@@ -2783,21 +2808,19 @@
 	struct extent_buffer *leaf;
 
 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
-	if (ret < 0)
+	if (ret) {
+		if (ret > 0)
+			ret = -ENOENT;
 		goto fail;
-	BUG_ON(ret);  
+	}
 
 	leaf = path->nodes[0];
 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
 	btrfs_mark_buffer_dirty(leaf);
-	btrfs_release_path(path);
 fail:
-	if (ret) {
-		btrfs_abort_transaction(trans, root, ret);
-		return ret;
-	}
-	return 0;
+	btrfs_release_path(path);
+	return ret;
 
 }
 
@@ -2886,13 +2909,17 @@
 		if (ret)
 			goto out_put;
 
-		ret = btrfs_truncate_free_space_cache(root, trans, inode);
+		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
 		if (ret)
 			goto out_put;
 	}
 
 	spin_lock(&block_group->lock);
 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
+#ifdef MY_ABC_HERE
+		 
+		block_group->fs_info->log_root_recovering ||
+#endif  
 	    !btrfs_test_opt(root, SPACE_CACHE) ||
 	    block_group->delalloc_bytes) {
 		 
@@ -2934,112 +2961,224 @@
 	return ret;
 }
 
-int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root)
+int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root)
 {
-	struct btrfs_block_group_cache *cache;
-	int err = 0;
+	struct btrfs_block_group_cache *cache, *tmp;
+	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_path *path;
-	u64 last = 0;
+
+	if (list_empty(&cur_trans->dirty_bgs) ||
+	    !btrfs_test_opt(root, SPACE_CACHE))
+		return 0;
 
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
+	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
+				 dirty_list) {
+		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
+			cache_save_setup(cache, trans, path);
+	}
+
+	btrfs_free_path(path);
+	return 0;
+}
+
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root)
+{
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_transaction *cur_trans = trans->transaction;
+	int ret = 0;
+	int should_put;
+	struct btrfs_path *path = NULL;
+	LIST_HEAD(dirty);
+	struct list_head *io = &cur_trans->io_bgs;
+	int num_started = 0;
+	int loops = 0;
+
+	spin_lock(&cur_trans->dirty_bgs_lock);
+	if (list_empty(&cur_trans->dirty_bgs)) {
+		spin_unlock(&cur_trans->dirty_bgs_lock);
+		return 0;
+	}
+	list_splice_init(&cur_trans->dirty_bgs, &dirty);
+	spin_unlock(&cur_trans->dirty_bgs_lock);
+
 again:
-	while (1) {
-		cache = btrfs_lookup_first_block_group(root->fs_info, last);
-		while (cache) {
-			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
-				break;
-			cache = next_block_group(root, cache);
-		}
-		if (!cache) {
-			if (last == 0)
-				break;
-			last = 0;
-			continue;
-		}
-		err = cache_save_setup(cache, trans, path);
-		last = cache->key.objectid + cache->key.offset;
-		btrfs_put_block_group(cache);
+	 
+	btrfs_create_pending_block_groups(trans, root);
+
+	if (!path) {
+		path = btrfs_alloc_path();
+		if (!path)
+			return -ENOMEM;
 	}
 
-	while (1) {
-		if (last == 0) {
-			err = btrfs_run_delayed_refs(trans, root,
-						     (unsigned long)-1);
-			if (err)  
-				goto out;
+	mutex_lock(&trans->transaction->cache_write_mutex);
+	while (!list_empty(&dirty)) {
+		cache = list_first_entry(&dirty,
+					 struct btrfs_block_group_cache,
+					 dirty_list);
+		 
+		if (!list_empty(&cache->io_list)) {
+			list_del_init(&cache->io_list);
+			btrfs_wait_cache_io(root, trans, cache,
+					    &cache->io_ctl, path,
+					    cache->key.objectid);
+			btrfs_put_block_group(cache);
 		}
 
-		cache = btrfs_lookup_first_block_group(root->fs_info, last);
-		while (cache) {
-			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
-				btrfs_put_block_group(cache);
-				goto again;
-			}
+		spin_lock(&cur_trans->dirty_bgs_lock);
+		list_del_init(&cache->dirty_list);
+		spin_unlock(&cur_trans->dirty_bgs_lock);
+
+		should_put = 1;
+
+		cache_save_setup(cache, trans, path);
+
+		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
+			cache->io_ctl.inode = NULL;
+			ret = btrfs_write_out_cache(root, trans, cache, path);
+			if (ret == 0 && cache->io_ctl.inode) {
+				num_started++;
+				should_put = 0;
 
-			if (cache->dirty)
-				break;
-			cache = next_block_group(root, cache);
+				list_add_tail(&cache->io_list, io);
+			} else {
+				 
+				ret = 0;
+			}
 		}
-		if (!cache) {
-			if (last == 0)
-				break;
-			last = 0;
-			continue;
+		if (!ret) {
+			ret = write_one_cache_group(trans, root, path, cache);
+			 
+			if (ret == -ENOENT) {
+				ret = 0;
+				spin_lock(&cur_trans->dirty_bgs_lock);
+				if (list_empty(&cache->dirty_list)) {
+					list_add_tail(&cache->dirty_list,
+						      &cur_trans->dirty_bgs);
+					btrfs_get_block_group(cache);
+				}
+				spin_unlock(&cur_trans->dirty_bgs_lock);
+			} else if (ret) {
+				btrfs_abort_transaction(trans, root, ret);
+			}
 		}
 
-		if (cache->disk_cache_state == BTRFS_DC_SETUP)
-			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
-		cache->dirty = 0;
-		last = cache->key.objectid + cache->key.offset;
+		if (should_put)
+			btrfs_put_block_group(cache);
 
-		err = write_one_cache_group(trans, root, path, cache);
-		btrfs_put_block_group(cache);
-		if (err)  
-			goto out;
+		if (ret)
+			break;
+
+		mutex_unlock(&trans->transaction->cache_write_mutex);
+		mutex_lock(&trans->transaction->cache_write_mutex);
 	}
+	mutex_unlock(&trans->transaction->cache_write_mutex);
 
-	while (1) {
+	ret = btrfs_run_delayed_refs(trans, root, 0);
+	if (!ret && loops == 0) {
+		loops++;
+		spin_lock(&cur_trans->dirty_bgs_lock);
+		list_splice_init(&cur_trans->dirty_bgs, &dirty);
 		 
-		if (last == 0) {
-			err = btrfs_run_delayed_refs(trans, root,
-						     (unsigned long)-1);
-			if (err)  
-				goto out;
+		if (!list_empty(&dirty)) {
+			spin_unlock(&cur_trans->dirty_bgs_lock);
+			goto again;
 		}
+		spin_unlock(&cur_trans->dirty_bgs_lock);
+	}
 
-		cache = btrfs_lookup_first_block_group(root->fs_info, last);
-		while (cache) {
-			 
-			if (cache->dirty) {
-				btrfs_put_block_group(cache);
-				goto again;
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root)
+{
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_transaction *cur_trans = trans->transaction;
+	int ret = 0;
+	int should_put;
+	struct btrfs_path *path;
+	struct list_head *io = &cur_trans->io_bgs;
+	int num_started = 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	spin_lock(&cur_trans->dirty_bgs_lock);
+	while (!list_empty(&cur_trans->dirty_bgs)) {
+		cache = list_first_entry(&cur_trans->dirty_bgs,
+					 struct btrfs_block_group_cache,
+					 dirty_list);
+
+		if (!list_empty(&cache->io_list)) {
+			spin_unlock(&cur_trans->dirty_bgs_lock);
+			list_del_init(&cache->io_list);
+			btrfs_wait_cache_io(root, trans, cache,
+					    &cache->io_ctl, path,
+					    cache->key.objectid);
+			btrfs_put_block_group(cache);
+			spin_lock(&cur_trans->dirty_bgs_lock);
+		}
+
+		list_del_init(&cache->dirty_list);
+		spin_unlock(&cur_trans->dirty_bgs_lock);
+		should_put = 1;
+
+		cache_save_setup(cache, trans, path);
+
+		if (!ret)
+			ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
+
+		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
+			cache->io_ctl.inode = NULL;
+			ret = btrfs_write_out_cache(root, trans, cache, path);
+			if (ret == 0 && cache->io_ctl.inode) {
+				num_started++;
+				should_put = 0;
+				list_add_tail(&cache->io_list, io);
+			} else {
+				 
+				ret = 0;
 			}
-			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
-				break;
-			cache = next_block_group(root, cache);
 		}
-		if (!cache) {
-			if (last == 0)
-				break;
-			last = 0;
-			continue;
+		if (!ret) {
+			ret = write_one_cache_group(trans, root, path, cache);
+			 
+			if (ret == -ENOENT) {
+				wait_event(cur_trans->writer_wait,
+				   atomic_read(&cur_trans->num_writers) == 1);
+				ret = write_one_cache_group(trans, root, path,
+							    cache);
+			}
+			if (ret)
+				btrfs_abort_transaction(trans, root, ret);
 		}
 
-		err = btrfs_write_out_cache(root, trans, cache, path);
+		if (should_put)
+			btrfs_put_block_group(cache);
+		spin_lock(&cur_trans->dirty_bgs_lock);
+	}
+	spin_unlock(&cur_trans->dirty_bgs_lock);
 
-		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
-			cache->disk_cache_state = BTRFS_DC_WRITTEN;
-		last = cache->key.objectid + cache->key.offset;
+	while (!list_empty(io)) {
+		cache = list_first_entry(io, struct btrfs_block_group_cache,
+					 io_list);
+		list_del_init(&cache->io_list);
+		btrfs_wait_cache_io(root, trans, cache,
+				    &cache->io_ctl, path, cache->key.objectid);
 		btrfs_put_block_group(cache);
 	}
-out:
 
 	btrfs_free_path(path);
-	return err;
+	return ret;
 }
 
 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -3128,6 +3267,7 @@
 	found->chunk_alloc = 0;
 	found->flush = 0;
 	init_waitqueue_head(&found->wait);
+	INIT_LIST_HEAD(&found->ro_bgs);
 
 	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
 				    info->space_info_kobj, "%s",
@@ -4761,8 +4901,9 @@
 }
 #endif  
 
-static int update_block_group(struct btrfs_root *root,
-			      u64 bytenr, u64 num_bytes, int alloc)
+static int update_block_group(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root, u64 bytenr,
+			      u64 num_bytes, int alloc)
 {
 	struct btrfs_block_group_cache *cache = NULL;
 	struct btrfs_fs_info *info = root->fs_info;
@@ -4804,7 +4945,6 @@
 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
 			cache->disk_cache_state = BTRFS_DC_CLEAR;
 
-		cache->dirty = 1;
 		old_val = btrfs_block_group_used(&cache->item);
 		num_bytes = min(total, cache->key.offset - byte_in_group);
 		if (alloc) {
@@ -4844,6 +4984,15 @@
 				spin_unlock(&info->unused_bgs_lock);
 			}
 		}
+
+		spin_lock(&trans->transaction->dirty_bgs_lock);
+		if (list_empty(&cache->dirty_list)) {
+			list_add_tail(&cache->dirty_list,
+				      &trans->transaction->dirty_bgs);
+			btrfs_get_block_group(cache);
+		}
+		spin_unlock(&trans->transaction->dirty_bgs_lock);
+
 		btrfs_put_block_group(cache);
 		total -= num_bytes;
 		bytenr += num_bytes;
@@ -5487,7 +5636,7 @@
 			}
 		}
 
-		ret = update_block_group(root, bytenr, num_bytes, 0);
+		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
 		if (ret) {
 			btrfs_abort_transaction(trans, extent_root, ret);
 			goto out;
@@ -6397,7 +6546,7 @@
 	if (ret)
 		return ret;
 
-	ret = update_block_group(root, ins->objectid, ins->offset, 1);
+	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
 	if (ret) {  
 		btrfs_err(fs_info, "update block group failed for %llu %llu",
 			ins->objectid, ins->offset);
@@ -6486,7 +6635,8 @@
 			return ret;
 	}
 
-	ret = update_block_group(root, ins->objectid, root->leafsize, 1);
+	ret = update_block_group(trans, root, ins->objectid, root->leafsize,
+				 1);
 	if (ret) {  
 		btrfs_err(fs_info, "update block group failed for %llu %llu",
 			ins->objectid, ins->offset);
@@ -7502,6 +7652,7 @@
 	    min_allocable_bytes <= sinfo->total_bytes) {
 		sinfo->bytes_readonly += num_bytes;
 		cache->ro = 1;
+		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
 		ret = 0;
 	}
 out:
@@ -7520,16 +7671,22 @@
 
 	BUG_ON(cache->ro);
 
+again:
 	trans = btrfs_join_transaction(root);
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
 
-	alloc_flags = update_block_group_flags(root, cache->flags);
-	if (alloc_flags != cache->flags) {
-		ret = do_chunk_alloc(trans, root, alloc_flags,
-				     CHUNK_ALLOC_FORCE);
-		if (ret < 0)
-			goto out;
+	mutex_lock(&root->fs_info->ro_block_group_mutex);
+	if (trans->transaction->dirty_bg_run) {
+		u64 transid = trans->transid;
+
+		mutex_unlock(&root->fs_info->ro_block_group_mutex);
+		btrfs_end_transaction(trans, root);
+
+		ret = btrfs_wait_for_commit(root, transid);
+		if (ret)
+			return ret;
+		goto again;
 	}
 
 	ret = set_block_group_ro(cache, 0);
@@ -7542,6 +7699,14 @@
 		goto out;
 	ret = set_block_group_ro(cache, 0);
 out:
+	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+		alloc_flags = update_block_group_flags(root, cache->flags);
+		lock_chunks(root->fs_info->chunk_root);
+		check_system_chunk(trans, root, alloc_flags, true);
+		unlock_chunks(root->fs_info->chunk_root);
+	}
+	mutex_unlock(&root->fs_info->ro_block_group_mutex);
+
 	btrfs_end_transaction(trans, root);
 	return ret;
 }
@@ -7554,13 +7719,17 @@
 			      CHUNK_ALLOC_FORCE);
 }
 
-static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
+u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
 {
 	struct btrfs_block_group_cache *block_group;
 	u64 free_bytes = 0;
 	int factor;
 
-	list_for_each_entry(block_group, groups_list, list) {
+	if (list_empty(&sinfo->ro_bgs))
+		return 0;
+
+	spin_lock(&sinfo->lock);
+	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
 		spin_lock(&block_group->lock);
 
 		if (!block_group->ro) {
@@ -7581,31 +7750,7 @@
 
 		spin_unlock(&block_group->lock);
 	}
-
-	return free_bytes;
-}
-
-u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
-{
-	int i;
-	u64 free_bytes = 0;
-
-#ifdef MY_ABC_HERE
-	down_read(&sinfo->groups_sem);
-#else
-	spin_lock(&sinfo->lock);
-#endif  
-
-	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
-		if (!list_empty(&sinfo->block_groups[i]))
-			free_bytes += __btrfs_get_ro_block_group_free_space(
-						&sinfo->block_groups[i]);
-
-#ifdef MY_ABC_HERE
-	up_read(&sinfo->groups_sem);
-#else
 	spin_unlock(&sinfo->lock);
-#endif  
 
 	return free_bytes;
 }
@@ -7624,6 +7769,7 @@
 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
 	sinfo->bytes_readonly -= num_bytes;
 	cache->ro = 0;
+	list_del_init(&cache->ro_list);
 	spin_unlock(&cache->lock);
 	spin_unlock(&sinfo->lock);
 }
@@ -7946,6 +8092,9 @@
 	INIT_LIST_HEAD(&cache->list);
 	INIT_LIST_HEAD(&cache->cluster_list);
 	INIT_LIST_HEAD(&cache->bg_list);
+	INIT_LIST_HEAD(&cache->ro_list);
+	INIT_LIST_HEAD(&cache->dirty_list);
+	INIT_LIST_HEAD(&cache->io_list);
 	btrfs_init_free_space_ctl(cache);
 	atomic_set(&cache->trimming, 0);
 
@@ -8104,7 +8253,7 @@
 
 	while (1) {
 #ifdef MY_ABC_HERE
-		if (info->block_group_hint_root)
+		if (info->block_group_hint_root && info->reada_path_workers)
 			reada_block_group_item(info, hint_path);
 #endif
 
@@ -8126,9 +8275,8 @@
 
 		if (need_clear) {
 			 
-			cache->disk_cache_state = BTRFS_DC_CLEAR;
 			if (btrfs_test_opt(root, SPACE_CACHE))
-				cache->dirty = 1;
+				cache->disk_cache_state = BTRFS_DC_CLEAR;
 		}
 
 		read_extent_buffer(leaf, &cache->item,
@@ -8438,6 +8586,30 @@
 	}
 
 	inode = lookup_free_space_inode(tree_root, block_group, path);
+
+	mutex_lock(&trans->transaction->cache_write_mutex);
+	 
+	spin_lock(&trans->transaction->dirty_bgs_lock);
+	if (!list_empty(&block_group->io_list)) {
+		list_del_init(&block_group->io_list);
+
+		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
+
+		spin_unlock(&trans->transaction->dirty_bgs_lock);
+		btrfs_wait_cache_io(root, trans, block_group,
+				    &block_group->io_ctl, path,
+				    block_group->key.objectid);
+		btrfs_put_block_group(block_group);
+		spin_lock(&trans->transaction->dirty_bgs_lock);
+	}
+
+	if (!list_empty(&block_group->dirty_list)) {
+		list_del_init(&block_group->dirty_list);
+		btrfs_put_block_group(block_group);
+	}
+	spin_unlock(&trans->transaction->dirty_bgs_lock);
+	mutex_unlock(&trans->transaction->cache_write_mutex);
+
 	if (!IS_ERR(inode)) {
 		ret = btrfs_orphan_add(trans, inode);
 		if (ret) {
@@ -8525,9 +8697,18 @@
 		}
 	}
 
+	spin_lock(&trans->transaction->dirty_bgs_lock);
+	if (!list_empty(&block_group->dirty_list)) {
+		WARN_ON(1);
+	}
+	if (!list_empty(&block_group->io_list)) {
+		WARN_ON(1);
+	}
+	spin_unlock(&trans->transaction->dirty_bgs_lock);
 	btrfs_remove_free_space_cache(block_group);
 
 	spin_lock(&block_group->space_info->lock);
+	list_del_init(&block_group->ro_list);
 	block_group->space_info->total_bytes -= block_group->key.offset;
 	block_group->space_info->bytes_readonly -= block_group->key.offset;
 	block_group->space_info->disk_total -= block_group->key.offset * factor;
diff -ur a/fs/btrfs/file.c b/fs/btrfs/file.c
--- a/fs/btrfs/file.c	2017-03-23 15:07:56.000000000 +0100
+++ b/fs/btrfs/file.c	2017-03-14 02:45:53.000000000 +0100
@@ -1479,6 +1479,7 @@
 {
 	struct file *file = iocb->ki_filp;
 	struct iov_iter i;
+	struct inode *inode = file_inode(file);
 	ssize_t written;
 	ssize_t written_buffered;
 	loff_t endbyte;
@@ -1499,7 +1500,10 @@
 		goto out;
 	}
 	endbyte = pos + written_buffered - 1;
-	err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+	err = btrfs_fdatawrite_range(inode, pos, endbyte);
+	if (err)
+		goto out;
+	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
 	if (err)
 		goto out;
 	written += written_buffered;
@@ -1640,10 +1644,7 @@
 	int ret;
 
 	atomic_inc(&BTRFS_I(inode)->sync_writers);
-	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
-	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
-			     &BTRFS_I(inode)->runtime_flags))
-		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+	ret = btrfs_fdatawrite_range(inode, start, end);
 	atomic_dec(&BTRFS_I(inode)->sync_writers);
 
 	return ret;
@@ -2504,3 +2505,15 @@
 
 	return 0;
 }
+
+int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+{
+	int ret;
+
+	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+			     &BTRFS_I(inode)->runtime_flags))
+		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+
+	return ret;
+}
diff -ur a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
--- a/fs/btrfs/free-space-cache.c	2017-03-23 15:07:55.000000000 +0100
+++ b/fs/btrfs/free-space-cache.c	2017-03-14 02:45:52.000000000 +0100
@@ -71,7 +71,8 @@
 	}
 
 	mapping_set_gfp_mask(inode->i_mapping,
-			mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+			mapping_gfp_mask(inode->i_mapping) &
+			~(GFP_NOFS & ~__GFP_HIGHMEM));
 
 	return inode;
 }
@@ -155,13 +156,13 @@
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.offset = offset;
 	key.type = 0;
-
 	ret = btrfs_insert_empty_item(trans, root, path, &key,
 				      sizeof(struct btrfs_free_space_header));
 	if (ret < 0) {
 		btrfs_release_path(path);
 		return ret;
 	}
+
 	leaf = path->nodes[0];
 	header = btrfs_item_ptr(leaf, path->slots[0],
 				struct btrfs_free_space_header);
@@ -209,10 +210,36 @@
 
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 				    struct btrfs_trans_handle *trans,
+				    struct btrfs_block_group_cache *block_group,
 				    struct inode *inode)
 {
 	loff_t oldsize;
 	int ret = 0;
+	struct btrfs_path *path = btrfs_alloc_path();
+	bool locked = false;
+
+	if (!path) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (block_group) {
+		locked = true;
+		mutex_lock(&trans->transaction->cache_write_mutex);
+		if (!list_empty(&block_group->io_list)) {
+			list_del_init(&block_group->io_list);
+
+			btrfs_wait_cache_io(root, trans, block_group,
+					    &block_group->io_ctl, path,
+					    block_group->key.objectid);
+			btrfs_put_block_group(block_group);
+		}
+
+		spin_lock(&block_group->lock);
+		block_group->disk_cache_state = BTRFS_DC_CLEAR;
+		spin_unlock(&block_group->lock);
+	}
+	btrfs_free_path(path);
 
 	oldsize = i_size_read(inode);
 	btrfs_i_size_write(inode, 0);
@@ -220,12 +247,14 @@
 
 	ret = btrfs_truncate_inode_items(trans, root, inode,
 					 0, BTRFS_EXTENT_DATA_KEY);
-	if (ret) {
-		btrfs_abort_transaction(trans, root, ret);
-		return ret;
-	}
+	if (ret)
+		goto fail;
 
 	ret = btrfs_update_inode(trans, root, inode);
+
+fail:
+	if (locked)
+		mutex_unlock(&trans->transaction->cache_write_mutex);
 	if (ret)
 		btrfs_abort_transaction(trans, root, ret);
 
@@ -251,18 +280,7 @@
 	return 0;
 }
 
-struct io_ctl {
-	void *cur, *orig;
-	struct page *page;
-	struct page **pages;
-	struct btrfs_root *root;
-	unsigned long size;
-	int index;
-	int num_pages;
-	unsigned check_crcs:1;
-};
-
-static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
+static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 		       struct btrfs_root *root, int write)
 {
 	int num_pages;
@@ -278,7 +296,7 @@
 	    (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
 		return -ENOSPC;
 
-	memset(io_ctl, 0, sizeof(struct io_ctl));
+	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
 
 	io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
 	if (!io_ctl->pages)
@@ -287,36 +305,37 @@
 	io_ctl->num_pages = num_pages;
 	io_ctl->root = root;
 	io_ctl->check_crcs = check_crcs;
+	io_ctl->inode = inode;
 
 	return 0;
 }
 
-static void io_ctl_free(struct io_ctl *io_ctl)
+static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
 {
 	kfree(io_ctl->pages);
+	io_ctl->pages = NULL;
 }
 
-static void io_ctl_unmap_page(struct io_ctl *io_ctl)
+static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
 {
 	if (io_ctl->cur) {
-		kunmap(io_ctl->page);
 		io_ctl->cur = NULL;
 		io_ctl->orig = NULL;
 	}
 }
 
-static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
+static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
 {
 	ASSERT(io_ctl->index < io_ctl->num_pages);
 	io_ctl->page = io_ctl->pages[io_ctl->index++];
-	io_ctl->cur = kmap(io_ctl->page);
+	io_ctl->cur = page_address(io_ctl->page);
 	io_ctl->orig = io_ctl->cur;
 	io_ctl->size = PAGE_CACHE_SIZE;
 	if (clear)
 		memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
 }
 
-static void io_ctl_drop_pages(struct io_ctl *io_ctl)
+static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
 {
 	int i;
 
@@ -331,7 +350,7 @@
 	}
 }
 
-static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
+static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 				int uptodate)
 {
 	struct page *page;
@@ -365,7 +384,7 @@
 	return 0;
 }
 
-static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
+static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
 {
 	__le64 *val;
 
@@ -384,7 +403,7 @@
 	io_ctl->cur += sizeof(u64);
 }
 
-static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
+static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
 {
 	__le64 *gen;
 
@@ -409,7 +428,7 @@
 	return 0;
 }
 
-static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
+static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
 {
 	u32 *tmp;
 	u32 crc = ~(u32)0;
@@ -427,13 +446,12 @@
 			      PAGE_CACHE_SIZE - offset);
 	btrfs_csum_final(crc, (char *)&crc);
 	io_ctl_unmap_page(io_ctl);
-	tmp = kmap(io_ctl->pages[0]);
+	tmp = page_address(io_ctl->pages[0]);
 	tmp += index;
 	*tmp = crc;
-	kunmap(io_ctl->pages[0]);
 }
 
-static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
+static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
 {
 	u32 *tmp, val;
 	u32 crc = ~(u32)0;
@@ -447,10 +465,9 @@
 	if (index == 0)
 		offset = sizeof(u32) * io_ctl->num_pages;
 
-	tmp = kmap(io_ctl->pages[0]);
+	tmp = page_address(io_ctl->pages[0]);
 	tmp += index;
 	val = *tmp;
-	kunmap(io_ctl->pages[0]);
 
 	io_ctl_map_page(io_ctl, 0);
 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
@@ -466,7 +483,7 @@
 	return 0;
 }
 
-static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
+static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
 			    void *bitmap)
 {
 	struct btrfs_free_space_entry *entry;
@@ -494,7 +511,7 @@
 	return 0;
 }
 
-static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
+static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
 {
 	if (!io_ctl->cur)
 		return -ENOSPC;
@@ -513,7 +530,7 @@
 	return 0;
 }
 
-static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
+static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
 {
 	 
 	if (io_ctl->cur != io_ctl->orig)
@@ -527,7 +544,7 @@
 	}
 }
 
-static int io_ctl_read_entry(struct io_ctl *io_ctl,
+static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
 			    struct btrfs_free_space *entry, u8 *type)
 {
 	struct btrfs_free_space_entry *e;
@@ -554,7 +571,7 @@
 	return 0;
 }
 
-static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
+static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
 			      struct btrfs_free_space *entry)
 {
 	int ret;
@@ -604,7 +621,7 @@
 {
 	struct btrfs_free_space_header *header;
 	struct extent_buffer *leaf;
-	struct io_ctl io_ctl;
+	struct btrfs_io_ctl io_ctl;
 	struct btrfs_key key;
 	struct btrfs_free_space *e, *n;
 	struct list_head bitmaps;
@@ -825,7 +842,7 @@
 }
 
 static noinline_for_stack
-int write_cache_extent_entries(struct io_ctl *io_ctl,
+int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
 			      struct btrfs_free_space_ctl *ctl,
 			      struct btrfs_block_group_cache *block_group,
 			      int *entries, int *bitmaps,
@@ -833,6 +850,7 @@
 {
 	int ret;
 	struct btrfs_free_cluster *cluster = NULL;
+	struct btrfs_free_cluster *cluster_locked = NULL;
 	struct rb_node *node = rb_first(&ctl->free_space_offset);
 	struct btrfs_trim_range *trim_entry;
 
@@ -843,6 +861,8 @@
 	}
 
 	if (!node && cluster) {
+		cluster_locked = cluster;
+		spin_lock(&cluster_locked->lock);
 		node = rb_first(&cluster->root);
 		cluster = NULL;
 	}
@@ -865,9 +885,15 @@
 		node = rb_next(node);
 		if (!node && cluster) {
 			node = rb_first(&cluster->root);
+			cluster_locked = cluster;
+			spin_lock(&cluster_locked->lock);
 			cluster = NULL;
 		}
 	}
+	if (cluster_locked) {
+		spin_unlock(&cluster_locked->lock);
+		cluster_locked = NULL;
+	}
 
 	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
 		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
@@ -879,6 +905,8 @@
 
 	return 0;
 fail:
+	if (cluster_locked)
+		spin_unlock(&cluster_locked->lock);
 	return -ENOSPC;
 }
 
@@ -940,7 +968,7 @@
 static noinline_for_stack int
 write_pinned_extent_entries(struct btrfs_root *root,
 			    struct btrfs_block_group_cache *block_group,
-			    struct io_ctl *io_ctl,
+			    struct btrfs_io_ctl *io_ctl,
 			    int *entries)
 {
 	u64 start, extent_start, extent_end, len;
@@ -982,7 +1010,7 @@
 }
 
 static noinline_for_stack int
-write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
+write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
 {
 	struct list_head *pos, *n;
 	int ret;
@@ -1014,10 +1042,7 @@
 }
 
 static void noinline_for_stack
-cleanup_write_cache_enospc(struct inode *inode,
-			   struct io_ctl *io_ctl,
-			   struct extent_state **cached_state,
-			   struct list_head *bitmap_list)
+cleanup_bitmap_list(struct list_head *bitmap_list)
 {
 	struct list_head *pos, *n;
 
@@ -1026,29 +1051,95 @@
 			list_entry(pos, struct btrfs_free_space, list);
 		list_del_init(&entry->list);
 	}
+}
+
+static void noinline_for_stack
+cleanup_write_cache_enospc(struct inode *inode,
+			   struct btrfs_io_ctl *io_ctl,
+			   struct extent_state **cached_state,
+			   struct list_head *bitmap_list)
+{
 	io_ctl_drop_pages(io_ctl);
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 			     i_size_read(inode) - 1, cached_state,
 			     GFP_NOFS);
 }
 
+int btrfs_wait_cache_io(struct btrfs_root *root,
+			struct btrfs_trans_handle *trans,
+			struct btrfs_block_group_cache *block_group,
+			struct btrfs_io_ctl *io_ctl,
+			struct btrfs_path *path, u64 offset)
+{
+	int ret;
+	struct inode *inode = io_ctl->inode;
+
+	if (!inode)
+		return 0;
+
+	root = root->fs_info->tree_root;
+
+	ret = flush_dirty_cache(inode);
+	if (ret)
+		goto out;
+
+	ret = update_cache_item(trans, root, inode, path, offset,
+				io_ctl->entries, io_ctl->bitmaps);
+out:
+	io_ctl_free(io_ctl);
+	if (ret) {
+		invalidate_inode_pages2(inode->i_mapping);
+		BTRFS_I(inode)->generation = 0;
+		if (block_group) {
+#ifdef DEBUG
+			btrfs_err(root->fs_info,
+				"failed to write free space cache for block group %llu",
+				block_group->key.objectid);
+#endif
+		}
+	}
+	btrfs_update_inode(trans, root, inode);
+
+	if (block_group) {
+		 
+		spin_lock(&trans->transaction->dirty_bgs_lock);
+
+		spin_lock(&block_group->lock);
+
+		if (!ret && list_empty(&block_group->dirty_list))
+			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+		else if (ret)
+			block_group->disk_cache_state = BTRFS_DC_ERROR;
+
+		spin_unlock(&block_group->lock);
+		spin_unlock(&trans->transaction->dirty_bgs_lock);
+		io_ctl->inode = NULL;
+		iput(inode);
+	}
+
+	return ret;
+
+}
+
 static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 				   struct btrfs_free_space_ctl *ctl,
 				   struct btrfs_block_group_cache *block_group,
+				   struct btrfs_io_ctl *io_ctl,
 				   struct btrfs_trans_handle *trans,
 				   struct btrfs_path *path, u64 offset)
 {
 	struct extent_state *cached_state = NULL;
-	struct io_ctl io_ctl;
 	LIST_HEAD(bitmap_list);
 	int entries = 0;
 	int bitmaps = 0;
 	int ret;
+	int must_iput = 0;
 
 	if (!i_size_read(inode))
 		return -1;
 
-	ret = io_ctl_init(&io_ctl, inode, root, 1);
+	WARN_ON(io_ctl->pages);
+	ret = io_ctl_init(io_ctl, inode, root, 1);
 	if (ret)
 		return -1;
 
@@ -1061,42 +1152,41 @@
 			up_write(&block_group->data_rwsem);
 			BTRFS_I(inode)->generation = 0;
 			ret = 0;
+			must_iput = 1;
 			goto out;
 		}
 		spin_unlock(&block_group->lock);
 	}
 
-	io_ctl_prepare_pages(&io_ctl, inode, 0);
+	io_ctl_prepare_pages(io_ctl, inode, 0);
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
 			 0, &cached_state);
 
-	io_ctl_set_generation(&io_ctl, trans->transid);
+	io_ctl_set_generation(io_ctl, trans->transid);
 
 	mutex_lock(&ctl->cache_writeout_mutex);
 	 
-	ret = write_cache_extent_entries(&io_ctl, ctl,
+	spin_lock(&ctl->tree_lock);
+	ret = write_cache_extent_entries(io_ctl, ctl,
 					 block_group, &entries, &bitmaps,
 					 &bitmap_list);
-	if (ret) {
-		mutex_unlock(&ctl->cache_writeout_mutex);
-		goto out_nospc;
-	}
+	if (ret)
+		goto out_nospc_locked;
 
-	ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
-	if (ret) {
-		mutex_unlock(&ctl->cache_writeout_mutex);
-		goto out_nospc;
-	}
+	ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
+	if (ret)
+		goto out_nospc_locked;
 
-	ret = write_bitmap_entries(&io_ctl, &bitmap_list);
+	ret = write_bitmap_entries(io_ctl, &bitmap_list);
+	spin_unlock(&ctl->tree_lock);
 	mutex_unlock(&ctl->cache_writeout_mutex);
 	if (ret)
 		goto out_nospc;
 
-	io_ctl_zero_remaining_pages(&io_ctl);
+	io_ctl_zero_remaining_pages(io_ctl);
 
-	ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
+	ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
 				0, i_size_read(inode), &cached_state);
 	if (ret)
 		goto out_nospc;
@@ -1104,28 +1194,39 @@
 	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
 		up_write(&block_group->data_rwsem);
 	 
-	io_ctl_drop_pages(&io_ctl);
+	io_ctl_drop_pages(io_ctl);
 
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
 
-	ret = flush_dirty_cache(inode);
+	io_ctl->entries = entries;
+	io_ctl->bitmaps = bitmaps;
+
+	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
 	if (ret)
 		goto out;
 
-	ret = update_cache_item(trans, root, inode, path, offset,
-				entries, bitmaps);
+	return 0;
+
 out:
-	io_ctl_free(&io_ctl);
+	io_ctl->inode = NULL;
+	io_ctl_free(io_ctl);
 	if (ret) {
 		invalidate_inode_pages2(inode->i_mapping);
 		BTRFS_I(inode)->generation = 0;
 	}
 	btrfs_update_inode(trans, root, inode);
+	if (must_iput)
+		iput(inode);
 	return ret;
 
+out_nospc_locked:
+	cleanup_bitmap_list(&bitmap_list);
+	spin_unlock(&ctl->tree_lock);
+	mutex_unlock(&ctl->cache_writeout_mutex);
+
 out_nospc:
-	cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
+	cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
 
 	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
 		up_write(&block_group->data_rwsem);
@@ -1149,33 +1250,29 @@
 		spin_unlock(&block_group->lock);
 		return 0;
 	}
-
-	if (block_group->delalloc_bytes) {
-		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
-		spin_unlock(&block_group->lock);
-		return 0;
-	}
 	spin_unlock(&block_group->lock);
 
 	inode = lookup_free_space_inode(root, block_group, path);
 	if (IS_ERR(inode))
 		return 0;
 
-	ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
+	ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
+				      &block_group->io_ctl, trans,
 				      path, block_group->key.objectid);
 	if (ret) {
-		spin_lock(&block_group->lock);
-		block_group->disk_cache_state = BTRFS_DC_ERROR;
-		spin_unlock(&block_group->lock);
-		ret = 0;
 #ifdef DEBUG
 		btrfs_err(root->fs_info,
 			"failed to write free space cache for block group %llu",
 			block_group->key.objectid);
 #endif
+		spin_lock(&block_group->lock);
+		block_group->disk_cache_state = BTRFS_DC_ERROR;
+		spin_unlock(&block_group->lock);
+
+		block_group->io_ctl.inode = NULL;
+		iput(inode);
 	}
 
-	iput(inode);
 	return ret;
 }
 
@@ -2448,15 +2545,17 @@
 		     u64 cont1_bytes, u64 min_bytes)
 {
 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
-	struct btrfs_free_space *entry;
+	struct btrfs_free_space *entry = NULL;
 	int ret = -ENOSPC;
 	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
 
 	if (ctl->total_bitmaps == 0)
 		return -ENOSPC;
 
-	entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
-	if (entry->offset != bitmap_offset) {
+	if (!list_empty(bitmaps))
+		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
+
+	if (!entry || entry->offset != bitmap_offset) {
 		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
 		if (entry && list_empty(&entry->list))
 			list_add(&entry->list, bitmaps);
@@ -2948,11 +3047,14 @@
 {
 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
 	int ret;
+	struct btrfs_io_ctl io_ctl;
 
 	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
 		return 0;
 
-	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
+	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
+				      trans, path, 0) ||
+		btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
 	if (ret) {
 		btrfs_delalloc_release_metadata(inode, inode->i_size);
 #ifdef DEBUG
diff -ur a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
--- a/fs/btrfs/free-space-cache.h	2017-03-23 15:07:55.000000000 +0100
+++ b/fs/btrfs/free-space-cache.h	2017-03-14 02:45:53.000000000 +0100
@@ -35,6 +35,8 @@
 			   struct btrfs_free_space *info);
 };
 
+struct btrfs_io_ctl;
+
 struct inode *lookup_free_space_inode(struct btrfs_root *root,
 				      struct btrfs_block_group_cache
 				      *block_group, struct btrfs_path *path);
@@ -47,14 +49,19 @@
 				       struct btrfs_block_rsv *rsv);
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 				    struct btrfs_trans_handle *trans,
+				    struct btrfs_block_group_cache *block_group,
 				    struct inode *inode);
 int load_free_space_cache(struct btrfs_fs_info *fs_info,
 			  struct btrfs_block_group_cache *block_group);
+int btrfs_wait_cache_io(struct btrfs_root *root,
+			struct btrfs_trans_handle *trans,
+			struct btrfs_block_group_cache *block_group,
+			struct btrfs_io_ctl *io_ctl,
+			struct btrfs_path *path, u64 offset);
 int btrfs_write_out_cache(struct btrfs_root *root,
 			  struct btrfs_trans_handle *trans,
 			  struct btrfs_block_group_cache *block_group,
 			  struct btrfs_path *path);
-
 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
 				    struct btrfs_path *path);
 int create_free_ino_inode(struct btrfs_root *root,
diff -ur a/fs/btrfs/inode.c b/fs/btrfs/inode.c
--- a/fs/btrfs/inode.c	2017-03-23 15:08:05.000000000 +0100
+++ b/fs/btrfs/inode.c	2017-03-14 02:46:01.000000000 +0100
@@ -1204,8 +1204,14 @@
 		num_bytes = 0;
 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 
-		if (found_key.objectid > ino ||
-		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
+		if (found_key.objectid > ino)
+			break;
+		if (WARN_ON_ONCE(found_key.objectid < ino) ||
+		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
+			path->slots[0]++;
+			goto next_slot;
+		}
+		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
 		    found_key.offset > end)
 			break;
 
@@ -1256,8 +1262,27 @@
 					goto out_check;
 			}
 			 
+#ifdef MY_ABC_HERE
+			if (csum_exist_in_range(root, disk_bytenr, num_bytes)) {
+				if (!nolock) {
+					btrfs_end_write_no_snapshoting(root);
+				}
+				goto out_check;
+			}
+#else
 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
 				goto out_check;
+#endif  
+#ifdef MY_ABC_HERE
+			if (test_bit(BTRFS_INODE_IN_SYNO_DEFRAG, &BTRFS_I(inode)->runtime_flags) &&
+				test_range_bit(&BTRFS_I(inode)->io_tree, found_key.offset,
+				        extent_end - 1, EXTENT_DEFRAG, 1, NULL)) {
+				if (!nolock) {
+					btrfs_end_write_no_snapshoting(root);
+				}
+				goto out_check;
+			}
+#endif  
 			nocow = 1;
 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 			extent_end = found_key.offset +
@@ -2495,6 +2520,10 @@
 		goto out;
 	}
 
+	btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
+				     ordered_extent->file_offset +
+				     ordered_extent->len - 1);
+
 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
 		truncated = true;
 		logical_len = ordered_extent->truncated_len;
@@ -4415,7 +4444,10 @@
 		goto no_delete;
 	}
 	 
-	btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	if (!special_file(inode->i_mode))
+		btrfs_wait_ordered_range(inode, 0, (u64)-1);
+
+	btrfs_free_io_failure_record(inode, 0, (u64)-1);
 
 	if (root->fs_info->log_root_recovering) {
 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
@@ -6375,9 +6407,12 @@
 			btrfs_put_ordered_extent(ordered);
 		} else {
 			 
-			ret = filemap_write_and_wait_range(inode->i_mapping,
-							   lockstart,
-							   lockend);
+			ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
+			if (ret)
+				break;
+			ret = filemap_fdatawait_range(inode->i_mapping,
+						      lockstart,
+						      lockend);
 			if (ret)
 				break;
 
@@ -7068,14 +7103,23 @@
 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
 {
 	struct extent_io_tree *tree;
+	struct inode *inode = page->mapping->host;
+	int ret;
 
 	if (current->flags & PF_MEMALLOC) {
 		redirty_page_for_writepage(wbc, page);
 		unlock_page(page);
 		return 0;
 	}
+
+	if (!igrab(inode)) {
+		redirty_page_for_writepage(wbc, page);
+		return AOP_WRITEPAGE_ACTIVATE;
+	}
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+	ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+	btrfs_add_delayed_iput(inode);
+	return ret;
 }
 
 static int btrfs_writepages(struct address_space *mapping,
@@ -8081,9 +8125,9 @@
 #endif
 	 
 #if defined(MY_ABC_HERE)
-	trans = btrfs_start_transaction(root, 5 + credit_for_syno);
+	trans = btrfs_start_transaction(root, 7 + credit_for_syno);
 #else
-	trans = btrfs_start_transaction(root, 5);
+	trans = btrfs_start_transaction(root, 7);
 #endif  
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
diff -ur a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
--- a/fs/btrfs/inode-map.c	2017-03-23 15:07:53.000000000 +0100
+++ b/fs/btrfs/inode-map.c	2017-03-14 02:45:51.000000000 +0100
@@ -271,7 +271,7 @@
 		__btrfs_add_free_space(ctl, info->offset, count);
 free:
 		rb_erase(&info->offset_index, rbroot);
-		kfree(info);
+		kmem_cache_free(btrfs_free_space_cachep, info);
 	}
 }
 
@@ -456,7 +456,7 @@
 	}
 
 	if (i_size_read(inode) > 0) {
-		ret = btrfs_truncate_free_space_cache(root, trans, inode);
+		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
 		if (ret) {
 			if (ret != -ENOSPC)
 				btrfs_abort_transaction(trans, root, ret);
diff -ur a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
--- a/fs/btrfs/ioctl.c	2017-03-23 15:08:01.000000000 +0100
+++ b/fs/btrfs/ioctl.c	2017-03-14 02:45:58.000000000 +0100
@@ -75,8 +75,9 @@
 #endif
 
 static int btrfs_clone(struct inode *src, struct inode *inode,
-#ifdef MY_ABC_HERE
-		       u64 off, u64 olen, u64 olen_aligned, u64 destoff, u64 *reserved);
+#if defined(MY_ABC_HERE)
+		       u64 off, u64 olen, u64 olen_aligned, u64 destoff,
+		       u64 *reserved);
 #else
 		       u64 off, u64 olen, u64 olen_aligned, u64 destoff);
 #endif
@@ -944,6 +945,199 @@
 	return em;
 }
 
+#ifdef MY_ABC_HERE
+ 
+static int defrag_check_extent_usage(struct inode *inode,
+			        struct btrfs_ioctl_defrag_range_args *range,
+			        struct ulist *disko_ulist, u64 start, u64 *endoff)
+{
+	int ret = 0;
+	int extent_rewrite = 0;
+	int slot;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct ulist_node *unode;
+	struct btrfs_path *path = NULL;
+	struct btrfs_file_extent_item *item;
+	struct extent_buffer *leaf;
+	struct btrfs_key key;
+	struct btrfs_trans_handle *trans;
+	u8 type;
+	u64 extent_item_use = 0;
+	u32 syno_ratio_denom = 3;  
+	u32 syno_ratio_nom = 2;
+	u32 syno_thresh = 8 * 1024 * 1024;  
+	u64 extent_disko = 0, extent_diskl = 0, extent_datao = 0;
+	u64 num_bytes;
+	u64 search_end = 0;
+	u32 nritems;
+
+	if (range->syno_ratio_denom != 0 && range->syno_ratio_nom != 0) {
+		syno_ratio_denom = range->syno_ratio_denom;
+		syno_ratio_nom = range->syno_ratio_nom;
+	}
+	if (range->syno_thresh != 0)
+		syno_thresh = (u32)range->syno_thresh * 4096;
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		extent_rewrite = -ENOMEM;
+		goto out;
+	}
+
+	path->reada = 1;
+	path->leave_spinning = 1;
+
+	key.objectid = btrfs_ino(inode);
+	key.type = BTRFS_EXTENT_DATA_KEY;
+	key.offset = start;
+
+again:
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0) {
+		extent_rewrite = ret;
+		goto out;
+	}
+
+	if (key.offset == start && ret > 0 && path->slots[0] > 0) {
+		btrfs_item_key_to_cpu(path->nodes[0], &key,
+				      path->slots[0] - 1);
+		if (key.type == BTRFS_EXTENT_DATA_KEY)
+			path->slots[0]--;
+	}
+	nritems = btrfs_header_nritems(path->nodes[0]);
+	if (path->slots[0] >= nritems) {
+		ret = btrfs_next_leaf(root, path);
+		if (ret < 0) {
+			extent_rewrite = ret;
+			goto out;
+		}
+		if (ret > 0) {
+			*endoff = start + extent_diskl - 1;
+			goto out;
+		}
+	}
+	leaf = path->nodes[0];
+	slot = path->slots[0];
+
+	btrfs_item_key_to_cpu(leaf, &key, slot);
+	if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
+	    key.objectid != btrfs_ino(inode)) {
+		*endoff = (u64)-1;  
+		goto out;
+	}
+
+	if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) {
+		btrfs_release_path(path);
+		key.offset++;
+		goto again;
+	}
+	item = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+	type = btrfs_file_extent_type(leaf, item);
+	extent_disko = btrfs_file_extent_disk_bytenr(leaf, item);
+	extent_diskl = btrfs_file_extent_disk_num_bytes(leaf, item);
+	extent_datao = btrfs_file_extent_offset(leaf, item);
+	num_bytes = btrfs_file_extent_num_bytes(leaf, item);
+
+	if (type == BTRFS_FILE_EXTENT_INLINE) {
+		*endoff = (u64)-1;  
+		goto out;
+	}
+
+	*endoff = key.offset + num_bytes - 1;
+	if (extent_disko == 0) {
+		goto out;
+	}
+
+	unode = ulist_search(disko_ulist, extent_disko);
+	if (unode) {
+		btrfs_free_path(path);
+		return unode->aux;
+	}
+
+	if (btrfs_file_extent_compression(leaf, item) ||
+	    btrfs_file_extent_encryption(leaf, item) ||
+	    btrfs_file_extent_other_encoding(leaf, item) ||
+	    btrfs_extent_readonly(root, extent_disko))
+		goto add_list;
+
+	btrfs_release_path(path);
+
+	trans = btrfs_join_transaction(root);
+	if (IS_ERR(trans))
+		goto add_list;
+
+	ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
+				    key.offset - extent_datao, extent_disko);
+	btrfs_end_transaction(trans, root);
+	if (ret)
+		goto add_list;
+
+	extent_item_use = num_bytes;
+	search_end = key.offset + extent_diskl - extent_datao;
+	key.offset += num_bytes;
+	while (1) {
+		u64 disko, datal;
+
+		path->leave_spinning = 1;
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		if (ret < 0) {
+			extent_rewrite = ret;
+			goto out;
+		}
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0) {
+				extent_rewrite = ret;
+				goto out;
+			}
+			if (ret > 0)
+				break;
+		}
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
+		    key.objectid != btrfs_ino(inode))
+			break;
+		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
+			if (key.offset > search_end)
+				break;
+			item = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+			type = btrfs_file_extent_type(leaf, item);
+			if (type == BTRFS_FILE_EXTENT_INLINE)
+				goto next;
+			disko = btrfs_file_extent_disk_bytenr(leaf, item);
+			datal = btrfs_file_extent_num_bytes(leaf, item);
+			 
+			if (disko == 0)
+				goto next;
+			 
+			if (disko < extent_disko || disko >= extent_disko + extent_diskl)
+				goto next;
+			if (type == BTRFS_FILE_EXTENT_PREALLOC)
+				goto add_list;
+			extent_item_use += datal;
+		}
+next:
+		btrfs_release_path(path);
+		key.offset++;
+	}
+	btrfs_release_path(path);
+	if (extent_item_use * syno_ratio_denom <= extent_diskl * syno_ratio_nom ||
+		extent_diskl >= extent_item_use + syno_thresh)
+		extent_rewrite = 1;
+add_list:
+	 
+	if (ulist_add_lru_adjust(disko_ulist, extent_disko, extent_rewrite, GFP_NOFS) &&
+		disko_ulist->nnodes > ULIST_NODES_MAX)
+		ulist_remove_first(disko_ulist);
+out:
+	btrfs_free_path(path);
+	return extent_rewrite;
+}
+#endif  
+
 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
 {
 	struct extent_map *next;
@@ -963,12 +1157,28 @@
 
 static int should_defrag_range(struct inode *inode, u64 start, int thresh,
 			       u64 *last_len, u64 *skip, u64 *defrag_end,
+#ifdef MY_ABC_HERE
+			       int compress,
+			       struct btrfs_ioctl_defrag_range_args *range,
+			       struct ulist *disko_ulist)
+#else
 			       int compress)
+#endif  
 {
 	struct extent_map *em;
 	int ret = 1;
 	bool next_mergeable = true;
 
+#ifdef MY_ABC_HERE
+	if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG) {
+		u64 endoff = 0;
+		ret = defrag_check_extent_usage(inode, range,
+				        disko_ulist, start, &endoff);
+		*defrag_end = *skip = endoff;
+		return ret;
+	}
+#endif  
+
 	if (start < *defrag_end)
 		return 1;
 
@@ -1159,6 +1369,12 @@
 #else
 	int compress_type = BTRFS_COMPRESS_ZLIB;
 #endif
+#ifdef MY_ABC_HERE
+	u64 last_rec_pos = 0;
+	u64 one_tenth_isize = i_size_read(inode) / 10;
+	int should_defrag_range_ret = 0;
+	struct ulist *disko_ulist = NULL;
+#endif  
 	int extent_thresh = range->extent_thresh;
 	unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
 	unsigned long cluster = max_cluster;
@@ -1168,6 +1384,17 @@
 	if (isize == 0)
 		return 0;
 
+#ifdef MY_ABC_HERE
+	if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG) {
+		printk(KERN_WARNING"[syno defrag] root:%llu ino:%llu "
+		        "start:%llu len:%llu thresh:%u dem:%u nom:%u\n",
+		        root->objectid, btrfs_ino(inode),
+		        range->start, range->len,
+		        range->syno_thresh, range->syno_ratio_denom,
+		        range->syno_ratio_nom);
+	}
+	i = 0;  
+#endif  
 	if (range->start >= isize)
 		return -EINVAL;
 
@@ -1190,6 +1417,15 @@
 		ra = &file->f_ra;
 	}
 
+#ifdef MY_ABC_HERE
+	if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG) {
+		disko_ulist = ulist_alloc(GFP_NOFS);
+		if (!disko_ulist) {
+			ret = -ENOMEM;
+			goto out_ra;
+		}
+	}
+#endif  
 	pages = kmalloc_array(max_cluster, sizeof(struct page *),
 			GFP_NOFS);
 	if (!pages) {
@@ -1222,6 +1458,11 @@
 	if (i < inode->i_mapping->writeback_index)
 		inode->i_mapping->writeback_index = i;
 
+#ifdef MY_ABC_HERE
+	if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG) {
+		set_bit(BTRFS_INODE_IN_SYNO_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+	}
+#endif  
 	while (i <= last_index && defrag_count < max_to_defrag &&
 	       (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
 		PAGE_CACHE_SHIFT)) {
@@ -1235,11 +1476,32 @@
 			break;
 		}
 
+#ifdef MY_ABC_HERE
+		if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG &&
+			((u64)i << PAGE_CACHE_SHIFT) - last_rec_pos >= one_tenth_isize) {
+			printk(KERN_NOTICE"[syno defrag status] root:%llu ino:%llu pos:%llu\n",
+			       root->objectid, btrfs_ino(inode), (u64)i << PAGE_CACHE_SHIFT);
+			last_rec_pos = (u64)i << PAGE_CACHE_SHIFT;
+		}
+		should_defrag_range_ret = should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+						 extent_thresh, &last_len, &skip,
+						 &defrag_end, range->flags & BTRFS_DEFRAG_RANGE_COMPRESS,
+						 range, disko_ulist);
+		if (should_defrag_range_ret < 0) {
+			ret = should_defrag_range_ret;
+			goto out_ra;
+		}
+		if (!should_defrag_range_ret) {
+			unsigned long next;
+			if (skip == (u64) -1)
+				break;
+#else
 		if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
 					 extent_thresh, &last_len, &skip,
 					 &defrag_end, range->flags &
 					 BTRFS_DEFRAG_RANGE_COMPRESS)) {
 			unsigned long next;
+#endif  
 			 
 			next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 			i = max(i + 1, next);
@@ -1304,6 +1566,14 @@
 		}
 	}
 
+#ifdef MY_ABC_HERE
+	if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG) {
+		printk(KERN_NOTICE"[syno defrag] root:%llu, ino:%llu wait flush\n",
+		       root->objectid, btrfs_ino(inode));
+		btrfs_wait_ordered_range(inode, 0, (u64)-1);
+		clear_bit(BTRFS_INODE_IN_SYNO_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+	} else
+#endif  
 	if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) {
 		filemap_flush(inode->i_mapping);
 		if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
@@ -1335,6 +1605,14 @@
 		BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE;
 		mutex_unlock(&inode->i_mutex);
 	}
+#ifdef MY_ABC_HERE
+	if (range->flags & BTRFS_DEFRAG_RANGE_SYNO_DEFRAG) {
+		printk(KERN_WARNING"[syno defrag] finish root:%llu ino:%llu end_pos:%lu "
+		       "ret: %d\n", root->objectid, btrfs_ino(inode), i, ret);
+		ulist_free(disko_ulist);
+		clear_bit(BTRFS_INODE_IN_SYNO_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+	}
+#endif  
 	if (!file)
 		kfree(ra);
 	kfree(pages);
@@ -2277,7 +2555,7 @@
 			"Attempt to delete subvolume %llu during send",
 			dest->root_key.objectid);
 		err = -EPERM;
-		goto out_dput;
+		goto out_unlock_inode;
 	}
 
 	err = d_invalidate(dentry);
@@ -2374,6 +2652,7 @@
 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
 		spin_unlock(&dest->root_item_lock);
 	}
+out_unlock_inode:
 	mutex_unlock(&inode->i_mutex);
 	if (!err) {
 		shrink_dcache_sb(root->fs_info->sb);
@@ -2777,7 +3056,7 @@
 
 	ret = btrfs_cmp_data(src, loff, dst, dst_loff, len);
 	if (ret == 0)
-#ifdef MY_ABC_HERE
+#if defined(MY_ABC_HERE)
 		ret = btrfs_clone(src, dst, loff, len, len, dst_loff, NULL);
 #else
 		ret = btrfs_clone(src, dst, loff, len, len, dst_loff);
@@ -3004,7 +3283,7 @@
 }
 
 static int btrfs_clone(struct inode *src, struct inode *inode,
-#ifdef MY_ABC_HERE
+#if defined(MY_ABC_HERE)
 		       const u64 off, const u64 olen, const u64 olen_aligned,
 		       const u64 destoff, u64 *reserved_size)
 #else
@@ -3441,6 +3720,8 @@
 	 
 	if (off == 0 && olen == 0 && destoff == 0) {
 		reserve_size = inode_get_bytes(src) + BTRFS_I(src)->delalloc_bytes;
+	} else {
+		reserve_size = olen;
 	}
 	if (root->fs_info->quota_enabled) {
 		ret = btrfs_qgroup_reserve(root, reserve_size);
@@ -3488,7 +3769,7 @@
 		lock_extent_range(inode, destoff, len);
 	}
 
-#ifdef MY_ABC_HERE
+#if defined(MY_ABC_HERE)
 	ret = btrfs_clone(src, inode, off, olen, len, destoff, &reserve_size);
 #else
 	ret = btrfs_clone(src, inode, off, olen, len, destoff);
@@ -5123,7 +5404,8 @@
 
 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
 			disko = btrfs_file_extent_disk_bytenr(leaf, fi);
-			if (disko && ulist_add_lru_adjust(disko_ulist, disko, GFP_NOFS)) {
+			btrfs_set_path_blocking(path);
+			if (disko && ulist_add_lru_adjust(disko_ulist, disko, 0, GFP_NOFS)) {
 				compressed_size += btrfs_file_extent_disk_num_bytes(leaf, fi);
 				size += btrfs_file_extent_num_bytes(leaf, fi);
 				if (disko_ulist->nnodes > ULIST_NODES_MAX)
@@ -5159,6 +5441,59 @@
 }
 #endif  
 
+#ifdef MY_ABC_HERE
+static int btrfs_ioctl_snapshot_size_query(struct file *file, void __user *argp)
+{
+	int i;
+	struct btrfs_ioctl_snapshot_size_query_args snap_args;
+	struct btrfs_ioctl_snapshot_size_query_args *user_args;
+	struct ulist *roots;
+	int ret;
+
+	user_args = (struct btrfs_ioctl_snapshot_size_query_args __user *)argp;
+
+	if (copy_from_user(&snap_args, argp, sizeof(snap_args)))
+		return -EFAULT;
+
+	if (snap_args.snap_count == 0)
+		return -EINVAL;
+
+	snap_args.snap_id = memdup_user(user_args->snap_id, sizeof(u64)*snap_args.snap_count);
+	if (IS_ERR(snap_args.snap_id))
+		return PTR_ERR(snap_args.snap_id);
+
+	roots = ulist_alloc(GFP_NOFS);
+	if (!roots) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < snap_args.snap_count; ++i) {
+		ret = ulist_add(roots, snap_args.snap_id[i], 0, GFP_KERNEL);
+		if (ret < 0)
+			goto out;
+		if (ret == 0) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+	ret = btrfs_snapshot_size_query(file, &snap_args, roots,
+					    btrfs_find_shared_root);
+
+	if (copy_to_user(&user_args->calc_size, &snap_args.calc_size, sizeof(snap_args.calc_size))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (ret > 0)
+		ret = 0;
+out:
+	kfree(snap_args.snap_id);
+	ulist_free(roots);
+	return ret;
+}
+#endif  
+
 long btrfs_ioctl(struct file *file, unsigned int
 		cmd, unsigned long arg)
 {
@@ -5305,6 +5640,10 @@
 	case BTRFS_IOC_COMPR_CTL:
 		return btrfs_ioctl_compr_ctl(file, argp);
 #endif
+#ifdef MY_ABC_HERE
+	case BTRFS_IOC_SNAPSHOT_SIZE_QUERY:
+		return btrfs_ioctl_snapshot_size_query(file, argp);
+#endif  
 	}
 
 	return -ENOTTY;
diff -ur a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
--- a/fs/btrfs/ordered-data.c	2017-03-23 15:07:54.000000000 +0100
+++ b/fs/btrfs/ordered-data.c	2017-03-14 02:45:52.000000000 +0100
@@ -686,17 +686,10 @@
 			orig_end = INT_LIMIT(loff_t);
 	}
 
-	ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 	if (ret)
 		return ret;
-	 
-	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
-		     &BTRFS_I(inode)->runtime_flags)) {
-		ret = filemap_fdatawrite_range(inode->i_mapping, start,
-					       orig_end);
-		if (ret)
-			return ret;
-	}
+
 	ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 	if (ret)
 		return ret;
diff -ur a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
--- a/fs/btrfs/relocation.c	2017-03-23 15:08:00.000000000 +0100
+++ b/fs/btrfs/relocation.c	2017-03-14 02:45:56.000000000 +0100
@@ -3152,7 +3152,9 @@
 }
 
 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
-				    struct inode *inode, u64 ino)
+				    struct btrfs_block_group_cache *block_group,
+				    struct inode *inode,
+				    u64 ino)
 {
 	struct btrfs_key key;
 	struct btrfs_root *root = fs_info->tree_root;
@@ -3185,7 +3187,7 @@
 		goto out;
 	}
 
-	ret = btrfs_truncate_free_space_cache(root, trans, inode);
+	ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
 
 	btrfs_end_transaction(trans, root);
 	btrfs_btree_balance_dirty(root);
@@ -3223,6 +3225,7 @@
 
 	if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
 		ret = delete_block_group_cache(rc->extent_root->fs_info,
+					       rc->block_group,
 					       NULL, ref_objectid);
 		if (ret != -ENOENT)
 			return ret;
@@ -3914,7 +3917,7 @@
 	btrfs_free_path(path);
 
 	if (!IS_ERR(inode))
-		ret = delete_block_group_cache(fs_info, inode, 0);
+		ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
 	else
 		ret = PTR_ERR(inode);
 
diff -ur a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
--- a/fs/btrfs/scrub.c	2017-03-23 15:07:56.000000000 +0100
+++ b/fs/btrfs/scrub.c	2017-03-14 02:45:53.000000000 +0100
@@ -1116,7 +1116,6 @@
 		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
 					     have_csum, csum, generation,
 					     csum_size);
-
 	return;
 }
 
diff -ur a/fs/btrfs/send.c b/fs/btrfs/send.c
--- a/fs/btrfs/send.c	2017-03-23 15:08:01.000000000 +0100
+++ b/fs/btrfs/send.c	2017-03-14 02:45:58.000000000 +0100
@@ -114,6 +114,10 @@
 #ifdef MY_ABC_HERE
 	u32 subvol_flags;
 #endif
+#ifdef MY_ABC_HERE
+	u64 skip_cmd_count;
+	u64 current_cmd_pos;
+#endif  
 
 	struct list_head new_refs;
 	struct list_head deleted_refs;
@@ -584,6 +588,15 @@
 	u32 crc;
 
 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+
+#ifdef MY_ABC_HERE
+	if (sctx->current_cmd_pos < sctx->skip_cmd_count && (le16_to_cpu(hdr->cmd) != BTRFS_SEND_C_SUBVOL) && (le16_to_cpu(hdr->cmd) != BTRFS_SEND_C_SNAPSHOT)) {
+		sctx->current_cmd_pos++;
+		sctx->send_size = 0;
+		return 0;
+	}
+#endif  
+
 	hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
 	hdr->crc = 0;
 
@@ -1257,7 +1270,14 @@
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	if (ret < 0)
 		goto out;
-	BUG_ON(ret);
+	if (ret) {
+		 
+		btrfs_err(root->fs_info,
+			  "Found empty symlink inode %llu at root %llu",
+			  ino, root->root_key.objectid);
+		ret = -EIO;
+		goto out;
+	}
 
 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
 			struct btrfs_file_extent_item);
@@ -2106,8 +2126,9 @@
 }
 
 #ifdef MY_ABC_HERE
-static void write_calculate_size(struct send_ctx *sctx)
+static int write_calculate_size(struct send_ctx *sctx)
 {
+	int ret = 0;
 	struct timeval now;
 	unsigned long val;
 
@@ -2117,9 +2138,10 @@
 	val += ((now.tv_usec - sctx->write_timeval.tv_usec) / 1000);
 	if (val > 800) {
 		snprintf(sctx->send_buf, sctx->send_max_size, "About:%llu\n", sctx->total_data_size);
-		write_buf(sctx->send_filp, sctx->send_buf, strlen(sctx->send_buf), &sctx->send_off);
+		ret = write_buf(sctx->send_filp, sctx->send_buf, strlen(sctx->send_buf), &sctx->send_off);
 		sctx->write_timeval = now;
 	}
+	return ret;
 }
 #endif  
 
@@ -2131,8 +2153,7 @@
 #ifdef MY_ABC_HERE
 	if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 		sctx->total_data_size += sizeof(struct btrfs_cmd_header) + sizeof(struct fs_path);
-		write_calculate_size(sctx);
-		return 0;
+		return write_calculate_size(sctx);
 	}
 #endif
 
@@ -3054,14 +3075,17 @@
 	list_for_each_entry(cur, &pm->update_refs, list) {
 #ifdef MY_ABC_HERE
 		 
+		u64 gen;
 		ret = get_inode_info(sctx->send_root, cur->dir, NULL,
-			     NULL , NULL, NULL, NULL, NULL);
-		if (ret == -ENOENT) {
+			     &gen , NULL, NULL, NULL, NULL);
+		if (ret < 0 && ret != -ENOENT) {
+			goto out;
+		}
+		
+		if (ret == -ENOENT || gen != cur->dir_gen) {
 			ret = 0;
 			continue;
 		}
-		if (ret < 0)
-			goto out;
 #else
 		if (cur->dir == rmdir_ino)
 			continue;
@@ -3356,8 +3380,7 @@
 #ifdef MY_ABC_HERE
 	if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 		sctx->total_data_size += sizeof(struct btrfs_cmd_header) + sizeof(struct fs_path);
-		write_calculate_size(sctx);
-		return 0;
+		return write_calculate_size(sctx);
 	}
 #endif
 
@@ -4246,7 +4269,11 @@
 	return ret;
 }
 
+#ifdef MY_ABC_HERE
+static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len, bool onlyCalculateSize)
+#else
 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
+#endif  
 {
 	struct btrfs_root *root = sctx->send_root;
 	struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4278,19 +4305,52 @@
 
 	last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
 
+#ifdef MY_ABC_HERE
+	if (!onlyCalculateSize) {
+#endif  
 	memset(&sctx->ra, 0, sizeof(struct file_ra_state));
 	file_ra_state_init(&sctx->ra, inode->i_mapping);
+#ifdef MY_ABC_HERE
+#else
 	btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
 		       last_index - index + 1);
+#endif  
+#ifdef MY_ABC_HERE
+	}
+#endif  
 
 	while (index <= last_index) {
 		unsigned cur_len = min_t(unsigned, len,
 					 PAGE_CACHE_SIZE - pg_offset);
+#ifdef MY_ABC_HERE
+		if (!onlyCalculateSize) {
+#endif  
+#ifdef MY_ABC_HERE
+		page = find_lock_page(inode->i_mapping, index);
+		if (!page) {
+			page_cache_sync_readahead(inode->i_mapping,
+						  &sctx->ra, NULL, index,
+						  last_index + 1 - index);
+
+			page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+			if (unlikely(page == NULL)) {
+				ret = -ENOMEM;
+				break;
+			}
+		}
+
+		if (PageReadahead(page)) {
+			page_cache_async_readahead(inode->i_mapping,
+						   &sctx->ra, NULL, page, index,
+						   last_index + 1 - index);
+		}
+#else
 		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 		if (!page) {
 			ret = -ENOMEM;
 			break;
 		}
+#endif  
 
 		if (!PageUptodate(page)) {
 			btrfs_readpage(NULL, page);
@@ -4308,6 +4368,9 @@
 		kunmap(page);
 		unlock_page(page);
 		page_cache_release(page);
+#ifdef MY_ABC_HERE
+		}
+#endif  
 		index++;
 		pg_offset = 0;
 		len -= cur_len;
@@ -4330,7 +4393,15 @@
 
 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
 
+#ifdef MY_ABC_HERE
+	if (sctx->current_cmd_pos < sctx->skip_cmd_count) {
+		num_read = fill_read_buf(sctx, offset, len, true);
+	} else {
+		num_read = fill_read_buf(sctx, offset, len, false);
+	}
+#else
 	num_read = fill_read_buf(sctx, offset, len);
+#endif  
 	if (num_read <= 0) {
 		if (num_read < 0)
 			ret = num_read;
@@ -4341,6 +4412,12 @@
 	if (ret < 0)
 		goto out;
 
+#ifdef MY_ABC_HERE
+	if (sctx->current_cmd_pos < sctx->skip_cmd_count) {
+		goto send_cmd_label;
+	}
+#endif  
+
 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
 	if (ret < 0)
 		goto out;
@@ -4349,6 +4426,9 @@
 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
 	TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
 
+#ifdef MY_ABC_HERE
+send_cmd_label:
+#endif  
 	ret = send_cmd(sctx);
 
 tlv_put_failure:
@@ -4461,8 +4541,7 @@
 #ifdef MY_ABC_HERE
 	if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 		sctx->total_data_size += end - offset;
-		write_calculate_size(sctx);
-		return 0;
+		return write_calculate_size(sctx);
 	}
 #endif
 
@@ -4529,10 +4608,10 @@
 	if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 		if (clone_root && IS_ALIGNED(offset + len, bs)) {
 			sctx->total_data_size += sizeof(struct btrfs_cmd_header) + sizeof(struct fs_path);
-			write_calculate_size(sctx);
+			ret = write_calculate_size(sctx);
 		} else if (offset < sctx->cur_inode_size) {
 			sctx->total_data_size += len;
-			write_calculate_size(sctx);
+			ret = write_calculate_size(sctx);
 		}
 		goto out;
 	}
@@ -4955,7 +5034,10 @@
 #ifdef MY_ABC_HERE
 			if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 				sctx->total_data_size += sizeof(archive_bit_le32);
-				write_calculate_size(sctx);
+				ret = write_calculate_size(sctx);
+				if (ret < 0) {
+					goto out;
+				}
 			} else {
 #endif  
 			ret = send_set_xattr(sctx, p, XATTR_SYNO_PREFIX XATTR_SYNO_ARCHIVE_BIT,
@@ -5213,7 +5295,10 @@
 			if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 				if (S_ISREG(sctx->cur_inode_mode)) {
 					sctx->total_data_size += sctx->cur_inode_size;
-					write_calculate_size(sctx);
+					ret = write_calculate_size(sctx);
+					if (ret < 0) {
+						goto out;
+					}
 				}
 				 
 			} else {
@@ -5254,8 +5339,7 @@
 #ifdef MY_ABC_HERE
 	if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 		sctx->total_data_size += sizeof(struct btrfs_cmd_header) + sizeof(struct fs_path);
-		write_calculate_size(sctx);
-		return 0;
+		return write_calculate_size(sctx);
 	}
 #endif
 
@@ -5282,8 +5366,7 @@
 #ifdef MY_ABC_HERE
 	if (sctx->phase == SEND_PHASE_COMPUTE_DATA_SIZE) {
 		sctx->total_data_size += sizeof(struct btrfs_cmd_header) + sizeof(struct fs_path);
-		write_calculate_size(sctx);
-		return 0;
+		return write_calculate_size(sctx);
 	}
 #endif
 
@@ -5622,9 +5705,15 @@
 	sctx->send_root = send_root;
 #ifdef MY_ABC_HERE
 	sctx->subvol_flags = BTRFS_I(file_inode(mnt_file))->flags;
+#endif
+#ifdef MY_ABC_HERE
 	sctx->total_data_size = arg->total_data_size;
 	do_gettimeofday(&sctx->write_timeval);
-#endif
+#endif  
+#ifdef MY_ABC_HERE
+	sctx->skip_cmd_count = arg->skip_cmd_count;
+	sctx->current_cmd_pos = 0;
+#endif  
 	 
 	if (btrfs_root_dead(sctx->send_root)) {
 		ret = -EPERM;
diff -ur a/fs/btrfs/super.c b/fs/btrfs/super.c
--- a/fs/btrfs/super.c	2017-03-23 15:07:57.000000000 +0100
+++ b/fs/btrfs/super.c	2017-03-14 02:45:54.000000000 +0100
@@ -833,6 +833,15 @@
 	if (IS_ERR(new_root))
 		return ERR_CAST(new_root);
 
+	if (!(sb->s_flags & MS_RDONLY)) {
+		int ret;
+		down_read(&fs_info->cleanup_work_sem);
+		ret = btrfs_orphan_cleanup(new_root);
+		up_read(&fs_info->cleanup_work_sem);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
 	dir_id = btrfs_root_dirid(&new_root->root_item);
 setup_root:
 	location.objectid = dir_id;
diff -ur a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
--- a/fs/btrfs/transaction.c	2017-03-23 15:07:56.000000000 +0100
+++ b/fs/btrfs/transaction.c	2017-03-14 02:45:53.000000000 +0100
@@ -165,6 +165,7 @@
 	atomic_set(&cur_trans->use_count, 2);
 	atomic_set(&cur_trans->pending_ordered, 0);
 	cur_trans->start_time = get_seconds();
+	cur_trans->dirty_bg_run = 0;
 
 	cur_trans->delayed_refs.href_root = RB_ROOT;
 	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
@@ -187,6 +188,10 @@
 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
 	INIT_LIST_HEAD(&cur_trans->pending_chunks);
 	INIT_LIST_HEAD(&cur_trans->switch_commits);
+	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
+	INIT_LIST_HEAD(&cur_trans->io_bgs);
+	mutex_init(&cur_trans->cache_write_mutex);
+	spin_lock_init(&cur_trans->dirty_bgs_lock);
 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 	extent_io_tree_init(&cur_trans->dirty_pages,
 			     fs_info->btree_inode->i_mapping);
@@ -372,11 +377,8 @@
 		}
 	} while (ret == -EBUSY);
 
-	if (ret < 0) {
-		 
-		BUG_ON(type == TRANS_JOIN_NOLOCK);
+	if (ret < 0)
 		goto join_fail;
-	}
 
 	cur_trans = root->fs_info->running_transaction;
 
@@ -852,7 +854,6 @@
 	struct btrfs_root *tree_root = root->fs_info->tree_root;
 
 	old_root_used = btrfs_root_used(&root->root_item);
-	btrfs_write_dirty_block_groups(trans, root);
 
 	while (1) {
 		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
@@ -868,9 +869,6 @@
 			return ret;
 
 		old_root_used = btrfs_root_used(&root->root_item);
-		ret = btrfs_write_dirty_block_groups(trans, root);
-		if (ret)
-			return ret;
 	}
 
 	return 0;
@@ -880,14 +878,12 @@
 					 struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
+	struct list_head *io_bgs = &trans->transaction->io_bgs;
 	struct list_head *next;
 	struct extent_buffer *eb;
 	int ret;
 
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-	if (ret)
-		return ret;
-
 	eb = btrfs_lock_root_node(fs_info->tree_root);
 	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
 			      0, &eb);
@@ -911,14 +907,19 @@
 	if (ret)
 		return ret;
 
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	ret = btrfs_setup_space_cache(trans, root);
 	if (ret)
 		return ret;
 
+	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	if (ret)
+		return ret;
+again:
 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
 		next = fs_info->dirty_cowonly_roots.next;
 		list_del_init(next);
 		root = list_entry(next, struct btrfs_root, dirty_list);
+		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
 
 		if (root != fs_info->extent_root)
 			list_add_tail(&root->dirty_list,
@@ -926,8 +927,23 @@
 		ret = update_cowonly_root(trans, root);
 		if (ret)
 			return ret;
+		ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+		if (ret)
+			return ret;
+	}
+
+	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
+		ret = btrfs_write_dirty_block_groups(trans, root);
+		if (ret)
+			return ret;
+		ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+		if (ret)
+			return ret;
 	}
 
+	if (!list_empty(&fs_info->dirty_cowonly_roots))
+		goto again;
+
 	list_add_tail(&fs_info->extent_root->dirty_list,
 		      &trans->transaction->switch_commits);
 	btrfs_after_dev_replace_commit(fs_info);
@@ -1471,7 +1487,7 @@
 	if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
 		return btrfs_start_delalloc_roots(fs_info, 1, -1);
 #ifdef MY_ABC_HERE
-	else if (fs_info->delalloc_inodes_nr > fs_info->flushoncommit_threshold)
+	else if (fs_info->flushoncommit_threshold && fs_info->delalloc_inodes_nr > fs_info->flushoncommit_threshold)
 		return btrfs_start_delalloc_roots(fs_info, 1, -1);
 #endif
 	return 0;
@@ -1482,7 +1498,7 @@
 	if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
 		btrfs_wait_ordered_roots(fs_info, -1);
 #ifdef MY_ABC_HERE
-	else if (fs_info->ordered_extent_nr > fs_info->flushoncommit_threshold) {
+	else if (fs_info->flushoncommit_threshold && fs_info->ordered_extent_nr > fs_info->flushoncommit_threshold) {
 		btrfs_wait_ordered_roots(fs_info, -1);
 	}
 #endif
@@ -1539,6 +1555,24 @@
 #ifdef MY_ABC_HERE
 	trans->pending_snap_rm = false;
 #endif  
+	if (!cur_trans->dirty_bg_run) {
+		int run_it = 0;
+
+		mutex_lock(&root->fs_info->ro_block_group_mutex);
+		if (!cur_trans->dirty_bg_run) {
+			run_it = 1;
+			cur_trans->dirty_bg_run = 1;
+		}
+		mutex_unlock(&root->fs_info->ro_block_group_mutex);
+
+		if (run_it)
+			ret = btrfs_start_dirty_block_groups(trans, root);
+	}
+	if (ret) {
+		btrfs_end_transaction(trans, root);
+		return ret;
+	}
+
 	spin_lock(&root->fs_info->trans_lock);
 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
 		spin_unlock(&root->fs_info->trans_lock);
@@ -1677,6 +1711,8 @@
 	switch_commit_roots(cur_trans, root->fs_info);
 
 	assert_qgroups_uptodate(trans);
+	ASSERT(list_empty(&cur_trans->dirty_bgs));
+	ASSERT(list_empty(&cur_trans->io_bgs));
 	update_super_roots(root);
 
 	btrfs_set_super_log_root(root->fs_info->super_copy, 0);
diff -ur a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
--- a/fs/btrfs/transaction.h	2017-03-23 15:07:56.000000000 +0100
+++ b/fs/btrfs/transaction.h	2017-03-14 02:45:53.000000000 +0100
@@ -37,11 +37,17 @@
 	struct list_head pending_snapshots;
 	struct list_head pending_chunks;
 	struct list_head switch_commits;
+	struct list_head dirty_bgs;
+	struct list_head io_bgs;
+
+	struct mutex cache_write_mutex;
+	spinlock_t dirty_bgs_lock;
 	struct btrfs_delayed_ref_root delayed_refs;
 	int aborted;
 #ifdef MY_ABC_HERE
 	bool clear_full;
 #endif
+	int dirty_bg_run;
 };
 
 #define __TRANS_FREEZABLE	(1U << 0)
diff -ur a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
--- a/fs/btrfs/tree-log.c	2017-03-23 15:07:58.000000000 +0100
+++ b/fs/btrfs/tree-log.c	2017-03-14 02:45:54.000000000 +0100
@@ -1549,9 +1549,6 @@
 	char *name;
 	struct inode *inode;
 	struct btrfs_key location;
-#ifdef MY_ABC_HERE
-	struct btrfs_key log_location;
-#endif  
 
 again:
 	eb = path->nodes[0];
@@ -1586,15 +1583,6 @@
 						     dir_key->offset,
 						     name, name_len, 0);
 		}
-#ifdef MY_ABC_HERE
-		if (log_di && !IS_ERR(log_di)) {
-			btrfs_dir_item_key_to_cpu(eb, di, &location);
-			btrfs_dir_item_key_to_cpu(log_path->nodes[0], di, &log_location);
-			if (log_location.objectid != location.objectid) {
-				log_di = NULL;
-			}
-		}
-#endif  
 		if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
 			btrfs_dir_item_key_to_cpu(eb, di, &location);
 			btrfs_release_path(path);
diff -ur a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
--- a/fs/btrfs/ulist.c	2017-03-23 15:07:52.000000000 +0100
+++ b/fs/btrfs/ulist.c	2017-03-14 02:45:49.000000000 +0100
@@ -150,8 +150,8 @@
 	return node;
 }
 
-#if defined(MY_ABC_HERE)
-int ulist_add_lru_adjust(struct ulist *ulist, u64 val, gfp_t gfp_mask)
+#if defined(MY_ABC_HERE) || defined(MY_ABC_HERE)
+int ulist_add_lru_adjust(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
 {
 	int ret;
 	struct ulist_node *node;
@@ -167,6 +167,7 @@
 		return -ENOMEM;
 
 	node->val = val;
+	node->aux = aux;
 #ifdef CONFIG_BTRFS_DEBUG
 	node->seqnum = ulist->nnodes;
 #endif
@@ -192,4 +193,24 @@
 	ulist->nnodes--;
 	kfree(node);
 }
-#endif
+#endif  
+
+#if defined(MY_ABC_HERE) || \
+    defined(MY_ABC_HERE)
+struct ulist_node * ulist_search(struct ulist *ulist, u64 val)
+{
+	struct rb_node *n = ulist->root.rb_node;
+	struct ulist_node *u = NULL;
+
+	while (n) {
+		u = rb_entry(n, struct ulist_node, rb_node);
+		if (u->val < val)
+			n = n->rb_right;
+		else if (u->val > val)
+			n = n->rb_left;
+		else
+			return u;
+	}
+	return NULL;
+}
+#endif  
diff -ur a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
--- a/fs/btrfs/ulist.h	2017-03-23 15:07:56.000000000 +0100
+++ b/fs/btrfs/ulist.h	2017-03-14 02:45:54.000000000 +0100
@@ -47,10 +47,14 @@
 
 #define ULIST_ITER_INIT(uiter) ((uiter)->cur_list = NULL)
 
-#if defined(MY_ABC_HERE)
-#define ULIST_NODES_MAX 16384  
-int ulist_add_lru_adjust(struct ulist *ulist, u64 val, gfp_t gfp_mask);
+#if defined(MY_ABC_HERE) || defined(MY_ABC_HERE)
+#define ULIST_NODES_MAX 65536  
+int ulist_add_lru_adjust(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
 void ulist_remove_first(struct ulist *ulist);
 #endif
+#if defined(MY_ABC_HERE) || \
+    defined(MY_ABC_HERE)
+struct ulist_node * ulist_search(struct ulist *ulist, u64 val);
+#endif  
 
 #endif
diff -ur a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
--- a/fs/btrfs/volumes.c	2017-03-23 15:08:02.000000000 +0100
+++ b/fs/btrfs/volumes.c	2017-03-14 02:45:58.000000000 +0100
@@ -4194,6 +4194,7 @@
 {
 	u64 chunk_offset;
 
+	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
 	chunk_offset = find_next_chunk(extent_root->fs_info);
 	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
 }
diff -ur a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
--- a/fs/btrfs/xattr.c	2017-03-23 15:07:54.000000000 +0100
+++ b/fs/btrfs/xattr.c	2017-03-14 02:45:51.000000000 +0100
@@ -92,7 +92,12 @@
 	}
 
 	if (flags & XATTR_REPLACE) {
-		ASSERT(mutex_is_locked(&inode->i_mutex));
+		if(!mutex_is_locked(&inode->i_mutex)) {
+			pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
+			       "mutex_is_locked(&inode->i_mutex)", __FILE__,
+			       __LINE__);
+			BUG();
+		}
 		di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
 					name, name_len, 0);
 		if (!di) {
@@ -117,7 +122,11 @@
 	} else if (ret == -EEXIST) {
 		ret = 0;
 		di = btrfs_match_dir_item_name(root, path, name, name_len);
-		ASSERT(di);  
+		if(!di) {  
+			pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
+			       "di", __FILE__, __LINE__);
+			BUG();
+		}
 	} else if (ret) {
 		goto out;
 	}
diff -ur a/fs/buffer.c b/fs/buffer.c
--- a/fs/buffer.c	2017-03-23 15:07:51.000000000 +0100
+++ b/fs/buffer.c	2017-03-14 02:45:48.000000000 +0100
@@ -91,13 +91,23 @@
 {
 	char b[BDEVNAME_SIZE];
 #ifdef MY_ABC_HERE
-	if (printk_ratelimit()) {
-#endif  
+	static unsigned long long b_blocknr_last = 0;
+
+	if (b_blocknr_last == (unsigned long long)bh->b_blocknr) {
+		printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
+			bdevname(bh->b_bdev, b),
+			b_blocknr_last);
+	} else {
+		b_blocknr_last = (unsigned long long)bh->b_blocknr;
+		printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block in range %Lu + 0-2(%d)\n",
+			bdevname(bh->b_bdev, b),
+			(b_blocknr_last >> CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT) << CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT,
+			CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT);
+	}
+#else
 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
 			bdevname(bh->b_bdev, b),
 			(unsigned long long)bh->b_blocknr);
-#ifdef MY_ABC_HERE
-	}
 #endif  
 }
 
@@ -128,7 +138,11 @@
 	} else {
 		if (!quiet_error(bh)) {
 			buffer_io_error(bh);
+#ifdef MY_ABC_HERE
+			printk_ratelimited(KERN_WARNING "lost page write due to "
+#else
 			printk(KERN_WARNING "lost page write due to "
+#endif  
 					"I/O error on %s\n",
 				       bdevname(bh->b_bdev, b));
 		}
@@ -275,7 +289,11 @@
 	} else {
 		if (!quiet_error(bh)) {
 			buffer_io_error(bh);
+#ifdef MY_ABC_HERE
+			printk_ratelimited(KERN_WARNING "lost page write due to "
+#else
 			printk(KERN_WARNING "lost page write due to "
+#endif  
 					"I/O error on %s\n",
 			       bdevname(bh->b_bdev, b));
 		}
diff -ur a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
--- a/fs/cifs/cifs_debug.c	2017-03-23 15:08:04.000000000 +0100
+++ b/fs/cifs/cifs_debug.c	2017-03-14 02:46:01.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  *   fs/cifs_debug.c
  *
@@ -67,8 +70,11 @@
 
 	vaf.fmt = fmt;
 	vaf.va = &args;
-
+#ifdef MY_ABC_HERE
+	printk_ratelimited(KERN_ERR "CIFS VFS: %pV", &vaf);
+#else
 	printk(KERN_ERR "CIFS VFS: %pV", &vaf);
+#endif /* MY_ABC_HERE */
 
 	va_end(args);
 }
diff -ur a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
--- a/fs/cifs/cifsencrypt.c	2017-03-23 15:08:03.000000000 +0100
+++ b/fs/cifs/cifsencrypt.c	2017-03-14 02:46:00.000000000 +0100
@@ -590,7 +590,7 @@
 
 	ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
 	if (!ses->auth_key.response) {
-		rc = ENOMEM;
+		rc = -ENOMEM;
 		ses->auth_key.len = 0;
 		goto setup_ntlmv2_rsp_ret;
 	}
diff -ur a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
--- a/fs/cifs/cifssmb.c	2017-03-23 15:08:11.000000000 +0100
+++ b/fs/cifs/cifssmb.c	2017-03-14 02:46:06.000000000 +0100
@@ -1229,11 +1229,10 @@
 }
 
 static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+discard_remaining_data(struct TCP_Server_Info *server)
 {
 	unsigned int rfclen = get_rfc1002_length(server->smallbuf);
 	int remaining = rfclen + 4 - server->total_read;
-	struct cifs_readdata *rdata = mid->callback_data;
 
 	while (remaining > 0) {
 		int length;
@@ -1247,10 +1246,20 @@
 		remaining -= length;
 	}
 
-	dequeue_mid(mid, rdata->result);
 	return 0;
 }
 
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+	int length;
+	struct cifs_readdata *rdata = mid->callback_data;
+
+	length = discard_remaining_data(server);
+	dequeue_mid(mid, rdata->result);
+	return length;
+}
+
 int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
@@ -1274,6 +1283,12 @@
 		return length;
 	server->total_read += length;
 
+	if (server->ops->is_status_pending &&
+	    server->ops->is_status_pending(buf, server, 0)) {
+		discard_remaining_data(server);
+		return -1;
+	}
+
 	rdata->result = server->ops->map_error(buf, false);
 	if (rdata->result != 0) {
 		cifs_dbg(FYI, "%s: server returned error %d\n",
diff -ur a/fs/cifs/sess.c b/fs/cifs/sess.c
--- a/fs/cifs/sess.c	2017-03-23 15:08:05.000000000 +0100
+++ b/fs/cifs/sess.c	2017-03-14 02:46:02.000000000 +0100
@@ -410,19 +410,25 @@
 	sec_blob->LmChallengeResponse.MaximumLength = 0;
 
 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
-	rc = setup_ntlmv2_rsp(ses, nls_cp);
-	if (rc) {
-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
-		goto setup_ntlmv2_ret;
-	}
-	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+	if (ses->user_name != NULL) {
+		rc = setup_ntlmv2_rsp(ses, nls_cp);
+		if (rc) {
+			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+			goto setup_ntlmv2_ret;
+		}
+		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
 
-	sec_blob->NtChallengeResponse.Length =
-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-	sec_blob->NtChallengeResponse.MaximumLength =
-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		sec_blob->NtChallengeResponse.Length =
+				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		sec_blob->NtChallengeResponse.MaximumLength =
+				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+	} else {
+		 
+		sec_blob->NtChallengeResponse.Length = 0;
+		sec_blob->NtChallengeResponse.MaximumLength = 0;
+	}
 
 	if (ses->domainName == NULL) {
 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
diff -ur a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
--- a/fs/cifs/smb2ops.c	2017-03-23 15:08:04.000000000 +0100
+++ b/fs/cifs/smb2ops.c	2017-03-14 02:46:00.000000000 +0100
@@ -48,9 +48,13 @@
 		break;
 	default:
 		server->echoes = true;
-		server->oplocks = true;
+		if (enable_oplocks) {
+			server->oplocks = true;
+			server->oplock_credits = 1;
+		} else
+			server->oplocks = false;
+
 		server->echo_credits = 1;
-		server->oplock_credits = 1;
 	}
 	server->credits -= server->echo_credits + server->oplock_credits;
 	return 0;
diff -ur a/fs/compat.c b/fs/compat.c
--- a/fs/compat.c	2017-03-23 15:07:51.000000000 +0100
+++ b/fs/compat.c	2017-03-14 02:45:48.000000000 +0100
@@ -1444,7 +1444,8 @@
 				goto drop_write;
 		} else if (inode->i_op->syno_bypass_is_synoacl) {
 			 
-			error = inode->i_op->syno_bypass_is_synoacl(path.dentry, 0, -EPERM);
+			error = inode->i_op->syno_bypass_is_synoacl(path.dentry,
+					                BYPASS_SYNOACL_SYNOUTIME, -EPERM);
 			if (error)
 				goto drop_write;
 		} else {
diff -ur a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
--- a/fs/configfs/configfs_internal.h	2017-03-23 15:08:52.000000000 +0100
+++ b/fs/configfs/configfs_internal.h	2017-03-14 02:46:45.000000000 +0100
@@ -33,6 +33,16 @@
 	return dentry->d_inode;
 }
 
+static inline bool d_really_is_positive(const struct dentry *dentry)
+{
+	return dentry->d_inode != NULL;
+}
+
+static inline bool d_really_is_negative(const struct dentry *dentry)
+{
+	return dentry->d_inode == NULL;
+}
+
 static inline void inode_lock(struct inode *inode)
 {
 	mutex_lock(&inode->i_mutex);
diff -ur a/fs/configfs/dir.c b/fs/configfs/dir.c
--- a/fs/configfs/dir.c	2017-03-23 15:08:54.000000000 +0100
+++ b/fs/configfs/dir.c	2017-03-14 02:46:47.000000000 +0100
@@ -380,8 +380,8 @@
 	list_del_init(&sd->s_sibling);
 	spin_unlock(&configfs_dirent_lock);
 	configfs_put(sd);
-	if (d->d_inode)
-		simple_rmdir(parent->d_inode,d);
+	if (d_really_is_positive(d))
+		simple_rmdir(d_inode(parent),d);
 
 	pr_debug(" o %s removing done (%d)\n",d->d_name.name, d->d_count);
 
@@ -492,7 +492,7 @@
  * If there is an error, the caller will reset the flags via
  * configfs_detach_rollback().
  */
-static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex)
+static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait)
 {
 	struct configfs_dirent *parent_sd = dentry->d_fsdata;
 	struct configfs_dirent *sd;
@@ -513,8 +513,8 @@
 		if (sd->s_type & CONFIGFS_USET_DEFAULT) {
 			/* Abort if racing with mkdir() */
 			if (sd->s_type & CONFIGFS_USET_IN_MKDIR) {
-				if (wait_mutex)
-					*wait_mutex = &sd->s_dentry->d_inode->i_mutex;
+				if (wait)
+					*wait= dget(sd->s_dentry);
 				return -EAGAIN;
 			}
 
@@ -522,7 +522,7 @@
 			 * Yup, recursive.  If there's a problem, blame
 			 * deep nesting of default_groups
 			 */
-			ret = configfs_detach_prep(sd->s_dentry, wait_mutex);
+			ret = configfs_detach_prep(sd->s_dentry, wait);
 			if (!ret)
 				continue;
 		} else
@@ -636,7 +636,7 @@
 		inode_lock(d_inode(child));
 
 		configfs_detach_group(sd->s_element);
-		child->d_inode->i_flags |= S_DEAD;
+		d_inode(child)->i_flags |= S_DEAD;
 		dont_mount(child);
 
 		inode_unlock(d_inode(child));
@@ -681,7 +681,7 @@
 			sd = child->d_fsdata;
 			sd->s_type |= CONFIGFS_USET_DEFAULT;
 		} else {
-			BUG_ON(child->d_inode);
+			BUG_ON(d_inode(child));
 			d_drop(child);
 			dput(child);
 		}
@@ -825,7 +825,7 @@
 			 */
 			inode_lock(d_inode(dentry));
 			configfs_remove_dir(item);
-			dentry->d_inode->i_flags |= S_DEAD;
+			d_inode(dentry)->i_flags |= S_DEAD;
 			dont_mount(dentry);
 			inode_unlock(d_inode(dentry));
 			d_delete(dentry);
@@ -868,7 +868,7 @@
 		ret = populate_groups(to_config_group(item));
 		if (ret) {
 			configfs_detach_item(item);
-			dentry->d_inode->i_flags |= S_DEAD;
+			d_inode(dentry)->i_flags |= S_DEAD;
 			dont_mount(dentry);
 		}
 		configfs_adjust_dir_dirent_depth_after_populate(sd);
@@ -1454,7 +1454,7 @@
 	 * the new link is temporarily attached
 	 */
 	do {
-		struct mutex *wait_mutex;
+		struct dentry *wait;
 
 		mutex_lock(&configfs_symlink_mutex);
 		spin_lock(&configfs_dirent_lock);
@@ -1465,7 +1465,7 @@
 		 */
 		ret = sd->s_dependent_count ? -EBUSY : 0;
 		if (!ret) {
-			ret = configfs_detach_prep(dentry, &wait_mutex);
+			ret = configfs_detach_prep(dentry, &wait);
 			if (ret)
 				configfs_detach_rollback(dentry);
 		}
@@ -1479,8 +1479,9 @@
 			}
 
 			/* Wait until the racing operation terminates */
-			mutex_lock(wait_mutex);
-			mutex_unlock(wait_mutex);
+			inode_lock(d_inode(wait));
+			inode_unlock(d_inode(wait));
+			dput(wait);
 		}
 	} while (ret == -EAGAIN);
 
@@ -1552,7 +1553,7 @@
 
 	new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
 	if (!IS_ERR(new_dentry)) {
-		if (!new_dentry->d_inode) {
+		if (d_really_is_negative(new_dentry)) {
 			error = config_item_set_name(item, "%s", new_name);
 			if (!error) {
 				d_add(new_dentry, NULL);
@@ -1678,7 +1679,7 @@
 				spin_lock(&configfs_dirent_lock);
 				dentry = next->s_dentry;
 				if (dentry)
-					inode = dentry->d_inode;
+					inode = d_inode(dentry);
 				if (inode)
 					ino = inode->i_ino;
 				spin_unlock(&configfs_dirent_lock);
@@ -1887,7 +1888,7 @@
 		err = configfs_attach_group(sd->s_element, &group->cg_item,
 					    dentry);
 		if (err) {
-			BUG_ON(dentry->d_inode);
+			BUG_ON(d_inode(dentry));
 			d_drop(dentry);
 			dput(dentry);
 		} else {
@@ -1929,7 +1930,7 @@
 	spin_unlock(&configfs_dirent_lock);
 	mutex_unlock(&configfs_symlink_mutex);
 	configfs_detach_group(&group->cg_item);
-	dentry->d_inode->i_flags |= S_DEAD;
+	d_inode(dentry)->i_flags |= S_DEAD;
 	dont_mount(dentry);
 	inode_unlock(d_inode(dentry));
 
diff -ur a/fs/configfs/file.c b/fs/configfs/file.c
--- a/fs/configfs/file.c	2017-03-23 15:08:53.000000000 +0100
+++ b/fs/configfs/file.c	2017-03-14 02:46:45.000000000 +0100
@@ -371,8 +371,6 @@
 
 	len = simple_write_to_buffer(buffer->bin_buffer,
 			buffer->bin_buffer_size, ppos, buf, count);
-	if (len > 0)
-		*ppos += len;
 out:
 	mutex_unlock(&buffer->mutex);
 	return len;
diff -ur a/fs/configfs/inode.c b/fs/configfs/inode.c
--- a/fs/configfs/inode.c	2017-03-23 15:08:53.000000000 +0100
+++ b/fs/configfs/inode.c	2017-03-14 02:46:45.000000000 +0100
@@ -62,7 +62,7 @@
 
 int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
 {
-	struct inode * inode = dentry->d_inode;
+	struct inode * inode = d_inode(dentry);
 	struct configfs_dirent * sd = dentry->d_fsdata;
 	struct iattr * sd_iattr;
 	unsigned int ia_valid = iattr->ia_valid;
@@ -195,7 +195,7 @@
 	if (!dentry)
 		return -ENOENT;
 
-	if (dentry->d_inode)
+	if (d_really_is_positive(dentry))
 		return -EEXIST;
 
 	sd = dentry->d_fsdata;
@@ -203,8 +203,7 @@
 	if (!inode)
 		return -ENOMEM;
 
-	p_inode = dentry->d_parent->d_inode;
-	p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
+	p_inode = d_inode(dentry->d_parent);
 	p_inode->i_mtime = p_inode->i_ctime = current_fs_time(p_inode->i_sb);
 	configfs_set_inode_lock_class(sd, inode);
 
@@ -253,11 +252,11 @@
 
 	if (dentry) {
 		spin_lock(&dentry->d_lock);
-		if (!(d_unhashed(dentry) && dentry->d_inode)) {
+		if (!d_unhashed(dentry) && d_really_is_positive(dentry)) {
 			dget_dlock(dentry);
 			__d_drop(dentry);
 			spin_unlock(&dentry->d_lock);
-			simple_unlink(parent->d_inode, dentry);
+			simple_unlink(d_inode(parent), dentry);
 		} else
 			spin_unlock(&dentry->d_lock);
 	}
@@ -268,7 +267,7 @@
 	struct configfs_dirent * sd;
 	struct configfs_dirent * parent_sd = dir->d_fsdata;
 
-	if (dir->d_inode == NULL)
+	if (d_really_is_negative(dir))
 		/* no inode means this hasn't been made visible yet */
 		return;
 
diff -ur a/fs/coredump.c b/fs/coredump.c
--- a/fs/coredump.c	2017-03-23 15:07:46.000000000 +0100
+++ b/fs/coredump.c	2017-03-14 02:45:43.000000000 +0100
@@ -491,10 +491,10 @@
 	const struct cred *old_cred;
 	struct cred *cred;
 	int retval = 0;
-	int flag = 0;
 	int ispipe;
 	struct files_struct *displaced;
-	bool need_nonrelative = false;
+	/* require nonrelative corefile path and be extra careful */
+	bool need_suid_safe = false;
 	bool core_dumped = false;
 	static atomic_t core_dump_count = ATOMIC_INIT(0);
 	struct coredump_params cprm = {
@@ -528,9 +528,8 @@
 	 */
 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
 		/* Setuid core dump mode */
-		flag = O_EXCL;		/* Stop rewrite attacks */
 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
-		need_nonrelative = true;
+		need_suid_safe = true;
 	}
 
 	retval = coredump_wait(siginfo->si_signo, &core_state);
@@ -611,7 +610,7 @@
 		if (cprm.limit < binfmt->min_coredump)
 			goto fail_unlock;
 
-		if (need_nonrelative && cn.corename[0] != '/') {
+		if (need_suid_safe && cn.corename[0] != '/') {
 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
 				"to fully qualified path!\n",
 				task_tgid_vnr(current), current->comm);
@@ -619,8 +618,35 @@
 			goto fail_unlock;
 		}
 
+		/*
+		 * Unlink the file if it exists unless this is a SUID
+		 * binary - in that case, we're running around with root
+		 * privs and don't want to unlink another user's coredump.
+		 */
+		if (!need_suid_safe) {
+			mm_segment_t old_fs;
+
+			old_fs = get_fs();
+			set_fs(KERNEL_DS);
+			/*
+			 * If it doesn't exist, that's fine. If there's some
+			 * other problem, we'll catch it at the filp_open().
+			 */
+			(void) sys_unlink((const char __user *)cn.corename);
+			set_fs(old_fs);
+		}
+
+		/*
+		 * There is a race between unlinking and creating the
+		 * file, but if that causes an EEXIST here, that's
+		 * fine - another process raced with us while creating
+		 * the corefile, and the other process won. To userspace,
+		 * what matters is that at least one of the two processes
+		 * writes its coredump successfully, not which one.
+		 */
 		cprm.file = filp_open(cn.corename,
-				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+				 O_CREAT | 2 | O_NOFOLLOW |
+				 O_LARGEFILE | O_EXCL,
 				 0600);
 		if (IS_ERR(cprm.file))
 			goto fail_unlock;
diff -ur a/fs/dcache.c b/fs/dcache.c
--- a/fs/dcache.c	2017-03-23 15:07:48.000000000 +0100
+++ b/fs/dcache.c	2017-03-14 02:45:44.000000000 +0100
@@ -352,6 +352,9 @@
 		return;
 	}
 
+	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+		goto kill_it;
+
 	if (dentry->d_flags & DCACHE_OP_DELETE) {
 		if (dentry->d_op->d_delete(dentry))
 			goto kill_it;
@@ -750,13 +753,13 @@
 
 		if (!locked && read_seqretry(&rename_lock, seq))
 			goto rename_retry;
-		next = child->d_child.next;
-		while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+		 
+		do {
+			next = child->d_child.next;
 			if (next == &this_parent->d_subdirs)
 				goto ascend;
 			child = list_entry(next, struct dentry, d_child);
-			next = next->next;
-		}
+		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
 		rcu_read_unlock();
 		goto resume;
 	}
@@ -844,13 +847,13 @@
 
 		if (!locked && read_seqretry(&rename_lock, seq))
 			goto rename_retry;
-		next = child->d_child.next;
-		while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+		 
+		do {
+			next = child->d_child.next;
 			if (next == &this_parent->d_subdirs)
 				goto ascend;
 			child = list_entry(next, struct dentry, d_child);
-			next = next->next;
-		}
+		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
 		rcu_read_unlock();
 		goto resume;
 	}
@@ -1885,6 +1888,8 @@
 	struct dentry *dentry = path->dentry;
 	struct vfsmount *vfsmnt = path->mnt;
 	struct mount *mnt = real_mount(vfsmnt);
+	char *orig_buffer = *buffer;
+	int orig_len = *buflen;
 	bool slash = false;
 	int error = 0;
 
@@ -1893,6 +1898,14 @@
 
 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
 			 
+			if (dentry != vfsmnt->mnt_root) {
+				*buffer = orig_buffer;
+				*buflen = orig_len;
+				slash = false;
+				error = 3;
+				goto global_root;
+			}
+			 
 			if (!mnt_has_parent(mnt))
 				goto global_root;
 			dentry = mnt->mnt_mountpoint;
@@ -1920,12 +1933,6 @@
 	return error;
 
 global_root:
-	 
-	if (IS_ROOT(dentry) &&
-	    (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
-		WARN(1, "Root dentry has weird name <%.*s>\n",
-		     (int) dentry->d_name.len, dentry->d_name.name);
-	}
 	if (!slash)
 		error = prepend(buffer, buflen, "/", 1);
 	if (!error)
@@ -2237,13 +2244,13 @@
 
 		if (!locked && read_seqretry(&rename_lock, seq))
 			goto rename_retry;
-		next = child->d_child.next;
-		while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+		 
+		do {
+			next = child->d_child.next;
 			if (next == &this_parent->d_subdirs)
 				goto ascend;
 			child = list_entry(next, struct dentry, d_child);
-			next = next->next;
-		}
+		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
 		rcu_read_unlock();
 		goto resume;
 	}
diff -ur a/fs/devpts/inode.c b/fs/devpts/inode.c
--- a/fs/devpts/inode.c	2017-03-23 15:09:09.000000000 +0100
+++ b/fs/devpts/inode.c	2017-03-14 02:47:01.000000000 +0100
@@ -565,6 +565,26 @@
 	mutex_unlock(&allocated_ptys_lock);
 }
 
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+	atomic_inc(&sb->s_active);
+	ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+	iput(ptmx_inode);
+	deactivate_super(sb);
+}
+
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
diff -ur a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
--- a/fs/dlm/lowcomms.c	2017-03-23 15:08:34.000000000 +0100
+++ b/fs/dlm/lowcomms.c	2017-03-14 02:46:27.000000000 +0100
@@ -411,7 +411,7 @@
 }
 
 /* Data available on socket or listen socket received a connect */
-static void lowcomms_data_ready(struct sock *sk, int count_unused)
+static void lowcomms_data_ready(struct sock *sk)
 {
 	struct connection *con = sock2con(sk);
 	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
diff -ur a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
--- a/fs/ecryptfs/crypto.c	2017-03-23 15:09:50.000000000 +0100
+++ b/fs/ecryptfs/crypto.c	2017-03-14 02:47:45.000000000 +0100
@@ -1408,6 +1408,10 @@
 {
 	struct ecryptfs_crypt_stat *crypt_stat =
 		&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+#ifdef MY_ABC_HERE
+	struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
+		&ecryptfs_superblock_to_private(ecryptfs_inode->i_sb)->mount_crypt_stat;
+#endif  
 	unsigned int order;
 	char *virt;
 	size_t virt_len;
@@ -1443,12 +1447,24 @@
 		       __func__, rc);
 		goto out_free;
 	}
+#ifdef MY_ABC_HERE
+	if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED) {
+		rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt, virt_len);
+		if(!rc)
+			rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, ECRYPTFS_SIZE_AND_MARKER_BYTES);
+	}
+	else {
+#endif  
 	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
 		rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
 						      size);
 	else
 		rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
 							 virt_len);
+
+#ifdef MY_ABC_HERE
+	}
+#endif  
 	if (rc) {
 #ifdef MY_ABC_HERE
 		if (-EDQUOT != rc && -ENOSPC != rc)
@@ -1591,7 +1607,37 @@
 	u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
 	u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
 	int rc;
+#ifdef MY_ABC_HERE
+	char *page_virt = NULL;
+	struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
+		&ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
+	if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED) {
+		page_virt = kmem_cache_alloc(ecryptfs_header_cache, GFP_USER);
+		if (!page_virt) {
+			rc = -ENOMEM;
+			printk(KERN_ERR "%s: Unable to allocate page_virt\n",
+			       __func__);
+			goto out;
+		}
 
+		rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
+					     ECRYPTFS_XATTR_NAME, page_virt,
+					     PAGE_CACHE_SIZE);
+		if (rc < ((int)ECRYPTFS_SIZE_AND_MARKER_BYTES)) {
+			rc = rc >= 0 ? -EINVAL : rc;
+			goto out;
+		}
+		rc = ecryptfs_validate_marker(page_virt+ECRYPTFS_FILE_SIZE_BYTES);
+		if (!rc)
+			ecryptfs_i_size_init(page_virt, inode);
+out:
+		if (page_virt) {
+			memset(page_virt, 0, PAGE_CACHE_SIZE);
+			kmem_cache_free(ecryptfs_header_cache, page_virt);
+		}
+		return rc;
+	}
+#endif  
 	rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
 				     ECRYPTFS_XATTR_NAME, file_size,
 				     ECRYPTFS_SIZE_AND_MARKER_BYTES);
diff -ur a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
--- a/fs/ecryptfs/ecryptfs_kernel.h	2017-03-23 15:09:48.000000000 +0100
+++ b/fs/ecryptfs/ecryptfs_kernel.h	2017-03-14 02:47:44.000000000 +0100
@@ -261,6 +261,10 @@
 #define ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK   0x00000020
 #define ECRYPTFS_GLOBAL_ENCFN_USE_FEK          0x00000040
 #define ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY    0x00000080
+
+#ifdef MY_ABC_HERE
+#define ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED    0x80000000
+#endif  
 	u32 flags;
 	struct list_head global_auth_tok_list;
 	struct mutex global_auth_tok_list_mutex;
diff -ur a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
--- a/fs/ecryptfs/inode.c	2017-03-23 15:09:48.000000000 +0100
+++ b/fs/ecryptfs/inode.c	2017-03-14 02:47:44.000000000 +0100
@@ -253,6 +253,10 @@
 static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
 {
 	struct ecryptfs_crypt_stat *crypt_stat;
+#ifdef MY_ABC_HERE
+	struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
+		&ecryptfs_superblock_to_private(dentry->d_sb)->mount_crypt_stat;
+#endif  
 	int rc;
 
 	rc = ecryptfs_get_lower_file(dentry, inode);
@@ -269,6 +273,16 @@
 	if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
 		ecryptfs_set_default_sizes(crypt_stat);
 
+#ifdef MY_ABC_HERE
+	if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED) {
+		rc = ecryptfs_read_and_validate_xattr_region(dentry, inode);
+		if (rc) {
+			rc = ecryptfs_read_and_validate_header_region(inode);
+		}
+		ecryptfs_put_lower_file(inode);
+	}
+	else {
+#endif  
 	rc = ecryptfs_read_and_validate_header_region(inode);
 	ecryptfs_put_lower_file(inode);
 	if (rc) {
@@ -276,7 +290,10 @@
 		if (!rc)
 			crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
 	}
-
+#ifdef MY_ABC_HERE
+	}
+#endif  
+	 
 	return 0;
 }
 
diff -ur a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
--- a/fs/ecryptfs/main.c	2017-03-23 15:09:48.000000000 +0100
+++ b/fs/ecryptfs/main.c	2017-03-14 02:47:44.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
  
 #include <linux/dcache.h>
 #include <linux/file.h>
@@ -114,6 +117,9 @@
        ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes,
        ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only,
        ecryptfs_opt_check_dev_ruid,
+#ifdef MY_ABC_HERE
+       ecryptfs_opt_no_fast_lookup,
+#endif  
        ecryptfs_opt_err };
 
 static const match_table_t tokens = {
@@ -131,6 +137,9 @@
 	{ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"},
 	{ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"},
 	{ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"},
+#ifdef MY_ABC_HERE
+	{ecryptfs_opt_no_fast_lookup, "no_fast_lookup"},
+#endif  
 	{ecryptfs_opt_err, NULL}
 };
 
@@ -169,6 +178,9 @@
 	       sizeof(struct ecryptfs_mount_crypt_stat));
 	INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list);
 	mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex);
+#ifdef MY_ABC_HERE
+	mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED;
+#endif  
 	mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED;
 }
 
@@ -308,6 +320,11 @@
 		case ecryptfs_opt_check_dev_ruid:
 			*check_ruid = 1;
 			break;
+#ifdef MY_ABC_HERE
+		case ecryptfs_opt_no_fast_lookup:
+			mount_crypt_stat->flags &= (~ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED);
+			break;
+#endif  
 		case ecryptfs_opt_err:
 		default:
 			printk(KERN_WARNING
diff -ur a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
--- a/fs/ecryptfs/mmap.c	2017-03-23 15:09:47.000000000 +0100
+++ b/fs/ecryptfs/mmap.c	2017-03-14 02:47:43.000000000 +0100
@@ -361,9 +361,22 @@
 int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
 {
 	struct ecryptfs_crypt_stat *crypt_stat;
-
+#ifdef MY_ABC_HERE
+	int rc = -1;
+	struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
+		&ecryptfs_superblock_to_private(ecryptfs_inode->i_sb)->mount_crypt_stat;
+#endif  
 	crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
 	BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
+#ifdef MY_ABC_HERE
+	if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED) {
+		rc = ecryptfs_write_inode_size_to_xattr(ecryptfs_inode);
+		if (rc) {
+			return rc;
+		}
+		return ecryptfs_write_inode_size_to_header(ecryptfs_inode);
+	}
+#endif  
 	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
 		return ecryptfs_write_inode_size_to_xattr(ecryptfs_inode);
 	else
diff -ur a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
--- a/fs/ecryptfs/read_write.c	2017-03-23 15:09:47.000000000 +0100
+++ b/fs/ecryptfs/read_write.c	2017-03-14 02:47:43.000000000 +0100
@@ -6,7 +6,7 @@
 #include <linux/pagemap.h>
 #include "ecryptfs_kernel.h"
 
-#ifdef MY_DEF_HERE
+#ifdef MY_ABC_HERE
 #include <linux/fsnotify.h>
 
 static ssize_t ecryptfs_kernel_write(struct file *file, const char *buf, size_t count, loff_t pos)
@@ -48,7 +48,7 @@
 	lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
 	if (!lower_file)
 		return -EIO;
-#ifdef MY_DEF_HERE
+#ifdef MY_ABC_HERE
 	rc = ecryptfs_kernel_write(lower_file, data, size, offset);
 #else
 	rc = kernel_write(lower_file, data, size, offset);
diff -ur a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
--- a/fs/ecryptfs/super.c	2017-03-23 15:09:48.000000000 +0100
+++ b/fs/ecryptfs/super.c	2017-03-14 02:47:43.000000000 +0100
@@ -111,7 +111,10 @@
 		seq_printf(m, ",ecryptfs_unlink_sigs");
 	if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY)
 		seq_printf(m, ",ecryptfs_mount_auth_tok_only");
-
+#ifdef MY_ABC_HERE
+	if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_FAST_LOOKUP_ENABLED))
+		seq_printf(m, ",no_fast_lookup");
+#endif  
 	return 0;
 }
 
diff -ur a/fs/efivarfs/file.c b/fs/efivarfs/file.c
--- a/fs/efivarfs/file.c	2017-03-23 15:09:00.000000000 +0100
+++ b/fs/efivarfs/file.c	2017-03-14 02:46:53.000000000 +0100
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
+#include <linux/mount.h>
 
 #include "internal.h"
 
@@ -108,9 +109,78 @@
 	return size;
 }
 
+static int
+efivarfs_ioc_getxflags(struct file *file, void __user *arg)
+{
+	struct inode *inode = file->f_mapping->host;
+	unsigned int i_flags;
+	unsigned int flags = 0;
+
+	i_flags = inode->i_flags;
+	if (i_flags & S_IMMUTABLE)
+		flags |= FS_IMMUTABLE_FL;
+
+	if (copy_to_user(arg, &flags, sizeof(flags)))
+		return -EFAULT;
+	return 0;
+}
+
+static int
+efivarfs_ioc_setxflags(struct file *file, void __user *arg)
+{
+	struct inode *inode = file->f_mapping->host;
+	unsigned int flags;
+	unsigned int i_flags = 0;
+	int error;
+
+	if (!inode_owner_or_capable(inode))
+		return -EACCES;
+
+	if (copy_from_user(&flags, arg, sizeof(flags)))
+		return -EFAULT;
+
+	if (flags & ~FS_IMMUTABLE_FL)
+		return -EOPNOTSUPP;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+
+	if (flags & FS_IMMUTABLE_FL)
+		i_flags |= S_IMMUTABLE;
+
+	error = mnt_want_write_file(file);
+	if (error)
+		return error;
+
+	mutex_lock(&inode->i_mutex);
+	inode->i_flags &= ~S_IMMUTABLE;
+	inode->i_flags |= i_flags;
+	mutex_unlock(&inode->i_mutex);
+
+	mnt_drop_write_file(file);
+
+	return 0;
+}
+
+long
+efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
+{
+	void __user *arg = (void __user *)p;
+
+	switch (cmd) {
+	case FS_IOC_GETFLAGS:
+		return efivarfs_ioc_getxflags(file, arg);
+	case FS_IOC_SETFLAGS:
+		return efivarfs_ioc_setxflags(file, arg);
+	}
+
+	return -ENOTTY;
+}
+
 const struct file_operations efivarfs_file_operations = {
 	.open	= simple_open,
 	.read	= efivarfs_file_read,
 	.write	= efivarfs_file_write,
 	.llseek	= no_llseek,
+	.unlocked_ioctl = efivarfs_file_ioctl,
 };
diff -ur a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
--- a/fs/efivarfs/inode.c	2017-03-23 15:09:00.000000000 +0100
+++ b/fs/efivarfs/inode.c	2017-03-14 02:46:53.000000000 +0100
@@ -15,7 +15,8 @@
 #include "internal.h"
 
 struct inode *efivarfs_get_inode(struct super_block *sb,
-				const struct inode *dir, int mode, dev_t dev)
+				const struct inode *dir, int mode,
+				dev_t dev, bool is_removable)
 {
 	struct inode *inode = new_inode(sb);
 
@@ -23,6 +24,7 @@
 		inode->i_ino = get_next_ino();
 		inode->i_mode = mode;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
 		switch (mode & S_IFMT) {
 		case S_IFREG:
 			inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@
 static int efivarfs_create(struct inode *dir, struct dentry *dentry,
 			  umode_t mode, bool excl)
 {
-	struct inode *inode;
+	struct inode *inode = NULL;
 	struct efivar_entry *var;
 	int namelen, i = 0, err = 0;
+	bool is_removable = false;
 
 	if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
 		return -EINVAL;
 
-	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
-	if (!inode)
-		return -ENOMEM;
-
 	var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
-	if (!var) {
-		err = -ENOMEM;
-		goto out;
-	}
+	if (!var)
+		return -ENOMEM;
 
 	/* length of the variable name itself: remove GUID and separator */
 	namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@
 	efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
 			&var->var.VendorGuid);
 
+	if (efivar_variable_is_removable(var->var.VendorGuid,
+					 dentry->d_name.name, namelen))
+		is_removable = true;
+
+	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
+	if (!inode) {
+		err = -ENOMEM;
+		goto out;
+	}
+
 	for (i = 0; i < namelen; i++)
 		var->var.VariableName[i] = dentry->d_name.name[i];
 
@@ -138,7 +145,8 @@
 out:
 	if (err) {
 		kfree(var);
-		iput(inode);
+		if (inode)
+			iput(inode);
 	}
 	return err;
 }
diff -ur a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
--- a/fs/efivarfs/internal.h	2017-03-23 15:09:00.000000000 +0100
+++ b/fs/efivarfs/internal.h	2017-03-14 02:46:53.000000000 +0100
@@ -15,7 +15,8 @@
 extern const struct inode_operations efivarfs_dir_inode_operations;
 extern bool efivarfs_valid_name(const char *str, int len);
 extern struct inode *efivarfs_get_inode(struct super_block *sb,
-			const struct inode *dir, int mode, dev_t dev);
+			const struct inode *dir, int mode, dev_t dev,
+			bool is_removable);
 
 extern struct list_head efivarfs_list;
 
diff -ur a/fs/efivarfs/super.c b/fs/efivarfs/super.c
--- a/fs/efivarfs/super.c	2017-03-23 15:09:00.000000000 +0100
+++ b/fs/efivarfs/super.c	2017-03-14 02:46:53.000000000 +0100
@@ -119,8 +119,9 @@
 	struct dentry *dentry, *root = sb->s_root;
 	unsigned long size = 0;
 	char *name;
-	int len, i;
+	int len;
 	int err = -ENOMEM;
+	bool is_removable = false;
 
 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
@@ -129,15 +130,17 @@
 	memcpy(entry->var.VariableName, name16, name_size);
 	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
 
-	len = ucs2_strlen(entry->var.VariableName);
+	len = ucs2_utf8size(entry->var.VariableName);
 
 	/* name, plus '-', plus GUID, plus NUL*/
 	name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
 	if (!name)
 		goto fail;
 
-	for (i = 0; i < len; i++)
-		name[i] = entry->var.VariableName[i] & 0xFF;
+	ucs2_as_utf8(name, entry->var.VariableName, len);
+
+	if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+		is_removable = true;
 
 	name[len] = '-';
 
@@ -145,7 +148,8 @@
 
 	name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
 
-	inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0);
+	inode = efivarfs_get_inode(sb, root->d_inode, S_IFREG | 0644, 0,
+				   is_removable);
 	if (!inode)
 		goto fail_name;
 
@@ -201,7 +205,7 @@
 	sb->s_d_op		= &efivarfs_d_ops;
 	sb->s_time_gran         = 1;
 
-	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
 	if (!inode)
 		return -ENOMEM;
 	inode->i_op = &efivarfs_dir_inode_operations;
diff -ur a/fs/ext4/ext4.h b/fs/ext4/ext4.h
--- a/fs/ext4/ext4.h	2017-03-23 15:08:33.000000000 +0100
+++ b/fs/ext4/ext4.h	2017-03-14 02:46:26.000000000 +0100
@@ -15,6 +15,7 @@
 #include <linux/seqlock.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
+#include <linux/version.h>
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
@@ -586,17 +587,26 @@
 
 static inline __le32 ext4_encode_extra_time(struct timespec *time)
 {
-       return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
-			   (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
-                          ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
+	u32 extra = sizeof(time->tv_sec) > 4 ?
+		((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
+	return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
 }
 
 static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
 {
-       if (sizeof(time->tv_sec) > 4)
-	       time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
-			       << 32;
-       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+	if (unlikely(sizeof(time->tv_sec) > 4 &&
+			(extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
+		 
+		u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
+		if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
+			extra_bits = 0;
+		time->tv_sec += extra_bits << 32;
+#else
+		time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
+#endif
+	}
+	time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
 }
 
 #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode)			       \
@@ -682,6 +692,12 @@
 
 #include "extents_status.h"
 
+enum {
+	I_DATA_SEM_NORMAL = 0,
+	I_DATA_SEM_OTHER,
+	I_DATA_SEM_QUOTA,
+};
+
 struct ext4_inode_info {
 	__le32	i_data[15];	 
 	__u32	i_dtime;
@@ -704,6 +720,8 @@
 	struct inode vfs_inode;
 	struct jbd2_inode *jinode;
 
+	spinlock_t i_raw_lock;	 
+
 	struct timespec i_crtime;
 
 	struct list_head i_prealloc_list;
@@ -1056,6 +1074,7 @@
 #ifdef MY_ABC_HERE
 	int s_new_error_fs_event_flag;
 	char *s_mount_path;
+	unsigned long s_last_notify_time;
 #endif
 #ifdef CONFIG_SYNO_EXT4_CREATE_TIME_BIG_ENDIAN_SWAP
 	int s_swap_create_time;
@@ -1063,6 +1082,11 @@
 
 	struct workqueue_struct *dio_unwritten_wq;
 
+#ifdef MY_ABC_HERE
+	atomic_t reada_group_desc_threads;  
+	struct workqueue_struct *group_desc_readahead_wq;
+#endif  
+
 	struct timer_list s_err_report;
 
 	struct ext4_li_request *s_li_request;
diff -ur a/fs/ext4/extents.c b/fs/ext4/extents.c
--- a/fs/ext4/extents.c	2017-03-23 15:08:35.000000000 +0100
+++ b/fs/ext4/extents.c	2017-03-14 02:46:28.000000000 +0100
@@ -363,7 +363,7 @@
 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
 	ext4_lblk_t last = lblock + len - 1;
 
-	if (lblock > last)
+	if (len == 0 || lblock > last)
 		return 0;
 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
 }
@@ -2847,6 +2847,9 @@
 				err = -EIO;
 				break;
 			}
+			/* Yield here to deal with large extent trees.
+			 * Should be a no-op if we did IO above. */
+			cond_resched();
 			if (WARN_ON(i + 1 > depth)) {
 				err = -EIO;
 				break;
diff -ur a/fs/ext4/indirect.c b/fs/ext4/indirect.c
--- a/fs/ext4/indirect.c	2017-03-23 15:08:30.000000000 +0100
+++ b/fs/ext4/indirect.c	2017-03-14 02:46:24.000000000 +0100
@@ -577,7 +577,7 @@
 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
 		EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
 				 "non-extent mapped inodes with bigalloc");
-		return -ENOSPC;
+		return -EUCLEAN;
 	}
 
 	goal = ext4_find_goal(inode, map->m_lblk, partial);
diff -ur a/fs/ext4/inode.c b/fs/ext4/inode.c
--- a/fs/ext4/inode.c	2017-03-23 15:08:33.000000000 +0100
+++ b/fs/ext4/inode.c	2017-03-14 02:46:26.000000000 +0100
@@ -768,7 +768,8 @@
 		ext4_journal_stop(handle);
 		goto retry_grab;
 	}
-	wait_on_page_writeback(page);
+	 
+	wait_for_stable_page(page);
 
 	if (ext4_should_dioread_nolock(inode))
 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
@@ -1015,7 +1016,7 @@
 static void ext4_da_page_release_reservation(struct page *page,
 					     unsigned long offset)
 {
-	int to_release = 0;
+	int to_release = 0, contiguous_blks = 0;
 	struct buffer_head *head, *bh;
 	unsigned int curr_off = 0;
 	struct inode *inode = page->mapping->host;
@@ -1030,14 +1031,23 @@
 
 		if ((offset <= curr_off) && (buffer_delay(bh))) {
 			to_release++;
+			contiguous_blks++;
 			clear_buffer_delay(bh);
+		} else if (contiguous_blks) {
+			lblk = page->index <<
+			       (PAGE_CACHE_SHIFT - inode->i_blkbits);
+			lblk += (curr_off >> inode->i_blkbits) -
+				contiguous_blks;
+			ext4_es_remove_extent(inode, lblk, contiguous_blks);
+			contiguous_blks = 0;
 		}
 		curr_off = next_off;
 	} while ((bh = bh->b_this_page) != head);
 
-	if (to_release) {
+	if (contiguous_blks) {
 		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-		ext4_es_remove_extent(inode, lblk, to_release);
+		lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+		ext4_es_remove_extent(inode, lblk, contiguous_blks);
 	}
 
 	num_clusters = EXT4_NUM_B2C(sbi, to_release);
@@ -1541,17 +1551,27 @@
 				       NULL, bget_one);
 	}
 	 
+	get_page(page);
 	unlock_page(page);
 
 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
 				    ext4_writepage_trans_blocks(inode));
 	if (IS_ERR(handle)) {
 		ret = PTR_ERR(handle);
-		goto out;
+		put_page(page);
+		goto out_no_pagelock;
 	}
-
 	BUG_ON(!ext4_handle_valid(handle));
 
+	lock_page(page);
+	put_page(page);
+	if (page->mapping != mapping) {
+		 
+		ext4_journal_stop(handle);
+		ret = 0;
+		goto out;
+	}
+
 	if (inline_data) {
 		ret = ext4_journal_get_write_access(handle, inode_bh);
 
@@ -1576,6 +1596,8 @@
 				       NULL, bput_one);
 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 out:
+	unlock_page(page);
+out_no_pagelock:
 	brelse(inode_bh);
 	return ret;
 }
@@ -1820,9 +1842,15 @@
 					    needed_blocks);
 		if (IS_ERR(handle)) {
 			ret = PTR_ERR(handle);
+#ifdef MY_ABC_HERE
+		if (printk_ratelimit()) {
+#endif  
 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
 			       "%ld pages, ino %lu; err %d", __func__,
 				wbc->nr_to_write, inode->i_ino, ret);
+#ifdef MY_ABC_HERE
+		}
+#endif  
 			blk_finish_plug(&plug);
 			goto out_writepages;
 		}
@@ -1973,7 +2001,7 @@
 		goto retry_grab;
 	}
 	 
-	wait_on_page_writeback(page);
+	wait_for_stable_page(page);
 
 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
 	if (ret < 0) {
@@ -3298,11 +3326,14 @@
 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct buffer_head *bh = iloc->bh;
+	struct super_block *sb = inode->i_sb;
 	int err = 0, rc, block;
-	int need_datasync = 0;
+	int need_datasync = 0, set_large_file = 0;
 	uid_t i_uid;
 	gid_t i_gid;
 
+	spin_lock(&ei->i_raw_lock);
+
 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
 
@@ -3356,8 +3387,10 @@
 	}
 #endif
 
-	if (ext4_inode_blocks_set(handle, raw_inode, ei))
+	if (ext4_inode_blocks_set(handle, raw_inode, ei)) {
+		spin_unlock(&ei->i_raw_lock);
 		goto out_brelse;
+	}
 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
@@ -3370,22 +3403,11 @@
 		need_datasync = 1;
 	}
 	if (ei->i_disksize > 0x7fffffffULL) {
-		struct super_block *sb = inode->i_sb;
 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
 				EXT4_SB(sb)->s_es->s_rev_level ==
-				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
-			 
-			err = ext4_journal_get_write_access(handle,
-					EXT4_SB(sb)->s_sbh);
-			if (err)
-				goto out_brelse;
-			ext4_update_dynamic_rev(sb);
-			EXT4_SET_RO_COMPAT_FEATURE(sb,
-					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
-			ext4_handle_sync(handle);
-			err = ext4_handle_dirty_super(handle, sb);
-		}
+		    cpu_to_le32(EXT4_GOOD_OLD_REV))
+			set_large_file = 1;
 	}
 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
@@ -3414,12 +3436,23 @@
 
 	ext4_inode_csum_set(inode, raw_inode, ei);
 
+	spin_unlock(&ei->i_raw_lock);
+
 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
 	if (!err)
 		err = rc;
 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
-
+	if (set_large_file) {
+		err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+		if (err)
+			goto out_brelse;
+		ext4_update_dynamic_rev(sb);
+		EXT4_SET_RO_COMPAT_FEATURE(sb,
+					   EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
+		ext4_handle_sync(handle);
+		err = ext4_handle_dirty_super(handle, sb);
+	}
 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
 	brelse(bh);
@@ -3835,6 +3868,8 @@
 	might_sleep();
 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		return err;
 	if (ext4_handle_valid(handle) &&
 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
@@ -3859,9 +3894,7 @@
 			}
 		}
 	}
-	if (!err)
-		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
-	return err;
+	return ext4_mark_iloc_dirty(handle, inode, &iloc);
 }
 
 void ext4_dirty_inode(struct inode *inode, int flags)
diff -ur a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
--- a/fs/ext4/mballoc.c	2017-03-23 15:08:31.000000000 +0100
+++ b/fs/ext4/mballoc.c	2017-03-14 02:46:25.000000000 +0100
@@ -3807,14 +3807,8 @@
 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
 		struct ext4_free_data *new_entry;
 		 
-	retry:
-		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-		if (!new_entry) {
-			 
-			cond_resched();
-			congestion_wait(BLK_RW_ASYNC, HZ/50);
-			goto retry;
-		}
+		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+				GFP_NOFS|__GFP_NOFAIL);
 		new_entry->efd_start_cluster = bit;
 		new_entry->efd_group = block_group;
 		new_entry->efd_count = count_clusters;
diff -ur a/fs/ext4/migrate.c b/fs/ext4/migrate.c
--- a/fs/ext4/migrate.c	2017-03-23 15:08:27.000000000 +0100
+++ b/fs/ext4/migrate.c	2017-03-14 02:46:22.000000000 +0100
@@ -616,6 +616,7 @@
 	struct ext4_inode_info		*ei = EXT4_I(inode);
 	struct ext4_extent		*ex;
 	unsigned int			i, len;
+	ext4_lblk_t			start, end;
 	ext4_fsblk_t			blk;
 	handle_t			*handle;
 	int				ret;
@@ -629,6 +630,14 @@
 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
 		return -EOPNOTSUPP;
 
+	/*
+	 * In order to get correct extent info, force all delayed allocation
+	 * blocks to be allocated, otherwise delayed allocation blocks may not
+	 * be reflected and bypass the checks on extent header.
+	 */
+	if (test_opt(inode->i_sb, DELALLOC))
+		ext4_alloc_da_blocks(inode);
+
 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
@@ -646,11 +655,13 @@
 		goto errout;
 	}
 	if (eh->eh_entries == 0)
-		blk = len = 0;
+		blk = len = start = end = 0;
 	else {
 		len = le16_to_cpu(ex->ee_len);
 		blk = ext4_ext_pblock(ex);
-		if (len > EXT4_NDIR_BLOCKS) {
+		start = le32_to_cpu(ex->ee_block);
+		end = start + len - 1;
+		if (end >= EXT4_NDIR_BLOCKS) {
 			ret = -EOPNOTSUPP;
 			goto errout;
 		}
@@ -658,7 +669,7 @@
 
 	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
 	memset(ei->i_data, 0, sizeof(ei->i_data));
-	for (i=0; i < len; i++)
+	for (i = start; i <= end; i++)
 		ei->i_data[i] = cpu_to_le32(blk++);
 	ext4_mark_inode_dirty(handle, inode);
 errout:
diff -ur a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
--- a/fs/ext4/move_extent.c	2017-03-23 15:08:31.000000000 +0100
+++ b/fs/ext4/move_extent.c	2017-03-14 02:46:24.000000000 +0100
@@ -154,10 +154,10 @@
 {
 	if (first < second) {
 		down_write(&EXT4_I(first)->i_data_sem);
-		down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+		down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
 	} else {
 		down_write(&EXT4_I(second)->i_data_sem);
-		down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+		down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
 
 	}
 }
@@ -1117,6 +1117,13 @@
 		return -EINVAL;
 	}
 
+	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
+		ext4_debug("ext4 move extent: The argument files should "
+			"not be quota files [ino:orig %lu, donor %lu]\n",
+			orig_inode->i_ino, donor_inode->i_ino);
+		return -EBUSY;
+	}
+
 	/* Ext4 move extent supports only extent based file */
 	if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
 		ext4_debug("ext4 move extent: orig file is not extents "
diff -ur a/fs/ext4/namei.c b/fs/ext4/namei.c
--- a/fs/ext4/namei.c	2017-03-23 15:08:32.000000000 +0100
+++ b/fs/ext4/namei.c	2017-03-14 02:46:25.000000000 +0100
@@ -29,6 +29,7 @@
 #define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
 
 #ifdef MY_ABC_HERE
+extern struct kmem_cache *ext4_syno_caseless_cachep;
  
 static unsigned char ext4_utf8_namei_buf[UNICODE_UTF8_BUFSIZE];
 extern spinlock_t ext4_namei_buf_lock;   
@@ -44,15 +45,28 @@
 
 static int ext4_dentry_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *this)
 {
+	char* hash_buf;
 	unsigned int upperlen;
 
-	spin_lock(&ext4_namei_buf_lock);
+	if (NAME_MAX < this->len) {
+		goto static_buf;
+	}
 
-	upperlen = syno_utf8_toupper(ext4_utf8_namei_buf,this->name,
-									  UNICODE_UTF8_BUFSIZE - 1 , this->len, NULL);
+	hash_buf = kmem_cache_alloc(ext4_syno_caseless_cachep, GFP_NOFS);
+	if (NULL == hash_buf) {
+		goto static_buf;
+	}
 
-	this->hash = ext4_strhash(ext4_utf8_namei_buf, upperlen);
+	upperlen = syno_utf8_toupper(hash_buf, this->name, NAME_MAX, this->len, NULL);
+	this->hash = ext4_strhash(hash_buf, upperlen);
+	kmem_cache_free(ext4_syno_caseless_cachep, hash_buf);
 
+	return 0;
+
+static_buf:
+	spin_lock(&ext4_namei_buf_lock);
+	upperlen = syno_utf8_toupper(ext4_utf8_namei_buf, this->name, UNICODE_UTF8_BUFSIZE - 1 , this->len, NULL);
+	this->hash = ext4_strhash(ext4_utf8_namei_buf, upperlen);
 	spin_unlock(&ext4_namei_buf_lock);
 
 	return 0;
@@ -124,6 +138,9 @@
 	struct buffer_head *bh;
 	struct ext4_dir_entry *dirent;
 	int err = 0, is_dx_block = 0;
+#ifdef MY_ABC_HERE
+	static unsigned long block_last = 0;
+#endif  
 
 	bh = ext4_bread(NULL, inode, block, 0, &err);
 	if (!bh) {
@@ -132,10 +149,26 @@
 					       "Directory hole found");
 			return ERR_PTR(-EIO);
 		}
+#ifdef MY_ABC_HERE
+		if (block_last == (unsigned long)block) {
+			__ext4_warning(inode->i_sb, __func__, line,
+			       "error reading directory block "
+			       "(ino %lu, block %lu)", inode->i_ino,
+			       block_last);
+		} else {
+			block_last = (unsigned long)block;
+			__ext4_warning(inode->i_sb, __func__, line,
+			       "error reading directory block "
+			       "(ino %lu, block in range %lu + 0-2(%d))", inode->i_ino,
+			       (block_last >> CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT) << CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT,
+			       CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT);
+		}
+#else
 		__ext4_warning(inode->i_sb, __func__, line,
 			       "error reading directory block "
 			       "(ino %lu, block %lu)", inode->i_ino,
 			       (unsigned long) block);
+#endif  
 		return ERR_PTR(err);
 	}
 	dirent = (struct ext4_dir_entry *) bh->b_data;
diff -ur a/fs/ext4/page-io.c b/fs/ext4/page-io.c
--- a/fs/ext4/page-io.c	2017-03-23 15:08:27.000000000 +0100
+++ b/fs/ext4/page-io.c	2017-03-14 02:46:21.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  * linux/fs/ext4/page-io.c
  *
@@ -214,9 +217,24 @@
 static void buffer_io_error(struct buffer_head *bh)
 {
 	char b[BDEVNAME_SIZE];
+#ifdef MY_ABC_HERE
+	static unsigned long long b_blocknr_last = 0;
+	if (b_blocknr_last == (unsigned long long)bh->b_blocknr) {
+		printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
+			bdevname(bh->b_bdev, b),
+			b_blocknr_last);
+	} else {
+		b_blocknr_last = (unsigned long long)bh->b_blocknr;
+		printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block in range %llu + 0-2(%d)\n",
+			bdevname(bh->b_bdev, b),
+			(b_blocknr_last >> CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT) << CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT,
+			CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT);
+	}
+#else
 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
 			bdevname(bh->b_bdev, b),
 			(unsigned long long)bh->b_blocknr);
+#endif /* MY_ABC_HERE */
 }
 
 static void ext4_end_bio(struct bio *bio, int error)
@@ -226,6 +244,9 @@
 	int i;
 	int blocksize;
 	sector_t bi_sector = bio->bi_sector;
+#ifdef MY_ABC_HERE
+	static unsigned long long i_blkbits_last = 0;
+#endif /* MY_ABC_HERE */
 
 	BUG_ON(!io_end);
 	inode = io_end->inode;
@@ -277,6 +298,26 @@
 
 	if (error) {
 		io_end->flag |= EXT4_IO_END_ERROR;
+#ifdef MY_ABC_HERE
+		if (i_blkbits_last == (unsigned long long)bi_sector >> (inode->i_blkbits - 9)) {
+			ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+					"(offset %llu size %ld starting block %llu)",
+					inode->i_ino,
+					(unsigned long long) io_end->offset,
+					(long) io_end->size,
+					i_blkbits_last);
+		} else {
+			i_blkbits_last = (unsigned long long)bi_sector >> (inode->i_blkbits - 9);
+			ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+					"(offset %llu size %ld starting block in range %llu + 0-2(%d))",
+					inode->i_ino,
+					(unsigned long long) io_end->offset,
+					(long) io_end->size,
+					(unsigned long long)
+					(i_blkbits_last >> CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT) << CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT,
+					CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT);
+		}
+#else
 		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
 			     "(offset %llu size %ld starting block %llu)",
 			     inode->i_ino,
@@ -284,6 +325,7 @@
 			     (long) io_end->size,
 			     (unsigned long long)
 			     bi_sector >> (inode->i_blkbits - 9));
+#endif /* MY_ABC_HERE */
 	}
 
 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
diff -ur a/fs/ext4/resize.c b/fs/ext4/resize.c
--- a/fs/ext4/resize.c	2017-03-23 15:08:29.000000000 +0100
+++ b/fs/ext4/resize.c	2017-03-14 02:46:23.000000000 +0100
@@ -157,7 +157,7 @@
 	if (flex_gd == NULL)
 		goto out3;
 
-	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
 		goto out2;
 	flex_gd->count = flexbg_size;
 
@@ -869,13 +869,8 @@
 	return err;
 }
 
-#ifdef MY_ABC_HERE
-static void update_backups(struct super_block *sb, int blk_off, char *data,
-			   int size, int meta_bg, int group_num)
-#else
-static void update_backups(struct super_block *sb, int blk_off, char *data,
+static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
 			   int size, int meta_bg)
-#endif  
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_group_t last;
@@ -883,11 +878,7 @@
 	unsigned three = 1;
 	unsigned five = 5;
 	unsigned seven = 7;
-#ifdef MY_ABC_HERE
-	ext4_group_t group = meta_bg ? group_num : 0;
-#else
 	ext4_group_t group = 0;
-#endif  
 	int rest = sb->s_blocksize - size;
 	handle_t *handle;
 	int err = 0, err2;
@@ -903,7 +894,7 @@
 		group = ext4_list_backups(sb, &three, &five, &seven);
 		last = sbi->s_groups_count;
 	} else {
-		group = ext4_meta_bg_first_group(sb, group) + 1;
+		group = ext4_get_group_number(sb, blk_off) + 1;
 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
 	}
 
@@ -1229,7 +1220,7 @@
 				EXT4_FEATURE_INCOMPAT_META_BG);
 		sector_t old_gdb = 0;
 
-#ifdef MY_ABC_HERE
+#ifdef CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX
 		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
 			       sizeof(struct ext4_super_block), 0, 0);
 #else
@@ -1242,7 +1233,7 @@
 			gdb_bh = sbi->s_group_desc[gdb_num];
 			if (old_gdb == gdb_bh->b_blocknr)
 				continue;
-#ifdef MY_ABC_HERE
+#ifdef CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX
 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
 				       gdb_bh->b_size, meta_bg, group);
 #else
@@ -1426,7 +1417,7 @@
 		if (test_opt(sb, DEBUG))
 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
 			       "blocks\n", ext4_blocks_count(es));
-#ifdef MY_ABC_HERE
+#ifdef CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX
 		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
 			       (char *)es, sizeof(struct ext4_super_block), 0, 0);
 #else
diff -ur a/fs/ext4/super.c b/fs/ext4/super.c
--- a/fs/ext4/super.c	2017-03-23 15:08:35.000000000 +0100
+++ b/fs/ext4/super.c	2017-03-14 02:46:28.000000000 +0100
@@ -70,6 +70,7 @@
 static int ext4_reserve_clusters(struct ext4_sb_info *, ext4_fsblk_t);
 
 #ifdef MY_ABC_HERE
+struct kmem_cache *ext4_syno_caseless_cachep;
 extern struct dentry_operations ext4_dentry_operations;
 
 spinlock_t ext4_namei_buf_lock;   
@@ -366,8 +367,11 @@
 	 
 	if ((0 != strcmp(es->s_last_mounted, "/"))
 			&& (0 == sbi->s_new_error_fs_event_flag)
+			&& (sbi->s_last_notify_time == 0 ||
+			    time_after(jiffies, sbi->s_last_notify_time + 24*60*60*HZ))
 			&& (es->s_syno_hash_magic == cpu_to_le32(SYNO_HASH_MAGIC))) {
 		sbi->s_new_error_fs_event_flag = 1;
+		sbi->s_last_notify_time = jiffies;
 		SYNOExt4GetDSMVersion(es->s_volume_name, szDsmVersion);
 		if ('\0' != szDsmVersion[0]) {
 			SynoAutoErrorFsReport(szDsmVersion ,(unsigned int)es->s_error_count);
@@ -427,9 +431,13 @@
 		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
 		sb->s_flags |= MS_RDONLY;
 	}
-	if (test_opt(sb, ERRORS_PANIC))
+	if (test_opt(sb, ERRORS_PANIC)) {
+		if (EXT4_SB(sb)->s_journal &&
+		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
+			return;
 		panic("EXT4-fs (device %s): panic forced after error\n",
 			sb->s_id);
+	}
 }
 
 void __ext4_error(struct super_block *sb, const char *function,
@@ -441,8 +449,13 @@
 	va_start(args, fmt);
 	vaf.fmt = fmt;
 	vaf.va = &args;
+#ifdef MY_ABC_HERE
+	printk_ratelimited(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+	       sb->s_id, function, line, current->comm, &vaf);
+#else
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
 	       sb->s_id, function, line, current->comm, &vaf);
+#endif  
 	va_end(args);
 	save_error_info(sb, function, line);
 
@@ -464,15 +477,29 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 	if (block)
+#ifdef MY_ABC_HERE
+		printk_ratelimited(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+		       "inode #%lu: block %llu: comm %s: %pV\n",
+		       inode->i_sb->s_id, function, line, inode->i_ino,
+		       block, current->comm, &vaf);
+#else
 		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
 		       "inode #%lu: block %llu: comm %s: %pV\n",
 		       inode->i_sb->s_id, function, line, inode->i_ino,
 		       block, current->comm, &vaf);
+#endif  
 	else
+#ifdef MY_ABC_HERE
+		printk_ratelimited(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+		       "inode #%lu: comm %s: %pV\n",
+		       inode->i_sb->s_id, function, line, inode->i_ino,
+		       current->comm, &vaf);
+#else
 		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
 		       "inode #%lu: comm %s: %pV\n",
 		       inode->i_sb->s_id, function, line, inode->i_ino,
 		       current->comm, &vaf);
+#endif  
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
@@ -557,8 +584,13 @@
 		return;
 
 	errstr = ext4_decode_error(sb, errno, nbuf);
+#ifdef MY_ABC_HERE
+	printk_ratelimited(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+	       sb->s_id, function, line, errstr);
+#else
 	printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
 	       sb->s_id, function, line, errstr);
+#endif  
 	save_error_info(sb, function, line);
 
 	ext4_handle_error(sb);
@@ -585,8 +617,12 @@
 			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
 		save_error_info(sb, function, line);
 	}
-	if (test_opt(sb, ERRORS_PANIC))
+	if (test_opt(sb, ERRORS_PANIC)) {
+		if (EXT4_SB(sb)->s_journal &&
+		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
+			return;
 		panic("EXT4-fs panic from previous error\n");
+	}
 }
 
 void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
@@ -610,8 +646,13 @@
 	va_start(args, fmt);
 	vaf.fmt = fmt;
 	vaf.va = &args;
+#ifdef MY_ABC_HERE
+	printk_ratelimited(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
+	       sb->s_id, function, line, &vaf);
+#else
 	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
 	       sb->s_id, function, line, &vaf);
+#endif  
 	va_end(args);
 }
 
@@ -738,6 +779,11 @@
 
 	flush_workqueue(sbi->dio_unwritten_wq);
 	destroy_workqueue(sbi->dio_unwritten_wq);
+#ifdef MY_ABC_HERE
+	if (sbi->group_desc_readahead_wq) {
+		destroy_workqueue(sbi->group_desc_readahead_wq);
+	}
+#endif  
 
 	if (sbi->s_journal) {
 		err = jbd2_journal_destroy(sbi->s_journal);
@@ -790,6 +836,7 @@
 		dump_orphan_list(sb, sbi);
 	J_ASSERT(list_empty(&sbi->s_orphan));
 
+	sync_blockdev(sb->s_bdev);
 	invalidate_bdev(sb->s_bdev);
 	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
 		 
@@ -820,6 +867,7 @@
 		return NULL;
 
 	ei->vfs_inode.i_version = 1;
+	spin_lock_init(&ei->i_raw_lock);
 	INIT_LIST_HEAD(&ei->i_prealloc_list);
 	spin_lock_init(&ei->i_prealloc_lock);
 	ext4_es_init_tree(&ei->i_es_tree);
@@ -3229,6 +3277,74 @@
 	return 0;
 }
 
+#ifdef MY_ABC_HERE
+struct group_desc_reada_arg {
+	struct super_block *sb;
+	ext4_fsblk_t logical_sb_block;
+	unsigned int db_count;
+	unsigned int current_nr;
+};
+
+struct group_desc_reada {
+	struct super_block *sb;
+	ext4_fsblk_t logical_sb_block;
+	unsigned int nr;
+	struct work_struct work;
+};
+
+static void group_desc_reada_start(struct work_struct *work)
+{
+	struct group_desc_reada *group_desc_reada;
+	struct ext4_sb_info *sbi;
+	struct buffer_head *bh = NULL;
+	ext4_fsblk_t block;
+
+	group_desc_reada = container_of(work, struct group_desc_reada, work);
+	sbi = EXT4_SB(group_desc_reada->sb);
+	block = descriptor_loc(group_desc_reada->sb, group_desc_reada->logical_sb_block, group_desc_reada->nr);
+	bh = sb_bread(group_desc_reada->sb, block);
+	if (bh != NULL) {
+		brelse(bh);
+	}
+	kfree(group_desc_reada);
+	atomic_dec(&sbi->reada_group_desc_threads);
+}
+
+#define READA_GROUP_DESC_THREAD_MAX 30
+ 
+static void reada_group_desc_block(struct group_desc_reada_arg* group_desc_reada_arg)
+{
+	struct ext4_sb_info *sbi;
+	struct group_desc_reada *group_desc_reada;
+	int i;
+
+	if (group_desc_reada_arg->current_nr >= group_desc_reada_arg->db_count) {
+		return;
+	}
+
+	sbi = EXT4_SB(group_desc_reada_arg->sb);
+	for (i = 0; i + atomic_read(&sbi->reada_group_desc_threads) < READA_GROUP_DESC_THREAD_MAX; i++) {
+		group_desc_reada = kmalloc(sizeof(struct group_desc_reada), GFP_NOFS);
+		if (!group_desc_reada) {
+			return;
+		}
+
+		group_desc_reada->sb = group_desc_reada_arg->sb;
+		group_desc_reada->logical_sb_block = group_desc_reada_arg->logical_sb_block;
+		group_desc_reada->nr = group_desc_reada_arg->current_nr;
+
+		INIT_WORK(&group_desc_reada->work, group_desc_reada_start);
+		queue_work(sbi->group_desc_readahead_wq, &group_desc_reada->work);
+		atomic_inc(&sbi->reada_group_desc_threads);
+
+		group_desc_reada_arg->current_nr++;
+		if (group_desc_reada_arg->current_nr >= group_desc_reada_arg->db_count) {
+			return;
+		}
+	}
+}
+#endif  
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 {
 	char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3253,6 +3369,9 @@
 	int err = 0;
 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
 	ext4_group_t first_not_zeroed;
+#ifdef MY_ABC_HERE
+	struct group_desc_reada_arg group_desc_reada_arg;
+#endif  
 
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi)
@@ -3688,7 +3807,23 @@
 
 	bgl_lock_init(sbi->s_blockgroup_lock);
 
+#ifdef MY_ABC_HERE
+	EXT4_SB(sb)->group_desc_readahead_wq =
+		alloc_workqueue("ext4-group-desc-readahead", WQ_MEM_RECLAIM | WQ_UNBOUND, min_t(unsigned long, num_online_cpus() + 2, 8));
+
+	group_desc_reada_arg.sb = sb;
+	group_desc_reada_arg.logical_sb_block = logical_sb_block;
+	group_desc_reada_arg.db_count = db_count;
+	group_desc_reada_arg.current_nr = 0;
+	atomic_set(&sbi->reada_group_desc_threads, 0);
+#endif  
+
 	for (i = 0; i < db_count; i++) {
+#ifdef MY_ABC_HERE
+		if (EXT4_SB(sb)->group_desc_readahead_wq) {
+			reada_group_desc_block(&group_desc_reada_arg);
+		}
+#endif  
 		block = descriptor_loc(sb, logical_sb_block, i);
 		sbi->s_group_desc[i] = sb_bread(sb, block);
 		if (!sbi->s_group_desc[i]) {
@@ -4055,6 +4190,11 @@
 	if (sbi->s_mmp_tsk)
 		kthread_stop(sbi->s_mmp_tsk);
 failed_mount2:
+#ifdef MY_ABC_HERE
+	if (EXT4_SB(sb)->group_desc_readahead_wq) {
+		destroy_workqueue(EXT4_SB(sb)->group_desc_readahead_wq);
+	}
+#endif  
 	for (i = 0; i < db_count; i++)
 		brelse(sbi->s_group_desc[i]);
 	ext4_kvfree(sbi->s_group_desc);
@@ -4321,8 +4461,14 @@
 		return error;
 	if (buffer_write_io_error(sbh)) {
 		 
+#ifdef MY_ABC_HERE
+		if (printk_ratelimit()) {
+#endif  
 		ext4_msg(sb, KERN_ERR, "previous I/O error to "
 		       "superblock detected");
+#ifdef MY_ABC_HERE
+		}
+#endif  
 		clear_buffer_write_io_error(sbh);
 		set_buffer_uptodate(sbh);
 	}
@@ -4812,6 +4958,14 @@
 					EXT4_SB(sb)->s_jquota_fmt, type);
 }
 
+static void lockdep_set_quota_inode(struct inode *inode, int subclass)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+
+	(void) ei;	 
+	lockdep_set_subclass(&ei->i_data_sem, subclass);
+}
+
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
 			 struct path *path)
 {
@@ -4840,8 +4994,12 @@
 		if (err)
 			return err;
 	}
-
-	return dquot_quota_on(sb, type, format_id, path);
+	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
+	err = dquot_quota_on(sb, type, format_id, path);
+	if (err)
+		lockdep_set_quota_inode(path->dentry->d_inode,
+					     I_DATA_SEM_NORMAL);
+	return err;
 }
 
 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -4866,8 +5024,11 @@
 	}
 
 	qf_inode->i_flags |= S_NOQUOTA;
+	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
 	err = dquot_enable(qf_inode, type, format_id, flags);
 	iput(qf_inode);
+	if (err)
+		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
 
 	return err;
 }
@@ -5197,12 +5358,24 @@
 		goto out1;
 	register_as_ext3();
 	register_as_ext2();
+
+#ifdef MY_ABC_HERE
+	ext4_syno_caseless_cachep = kmem_cache_create("ext4_syno_caseless",
+			(NAME_MAX+1)*2, 0,
+			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+	if (!ext4_syno_caseless_cachep)
+		goto out;
+#endif
 	err = register_filesystem(&ext4_fs_type);
 	if (err)
 		goto out;
 
 	return 0;
 out:
+
+#ifdef MY_ABC_HERE
+	kmem_cache_destroy(ext4_syno_caseless_cachep);
+#endif
 	unregister_as_ext2();
 	unregister_as_ext3();
 	destroy_inodecache();
@@ -5229,6 +5402,9 @@
 static void __exit ext4_exit_fs(void)
 {
 	ext4_destroy_lazyinit_thread();
+#ifdef MY_ABC_HERE
+	kmem_cache_destroy(ext4_syno_caseless_cachep);
+#endif
 	unregister_as_ext2();
 	unregister_as_ext3();
 	unregister_filesystem(&ext4_fs_type);
diff -ur a/fs/fcntl.c b/fs/fcntl.c
--- a/fs/fcntl.c	2017-03-23 15:07:46.000000000 +0100
+++ b/fs/fcntl.c	2017-03-14 02:45:43.000000000 +0100
@@ -59,6 +59,8 @@
 	{S2_SYNO_ACL_IS_OWNER_GROUP, 0}, 
 	{S2_SYNO_ACL_IS_OWNER_GROUP, 1}, 
 #endif  
+	{S2_SMB_SPARSE, 1},				 
+	{S2_SMB_SPARSE, 0},				 
 };
 
 #ifdef MY_ABC_HERE
@@ -81,6 +83,8 @@
 	NEED_FS_ACL_SUPPORT,                                            
 	PROTECT_BY_ACL | NEED_INODE_ACL_SUPPORT | NEED_FS_ACL_SUPPORT,  
 	PROTECT_BY_ACL | NEED_INODE_ACL_SUPPORT | NEED_FS_ACL_SUPPORT,  
+	PROTECT_BY_ACL,                  
+	PROTECT_BY_ACL,                  
 };
 
 const int rgSynoArAclMask[] = {
@@ -102,6 +106,8 @@
 	ACL_MASK_NONE,         
 	MAY_GET_OWNER_SHIP,    
 	MAY_GET_OWNER_SHIP,    
+	MAY_WRITE_ATTR,        
+	MAY_WRITE_ATTR,        
 };
 
 struct syno_archive_permission_mapping {
@@ -114,6 +120,7 @@
 	{S2_SMB_ARCHIVE, MAY_WRITE_ATTR},
 	{S2_SMB_HIDDEN, MAY_WRITE_ATTR},
 	{S2_SMB_SYSTEM, MAY_WRITE_ATTR},
+	{S2_SMB_SPARSE, MAY_WRITE_ATTR},
 
 	{S2_SMB_READONLY, MAY_WRITE_ATTR},
 	{S2_SYNO_ACL_IS_OWNER_GROUP, MAY_GET_OWNER_SHIP},
@@ -190,7 +197,8 @@
 			goto unlock;
 		}
 	} else if (inode->i_op->syno_bypass_is_synoacl) {
-		err = inode->i_op->syno_bypass_is_synoacl(dentry, 0, -EPERM);
+		err = inode->i_op->syno_bypass_is_synoacl(dentry,
+				       BYPASS_SYNOACL_SYNOARCHIVE_OVERWRITE, -EPERM);
 		if (err) {
 			goto unlock;
 		}
@@ -202,7 +210,8 @@
 	}
 	if (ALL_SYNO_ACL_ARCHIVE & flags) {
 		if (inode->i_op->syno_bypass_is_synoacl) {
-			err = inode->i_op->syno_bypass_is_synoacl(dentry, 0, -EOPNOTSUPP);
+			err = inode->i_op->syno_bypass_is_synoacl(dentry,
+					        BYPASS_SYNOACL_SYNOARCHIVE_OVERWRITE_ACL, -EOPNOTSUPP);
 			if (err) {
 				goto unlock;
 			}
diff -ur a/fs/fhandle.c b/fs/fhandle.c
--- a/fs/fhandle.c	2017-03-23 15:07:50.000000000 +0100
+++ b/fs/fhandle.c	2017-03-14 02:45:47.000000000 +0100
@@ -195,8 +195,9 @@
 		goto out_err;
 	}
 	/* copy the full handle */
-	if (copy_from_user(handle, ufh,
-			   sizeof(struct file_handle) +
+	*handle = f_handle;
+	if (copy_from_user(&handle->f_handle,
+			   &ufh->f_handle,
 			   f_handle.handle_bytes)) {
 		retval = -EFAULT;
 		goto out_handle;
diff -ur a/fs/file_table.c b/fs/file_table.c
--- a/fs/file_table.c	2017-03-23 15:07:46.000000000 +0100
+++ b/fs/file_table.c	2017-03-14 02:45:43.000000000 +0100
@@ -210,18 +210,15 @@
 	mntput(mnt);
 }
 
-static DEFINE_SPINLOCK(delayed_fput_lock);
-static LIST_HEAD(delayed_fput_list);
+static LLIST_HEAD(delayed_fput_list);
 static void delayed_fput(struct work_struct *unused)
 {
-	LIST_HEAD(head);
-	spin_lock_irq(&delayed_fput_lock);
-	list_splice_init(&delayed_fput_list, &head);
-	spin_unlock_irq(&delayed_fput_lock);
-	while (!list_empty(&head)) {
-		struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
-		list_del_init(&f->f_u.fu_list);
-		__fput(f);
+	struct llist_node *node = llist_del_all(&delayed_fput_list);
+	struct llist_node *next;
+
+	for (; node; node = next) {
+		next = llist_next(node);
+		__fput(llist_entry(node, struct file, f_u.fu_llist));
 	}
 }
 
@@ -241,7 +238,6 @@
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		struct task_struct *task = current;
-		unsigned long flags;
 
 		file_sb_list_del(file);
 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
@@ -249,10 +245,9 @@
 			if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
 				return;
 		}
-		spin_lock_irqsave(&delayed_fput_lock, flags);
-		list_add(&file->f_u.fu_list, &delayed_fput_list);
-		schedule_work(&delayed_fput_work);
-		spin_unlock_irqrestore(&delayed_fput_lock, flags);
+
+		if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
+			schedule_work(&delayed_fput_work);
 	}
 }
 
@@ -316,10 +311,6 @@
 	}
 }
 
-#ifdef CONFIG_AUFS_FHSM
-EXPORT_SYMBOL(file_sb_list_del);
-#endif  
-
 #ifdef CONFIG_SMP
 
 #define do_file_list_for_each_entry(__sb, __file)		\
diff -ur a/fs/fscache/netfs.c b/fs/fscache/netfs.c
--- a/fs/fscache/netfs.c	2017-03-23 15:08:36.000000000 +0100
+++ b/fs/fscache/netfs.c	2017-03-14 02:46:29.000000000 +0100
@@ -45,9 +45,6 @@
 	netfs->primary_index->parent		= &fscache_fsdef_index;
 	netfs->primary_index->netfs_data	= netfs;
 
-	atomic_inc(&netfs->primary_index->parent->usage);
-	atomic_inc(&netfs->primary_index->parent->n_children);
-
 	spin_lock_init(&netfs->primary_index->lock);
 	INIT_HLIST_HEAD(&netfs->primary_index->backing_objects);
 
@@ -60,6 +57,9 @@
 			goto already_registered;
 	}
 
+	atomic_inc(&netfs->primary_index->parent->usage);
+	atomic_inc(&netfs->primary_index->parent->n_children);
+
 	list_add(&netfs->link, &fscache_netfs_list);
 	ret = 0;
 
@@ -70,8 +70,7 @@
 	up_write(&fscache_addremove_sem);
 
 	if (ret < 0) {
-		netfs->primary_index->parent = NULL;
-		__fscache_cookie_put(netfs->primary_index);
+		kmem_cache_free(fscache_cookie_jar, netfs->primary_index);
 		netfs->primary_index = NULL;
 	}
 
diff -ur a/fs/fuse/dir.c b/fs/fuse/dir.c
--- a/fs/fuse/dir.c	2017-03-23 15:09:52.000000000 +0100
+++ b/fs/fuse/dir.c	2017-03-14 02:47:47.000000000 +0100
@@ -14,6 +14,11 @@
 #include "../synoacl_int.h"
 #endif  
 
+#ifdef MY_ABC_HERE
+#include "../ntfs/time.h"
+#include "../ntfs/endian.h"
+#endif  
+
 #if defined(MY_ABC_HERE) || defined(MY_ABC_HERE)
 #include <linux/xattr.h>
 #endif  
@@ -1868,8 +1873,15 @@
 	return err;
 }
 
+#if defined(MY_ABC_HERE) || defined(MY_ABC_HERE)
+#define SZ_FS_NTFS	"ntfs"
+#define IS_NTFS_FS(inode) (inode->i_sb->s_subtype && !strcmp(SZ_FS_NTFS, inode->i_sb->s_subtype))
+#endif  
 #ifdef MY_ABC_HERE
-static int fuse_syno_getattr(struct dentry *dentry, struct kstat *stat, int stat_flag)
+#define XATTR_NTFS_CREATE_TIME "ntfs_crtime"
+#endif  
+#ifdef MY_ABC_HERE
+static int fuse_syno_glusterfs_getattr(struct dentry *dentry, struct kstat *stat, int stat_flag)
 {
 	int err = 0;
 	struct inode *dir;
@@ -1878,25 +1890,8 @@
 	struct inode *inode;
 	struct qstr name;
 
-	if (!dentry->d_parent || !dentry->d_parent->d_inode) {
-		printk(KERN_WARNING"fuse syno getattr null entry\n");
-		return -EINVAL;
-	}
 	dir = dentry->d_parent->d_inode;
 
-	if (!IS_GLUSTER_FS(dentry->d_inode)) {
-#ifdef MY_ABC_HERE
-			stat->syno_create_time = dentry->d_inode->i_create_time;
-#endif  
-#ifdef MY_ABC_HERE
-			stat->syno_archive_bit = dentry->d_inode->i_archive_bit;
-#endif  
-#ifdef MY_ABC_HERE
-			stat->syno_archive_version = dentry->d_inode->i_archive_version;
-#endif  
-		return 0;
-	}
-
 	if (stat_flag & (SYNOST_ALL | SYNOST_IS_CASELESS)) {
 		synostat = kmalloc(FUSE_SYNOSTAT_SIZE, GFP_KERNEL);
 		memset(synostat, 0, FUSE_SYNOSTAT_SIZE);
@@ -1928,6 +1923,55 @@
 	iput(inode);
 	return err;
 }
+
+static int fuse_syno_ntfs_getattr(struct dentry *dentry, struct kstat *stat, int stat_flag)
+{
+#ifdef MY_ABC_HERE
+	int size = 0;
+	sle64 time_le = 0;
+	s64 time_s = 0;
+
+	size = fuse_getxattr(dentry, XATTR_SYSTEM_PREFIX XATTR_NTFS_CREATE_TIME, &time_s, sizeof(time_s));
+	if (size == sizeof(time_s)) {
+		time_le = cpu_to_sle64(time_s);
+		stat->syno_create_time = ntfs2utc(time_le);
+	} else {
+		memset(&stat->syno_create_time, 0, sizeof(stat->syno_create_time));
+	}
+#endif  
+#ifdef MY_ABC_HERE
+	stat->syno_archive_bit = dentry->d_inode->i_archive_bit;
+#endif  
+#ifdef MY_ABC_HERE
+	stat->syno_archive_version = dentry->d_inode->i_archive_version;
+#endif  
+	return 0;
+}
+
+static int fuse_syno_getattr(struct dentry *dentry, struct kstat *stat, int stat_flag)
+{
+	if (!dentry->d_parent || !dentry->d_parent->d_inode) {
+		printk(KERN_WARNING"fuse syno getattr null entry\n");
+		return -EINVAL;
+	}
+
+	if (IS_GLUSTER_FS(dentry->d_inode)) {
+		return fuse_syno_glusterfs_getattr(dentry, stat, stat_flag);
+	} else if (IS_NTFS_FS(dentry->d_inode)) {
+		return fuse_syno_ntfs_getattr(dentry, stat, stat_flag);
+	}
+
+#ifdef MY_ABC_HERE
+	stat->syno_create_time = dentry->d_inode->i_create_time;
+#endif  
+#ifdef MY_ABC_HERE
+	stat->syno_archive_bit = dentry->d_inode->i_archive_bit;
+#endif  
+#ifdef MY_ABC_HERE
+	stat->syno_archive_version = dentry->d_inode->i_archive_version;
+#endif  
+	return 0;
+}
 #endif  
 
 #ifdef MY_ABC_HERE
@@ -1944,19 +1988,38 @@
 #endif  
 
 #ifdef MY_ABC_HERE
-static int fuse_syno_set_create_time(struct dentry *dentry, struct timespec* time)
+static int fuse_syno_set_glusterfs_create_time(struct dentry *dentry, struct timespec* time)
 {
 	struct syno_gf_xattr_crtime time_le;
-	struct inode *inode = dentry->d_inode;
 
-	if (!IS_GLUSTER_FS(inode)) {
-		return -EOPNOTSUPP;
-	}
 	time_le.sec = cpu_to_le64(time->tv_sec);
 	time_le.nsec = cpu_to_le32(time->tv_nsec);
 
 	return fuse_setxattr(dentry, XATTR_SYNO_PREFIX XATTR_SYNO_CREATE_TIME, &time_le, sizeof(time_le), 0);
 }
+
+static int fuse_syno_set_ntfs_create_time(struct dentry *dentry, struct timespec* time)
+{
+	sle64 time_le;
+	s64 time_s;
+
+	time_le = utc2ntfs(*time);
+	time_s = sle64_to_cpu(time_le);
+
+	return fuse_setxattr(dentry, XATTR_SYSTEM_PREFIX XATTR_NTFS_CREATE_TIME, &time_s, sizeof(time_s), 0);
+}
+
+static int fuse_syno_set_create_time(struct dentry *dentry, struct timespec* time)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (IS_GLUSTER_FS(inode)) {
+		return fuse_syno_set_glusterfs_create_time(dentry, time);
+	} else if (IS_NTFS_FS(inode)) {
+		return fuse_syno_set_ntfs_create_time(dentry, time);
+	}
+	return -EOPNOTSUPP;
+}
 #endif  
 
 #ifdef MY_ABC_HERE
@@ -2065,6 +2128,11 @@
 	if (IS_GLUSTER_FS(dentry->d_inode)) {
 		return 0;
 	}
+	if (cmd == BYPASS_SYNOACL_SYNOARCHIVE_OVERWRITE) {
+		if (inode_owner_or_capable(dentry->d_inode)) {
+			return 0;
+		}
+	}
 	return reterror;
 }
 
diff -ur a/fs/fuse/file.c b/fs/fuse/file.c
--- a/fs/fuse/file.c	2017-03-23 15:09:52.000000000 +0100
+++ b/fs/fuse/file.c	2017-03-14 02:47:47.000000000 +0100
@@ -1048,6 +1048,7 @@
 
 		mark_page_accessed(page);
 
+		iov_iter_advance(ii, tmp);
 		if (!tmp) {
 			unlock_page(page);
 			page_cache_release(page);
@@ -1060,7 +1061,6 @@
 		req->page_descs[req->num_pages].length = tmp;
 		req->num_pages++;
 
-		iov_iter_advance(ii, tmp);
 		count += tmp;
 		pos += tmp;
 		offset += tmp;
diff -ur a/fs/fuse/inode.c b/fs/fuse/inode.c
--- a/fs/fuse/inode.c	2017-03-23 15:09:51.000000000 +0100
+++ b/fs/fuse/inode.c	2017-03-14 02:47:47.000000000 +0100
@@ -1063,6 +1063,7 @@
 		goto err_fput;
 
 	fuse_conn_init(fc);
+	fc->release = fuse_free_conn;
 
 	fc->dev = sb->s_dev;
 	fc->sb = sb;
@@ -1076,7 +1077,6 @@
 		fc->dont_mask = 1;
 	sb->s_flags |= MS_POSIXACL;
 
-	fc->release = fuse_free_conn;
 	fc->flags = d.flags;
 	fc->user_id = d.user_id;
 	fc->group_id = d.group_id;
diff -ur a/fs/hfs/bnode.c b/fs/hfs/bnode.c
--- a/fs/hfs/bnode.c	2017-03-23 15:08:20.000000000 +0100
+++ b/fs/hfs/bnode.c	2017-03-14 02:46:15.000000000 +0100
@@ -288,7 +288,6 @@
 			page_cache_release(page);
 			goto fail;
 		}
-		page_cache_release(page);
 		node->page[i] = page;
 	}
 
@@ -398,11 +397,11 @@
 
 void hfs_bnode_free(struct hfs_bnode *node)
 {
-	//int i;
+	int i;
 
-	//for (i = 0; i < node->tree->pages_per_bnode; i++)
-	//	if (node->page[i])
-	//		page_cache_release(node->page[i]);
+	for (i = 0; i < node->tree->pages_per_bnode; i++)
+		if (node->page[i])
+			page_cache_release(node->page[i]);
 	kfree(node);
 }
 
diff -ur a/fs/hfs/brec.c b/fs/hfs/brec.c
--- a/fs/hfs/brec.c	2017-03-23 15:08:20.000000000 +0100
+++ b/fs/hfs/brec.c	2017-03-14 02:46:15.000000000 +0100
@@ -131,13 +131,16 @@
 	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
 	hfs_bnode_dump(node);
 
-	if (new_node) {
-		/* update parent key if we inserted a key
-		 * at the start of the first node
-		 */
-		if (!rec && new_node != node)
-			hfs_brec_update_parent(fd);
+	/*
+	 * update parent key if we inserted a key
+	 * at the start of the node and it is not the new node
+	 */
+	if (!rec && new_node != node) {
+		hfs_bnode_read_key(node, fd->search_key, data_off + size);
+		hfs_brec_update_parent(fd);
+	}
 
+	if (new_node) {
 		hfs_bnode_put(fd->bnode);
 		if (!new_node->parent) {
 			hfs_btree_inc_height(tree);
@@ -166,9 +169,6 @@
 		goto again;
 	}
 
-	if (!rec)
-		hfs_brec_update_parent(fd);
-
 	return 0;
 }
 
@@ -366,6 +366,8 @@
 	if (IS_ERR(parent))
 		return PTR_ERR(parent);
 	__hfs_brec_find(parent, fd);
+	if (fd->record < 0)
+		return -ENOENT;
 	hfs_bnode_dump(parent);
 	rec = fd->record;
 
diff -ur a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
--- a/fs/hfsplus/bnode.c	2017-03-23 15:08:59.000000000 +0100
+++ b/fs/hfsplus/bnode.c	2017-03-14 02:46:52.000000000 +0100
@@ -458,10 +458,6 @@
 			page_cache_release(page);
 			goto fail;
 		}
-#ifdef MY_ABC_HERE
-#else
-		page_cache_release(page);
-#endif  
 		node->page[i] = page;
 	}
 
@@ -572,20 +568,11 @@
 
 void hfs_bnode_free(struct hfs_bnode *node)
 {
-#if 0
 	int i;
 
 	for (i = 0; i < node->tree->pages_per_bnode; i++)
 		if (node->page[i])
 			page_cache_release(node->page[i]);
-#endif
-#ifdef MY_ABC_HERE
-	int i;
-
-	for (i = 0; i < node->tree->pages_per_bnode; i++)
-		if (node->page[i])
-			page_cache_release(node->page[i]);
-#endif  
 	kfree(node);
 }
 
diff -ur a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
--- a/fs/hostfs/hostfs_kern.c	2017-03-23 15:09:29.000000000 +0100
+++ b/fs/hostfs/hostfs_kern.c	2017-03-14 02:47:24.000000000 +0100
@@ -711,15 +711,13 @@
 
 	init_special_inode(inode, mode, dev);
 	err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
-	if (!err)
+	if (err)
 		goto out_free;
 
 	err = read_name(inode, name);
 	__putname(name);
 	if (err)
 		goto out_put;
-	if (err)
-		goto out_put;
 
 	d_instantiate(dentry, inode);
 	return 0;
diff -ur a/fs/hpfs/namei.c b/fs/hpfs/namei.c
--- a/fs/hpfs/namei.c	2017-03-23 15:09:54.000000000 +0100
+++ b/fs/hpfs/namei.c	2017-03-14 02:47:50.000000000 +0100
@@ -8,6 +8,17 @@
 #include <linux/sched.h>
 #include "hpfs_fn.h"
 
+static void hpfs_update_directory_times(struct inode *dir)
+{
+	time_t t = get_seconds();
+	if (t == dir->i_mtime.tv_sec &&
+	    t == dir->i_ctime.tv_sec)
+		return;
+	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
+	dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
+	hpfs_write_inode_nolock(dir);
+}
+
 static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	const unsigned char *name = dentry->d_name.name;
@@ -99,6 +110,7 @@
 		result->i_mode = mode | S_IFDIR;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -187,6 +199,7 @@
 		result->i_mode = mode | S_IFREG;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -262,6 +275,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	brelse(bh);
 	hpfs_unlock(dir->i_sb);
@@ -340,6 +354,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -423,6 +438,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -477,6 +494,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -595,7 +614,7 @@
 		goto end1;
 	}
 
-	end:
+end:
 	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
 	if (S_ISDIR(i->i_mode)) {
 		inc_nlink(new_dir);
@@ -610,6 +629,10 @@
 		brelse(bh);
 	}
 end1:
+	if (!err) {
+		hpfs_update_directory_times(old_dir);
+		hpfs_update_directory_times(new_dir);
+	}
 	hpfs_unlock(i->i_sb);
 	return err;
 }
diff -ur a/fs/hpfs/super.c b/fs/hpfs/super.c
--- a/fs/hpfs/super.c	2017-03-23 15:09:54.000000000 +0100
+++ b/fs/hpfs/super.c	2017-03-14 02:47:50.000000000 +0100
@@ -52,17 +52,20 @@
 }
 
 /* Filesystem error... */
-static char err_buf[1024];
-
 void hpfs_error(struct super_block *s, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	pr_err("filesystem error: %pV", &vaf);
+
 	va_end(args);
 
-	printk("HPFS: filesystem error: %s", err_buf);
 	if (!hpfs_sb(s)->sb_was_error) {
 		if (hpfs_sb(s)->sb_err == 2) {
 			printk("; crashing the system because you wanted it\n");
diff -ur a/fs/inode.c b/fs/inode.c
--- a/fs/inode.c	2017-03-23 15:07:50.000000000 +0100
+++ b/fs/inode.c	2017-03-14 02:45:46.000000000 +0100
@@ -1095,6 +1095,9 @@
 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
 			     struct timespec now)
 {
+#ifdef MY_ABC_HERE
+	long relatime_period = 1;
+#endif  
 
 	if (!(mnt->mnt_flags & MNT_RELATIME))
 		return 1;
@@ -1105,8 +1108,16 @@
 	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
 		return 1;
 
+#ifdef MY_ABC_HERE
+	if (inode->i_sb->relatime_period > 0)
+		relatime_period = inode->i_sb->relatime_period;
+
+	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= relatime_period*24*60*60)
+		return 1;
+#else
 	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
 		return 1;
+#endif  
 	 
 	return 0;
 }
@@ -1135,7 +1146,7 @@
 EXPORT_SYMBOL(update_time);
 #endif  
 
-void touch_atime(struct path *path)
+void touch_atime(const struct path *path)
 {
 	struct vfsmount *mnt = path->mnt;
 	struct inode *inode = path->dentry->d_inode;
@@ -1220,8 +1231,8 @@
 		error = security_inode_killpriv(dentry);
 	if (!error && killsuid)
 		error = __remove_suid(dentry, killsuid);
-	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-		inode->i_flags |= S_NOSEC;
+	if (!error)
+		inode_has_no_xattr(inode);
 
 	return error;
 }
diff -ur a/fs/internal.h b/fs/internal.h
--- a/fs/internal.h	2017-03-23 15:07:50.000000000 +0100
+++ b/fs/internal.h	2017-03-14 02:45:47.000000000 +0100
@@ -45,6 +45,9 @@
  * namei.c
  */
 extern int __inode_permission(struct inode *, int);
+extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+			   const char *, unsigned int, struct path *);
 
 /*
  * namespace.c
diff -ur a/fs/isofs/rock.c b/fs/isofs/rock.c
--- a/fs/isofs/rock.c	2017-03-23 15:07:51.000000000 +0100
+++ b/fs/isofs/rock.c	2017-03-14 02:45:48.000000000 +0100
@@ -203,6 +203,8 @@
 	int retnamlen = 0;
 	int truncate = 0;
 	int ret = 0;
+	char *p;
+	int len;
 
 	if (!ISOFS_SB(inode->i_sb)->s_rock)
 		return 0;
@@ -267,12 +269,17 @@
 					rr->u.NM.flags);
 				break;
 			}
-			if ((strlen(retname) + rr->len - 5) >= 254) {
+			len = rr->len - 5;
+			if (retnamlen + len >= 254) {
 				truncate = 1;
 				break;
 			}
-			strncat(retname, rr->u.NM.name, rr->len - 5);
-			retnamlen += rr->len - 5;
+			p = memchr(rr->u.NM.name, '\0', len);
+			if (unlikely(p))
+				len = p - rr->u.NM.name;
+			memcpy(retname + retnamlen, rr->u.NM.name, len);
+			retnamlen += len;
+			retname[retnamlen] = '\0';
 			break;
 		case SIG('R', 'E'):
 			kfree(rs.buffer);
diff -ur a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
--- a/fs/jbd2/checkpoint.c	2017-03-23 15:08:26.000000000 +0100
+++ b/fs/jbd2/checkpoint.c	2017-03-14 02:46:20.000000000 +0100
@@ -440,7 +440,7 @@
 	unsigned long	blocknr;
 
 	if (is_journal_aborted(journal))
-		return 1;
+		return -EIO;
 
 	if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
 		return 1;
@@ -455,10 +455,9 @@
 	 * jbd2_cleanup_journal_tail() doesn't get called all that often.
 	 */
 	if (journal->j_flags & JBD2_BARRIER)
-		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
 
-	__jbd2_update_log_tail(journal, first_tid, blocknr);
-	return 0;
+	return __jbd2_update_log_tail(journal, first_tid, blocknr);
 }
 
 /* Checkpoint list management */
@@ -467,20 +466,18 @@
  * journal_clean_one_cp_list
  *
  * Find all the written-back checkpoint buffers in the given list and
- * release them.
+ * release them. If 'destroy' is set, clean all buffers unconditionally.
  *
- * Called with the journal locked.
  * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
+ * Returns 1 if we freed the transaction, 0 otherwise.
  */
-
-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
 {
 	struct journal_head *last_jh;
 	struct journal_head *next_jh = jh;
-	int ret, freed = 0;
+	int ret;
+	int freed = 0;
 
-	*released = 0;
 	if (!jh)
 		return 0;
 
@@ -488,14 +485,15 @@
 	do {
 		jh = next_jh;
 		next_jh = jh->b_cpnext;
-		ret = __try_to_free_cp_buf(jh);
-		if (ret) {
-			freed++;
-			if (ret == 2) {
-				*released = 1;
-				return freed;
-			}
-		}
+		if (!destroy)
+			ret = __try_to_free_cp_buf(jh);
+		else
+			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
+		if (!ret)
+			return freed;
+		if (ret == 2)
+			return 1;
+		freed = 1;
 		/*
 		 * This function only frees up some memory
 		 * if possible so we dont have an obligation
@@ -513,50 +511,74 @@
  * journal_clean_checkpoint_list
  *
  * Find all the written-back checkpoint buffers in the journal and release them.
+ * If 'destroy' is set, release all buffers unconditionally.
  *
- * Called with the journal locked.
  * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
  */
-
-int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
 {
 	transaction_t *transaction, *last_transaction, *next_transaction;
-	int ret = 0;
-	int released;
+	int ret;
 
 	transaction = journal->j_checkpoint_transactions;
 	if (!transaction)
-		goto out;
+		return;
 
 	last_transaction = transaction->t_cpprev;
 	next_transaction = transaction;
 	do {
 		transaction = next_transaction;
 		next_transaction = transaction->t_cpnext;
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_list, &released);
+		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
+						destroy);
 		/*
 		 * This function only frees up some memory if possible so we
 		 * dont have an obligation to finish processing. Bail out if
 		 * preemption requested:
 		 */
 		if (need_resched())
-			goto out;
-		if (released)
+			return;
+		if (ret)
 			continue;
 		/*
 		 * It is essential that we are as careful as in the case of
 		 * t_checkpoint_list with removing the buffer from the list as
 		 * we can possibly see not yet submitted buffers on io_list
 		 */
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list, &released);
+		ret = journal_clean_one_cp_list(transaction->
+				t_checkpoint_io_list, destroy);
 		if (need_resched())
-			goto out;
+			return;
+		/*
+		 * Stop scanning if we couldn't free the transaction. This
+		 * avoids pointless scanning of transactions which still
+		 * weren't checkpointed.
+		 */
+		if (!ret)
+			return;
 	} while (transaction != last_transaction);
-out:
-	return ret;
+}
+
+/*
+ * Remove buffers from all checkpoint lists as journal is aborted and we just
+ * need to free memory
+ */
+void jbd2_journal_destroy_checkpoint(journal_t *journal)
+{
+	/*
+	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
+	 * early due to a need of rescheduling.
+	 */
+	while (1) {
+		spin_lock(&journal->j_list_lock);
+		if (!journal->j_checkpoint_transactions) {
+			spin_unlock(&journal->j_list_lock);
+			break;
+		}
+		__jbd2_journal_clean_checkpoint_list(journal, true);
+		spin_unlock(&journal->j_list_lock);
+		cond_resched();
+	}
 }
 
 /*
diff -ur a/fs/jbd2/commit.c b/fs/jbd2/commit.c
--- a/fs/jbd2/commit.c	2017-03-23 15:08:26.000000000 +0100
+++ b/fs/jbd2/commit.c	2017-03-14 02:46:21.000000000 +0100
@@ -504,7 +504,7 @@
 	 * frees some memory
 	 */
 	spin_lock(&journal->j_list_lock);
-	__jbd2_journal_clean_checkpoint_list(journal);
+	__jbd2_journal_clean_checkpoint_list(journal, false);
 	spin_unlock(&journal->j_list_lock);
 
 	jbd_debug(3, "JBD2: commit phase 1\n");
diff -ur a/fs/jbd2/journal.c b/fs/jbd2/journal.c
--- a/fs/jbd2/journal.c	2017-03-23 15:08:29.000000000 +0100
+++ b/fs/jbd2/journal.c	2017-03-14 02:46:22.000000000 +0100
@@ -869,9 +869,10 @@
  *
  * Requires j_checkpoint_mutex
  */
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
 {
 	unsigned long freed;
+	int ret;
 
 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
 
@@ -881,7 +882,10 @@
 	 * space and if we lose sb update during power failure we'd replay
 	 * old transaction with possibly newly overwritten data.
 	 */
-	jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
+	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
+	if (ret)
+		goto out;
+
 	write_lock(&journal->j_state_lock);
 	freed = block - journal->j_tail;
 	if (block < journal->j_tail)
@@ -897,6 +901,9 @@
 	journal->j_tail_sequence = tid;
 	journal->j_tail = block;
 	write_unlock(&journal->j_state_lock);
+
+out:
+	return ret;
 }
 
 /*
@@ -1315,7 +1322,7 @@
 	return jbd2_journal_start_thread(journal);
 }
 
-static void jbd2_write_superblock(journal_t *journal, int write_op)
+static int jbd2_write_superblock(journal_t *journal, int write_op)
 {
 	struct buffer_head *bh = journal->j_sb_buffer;
 	journal_superblock_t *sb = journal->j_superblock;
@@ -1354,7 +1361,10 @@
 		printk(KERN_ERR "JBD2: Error %d detected when updating "
 		       "journal superblock for %s.\n", ret,
 		       journal->j_devname);
+		jbd2_journal_abort(journal, ret);
 	}
+
+	return ret;
 }
 
 /**
@@ -1367,10 +1377,11 @@
  * Update a journal's superblock information about log tail and write it to
  * disk, waiting for the IO to complete.
  */
-void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
 				     unsigned long tail_block, int write_op)
 {
 	journal_superblock_t *sb = journal->j_superblock;
+	int ret;
 
 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
 	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
@@ -1379,23 +1390,29 @@
 	sb->s_sequence = cpu_to_be32(tail_tid);
 	sb->s_start    = cpu_to_be32(tail_block);
 
-	jbd2_write_superblock(journal, write_op);
+	ret = jbd2_write_superblock(journal, write_op);
+	if (ret)
+		goto out;
 
 	/* Log is no longer empty */
 	write_lock(&journal->j_state_lock);
 	WARN_ON(!sb->s_sequence);
 	journal->j_flags &= ~JBD2_FLUSHED;
 	write_unlock(&journal->j_state_lock);
+
+out:
+	return ret;
 }
 
 /**
  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
  * @journal: The journal to update.
+ * @write_op: With which operation should we write the journal sb
  *
  * Update a journal's dynamic superblock fields to show that journal is empty.
  * Write updated superblock to disk waiting for IO to complete.
  */
-static void jbd2_mark_journal_empty(journal_t *journal)
+static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
 {
 	journal_superblock_t *sb = journal->j_superblock;
 
@@ -1413,7 +1430,7 @@
 	sb->s_start    = cpu_to_be32(0);
 	read_unlock(&journal->j_state_lock);
 
-	jbd2_write_superblock(journal, WRITE_FUA);
+	jbd2_write_superblock(journal, write_op);
 
 	/* Log is no longer empty */
 	write_lock(&journal->j_state_lock);
@@ -1666,8 +1683,17 @@
 	while (journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
 		mutex_lock(&journal->j_checkpoint_mutex);
-		jbd2_log_do_checkpoint(journal);
+		err = jbd2_log_do_checkpoint(journal);
 		mutex_unlock(&journal->j_checkpoint_mutex);
+		/*
+		 * If checkpointing failed, just free the buffers to avoid
+		 * looping forever
+		 */
+		if (err) {
+			jbd2_journal_destroy_checkpoint(journal);
+			spin_lock(&journal->j_list_lock);
+			break;
+		}
 		spin_lock(&journal->j_list_lock);
 	}
 
@@ -1679,7 +1705,13 @@
 	if (journal->j_sb_buffer) {
 		if (!is_journal_aborted(journal)) {
 			mutex_lock(&journal->j_checkpoint_mutex);
-			jbd2_mark_journal_empty(journal);
+
+			write_lock(&journal->j_state_lock);
+			journal->j_tail_sequence =
+				++journal->j_transaction_sequence;
+			write_unlock(&journal->j_state_lock);
+
+			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
 			mutex_unlock(&journal->j_checkpoint_mutex);
 		} else
 			err = -EIO;
@@ -1919,14 +1951,21 @@
 		return -EIO;
 
 	mutex_lock(&journal->j_checkpoint_mutex);
-	jbd2_cleanup_journal_tail(journal);
+	if (!err) {
+		err = jbd2_cleanup_journal_tail(journal);
+		if (err < 0) {
+			mutex_unlock(&journal->j_checkpoint_mutex);
+			goto out;
+		}
+		err = 0;
+	}
 
 	/* Finally, mark the journal as really needing no recovery.
 	 * This sets s_start==0 in the underlying superblock, which is
 	 * the magic code for a fully-recovered superblock.  Any future
 	 * commits of data to the journal will restore the current
 	 * s_start value. */
-	jbd2_mark_journal_empty(journal);
+	jbd2_mark_journal_empty(journal, WRITE_FUA);
 	mutex_unlock(&journal->j_checkpoint_mutex);
 	write_lock(&journal->j_state_lock);
 	J_ASSERT(!journal->j_running_transaction);
@@ -1935,7 +1974,8 @@
 	J_ASSERT(journal->j_head == journal->j_tail);
 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
 	write_unlock(&journal->j_state_lock);
-	return 0;
+out:
+	return err;
 }
 
 /**
@@ -1971,7 +2011,7 @@
 	if (write) {
 		/* Lock to make assertions happy... */
 		mutex_lock(&journal->j_checkpoint_mutex);
-		jbd2_mark_journal_empty(journal);
+		jbd2_mark_journal_empty(journal, WRITE_FUA);
 		mutex_unlock(&journal->j_checkpoint_mutex);
 	}
 
@@ -2022,8 +2062,12 @@
 
 	__jbd2_journal_abort_hard(journal);
 
-	if (errno)
+	if (errno) {
 		jbd2_journal_update_sb_errno(journal);
+		write_lock(&journal->j_state_lock);
+		journal->j_flags |= JBD2_REC_ERR;
+		write_unlock(&journal->j_state_lock);
+	}
 }
 
 /**
diff -ur a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
--- a/fs/jbd2/transaction.c	2017-03-23 15:08:29.000000000 +0100
+++ b/fs/jbd2/transaction.c	2017-03-14 02:46:22.000000000 +0100
@@ -1928,6 +1928,7 @@
 
 		if (!buffer_dirty(bh)) {
 			/* bdflush has written it.  We can drop it now */
+			__jbd2_journal_remove_checkpoint(jh);
 			goto zap_buffer;
 		}
 
@@ -1957,6 +1958,7 @@
 				/* The orphan record's transaction has
 				 * committed.  We can cleanse this buffer */
 				clear_buffer_jbddirty(bh);
+				__jbd2_journal_remove_checkpoint(jh);
 				goto zap_buffer;
 			}
 		}
diff -ur a/fs/jffs2/build.c b/fs/jffs2/build.c
--- a/fs/jffs2/build.c	2017-03-23 15:08:22.000000000 +0100
+++ b/fs/jffs2/build.c	2017-03-14 02:46:17.000000000 +0100
@@ -48,7 +48,8 @@
 	     ic = next_inode(&i, ic, (c)))
 
 static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
-				    struct jffs2_inode_cache *ic)
+				    struct jffs2_inode_cache *ic,
+				    int *dir_hardlinks)
 {
 	struct jffs2_full_dirent *fd;
 
@@ -67,19 +68,21 @@
 			dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
 				  fd->name, fd->ino, ic->ino);
 			jffs2_mark_node_obsolete(c, fd->raw);
+			/* Clear the ic/raw union so it doesn't cause problems later. */
+			fd->ic = NULL;
 			continue;
 		}
 
+		/* From this point, fd->raw is no longer used so we can set fd->ic */
+		fd->ic = child_ic;
+		child_ic->pino_nlink++;
+		/* If we appear (at this stage) to have hard-linked directories,
+		 * set a flag to trigger a scan later */
 		if (fd->type == DT_DIR) {
-			if (child_ic->pino_nlink) {
-				JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
-					    fd->name, fd->ino, ic->ino);
-				/* TODO: What do we do about it? */
-			} else {
-				child_ic->pino_nlink = ic->ino;
-			}
-		} else
-			child_ic->pino_nlink++;
+			child_ic->flags |= INO_FLAGS_IS_DIR;
+			if (child_ic->pino_nlink > 1)
+				*dir_hardlinks = 1;
+		}
 
 		dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
 		/* Can't free scan_dents so far. We might need them in pass 2 */
@@ -93,8 +96,7 @@
 */
 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
 {
-	int ret;
-	int i;
+	int ret, i, dir_hardlinks = 0;
 	struct jffs2_inode_cache *ic;
 	struct jffs2_full_dirent *fd;
 	struct jffs2_full_dirent *dead_fds = NULL;
@@ -118,7 +120,7 @@
 	/* Now scan the directory tree, increasing nlink according to every dirent found. */
 	for_each_inode(i, c, ic) {
 		if (ic->scan_dents) {
-			jffs2_build_inode_pass1(c, ic);
+			jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
 			cond_resched();
 		}
 	}
@@ -154,6 +156,20 @@
 	}
 
 	dbg_fsbuild("pass 2a complete\n");
+
+	if (dir_hardlinks) {
+		/* If we detected directory hardlinks earlier, *hopefully*
+		 * they are gone now because some of the links were from
+		 * dead directories which still had some old dirents lying
+		 * around and not yet garbage-collected, but which have
+		 * been discarded above. So clear the pino_nlink field
+		 * in each directory, so that the final scan below can
+		 * print appropriate warnings. */
+		for_each_inode(i, c, ic) {
+			if (ic->flags & INO_FLAGS_IS_DIR)
+				ic->pino_nlink = 0;
+		}
+	}
 	dbg_fsbuild("freeing temporary data structures\n");
 
 	/* Finally, we can scan again and free the dirent structs */
@@ -161,6 +177,33 @@
 		while(ic->scan_dents) {
 			fd = ic->scan_dents;
 			ic->scan_dents = fd->next;
+			/* We do use the pino_nlink field to count nlink of
+			 * directories during fs build, so set it to the
+			 * parent ino# now. Now that there's hopefully only
+			 * one. */
+			if (fd->type == DT_DIR) {
+				if (!fd->ic) {
+					/* We'll have complained about it and marked the coresponding
+					   raw node obsolete already. Just skip it. */
+					continue;
+				}
+
+				/* We *have* to have set this in jffs2_build_inode_pass1() */
+				BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
+
+				/* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
+				 * is set. Otherwise, we know this should never trigger anyway, so
+				 * we don't do the check. And ic->pino_nlink still contains the nlink
+				 * value (which is 1). */
+				if (dir_hardlinks && fd->ic->pino_nlink) {
+					JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
+						    fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
+					/* Should we unlink it from its previous parent? */
+				}
+
+				/* For directories, ic->pino_nlink holds that parent inode # */
+				fd->ic->pino_nlink = ic->ino;
+			}
 			jffs2_free_full_dirent(fd);
 		}
 		ic->scan_dents = NULL;
@@ -239,11 +282,7 @@
 
 			/* Reduce nlink of the child. If it's now zero, stick it on the
 			   dead_fds list to be cleaned up later. Else just free the fd */
-
-			if (fd->type == DT_DIR)
-				child_ic->pino_nlink = 0;
-			else
-				child_ic->pino_nlink--;
+			child_ic->pino_nlink--;
 
 			if (!child_ic->pino_nlink) {
 				dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
diff -ur a/fs/jffs2/file.c b/fs/jffs2/file.c
--- a/fs/jffs2/file.c	2017-03-23 15:08:22.000000000 +0100
+++ b/fs/jffs2/file.c	2017-03-14 02:46:17.000000000 +0100
@@ -137,39 +137,33 @@
 	struct page *pg;
 	struct inode *inode = mapping->host;
 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
-	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
-	struct jffs2_raw_inode ri;
-	uint32_t alloc_len = 0;
 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
 	uint32_t pageofs = index << PAGE_CACHE_SHIFT;
 	int ret = 0;
 
-	jffs2_dbg(1, "%s()\n", __func__);
-
-	if (pageofs > inode->i_size) {
-		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
-					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
-		if (ret)
-			return ret;
-	}
-
-	mutex_lock(&f->sem);
 	pg = grab_cache_page_write_begin(mapping, index, flags);
-	if (!pg) {
-		if (alloc_len)
-			jffs2_complete_reservation(c);
-		mutex_unlock(&f->sem);
+	if (!pg)
 		return -ENOMEM;
-	}
 	*pagep = pg;
 
-	if (alloc_len) {
+	jffs2_dbg(1, "%s()\n", __func__);
+
+	if (pageofs > inode->i_size) {
 		/* Make new hole frag from old EOF to new page */
+		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+		struct jffs2_raw_inode ri;
 		struct jffs2_full_dnode *fn;
+		uint32_t alloc_len;
 
 		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
 			  (unsigned int)inode->i_size, pageofs);
 
+		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+		if (ret)
+			goto out_page;
+
+		mutex_lock(&f->sem);
 		memset(&ri, 0, sizeof(ri));
 
 		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -196,6 +190,7 @@
 		if (IS_ERR(fn)) {
 			ret = PTR_ERR(fn);
 			jffs2_complete_reservation(c);
+			mutex_unlock(&f->sem);
 			goto out_page;
 		}
 		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -210,10 +205,12 @@
 			jffs2_mark_node_obsolete(c, fn->raw);
 			jffs2_free_full_dnode(fn);
 			jffs2_complete_reservation(c);
+			mutex_unlock(&f->sem);
 			goto out_page;
 		}
 		jffs2_complete_reservation(c);
 		inode->i_size = pageofs;
+		mutex_unlock(&f->sem);
 	}
 
 	/*
@@ -222,18 +219,18 @@
 	 * case of a short-copy.
 	 */
 	if (!PageUptodate(pg)) {
+		mutex_lock(&f->sem);
 		ret = jffs2_do_readpage_nolock(inode, pg);
+		mutex_unlock(&f->sem);
 		if (ret)
 			goto out_page;
 	}
-	mutex_unlock(&f->sem);
 	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
 	return ret;
 
 out_page:
 	unlock_page(pg);
 	page_cache_release(pg);
-	mutex_unlock(&f->sem);
 	return ret;
 }
 
diff -ur a/fs/jffs2/gc.c b/fs/jffs2/gc.c
--- a/fs/jffs2/gc.c	2017-03-23 15:08:23.000000000 +0100
+++ b/fs/jffs2/gc.c	2017-03-14 02:46:17.000000000 +0100
@@ -1296,14 +1296,17 @@
 		BUG_ON(start > orig_start);
 	}
 
-	/* First, use readpage() to read the appropriate page into the page cache */
-	/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
-	 *    triggered garbage collection in the first place?
-	 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
-	 *    page OK. We'll actually write it out again in commit_write, which is a little
-	 *    suboptimal, but at least we're correct.
-	 */
+	/* The rules state that we must obtain the page lock *before* f->sem, so
+	 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
+	 * actually going to *change* so we're safe; we only allow reading.
+	 *
+	 * It is important to note that jffs2_write_begin() will ensure that its
+	 * page is marked Uptodate before allocating space. That means that if we
+	 * end up here trying to GC the *same* page that jffs2_write_begin() is
+	 * trying to write out, read_cache_page() will not deadlock. */
+	mutex_unlock(&f->sem);
 	pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+	mutex_lock(&f->sem);
 
 	if (IS_ERR(pg_ptr)) {
 		pr_warn("read_cache_page() returned error: %ld\n",
diff -ur a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
--- a/fs/jffs2/nodelist.h	2017-03-23 15:08:24.000000000 +0100
+++ b/fs/jffs2/nodelist.h	2017-03-14 02:46:18.000000000 +0100
@@ -194,6 +194,7 @@
 #define INO_STATE_CLEARING	6	/* In clear_inode() */
 
 #define INO_FLAGS_XATTR_CHECKED	0x01	/* has no duplicate xattr_ref */
+#define INO_FLAGS_IS_DIR	0x02	/* is a directory */
 
 #define RAWNODE_CLASS_INODE_CACHE	0
 #define RAWNODE_CLASS_XATTR_DATUM	1
@@ -249,7 +250,10 @@
 
 struct jffs2_full_dirent
 {
-	struct jffs2_raw_node_ref *raw;
+	union {
+		struct jffs2_raw_node_ref *raw;
+		struct jffs2_inode_cache *ic; /* Just during part of build */
+	};
 	struct jffs2_full_dirent *next;
 	uint32_t version;
 	uint32_t ino; /* == zero for unlink */
diff -ur a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
--- a/fs/jffs2/README.Locking	2016-10-20 04:32:10.000000000 +0200
+++ b/fs/jffs2/README.Locking	2016-07-29 05:48:09.000000000 +0200
@@ -2,10 +2,6 @@
 	JFFS2 LOCKING DOCUMENTATION
 	---------------------------
 
-At least theoretically, JFFS2 does not require the Big Kernel Lock
-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
-code. It has its own locking, as described below.
-
 This document attempts to describe the existing locking rules for
 JFFS2. It is not expected to remain perfectly up to date, but ought to
 be fairly close.
@@ -69,6 +65,7 @@
 	   any f->sem held.
 	2. Never attempt to lock two file mutexes in one thread.
 	   No ordering rules have been made for doing so.
+	3. Never lock a page cache page with f->sem held.
 
 
 	erase_completion_lock spinlock
diff -ur a/fs/lockd/host.c b/fs/lockd/host.c
--- a/fs/lockd/host.c	2017-03-23 15:09:08.000000000 +0100
+++ b/fs/lockd/host.c	2017-03-14 02:47:01.000000000 +0100
@@ -116,7 +116,7 @@
 		atomic_inc(&nsm->sm_count);
 	else {
 		host = NULL;
-		nsm = nsm_get_handle(ni->sap, ni->salen,
+		nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
 					ni->hostname, ni->hostname_len);
 		if (unlikely(nsm == NULL)) {
 			dprintk("lockd: %s failed; no nsm handle\n",
@@ -534,17 +534,18 @@
 
 /**
  * nlm_host_rebooted - Release all resources held by rebooted host
+ * @net:  network namespace
  * @info: pointer to decoded results of NLM_SM_NOTIFY call
  *
  * We were notified that the specified host has rebooted.  Release
  * all resources held by that peer.
  */
-void nlm_host_rebooted(const struct nlm_reboot *info)
+void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
 {
 	struct nsm_handle *nsm;
 	struct nlm_host	*host;
 
-	nsm = nsm_reboot_lookup(info);
+	nsm = nsm_reboot_lookup(net, info);
 	if (unlikely(nsm == NULL))
 		return;
 
diff -ur a/fs/lockd/mon.c b/fs/lockd/mon.c
--- a/fs/lockd/mon.c	2017-03-23 15:09:08.000000000 +0100
+++ b/fs/lockd/mon.c	2017-03-14 02:47:01.000000000 +0100
@@ -51,7 +51,6 @@
 };
 
 static const struct rpc_program	nsm_program;
-static				LIST_HEAD(nsm_handles);
 static				DEFINE_SPINLOCK(nsm_lock);
 
 /*
@@ -259,33 +258,35 @@
 	}
 }
 
-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
-					      const size_t len)
+static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
+					const char *hostname, const size_t len)
 {
 	struct nsm_handle *nsm;
 
-	list_for_each_entry(nsm, &nsm_handles, sm_link)
+	list_for_each_entry(nsm, nsm_handles, sm_link)
 		if (strlen(nsm->sm_name) == len &&
 		    memcmp(nsm->sm_name, hostname, len) == 0)
 			return nsm;
 	return NULL;
 }
 
-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
+static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
+					const struct sockaddr *sap)
 {
 	struct nsm_handle *nsm;
 
-	list_for_each_entry(nsm, &nsm_handles, sm_link)
+	list_for_each_entry(nsm, nsm_handles, sm_link)
 		if (rpc_cmp_addr(nsm_addr(nsm), sap))
 			return nsm;
 	return NULL;
 }
 
-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
+static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
+					const struct nsm_private *priv)
 {
 	struct nsm_handle *nsm;
 
-	list_for_each_entry(nsm, &nsm_handles, sm_link)
+	list_for_each_entry(nsm, nsm_handles, sm_link)
 		if (memcmp(nsm->sm_priv.data, priv->data,
 					sizeof(priv->data)) == 0)
 			return nsm;
@@ -350,6 +351,7 @@
 
 /**
  * nsm_get_handle - Find or create a cached nsm_handle
+ * @net: network namespace
  * @sap: pointer to socket address of handle to find
  * @salen: length of socket address
  * @hostname: pointer to C string containing hostname to find
@@ -362,11 +364,13 @@
  * @hostname cannot be found in the handle cache.  Returns NULL if
  * an error occurs.
  */
-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+struct nsm_handle *nsm_get_handle(const struct net *net,
+				  const struct sockaddr *sap,
 				  const size_t salen, const char *hostname,
 				  const size_t hostname_len)
 {
 	struct nsm_handle *cached, *new = NULL;
+	struct lockd_net *ln = net_generic(net, lockd_net_id);
 
 	if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
 		if (printk_ratelimit()) {
@@ -381,9 +385,10 @@
 	spin_lock(&nsm_lock);
 
 	if (nsm_use_hostnames && hostname != NULL)
-		cached = nsm_lookup_hostname(hostname, hostname_len);
+		cached = nsm_lookup_hostname(&ln->nsm_handles,
+					hostname, hostname_len);
 	else
-		cached = nsm_lookup_addr(sap);
+		cached = nsm_lookup_addr(&ln->nsm_handles, sap);
 
 	if (cached != NULL) {
 		atomic_inc(&cached->sm_count);
@@ -397,7 +402,7 @@
 	}
 
 	if (new != NULL) {
-		list_add(&new->sm_link, &nsm_handles);
+		list_add(&new->sm_link, &ln->nsm_handles);
 		spin_unlock(&nsm_lock);
 		dprintk("lockd: created nsm_handle for %s (%s)\n",
 				new->sm_name, new->sm_addrbuf);
@@ -414,19 +419,22 @@
 
 /**
  * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
+ * @net:  network namespace
  * @info: pointer to NLMPROC_SM_NOTIFY arguments
  *
  * Returns a matching nsm_handle if found in the nsm cache. The returned
  * nsm_handle's reference count is bumped. Otherwise returns NULL if some
  * error occurred.
  */
-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
+struct nsm_handle *nsm_reboot_lookup(const struct net *net,
+				const struct nlm_reboot *info)
 {
 	struct nsm_handle *cached;
+	struct lockd_net *ln = net_generic(net, lockd_net_id);
 
 	spin_lock(&nsm_lock);
 
-	cached = nsm_lookup_priv(&info->priv);
+	cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
 	if (unlikely(cached == NULL)) {
 		spin_unlock(&nsm_lock);
 		dprintk("lockd: never saw rebooted peer '%.*s' before\n",
diff -ur a/fs/lockd/netns.h b/fs/lockd/netns.h
--- a/fs/lockd/netns.h	2017-03-23 15:09:08.000000000 +0100
+++ b/fs/lockd/netns.h	2017-03-14 02:47:01.000000000 +0100
@@ -16,6 +16,7 @@
 	spinlock_t nsm_clnt_lock;
 	unsigned int nsm_users;
 	struct rpc_clnt *nsm_clnt;
+	struct list_head nsm_handles;
 };
 
 extern int lockd_net_id;
diff -ur a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
--- a/fs/lockd/svc4proc.c	2017-03-23 15:09:08.000000000 +0100
+++ b/fs/lockd/svc4proc.c	2017-03-14 02:47:01.000000000 +0100
@@ -421,7 +421,7 @@
 		return rpc_system_err;
 	}
 
-	nlm_host_rebooted(argp);
+	nlm_host_rebooted(SVC_NET(rqstp), argp);
 	return rpc_success;
 }
 
diff -ur a/fs/lockd/svc.c b/fs/lockd/svc.c
--- a/fs/lockd/svc.c	2017-03-23 15:09:08.000000000 +0100
+++ b/fs/lockd/svc.c	2017-03-14 02:47:01.000000000 +0100
@@ -581,6 +581,7 @@
 	INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
 	INIT_LIST_HEAD(&ln->grace_list);
 	spin_lock_init(&ln->nsm_clnt_lock);
+	INIT_LIST_HEAD(&ln->nsm_handles);
 	return 0;
 }
 
diff -ur a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
--- a/fs/lockd/svcproc.c	2017-03-23 15:09:09.000000000 +0100
+++ b/fs/lockd/svcproc.c	2017-03-14 02:47:01.000000000 +0100
@@ -464,7 +464,7 @@
 		return rpc_system_err;
 	}
 
-	nlm_host_rebooted(argp);
+	nlm_host_rebooted(SVC_NET(rqstp), argp);
 	return rpc_success;
 }
 
diff -ur a/fs/locks.c b/fs/locks.c
--- a/fs/locks.c	2017-03-23 15:07:50.000000000 +0100
+++ b/fs/locks.c	2017-03-14 02:45:46.000000000 +0100
@@ -1852,7 +1852,6 @@
 		goto out;
 	}
 
-again:
 	error = flock_to_posix_lock(filp, file_lock, &flock);
 	if (error)
 		goto out;
@@ -1883,19 +1882,22 @@
 	 * Attempt to detect a close/fcntl race and recover by
 	 * releasing the lock that was just acquired.
 	 */
-	/*
-	 * we need that spin_lock here - it prevents reordering between
-	 * update of inode->i_flock and check for it done in close().
-	 * rcu_read_lock() wouldn't do.
-	 */
-	spin_lock(&current->files->file_lock);
-	f = fcheck(fd);
-	spin_unlock(&current->files->file_lock);
-	if (!error && f != filp && flock.l_type != F_UNLCK) {
-		flock.l_type = F_UNLCK;
-		goto again;
+	if (!error && file_lock->fl_type != F_UNLCK) {
+		/*
+		 * We need that spin_lock here - it prevents reordering between
+		 * update of inode->i_flock and check for it done in
+		 * close(). rcu_read_lock() wouldn't do.
+		 */
+		spin_lock(&current->files->file_lock);
+		f = fcheck(fd);
+		spin_unlock(&current->files->file_lock);
+		if (f != filp) {
+			file_lock->fl_type = F_UNLCK;
+			error = do_lock_file_wait(filp, cmd, file_lock);
+			WARN_ON_ONCE(error);
+			error = -EBADF;
+		}
 	}
-
 out:
 	locks_free_lock(file_lock);
 	return error;
@@ -1970,7 +1972,6 @@
 		goto out;
 	}
 
-again:
 	error = flock64_to_posix_lock(filp, file_lock, &flock);
 	if (error)
 		goto out;
@@ -2001,14 +2002,22 @@
 	 * Attempt to detect a close/fcntl race and recover by
 	 * releasing the lock that was just acquired.
 	 */
-	spin_lock(&current->files->file_lock);
-	f = fcheck(fd);
-	spin_unlock(&current->files->file_lock);
-	if (!error && f != filp && flock.l_type != F_UNLCK) {
-		flock.l_type = F_UNLCK;
-		goto again;
+	if (!error && file_lock->fl_type != F_UNLCK) {
+		/*
+		 * We need that spin_lock here - it prevents reordering between
+		 * update of inode->i_flock and check for it done in
+		 * close(). rcu_read_lock() wouldn't do.
+		 */
+		spin_lock(&current->files->file_lock);
+		f = fcheck(fd);
+		spin_unlock(&current->files->file_lock);
+		if (f != filp) {
+			file_lock->fl_type = F_UNLCK;
+			error = do_lock_file_wait(filp, cmd, file_lock);
+			WARN_ON_ONCE(error);
+			error = -EBADF;
+		}
 	}
-
 out:
 	locks_free_lock(file_lock);
 	return error;
diff -ur a/fs/namei.c b/fs/namei.c
--- a/fs/namei.c	2017-03-23 15:07:53.000000000 +0100
+++ b/fs/namei.c	2017-03-14 02:45:50.000000000 +0100
@@ -34,6 +34,9 @@
 #endif  
 #include "internal.h"
 #include "mount.h"
+#ifdef MY_ABC_HERE
+extern struct rw_semaphore namespace_sem;
+#endif  
 
 #ifdef MY_ABC_HERE
 int syno_utf8chr_to_utf16chr(u_int16_t *p, const u_int8_t *s, int n);
@@ -540,6 +543,16 @@
 }
 EXPORT_SYMBOL(path_put);
 
+static bool path_connected(const struct path *path)
+{
+	struct vfsmount *mnt = path->mnt;
+
+	if (mnt->mnt_root == mnt->mnt_sb->s_root)
+		return true;
+
+	return is_subdir(path->dentry, mnt->mnt_root);
+}
+
 static inline void lock_rcu_walk(void)
 {
 	br_read_lock(&vfsmount_lock);
@@ -1159,6 +1172,8 @@
 				goto failed;
 			nd->path.dentry = parent;
 			nd->seq = seq;
+			if (unlikely(!path_connected(&nd->path)))
+				goto failed;
 			break;
 		}
 		if (!follow_up_rcu(&nd->path))
@@ -1211,14 +1226,9 @@
 }
 
 #ifdef MY_ABC_HERE
-static void follow_mount(struct nameidata *nd)
-#else
-static void follow_mount(struct path *path)
-#endif  
+static void follow_mount_with_nameidata(struct nameidata *nd)
 {
-#ifdef MY_ABC_HERE
 	struct path *path = &nd->path;
-#endif  
 	while (d_mountpoint(path->dentry)) {
 		struct vfsmount *mounted = lookup_mnt(path);
 		if (!mounted)
@@ -1227,13 +1237,25 @@
 		mntput(path->mnt);
 		path->mnt = mounted;
 		path->dentry = dget(mounted->mnt_root);
-#ifdef MY_ABC_HERE
 		nd->flags |= LOOKUP_MOUNTED;
+	}
+}
 #endif  
+
+static void follow_mount(struct path *path)
+{
+	while (d_mountpoint(path->dentry)) {
+		struct vfsmount *mounted = lookup_mnt(path);
+		if (!mounted)
+			break;
+		dput(path->dentry);
+		mntput(path->mnt);
+		path->mnt = mounted;
+		path->dentry = dget(mounted->mnt_root);
 	}
 }
 
-static void follow_dotdot(struct nameidata *nd)
+static int follow_dotdot(struct nameidata *nd)
 {
 	set_root(nd);
 
@@ -1248,17 +1270,22 @@
 			 
 			nd->path.dentry = dget_parent(nd->path.dentry);
 			dput(old);
+			if (unlikely(!path_connected(&nd->path))) {
+				path_put(&nd->path);
+				return -ENOENT;
+			}
 			break;
 		}
 		if (!follow_up(&nd->path))
 			break;
 	}
 #ifdef MY_ABC_HERE
-	follow_mount(nd);
+	follow_mount_with_nameidata(nd);
 #else
 	follow_mount(&nd->path);
 #endif  
 	nd->inode = nd->path.dentry->d_inode;
+	return 0;
 }
 
 static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
@@ -1499,7 +1526,7 @@
 			if (follow_dotdot_rcu(nd))
 				return -ECHILD;
 		} else
-			follow_dotdot(nd);
+			return follow_dotdot(nd);
 	}
 	return 0;
 }
@@ -2274,6 +2301,146 @@
 	return s;
 }
 
+static int
+mountpoint_last(struct nameidata *nd, struct path *path)
+{
+	int error = 0;
+	struct dentry *dentry;
+	struct dentry *dir = nd->path.dentry;
+
+	if (nd->flags & LOOKUP_RCU) {
+		if (unlazy_walk(nd, NULL)) {
+			error = -ECHILD;
+			goto out;
+		}
+	}
+
+	nd->flags &= ~LOOKUP_PARENT;
+
+	if (unlikely(nd->last_type != LAST_NORM)) {
+		error = handle_dots(nd, nd->last_type);
+		if (error)
+			goto out;
+		dentry = dget(nd->path.dentry);
+		goto done;
+	}
+
+	mutex_lock(&dir->d_inode->i_mutex);
+	dentry = d_lookup(dir, &nd->last);
+	if (!dentry) {
+		 
+		dentry = d_alloc(dir, &nd->last);
+		if (!dentry) {
+			error = -ENOMEM;
+			mutex_unlock(&dir->d_inode->i_mutex);
+			goto out;
+		}
+		dentry = lookup_real(dir->d_inode, dentry, nd->flags);
+		error = PTR_ERR(dentry);
+		if (IS_ERR(dentry)) {
+			mutex_unlock(&dir->d_inode->i_mutex);
+			goto out;
+		}
+	}
+	mutex_unlock(&dir->d_inode->i_mutex);
+
+done:
+	if (!dentry->d_inode) {
+		error = -ENOENT;
+		dput(dentry);
+		goto out;
+	}
+	path->dentry = dentry;
+	path->mnt = mntget(nd->path.mnt);
+	if (should_follow_link(dentry->d_inode, nd->flags & LOOKUP_FOLLOW))
+		return 1;
+	follow_mount(path);
+	error = 0;
+out:
+	terminate_walk(nd);
+	return error;
+}
+
+static int
+path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
+{
+	struct file *base = NULL;
+	struct nameidata nd;
+	int err;
+
+	err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
+	if (unlikely(err))
+		return err;
+
+	current->total_link_count = 0;
+	err = link_path_walk(name, &nd);
+	if (err)
+		goto out;
+
+	err = mountpoint_last(&nd, path);
+	while (err > 0) {
+		void *cookie;
+		struct path link = *path;
+		err = may_follow_link(&link, &nd);
+		if (unlikely(err))
+			break;
+		nd.flags |= LOOKUP_PARENT;
+		err = follow_link(&link, &nd, &cookie);
+		if (err)
+			break;
+		err = mountpoint_last(&nd, path);
+		put_link(&nd, &link, cookie);
+	}
+out:
+	if (base)
+		fput(base);
+
+	if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
+		path_put(&nd.root);
+
+	return err;
+}
+
+static int
+filename_mountpoint(int dfd, struct filename *s, struct path *path,
+			unsigned int flags)
+{
+#ifdef MY_ABC_HERE
+	int error = path_mountpoint(dfd, s->name, path, flags);
+#else
+	int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
+	if (unlikely(error == -ECHILD))
+		error = path_mountpoint(dfd, s->name, path, flags);
+#endif  
+	if (unlikely(error == -ESTALE))
+		error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
+	if (likely(!error))
+		audit_inode(s, path->dentry, 0);
+	return error;
+}
+
+int
+user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
+			struct path *path)
+{
+	struct filename *s = getname(name);
+	int error;
+	if (IS_ERR(s))
+		return PTR_ERR(s);
+	error = filename_mountpoint(dfd, s, path, flags);
+	putname(s);
+	return error;
+}
+
+int
+kern_path_mountpoint(int dfd, const char *name, struct path *path,
+			unsigned int flags)
+{
+	struct filename s = {.name = name};
+	return filename_mountpoint(dfd, &s, path, flags);
+}
+EXPORT_SYMBOL(kern_path_mountpoint);
+
 static inline int check_sticky(struct inode *dir, struct inode *inode)
 {
 	kuid_t fsuid = current_fsuid();
@@ -2913,6 +3080,10 @@
 			goto exit_fput;
 	}
 out:
+	if (unlikely(error > 0)) {
+		WARN_ON(1);
+		error = -EINVAL;
+	}
 	if (got_write)
 		mnt_drop_write(nd->path.mnt);
 	path_put(&save_parent);
@@ -3886,6 +4057,7 @@
 		return NULL;
 	}
 
+	down_read(&namespace_sem);
 	list_for_each(list_head, &mnt_space->list) {
 		struct mount *mnt = list_entry(list_head, struct mount, mnt_list);
 		struct synotify_rename_path *rename_path = NULL;
@@ -3925,6 +4097,7 @@
 			}
 		}
 	}  
+	up_read(&namespace_sem);
 
 	return head;
 }
diff -ur a/fs/namespace.c b/fs/namespace.c
--- a/fs/namespace.c	2017-03-23 15:07:52.000000000 +0100
+++ b/fs/namespace.c	2017-03-14 02:45:49.000000000 +0100
@@ -17,6 +17,9 @@
 #include <linux/uaccess.h>
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
+#ifdef MY_ABC_HERE
+#include <linux/string.h>
+#endif  
 #include "pnode.h"
 #include "internal.h"
 
@@ -49,7 +52,12 @@
 static struct list_head *mount_hashtable __read_mostly;
 static struct list_head *mountpoint_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
+#ifdef MY_ABC_HERE
+ 
+struct rw_semaphore namespace_sem;
+#else
 static struct rw_semaphore namespace_sem;
+#endif  
 
 struct kobject *fs_kobj;
 EXPORT_SYMBOL_GPL(fs_kobj);
@@ -1071,7 +1079,7 @@
 	if (!(flags & UMOUNT_NOFOLLOW))
 		lookup_flags |= LOOKUP_FOLLOW;
 
-	retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
+	retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
 	if (retval)
 		goto out;
 	mnt = real_mount(path.mnt);
@@ -1455,12 +1463,76 @@
 	return error;
 }
 
+#ifdef MY_ABC_HERE
+static struct syno_mnt_options {
+	long relatime_period;
+};
+
+static void option_erase(char *str) {
+	char *next = strchr(str, ',');
+
+	if (next) {
+		next++;
+		while (*next) {
+			*str = *next;
+			str++;
+			next++;
+		}
+	}
+	while (*str) {
+		*str = '\0';
+		str++;
+	}
+}
+
+enum {
+	Opt_relatime_period,
+	Opt_err,
+};
+
+static struct option_table {
+	int token;
+	const char *name;
+	const char *pattern;
+} tokens[] = {
+	{Opt_relatime_period, "relatime_period", "relatime_period=%ld"},
+	{Opt_err, NULL, NULL},
+};
+
+static int syno_parse_options(struct syno_mnt_options *options, char *data) {
+	struct option_table *p;
+	char *str;
+
+	if (!data)
+		return 0;
+
+	for (p = tokens; p->token != Opt_err; p++) {
+		while (NULL != (str = strstr(data, p->name))) {
+			switch (p->token) {
+				case Opt_relatime_period:
+					if (1 != sscanf(str, p->pattern, &(options->relatime_period))
+						|| options->relatime_period <= 0 || options->relatime_period > 365*10)
+						return -EINVAL;
+					break;
+				default:
+					break;
+			}
+			option_erase(str);
+		}
+	}
+	return 0;
+}
+#endif  
+
 static int do_remount(struct path *path, int flags, int mnt_flags,
 		      void *data)
 {
 	int err;
 	struct super_block *sb = path->mnt->mnt_sb;
 	struct mount *mnt = real_mount(path->mnt);
+#ifdef MY_ABC_HERE
+	struct syno_mnt_options options;
+#endif  
 
 	if (!check_mnt(mnt))
 		return -EINVAL;
@@ -1468,6 +1540,13 @@
 	if (path->dentry != path->mnt->mnt_root)
 		return -EINVAL;
 
+#ifdef MY_ABC_HERE
+	memset(&options, 0, sizeof(struct syno_mnt_options));
+	err = syno_parse_options(&options, data);
+	if (err)
+		return -EINVAL;
+#endif  
+
 	if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
 	    !(mnt_flags & MNT_READONLY)) {
 		return -EPERM;
@@ -1512,6 +1591,10 @@
 		mnt->mnt.mnt_flags = mnt_flags;
 		br_write_unlock(&vfsmount_lock);
 	}
+#ifdef MY_ABC_HERE
+	if (options.relatime_period > 0)
+		sb->relatime_period = options.relatime_period;
+#endif  
 	up_write(&sb->s_umount);
 	if (!err) {
 		br_write_lock(&vfsmount_lock);
@@ -1661,6 +1744,9 @@
 	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
 	struct vfsmount *mnt;
 	int err;
+#ifdef MY_ABC_HERE
+	struct syno_mnt_options options;
+#endif  
 
 	if (!fstype)
 		return -EINVAL;
@@ -1681,6 +1767,13 @@
 		}
 	}
 
+#ifdef MY_ABC_HERE
+	memset(&options, 0, sizeof(struct syno_mnt_options));
+	err = syno_parse_options(&options, data);
+	if (err)
+		return -EINVAL;
+#endif  
+
 	mnt = vfs_kern_mount(type, flags, name, data);
 	if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
 	    !mnt->mnt_sb->s_subtype)
@@ -1698,6 +1791,11 @@
 	}
 #endif
 
+#ifdef MY_ABC_HERE
+	if (options.relatime_period > 0)
+		mnt->mnt_sb->relatime_period = options.relatime_period;
+#endif  
+
 	if (err)
 		mntput(mnt);
 	return err;
@@ -1974,6 +2072,7 @@
 	else
 		retval = do_new_mount(&path, type_page, flags, mnt_flags,
 				      dev_name, data_page);
+
 #ifdef MY_ABC_HERE
 	up_read(&s_reshape_mount_key);
 #endif  
diff -ur a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
--- a/fs/ncpfs/ncp_fs_sb.h	2017-03-23 15:08:02.000000000 +0100
+++ b/fs/ncpfs/ncp_fs_sb.h	2017-03-14 02:45:58.000000000 +0100
@@ -111,7 +111,7 @@
 
 	spinlock_t requests_lock;	/* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
 
-	void (*data_ready)(struct sock* sk, int len);
+	void (*data_ready)(struct sock* sk);
 	void (*error_report)(struct sock* sk);
 	void (*write_space)(struct sock* sk);	/* STREAM mode only */
 	struct {
@@ -153,7 +153,7 @@
 extern void ncpdgram_rcv_proc(struct work_struct *work);
 extern void ncpdgram_timeout_proc(struct work_struct *work);
 extern void ncpdgram_timeout_call(unsigned long server);
-extern void ncp_tcp_data_ready(struct sock* sk, int len);
+extern void ncp_tcp_data_ready(struct sock* sk);
 extern void ncp_tcp_write_space(struct sock* sk);
 extern void ncp_tcp_error_report(struct sock* sk);
 
diff -ur a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
--- a/fs/ncpfs/sock.c	2017-03-23 15:08:02.000000000 +0100
+++ b/fs/ncpfs/sock.c	2017-03-14 02:45:59.000000000 +0100
@@ -95,11 +95,11 @@
 		kfree(req);
 }
 
-void ncp_tcp_data_ready(struct sock *sk, int len)
+void ncp_tcp_data_ready(struct sock *sk)
 {
 	struct ncp_server *server = sk->sk_user_data;
 
-	server->data_ready(sk, len);
+	server->data_ready(sk);
 	schedule_work(&server->rcv.tq);
 }
 
diff -ur a/fs/nfs/inode.c b/fs/nfs/inode.c
--- a/fs/nfs/inode.c	2017-03-23 15:09:23.000000000 +0100
+++ b/fs/nfs/inode.c	2017-03-14 02:47:17.000000000 +0100
@@ -1501,7 +1501,11 @@
 			nfsi->attrtimeo_timestamp = now;
 		}
 	}
-	invalid &= ~NFS_INO_INVALID_ATTR;
+
+	/* Don't declare attrcache up to date if there were no attrs! */
+	if (fattr->valid != 0)
+		invalid &= ~NFS_INO_INVALID_ATTR;
+
 	/* Don't invalidate the data if we were to blame */
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
 				|| S_ISLNK(inode->i_mode)))
diff -ur a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
--- a/fs/nfs/nfs3xdr.c	2017-03-23 15:09:24.000000000 +0100
+++ b/fs/nfs/nfs3xdr.c	2017-03-14 02:47:18.000000000 +0100
@@ -1340,7 +1340,7 @@
 	if (args->npages != 0)
 		xdr_write_pages(xdr, args->pages, 0, args->len);
 	else
-		xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
+		xdr_reserve_space(xdr, args->len);
 
 	error = nfsacl_encode(xdr->buf, base, args->inode,
 			    (args->mask & NFS_ACL) ?
diff -ur a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
--- a/fs/nfs/nfs4client.c	2017-03-23 15:09:22.000000000 +0100
+++ b/fs/nfs/nfs4client.c	2017-03-14 02:47:16.000000000 +0100
@@ -32,7 +32,7 @@
 		return ret;
 	idr_preload(GFP_KERNEL);
 	spin_lock(&nn->nfs_client_lock);
-	ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
+	ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
 	if (ret >= 0)
 		clp->cl_cb_ident = ret;
 	spin_unlock(&nn->nfs_client_lock);
diff -ur a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
--- a/fs/nfs/nfs4proc.c	2017-03-23 15:09:28.000000000 +0100
+++ b/fs/nfs/nfs4proc.c	2017-03-14 02:47:20.000000000 +0100
@@ -1003,6 +1003,7 @@
 	 * Protect the call to nfs4_state_set_mode_locked and
 	 * serialise the stateid update
 	 */
+	spin_lock(&state->owner->so_lock);
 	write_seqlock(&state->seqlock);
 	if (deleg_stateid != NULL) {
 		nfs4_stateid_copy(&state->stateid, deleg_stateid);
@@ -1011,7 +1012,6 @@
 	if (open_stateid != NULL)
 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
 	write_sequnlock(&state->seqlock);
-	spin_lock(&state->owner->so_lock);
 	update_open_stateflags(state, fmode);
 	spin_unlock(&state->owner->so_lock);
 }
@@ -2040,7 +2040,7 @@
 	if (status != 0)
 		goto err_opendata_put;
 
-	if ((opendata->o_arg.open_flags & O_EXCL) &&
+	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
 		nfs4_exclusive_attrset(opendata, sattr);
 
diff -ur a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
--- a/fs/nfs/nfs4state.c	2017-03-23 15:09:25.000000000 +0100
+++ b/fs/nfs/nfs4state.c	2017-03-14 02:47:18.000000000 +0100
@@ -1450,6 +1450,8 @@
 				}
 				spin_unlock(&state->state_lock);
 				nfs4_put_open_state(state);
+				clear_bit(NFS_STATE_RECLAIM_NOGRACE,
+					&state->flags);
 				spin_lock(&sp->so_lock);
 				goto restart;
 			}
diff -ur a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
--- a/fs/nfs/pagelist.c	2017-03-23 15:09:19.000000000 +0100
+++ b/fs/nfs/pagelist.c	2017-03-14 02:47:13.000000000 +0100
@@ -60,8 +60,8 @@
 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
 {
 	spin_lock(&hdr->lock);
-	if (pos < hdr->io_start + hdr->good_bytes) {
-		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
+	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
+	    || pos < hdr->io_start + hdr->good_bytes) {
 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
 		hdr->good_bytes = pos - hdr->io_start;
 		hdr->error = error;
diff -ur a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
--- a/fs/nfsd/vfs.c	2017-03-23 15:08:39.000000000 +0100
+++ b/fs/nfsd/vfs.c	2017-03-14 02:46:32.000000000 +0100
@@ -1682,6 +1682,9 @@
 {
 	struct dentry	*dentry, *rdentry;
 	struct inode	*dirp;
+#ifdef MY_ABC_HERE
+	struct inode *inode = NULL;
+#endif  
 	__be32		err;
 	int		host_err;
 
@@ -1711,6 +1714,13 @@
 		goto out;
 	}
 
+#ifdef MY_ABC_HERE
+	inode = rdentry->d_inode;
+	if (inode) {
+		ihold(inode);
+	}
+#endif  
+
 	if (!type)
 		type = rdentry->d_inode->i_mode & S_IFMT;
 
@@ -1726,6 +1736,12 @@
 out_put:
 	dput(rdentry);
 
+#ifdef MY_ABC_HERE
+	fh_unlock(fhp);
+	if (inode) {
+		iput(inode);	 	
+	}
+#endif  
 out_nfserr:
 	err = nfserrno(host_err);
 out:
@@ -1746,6 +1762,31 @@
 	int		full;
 };
 
+#ifdef MY_ABC_HERE
+const struct {
+	char * name;
+	int len;
+} hidden_files[] = {
+	{"@eaDir", 6},
+	{"@tmp", 4},
+	{"@sharebin", 9},
+	{".AppleDesktop", 13},
+};
+
+static int is_hidden_file(const char *name, int namlen) {
+	 
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(hidden_files); i++) {
+		if (namlen == hidden_files[i].len
+			&& !strncmp(name, hidden_files[i].name, namlen)) {
+			return 1;
+		}
+	}
+	return 0;
+}
+#endif
+
 static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen,
 				 loff_t offset, u64 ino, unsigned int d_type)
 {
@@ -1769,8 +1810,13 @@
 	return 0;
 }
 
+#ifdef MY_ABC_HERE
+static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
+					struct readdir_cd *cdp, loff_t *offsetp, int hide_hidden_file)
+#else
 static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
 					struct readdir_cd *cdp, loff_t *offsetp)
+#endif
 {
 	struct readdir_data buf;
 	struct buffered_dirent *de;
@@ -1812,12 +1858,23 @@
 		while (size > 0) {
 			offset = de->offset;
 
+#ifdef MY_ABC_HERE
+			if (!hide_hidden_file || !is_hidden_file(de->name, de->namlen)) {
+				if (func(cdp, de->name, de->namlen, de->offset,
+					 de->ino, de->d_type))
+					break;
+
+				if (cdp->err != nfs_ok)
+					break;
+			}
+#else
 			if (func(cdp, de->name, de->namlen, de->offset,
 				 de->ino, de->d_type))
 				break;
 
 			if (cdp->err != nfs_ok)
 				break;
+#endif
 
 			reclen = ALIGN(sizeof(*de) + de->namlen,
 					   sizeof(u64));
@@ -1848,6 +1905,13 @@
 	struct file	*file;
 	loff_t		offset = *offsetp;
 	int             may_flags = NFSD_MAY_READ;
+#ifdef MY_ABC_HERE
+	char ex_path_buf[SYNO_MOUNT_PATH_LEN] = {0};
+	char path_buf[SYNO_MOUNT_PATH_LEN] = {0};
+	char *ex_path = NULL;
+	char *path = NULL;
+	int hide_hidden_file = 0;
+#endif
 
 	if (rqstp->rq_vers > 2)
 		may_flags |= NFSD_MAY_64BIT_COOKIE;
@@ -1862,7 +1926,17 @@
 		goto out_close;
 	}
 
+#ifdef MY_ABC_HERE
+	ex_path = d_path(&fhp->fh_export->ex_path, ex_path_buf, sizeof(ex_path_buf));
+	path = d_path(&file->f_path, path_buf, sizeof(path_buf));
+	if (!IS_ERR(ex_path) && !IS_ERR(path)) {
+		hide_hidden_file = !strcmp(ex_path, path);
+	}
+
+	err = nfsd_buffered_readdir(file, func, cdp, offsetp, hide_hidden_file);
+#else
 	err = nfsd_buffered_readdir(file, func, cdp, offsetp);
+#endif
 
 	if (err == nfserr_eof || err == nfserr_toosmall)
 		err = nfs_ok;  
diff -ur a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
--- a/fs/nilfs2/btree.c	2017-03-23 15:09:37.000000000 +0100
+++ b/fs/nilfs2/btree.c	2017-03-14 02:47:31.000000000 +0100
@@ -388,7 +388,7 @@
 	nchildren = nilfs_btree_node_get_nchildren(node);
 
 	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
-		     level > NILFS_BTREE_LEVEL_MAX ||
+		     level >= NILFS_BTREE_LEVEL_MAX ||
 		     nchildren < 0 ||
 		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
 		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
diff -ur a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
--- a/fs/notify/fsnotify.c	2017-03-23 15:08:25.000000000 +0100
+++ b/fs/notify/fsnotify.c	2017-03-14 02:46:19.000000000 +0100
@@ -16,6 +16,7 @@
 
 #ifdef MY_ABC_HERE
 #include <linux/nsproxy.h>
+extern struct rw_semaphore namespace_sem;
 #endif
 
 void __fsnotify_inode_delete(struct inode *inode)
@@ -377,6 +378,7 @@
 	if (nsproxy) {
 		struct mnt_namespace *mnt_space = nsproxy->mnt_ns;
 		if (mnt_space) {
+			down_read(&namespace_sem);
 			list_for_each(head, &mnt_space->list) {
 				struct mount *mnt = list_entry(head, struct mount, mnt_list);
 				if (mnt && mnt->mnt.mnt_sb == dentry->d_sb) {
@@ -386,6 +388,7 @@
 					mntput(vfsmnt);
 				}
 			}
+			up_read(&namespace_sem);
 		}
 	}
 
diff -ur a/fs/notify/mark.c b/fs/notify/mark.c
--- a/fs/notify/mark.c	2017-03-23 15:08:24.000000000 +0100
+++ b/fs/notify/mark.c	2017-03-14 02:46:19.000000000 +0100
@@ -304,16 +304,36 @@
 					 unsigned int flags)
 {
 	struct fsnotify_mark *lmark, *mark;
+	LIST_HEAD(to_free);
 
+	/*
+	 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+	 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+	 * to_free list so we have to use mark_mutex even when accessing that
+	 * list. And freeing mark requires us to drop mark_mutex. So we can
+	 * reliably free only the first mark in the list. That's why we first
+	 * move marks to free to to_free list in one go and then free marks in
+	 * to_free list one by one.
+	 */
 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
-		if (mark->flags & flags) {
-			fsnotify_get_mark(mark);
-			fsnotify_destroy_mark_locked(mark, group);
-			fsnotify_put_mark(mark);
-		}
+		if (mark->flags & flags)
+			list_move(&mark->g_list, &to_free);
 	}
 	mutex_unlock(&group->mark_mutex);
+
+	while (1) {
+		mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+		if (list_empty(&to_free)) {
+			mutex_unlock(&group->mark_mutex);
+			break;
+		}
+		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+		fsnotify_get_mark(mark);
+		fsnotify_destroy_mark_locked(mark, group);
+		mutex_unlock(&group->mark_mutex);
+		fsnotify_put_mark(mark);
+	}
 }
 
 /*
diff -ur a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
--- a/fs/ocfs2/cluster/tcp.c	2017-03-23 15:09:20.000000000 +0100
+++ b/fs/ocfs2/cluster/tcp.c	2017-03-14 02:47:14.000000000 +0100
@@ -137,7 +137,7 @@
 static void o2net_sc_connect_completed(struct work_struct *work);
 static void o2net_rx_until_empty(struct work_struct *work);
 static void o2net_shutdown_sc(struct work_struct *work);
-static void o2net_listen_data_ready(struct sock *sk, int bytes);
+static void o2net_listen_data_ready(struct sock *sk);
 static void o2net_sc_send_keep_req(struct work_struct *work);
 static void o2net_idle_timer(unsigned long data);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
@@ -593,9 +593,9 @@
 }
 
 /* see o2net_register_callbacks() */
-static void o2net_data_ready(struct sock *sk, int bytes)
+static void o2net_data_ready(struct sock *sk)
 {
-	void (*ready)(struct sock *sk, int bytes);
+	void (*ready)(struct sock *sk);
 
 	read_lock(&sk->sk_callback_lock);
 	if (sk->sk_user_data) {
@@ -609,7 +609,7 @@
 	}
 	read_unlock(&sk->sk_callback_lock);
 
-	ready(sk, bytes);
+	ready(sk);
 }
 
 /* see o2net_register_callbacks() */
@@ -1946,9 +1946,9 @@
 		cond_resched();
 }
 
-static void o2net_listen_data_ready(struct sock *sk, int bytes)
+static void o2net_listen_data_ready(struct sock *sk)
 {
-	void (*ready)(struct sock *sk, int bytes);
+	void (*ready)(struct sock *sk);
 
 	read_lock(&sk->sk_callback_lock);
 	ready = sk->sk_user_data;
@@ -1962,13 +1962,13 @@
 	 * data_ready.. we only want to queue listen work for our listening
 	 * socket */
 	if (sk->sk_state == TCP_LISTEN) {
-		mlog(ML_TCP, "bytes: %d\n", bytes);
 		queue_work(o2net_wq, &o2net_listen_work);
 	}
 
 out:
 	read_unlock(&sk->sk_callback_lock);
-	ready(sk, bytes);
+	if (ready != NULL)
+		ready(sk);
 }
 
 static int o2net_open_listening_sock(__be32 addr, __be16 port)
@@ -2001,7 +2001,7 @@
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
 	if (ret < 0) {
 		printk(KERN_ERR "o2net: Error %d while binding socket at "
-		       "%pI4:%u\n", ret, &addr, ntohs(port)); 
+		       "%pI4:%u\n", ret, &addr, ntohs(port));
 		goto out;
 	}
 
diff -ur a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
--- a/fs/ocfs2/cluster/tcp_internal.h	2017-03-23 15:09:18.000000000 +0100
+++ b/fs/ocfs2/cluster/tcp_internal.h	2017-03-14 02:47:12.000000000 +0100
@@ -165,7 +165,7 @@
 
 	/* original handlers for the sockets */
 	void			(*sc_state_change)(struct sock *sk);
-	void			(*sc_data_ready)(struct sock *sk, int bytes);
+	void			(*sc_data_ready)(struct sock *sk);
 
 	u32			sc_msg_key;
 	u16			sc_msg_type;
diff -ur a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
--- a/fs/ocfs2/dlm/dlmconvert.c	2017-03-23 15:09:16.000000000 +0100
+++ b/fs/ocfs2/dlm/dlmconvert.c	2017-03-14 02:47:10.000000000 +0100
@@ -262,6 +262,7 @@
 				  struct dlm_lock *lock, int flags, int type)
 {
 	enum dlm_status status;
+	u8 old_owner = res->owner;
 
 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@@ -287,6 +288,19 @@
 		status = DLM_DENIED;
 		goto bail;
 	}
+
+	if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
+		mlog(0, "last convert request returned DLM_RECOVERING, but "
+		     "owner has already queued and sent ast to me. res %.*s, "
+		     "(cookie=%u:%llu, type=%d, conv=%d)\n",
+		     res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     lock->ml.type, lock->ml.convert_type);
+		status = DLM_NORMAL;
+		goto bail;
+	}
+
 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
 	/* move lock to local convert queue */
 	/* do not alter lock refcount.  switching lists. */
@@ -316,11 +330,19 @@
 	spin_lock(&res->spinlock);
 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
 	lock->convert_pending = 0;
-	/* if it failed, move it back to granted queue */
+	/* if it failed, move it back to granted queue.
+	 * if master returns DLM_NORMAL and then down before sending ast,
+	 * it may have already been moved to granted queue, reset to
+	 * DLM_RECOVERING and retry convert */
 	if (status != DLM_NORMAL) {
 		if (status != DLM_NOTQUEUED)
 			dlm_error(status);
 		dlm_revert_pending_convert(res, lock);
+	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+			(old_owner != res->owner)) {
+		mlog(0, "res %.*s is in recovering or has been recovered.\n",
+				res->lockname.len, res->lockname.name);
+		status = DLM_RECOVERING;
 	}
 bail:
 	spin_unlock(&res->spinlock);
diff -ur a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
--- a/fs/ocfs2/dlm/dlmmaster.c	2017-03-23 15:09:20.000000000 +0100
+++ b/fs/ocfs2/dlm/dlmmaster.c	2017-03-14 02:47:14.000000000 +0100
@@ -718,6 +718,19 @@
 	if (tmpres) {
 		spin_unlock(&dlm->spinlock);
 		spin_lock(&tmpres->spinlock);
+
+		/*
+		 * Right after dlm spinlock was released, dlm_thread could have
+		 * purged the lockres. Check if lockres got unhashed. If so
+		 * start over.
+		 */
+		if (hlist_unhashed(&tmpres->hash_node)) {
+			spin_unlock(&tmpres->spinlock);
+			dlm_lockres_put(tmpres);
+			tmpres = NULL;
+			goto lookup;
+		}
+
 		/* Wait on the thread that is mastering the resource */
 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
 			__dlm_wait_on_lockres(tmpres);
@@ -2426,6 +2439,11 @@
 	spin_lock(&dlm->master_lock);
 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
 				    namelen, target, dlm->node_num);
+	/* get an extra reference on the mle.
+	 * otherwise the assert_master from the new
+	 * master will destroy this.
+	 */
+	dlm_get_mle_inuse(mle);
 	spin_unlock(&dlm->master_lock);
 	spin_unlock(&dlm->spinlock);
 
@@ -2461,6 +2479,7 @@
 		if (mle_added) {
 			dlm_mle_detach_hb_events(dlm, mle);
 			dlm_put_mle(mle);
+			dlm_put_mle_inuse(mle);
 		} else if (mle) {
 			kmem_cache_free(dlm_mle_cache, mle);
 			mle = NULL;
@@ -2478,17 +2497,6 @@
 	 * ensure that all assert_master work is flushed. */
 	flush_workqueue(dlm->dlm_worker);
 
-	/* get an extra reference on the mle.
-	 * otherwise the assert_master from the new
-	 * master will destroy this.
-	 * also, make sure that all callers of dlm_get_mle
-	 * take both dlm->spinlock and dlm->master_lock */
-	spin_lock(&dlm->spinlock);
-	spin_lock(&dlm->master_lock);
-	dlm_get_mle_inuse(mle);
-	spin_unlock(&dlm->master_lock);
-	spin_unlock(&dlm->spinlock);
-
 	/* notify new node and send all lock state */
 	/* call send_one_lockres with migration flag.
 	 * this serves as notice to the target node that a
@@ -3213,6 +3221,15 @@
 			    mle->new_master != dead_node)
 				continue;
 
+			if (mle->new_master == dead_node && mle->inuse) {
+				mlog(ML_NOTICE, "%s: target %u died during "
+						"migration from %u, the MLE is "
+						"still keep used, ignore it!\n",
+						dlm->name, dead_node,
+						mle->master);
+				continue;
+			}
+
 			/* If we have reached this point, this mle needs to be
 			 * removed from the list and freed. */
 			dlm_clean_migration_mle(dlm, mle);
diff -ur a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
--- a/fs/ocfs2/dlm/dlmrecovery.c	2017-03-23 15:09:20.000000000 +0100
+++ b/fs/ocfs2/dlm/dlmrecovery.c	2017-03-14 02:47:13.000000000 +0100
@@ -2017,7 +2017,6 @@
 			dlm_lock_get(lock);
 			if (lock->convert_pending) {
 				/* move converting lock back to granted */
-				BUG_ON(i != DLM_CONVERTING_LIST);
 				mlog(0, "node died with convert pending "
 				     "on %.*s. move back to granted list.\n",
 				     res->lockname.len, res->lockname.name);
@@ -2305,6 +2304,8 @@
 						break;
 					}
 				}
+				dlm_lockres_clear_refmap_bit(dlm, res,
+						dead_node);
 				spin_unlock(&res->spinlock);
 				continue;
 			}
diff -ur a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
--- a/fs/ocfs2/dlmglue.c	2017-03-23 15:09:15.000000000 +0100
+++ b/fs/ocfs2/dlmglue.c	2017-03-14 02:47:07.000000000 +0100
@@ -3967,9 +3967,13 @@
 	osb->dc_work_sequence = osb->dc_wake_sequence;
 
 	processed = osb->blocked_lock_count;
-	while (processed) {
-		BUG_ON(list_empty(&osb->blocked_lock_list));
-
+	/*
+	 * blocked lock processing in this loop might call iput which can
+	 * remove items off osb->blocked_lock_list. Downconvert up to
+	 * 'processed' number of locks, but stop short if we had some
+	 * removed in ocfs2_mark_lockres_freeing when downconverting.
+	 */
+	while (processed && !list_empty(&osb->blocked_lock_list)) {
 		lockres = list_entry(osb->blocked_lock_list.next,
 				     struct ocfs2_lock_res, l_blocked_list);
 		list_del_init(&lockres->l_blocked_list);
diff -ur a/fs/omfs/inode.c b/fs/omfs/inode.c
--- a/fs/omfs/inode.c	2017-03-23 15:09:01.000000000 +0100
+++ b/fs/omfs/inode.c	2017-03-14 02:46:54.000000000 +0100
@@ -361,7 +361,7 @@
 }
 
 enum {
-	Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
+	Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
 };
 
 static const match_table_t tokens = {
@@ -370,6 +370,7 @@
 	{Opt_umask, "umask=%o"},
 	{Opt_dmask, "dmask=%o"},
 	{Opt_fmask, "fmask=%o"},
+	{Opt_err, NULL},
 };
 
 static int parse_options(char *options, struct omfs_sb_info *sbi)
diff -ur a/fs/pipe.c b/fs/pipe.c
--- a/fs/pipe.c	2017-03-23 15:07:48.000000000 +0100
+++ b/fs/pipe.c	2017-03-14 02:45:45.000000000 +0100
@@ -117,25 +117,27 @@
 }
 
 static int
-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
-			int atomic)
+pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
+			size_t *remaining, int atomic)
 {
 	unsigned long copy;
 
-	while (len > 0) {
+	while (*remaining > 0) {
 		while (!iov->iov_len)
 			iov++;
-		copy = min_t(unsigned long, len, iov->iov_len);
+		copy = min_t(unsigned long, *remaining, iov->iov_len);
 
 		if (atomic) {
-			if (__copy_from_user_inatomic(to, iov->iov_base, copy))
+			if (__copy_from_user_inatomic(addr + *offset,
+						      iov->iov_base, copy))
 				return -EFAULT;
 		} else {
-			if (copy_from_user(to, iov->iov_base, copy))
+			if (copy_from_user(addr + *offset,
+					   iov->iov_base, copy))
 				return -EFAULT;
 		}
-		to += copy;
-		len -= copy;
+		*offset += copy;
+		*remaining -= copy;
 		iov->iov_base += copy;
 		iov->iov_len -= copy;
 	}
@@ -143,25 +145,27 @@
 }
 
 static int
-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
-		      int atomic)
+pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
+		      size_t *remaining, int atomic)
 {
 	unsigned long copy;
 
-	while (len > 0) {
+	while (*remaining > 0) {
 		while (!iov->iov_len)
 			iov++;
-		copy = min_t(unsigned long, len, iov->iov_len);
+		copy = min_t(unsigned long, *remaining, iov->iov_len);
 
 		if (atomic) {
-			if (__copy_to_user_inatomic(iov->iov_base, from, copy))
+			if (__copy_to_user_inatomic(iov->iov_base,
+						    addr + *offset, copy))
 				return -EFAULT;
 		} else {
-			if (copy_to_user(iov->iov_base, from, copy))
+			if (copy_to_user(iov->iov_base,
+					 addr + *offset, copy))
 				return -EFAULT;
 		}
-		from += copy;
-		len -= copy;
+		*offset += copy;
+		*remaining -= copy;
 		iov->iov_base += copy;
 		iov->iov_len -= copy;
 	}
@@ -395,8 +399,9 @@
 			struct pipe_buffer *buf = pipe->bufs + curbuf;
 			const struct pipe_buf_operations *ops = buf->ops;
 			void *addr;
-			size_t chars = buf->len;
+			size_t chars = buf->len, remaining;
 			int error, atomic;
+			int offset;
 
 			if (chars > total_len)
 				chars = total_len;
@@ -409,9 +414,12 @@
 			}
 
 			atomic = !iov_fault_in_pages_write(iov, chars);
+			remaining = chars;
+			offset = buf->offset;
 redo:
 			addr = ops->map(pipe, buf, atomic);
-			error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
+			error = pipe_iov_copy_to_user(iov, addr, &offset,
+						      &remaining, atomic);
 			ops->unmap(pipe, buf, addr);
 			if (unlikely(error)) {
 				/*
@@ -531,6 +539,7 @@
 		if (ops->can_merge && offset + chars <= PAGE_SIZE) {
 			int error, atomic = 1;
 			void *addr;
+			size_t remaining = chars;
 
 			error = ops->confirm(pipe, buf);
 			if (error)
@@ -539,8 +548,8 @@
 			iov_fault_in_pages_read(iov, chars);
 redo1:
 			addr = ops->map(pipe, buf, atomic);
-			error = pipe_iov_copy_from_user(offset + addr, iov,
-							chars, atomic);
+			error = pipe_iov_copy_from_user(addr, &offset, iov,
+							&remaining, atomic);
 			ops->unmap(pipe, buf, addr);
 			ret = error;
 			do_wakeup = 1;
@@ -575,6 +584,8 @@
 			struct page *page = pipe->tmp_page;
 			char *src;
 			int error, atomic = 1;
+			int offset = 0;
+			size_t remaining;
 
 			if (!page) {
 				page = alloc_page(GFP_HIGHUSER);
@@ -595,14 +606,15 @@
 				chars = total_len;
 
 			iov_fault_in_pages_read(iov, chars);
+			remaining = chars;
 redo2:
 			if (atomic)
 				src = kmap_atomic(page);
 			else
 				src = kmap(page);
 
-			error = pipe_iov_copy_from_user(src, iov, chars,
-							atomic);
+			error = pipe_iov_copy_from_user(src, &offset, iov,
+							&remaining, atomic);
 			if (atomic)
 				kunmap_atomic(src);
 			else
diff -ur a/fs/proc/array.c b/fs/proc/array.c
--- a/fs/proc/array.c	2017-03-23 15:08:31.000000000 +0100
+++ b/fs/proc/array.c	2017-03-14 02:46:24.000000000 +0100
@@ -398,7 +398,7 @@
 
 	state = *get_task_state(task);
 	vsize = eip = esp = 0;
-	permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+	permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
 	mm = get_task_mm(task);
 	if (mm) {
 		vsize = task_vsize(mm);
diff -ur a/fs/proc/base.c b/fs/proc/base.c
--- a/fs/proc/base.c	2017-03-23 15:08:35.000000000 +0100
+++ b/fs/proc/base.c	2017-03-14 02:46:28.000000000 +0100
@@ -239,7 +239,7 @@
 
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
 {
-	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
+	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
 	int res = PTR_ERR(mm);
 	if (mm && !IS_ERR(mm)) {
 		unsigned int nwords = 0;
@@ -268,7 +268,7 @@
 	wchan = get_wchan(task);
 
 	if (lookup_symbol_name(wchan, symname) < 0)
-		if (!ptrace_may_access(task, PTRACE_MODE_READ))
+		if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
 			return 0;
 		else
 			return sprintf(buffer, "%lu", wchan);
@@ -282,7 +282,7 @@
 	int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
 	if (err)
 		return err;
-	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
 		mutex_unlock(&task->signal->cred_guard_mutex);
 		return -EPERM;
 	}
@@ -556,7 +556,7 @@
 	 */
 	task = get_proc_task(inode);
 	if (task) {
-		allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+		allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 		put_task_struct(task);
 	}
 	return allowed;
@@ -591,7 +591,7 @@
 		return true;
 	if (in_group_p(pid->pid_gid))
 		return true;
-	return ptrace_may_access(task, PTRACE_MODE_READ);
+	return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 }
 
 static int proc_pid_permission(struct inode *inode, int mask)
@@ -703,7 +703,7 @@
 	if (!task)
 		return -ESRCH;
 
-	mm = mm_access(task, mode);
+	mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
 	put_task_struct(task);
 
 	if (IS_ERR(mm))
@@ -841,7 +841,8 @@
 	struct mm_struct *mm = file->private_data;
 	unsigned long env_start, env_end;
 
-	if (!mm)
+	/* Ensure the process spawned far enough to have an environment. */
+	if (!mm || !mm->env_end)
 		return 0;
 
 	page = (char *)__get_free_page(GFP_TEMPORARY);
@@ -1762,7 +1763,7 @@
 	if (!task)
 		goto out_notask;
 
-	mm = mm_access(task, PTRACE_MODE_READ);
+	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
 	if (IS_ERR_OR_NULL(mm))
 		goto out;
 
@@ -1901,7 +1902,7 @@
 		goto out;
 
 	result = ERR_PTR(-EACCES);
-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
 		goto out_put_task;
 
 	result = ERR_PTR(-ENOENT);
@@ -1957,7 +1958,7 @@
 		goto out;
 
 	ret = -EACCES;
-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
 		goto out_put_task;
 
 	ret = 0;
@@ -2493,7 +2494,7 @@
 	if (result)
 		return result;
 
-	if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
 		result = -EACCES;
 		goto out_unlock;
 	}
diff -ur a/fs/proc/namespaces.c b/fs/proc/namespaces.c
--- a/fs/proc/namespaces.c	2017-03-23 15:08:31.000000000 +0100
+++ b/fs/proc/namespaces.c	2017-03-14 02:46:24.000000000 +0100
@@ -118,7 +118,7 @@
 	if (!task)
 		goto out;
 
-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
 		goto out_put_task;
 
 	ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns.ns_ops);
@@ -151,7 +151,7 @@
 	if (!task)
 		goto out;
 
-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
 		goto out_put_task;
 
 	len = -ENOENT;
diff -ur a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
--- a/fs/proc/task_mmu.c	2017-03-23 15:08:33.000000000 +0100
+++ b/fs/proc/task_mmu.c	2017-03-14 02:46:26.000000000 +0100
@@ -170,7 +170,7 @@
 	if (!priv->task)
 		return ERR_PTR(-ESRCH);
 
-	mm = mm_access(priv->task, PTRACE_MODE_READ);
+	mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
 	if (!mm || IS_ERR(mm))
 		return mm;
 	down_read(&mm->mmap_sem);
@@ -1050,7 +1050,7 @@
 	if (!pm.buffer)
 		goto out_task;
 
-	mm = mm_access(task, PTRACE_MODE_READ);
+	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
 	ret = PTR_ERR(mm);
 	if (!mm || IS_ERR(mm))
 		goto out_free;
diff -ur a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
--- a/fs/proc/task_nommu.c	2017-03-23 15:08:31.000000000 +0100
+++ b/fs/proc/task_nommu.c	2017-03-14 02:46:24.000000000 +0100
@@ -230,7 +230,7 @@
 	if (!priv->task)
 		return ERR_PTR(-ESRCH);
 
-	mm = mm_access(priv->task, PTRACE_MODE_READ);
+	mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
 	if (!mm || IS_ERR(mm)) {
 		put_task_struct(priv->task);
 		priv->task = NULL;
diff -ur a/fs/proc_namespace.c b/fs/proc_namespace.c
--- a/fs/proc_namespace.c	2017-03-23 15:07:49.000000000 +0100
+++ b/fs/proc_namespace.c	2017-03-14 02:45:46.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  * fs/proc_namespace.c - handling of /proc/<pid>/{mounts,mountinfo,mountstats}
  *
@@ -73,6 +76,11 @@
 		if (mnt->mnt_flags & fs_infop->flag)
 			seq_puts(m, fs_infop->str);
 	}
+#ifdef MY_ABC_HERE
+	if ((mnt->mnt_flags & MNT_RELATIME) && mnt->mnt_root->d_sb->relatime_period > 1) {
+		seq_printf(m, ",relatime_period=%ld", mnt->mnt_root->d_sb->relatime_period);
+	}
+#endif /* MY_ABC_HERE */
 }
 
 static inline void mangle(struct seq_file *m, const char *s)
diff -ur a/fs/read_write.c b/fs/read_write.c
--- a/fs/read_write.c	2017-03-23 15:07:50.000000000 +0100
+++ b/fs/read_write.c	2017-03-14 02:45:47.000000000 +0100
@@ -1349,10 +1349,6 @@
 		fsnotify_modify(file);
 		ret = total_written;
 	} else if(rwbytes) {
-#ifdef CONFIG_IA32_EMULATION
-		rwbytes[0]=total_received;
-		rwbytes[1]=total_written;
-#else
 		if (copy_to_user(&rwbytes[0], &total_received, sizeof(size_t)) < 0) {
 			ret = -ENOMEM;
 			goto out;
@@ -1361,7 +1357,6 @@
 			ret = -ENOMEM;
 			goto out;
 		}
-#endif
 	}
 
 out:
diff -ur a/fs/signalfd.c b/fs/signalfd.c
--- a/fs/signalfd.c	2017-03-23 15:07:47.000000000 +0100
+++ b/fs/signalfd.c	2017-03-14 02:45:44.000000000 +0100
@@ -121,8 +121,9 @@
 		 * Other callers might not initialize the si_lsb field,
 		 * so check explicitly for the right codes here.
 		 */
-		if (kinfo->si_code == BUS_MCEERR_AR ||
-		    kinfo->si_code == BUS_MCEERR_AO)
+		if (kinfo->si_signo == SIGBUS &&
+		    (kinfo->si_code == BUS_MCEERR_AR ||
+		     kinfo->si_code == BUS_MCEERR_AO))
 			err |= __put_user((short) kinfo->si_addr_lsb,
 					  &uinfo->ssi_addr_lsb);
 #endif
diff -ur a/fs/splice.c b/fs/splice.c
--- a/fs/splice.c	2017-03-23 15:07:49.000000000 +0100
+++ b/fs/splice.c	2017-03-14 02:45:45.000000000 +0100
@@ -189,6 +189,9 @@
 	unsigned int spd_pages = spd->nr_pages;
 	int ret, do_wakeup, page_nr;
 
+	if (!spd_pages)
+		return 0;
+
 	ret = 0;
 	do_wakeup = 0;
 	page_nr = 0;
@@ -949,6 +952,7 @@
 
 	splice_from_pipe_begin(sd);
 	do {
+		cond_resched();
 		ret = splice_from_pipe_next(pipe, sd);
 		if (ret > 0)
 			ret = splice_from_pipe_feed(pipe, sd, actor);
@@ -1206,7 +1210,7 @@
 	long ret, bytes;
 	umode_t i_mode;
 	size_t len;
-	int i, flags;
+	int i, flags, more;
 
 	/*
 	 * We require the input being a regular file, as we don't want to
@@ -1249,6 +1253,7 @@
 	 * Don't block on output, we have to drain the direct pipe.
 	 */
 	sd->flags &= ~SPLICE_F_NONBLOCK;
+	more = sd->flags & SPLICE_F_MORE;
 
 	while (len) {
 		size_t read_len;
@@ -1262,6 +1267,15 @@
 		sd->total_len = read_len;
 
 		/*
+		 * If more data is pending, set SPLICE_F_MORE
+		 * If this is the last data and SPLICE_F_MORE was not set
+		 * initially, clears it.
+		 */
+		if (read_len < len)
+			sd->flags |= SPLICE_F_MORE;
+		else if (!more)
+			sd->flags &= ~SPLICE_F_MORE;
+		/*
 		 * NOTE: nonblocking mode only applies to the input. We
 		 * must not do the output in nonblocking mode as then we
 		 * could get stuck data in the internal pipe:
diff -ur a/fs/sysv/inode.c b/fs/sysv/inode.c
--- a/fs/sysv/inode.c	2017-03-23 15:07:59.000000000 +0100
+++ b/fs/sysv/inode.c	2017-03-14 02:45:57.000000000 +0100
@@ -162,14 +162,8 @@
 		inode->i_fop = &sysv_dir_operations;
 		inode->i_mapping->a_ops = &sysv_aops;
 	} else if (S_ISLNK(inode->i_mode)) {
-		if (inode->i_blocks) {
-			inode->i_op = &sysv_symlink_inode_operations;
-			inode->i_mapping->a_ops = &sysv_aops;
-		} else {
-			inode->i_op = &sysv_fast_symlink_inode_operations;
-			nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size,
-				sizeof(SYSV_I(inode)->i_data) - 1);
-		}
+		inode->i_op = &sysv_symlink_inode_operations;
+		inode->i_mapping->a_ops = &sysv_aops;
 	} else
 		init_special_inode(inode, inode->i_mode, rdev);
 }
diff -ur a/fs/udf/inode.c b/fs/udf/inode.c
--- a/fs/udf/inode.c	2017-03-23 15:09:07.000000000 +0100
+++ b/fs/udf/inode.c	2017-03-14 02:47:00.000000000 +0100
@@ -1495,6 +1495,16 @@
 		iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
 	}
 
+	/*
+	 * Sanity check length of allocation descriptors and extended attrs to
+	 * avoid integer overflows
+	 */
+	if (iinfo->i_lenEAttr > inode->i_sb->s_blocksize || iinfo->i_lenAlloc > inode->i_sb->s_blocksize)
+		return;
+	/* Now do exact checks */
+	if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > inode->i_sb->s_blocksize)
+		return;
+
 	switch (fe->icbTag.fileType) {
 	case ICBTAG_FILE_TYPE_DIRECTORY:
 		inode->i_op = &udf_dir_inode_operations;
@@ -2045,14 +2055,29 @@
 		epos->offset += adsize;
 }
 
+/*
+ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
+ * someone does some weird stuff.
+ */
+#define UDF_MAX_INDIR_EXTS 16
+
 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
 		     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
 {
 	int8_t etype;
+	unsigned int indirections = 0;
 
 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
 	       (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
 		int block;
+
+		if (++indirections > UDF_MAX_INDIR_EXTS) {
+			udf_err(inode->i_sb,
+				"too many indirect extents in inode %lu\n",
+				inode->i_ino);
+			return -1;
+		}
+
 		epos->block = *eloc;
 		epos->offset = sizeof(struct allocExtDesc);
 		brelse(epos->bh);
diff -ur a/fs/udf/unicode.c b/fs/udf/unicode.c
--- a/fs/udf/unicode.c	2017-03-23 15:09:04.000000000 +0100
+++ b/fs/udf/unicode.c	2017-03-14 02:46:57.000000000 +0100
@@ -132,11 +132,15 @@
 		if (c < 0x80U)
 			utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
 		else if (c < 0x800U) {
+			if (utf_o->u_len > (UDF_NAME_LEN - 4))
+				break;
 			utf_o->u_name[utf_o->u_len++] =
 						(uint8_t)(0xc0 | (c >> 6));
 			utf_o->u_name[utf_o->u_len++] =
 						(uint8_t)(0x80 | (c & 0x3f));
 		} else {
+			if (utf_o->u_len > (UDF_NAME_LEN - 5))
+				break;
 			utf_o->u_name[utf_o->u_len++] =
 						(uint8_t)(0xe0 | (c >> 12));
 			utf_o->u_name[utf_o->u_len++] =
@@ -177,17 +181,22 @@
 static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
 {
 	unsigned c, i, max_val, utf_char;
-	int utf_cnt, u_len;
+	int utf_cnt, u_len, u_ch;
 
 	memset(ocu, 0, sizeof(dstring) * length);
 	ocu[0] = 8;
 	max_val = 0xffU;
+	u_ch = 1;
 
 try_again:
 	u_len = 0U;
 	utf_char = 0U;
 	utf_cnt = 0U;
 	for (i = 0U; i < utf->u_len; i++) {
+		/* Name didn't fit? */
+		if (u_len + 1 + u_ch >= length)
+			return 0;
+
 		c = (uint8_t)utf->u_name[i];
 
 		/* Complete a multi-byte UTF-8 character */
@@ -229,6 +238,7 @@
 			if (max_val == 0xffU) {
 				max_val = 0xffffU;
 				ocu[0] = (uint8_t)0x10U;
+				u_ch = 2;
 				goto try_again;
 			}
 			goto error_out;
@@ -280,7 +290,7 @@
 			c = (c << 8) | ocu[i++];
 
 		len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
-				    UDF_NAME_LEN - utf_o->u_len);
+				    UDF_NAME_LEN - 2 - utf_o->u_len);
 		/* Valid character? */
 		if (len >= 0)
 			utf_o->u_len += len;
@@ -298,15 +308,19 @@
 	int len;
 	unsigned i, max_val;
 	uint16_t uni_char;
-	int u_len;
+	int u_len, u_ch;
 
 	memset(ocu, 0, sizeof(dstring) * length);
 	ocu[0] = 8;
 	max_val = 0xffU;
+	u_ch = 1;
 
 try_again:
 	u_len = 0U;
 	for (i = 0U; i < uni->u_len; i++) {
+		/* Name didn't fit? */
+		if (u_len + 1 + u_ch >= length)
+			return 0;
 		len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
 		if (!len)
 			continue;
@@ -319,6 +333,7 @@
 		if (uni_char > max_val) {
 			max_val = 0xffffU;
 			ocu[0] = (uint8_t)0x10U;
+			u_ch = 2;
 			goto try_again;
 		}
 
diff -ur a/fs/utimes.c b/fs/utimes.c
--- a/fs/utimes.c	2017-03-23 15:07:46.000000000 +0100
+++ b/fs/utimes.c	2017-03-14 02:45:43.000000000 +0100
@@ -235,7 +235,8 @@
 				goto drop_write;
 		} else if (inode->i_op->syno_bypass_is_synoacl) {
 			 
-			error = inode->i_op->syno_bypass_is_synoacl(path.dentry, 0, -EPERM);
+			error = inode->i_op->syno_bypass_is_synoacl(path.dentry,
+					                BYPASS_SYNOACL_SYNOUTIME, -EPERM);
 			if (error)
 				goto drop_write;
 		} else {
diff -ur a/fs/xattr.c b/fs/xattr.c
--- a/fs/xattr.c	2017-03-23 15:07:49.000000000 +0100
+++ b/fs/xattr.c	2017-03-14 02:45:46.000000000 +0100
@@ -38,7 +38,8 @@
 	if (!strcmp(name, SYNO_ACL_XATTR_ACCESS)) {
 		if (inode->i_op->syno_bypass_is_synoacl) {
 			 
-			return inode->i_op->syno_bypass_is_synoacl(dentry, 0, -EOPNOTSUPP);
+			return inode->i_op->syno_bypass_is_synoacl(dentry,
+					        BYPASS_SYNOACL_SYNOACL_XATTR, -EOPNOTSUPP);
 		} else if (MAY_WRITE == mask || MAY_WRITE_PERMISSION == mask) {
 			return synoacl_check_xattr_perm(name, dentry, MAY_WRITE_PERMISSION);
 		} else if (MAY_READ == mask || MAY_READ_PERMISSION == mask) {
@@ -52,7 +53,8 @@
 		if (!strcmp(name, SYNO_ACL_XATTR_INHERIT)) {
 			if (inode->i_op->syno_bypass_is_synoacl) {
 				 
-				return inode->i_op->syno_bypass_is_synoacl(dentry, 0, -EOPNOTSUPP);
+				return inode->i_op->syno_bypass_is_synoacl(dentry,
+						        BYPASS_SYNOACL_SYNOACL_XATTR, -EOPNOTSUPP);
 			} else if (!IS_SYNOACL(dentry)) {
 				return -EOPNOTSUPP;
 			}
@@ -61,7 +63,8 @@
 		if (!strcmp(name, SYNO_ACL_XATTR_PSEUDO_INHERIT_ONLY)) {
 			if (inode->i_op->syno_bypass_is_synoacl) {
 				 
-				return inode->i_op->syno_bypass_is_synoacl(dentry, 0, -EOPNOTSUPP);
+				return inode->i_op->syno_bypass_is_synoacl(dentry,
+						        BYPASS_SYNOACL_SYNOACL_XATTR, -EOPNOTSUPP);
 			}
 			return synoacl_op_perm(dentry, MAY_READ_PERMISSION);
 		}
diff -ur a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
--- a/fs/xfs/xfs_iops.c	2017-03-23 15:09:41.000000000 +0100
+++ b/fs/xfs/xfs_iops.c	2017-03-14 02:47:36.000000000 +0100
@@ -465,9 +465,6 @@
 	ASSERT(tp);
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-	if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
-		mode &= ~S_ISGID;
-
 	ip->i_d.di_mode &= S_IFMT;
 	ip->i_d.di_mode |= mode & ~S_IFMT;
 
@@ -493,15 +490,18 @@
 
 	trace_xfs_setattr(ip);
 
-	if (mp->m_flags & XFS_MOUNT_RDONLY)
-		return XFS_ERROR(EROFS);
+	/* If acls are being inherited, we already have this checked */
+	if (!(flags & XFS_ATTR_NOACL)) {
+		if (mp->m_flags & XFS_MOUNT_RDONLY)
+			return XFS_ERROR(EROFS);
 
-	if (XFS_FORCED_SHUTDOWN(mp))
-		return XFS_ERROR(EIO);
+		if (XFS_FORCED_SHUTDOWN(mp))
+			return XFS_ERROR(EIO);
 
-	error = -inode_change_ok(inode, iattr);
-	if (error)
-		return XFS_ERROR(error);
+		error = -inode_change_ok(inode, iattr);
+		if (error)
+			return XFS_ERROR(error);
+	}
 
 	ASSERT((mask & ATTR_SIZE) == 0);
 
@@ -985,7 +985,8 @@
 	if (bmv->bmv_oflags & BMV_OF_PREALLOC)
 		fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN;
 	else if (bmv->bmv_oflags & BMV_OF_DELALLOC) {
-		fiemap_flags |= FIEMAP_EXTENT_DELALLOC;
+		fiemap_flags |= (FIEMAP_EXTENT_DELALLOC |
+				 FIEMAP_EXTENT_UNKNOWN);
 		physical = 0;   /* no block yet */
 	}
 	if (bmv->bmv_oflags & BMV_OF_LAST)
diff -ur a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
--- a/fs/xfs/xfs_symlink.c	2017-03-23 15:09:40.000000000 +0100
+++ b/fs/xfs/xfs_symlink.c	2017-03-14 02:47:35.000000000 +0100
@@ -271,7 +271,7 @@
 			cur_chunk += sizeof(struct xfs_dsymlink_hdr);
 		}
 
-		memcpy(link + offset, bp->b_addr, byte_cnt);
+		memcpy(link + offset, cur_chunk, byte_cnt);
 
 		pathlen -= byte_cnt;
 		offset += byte_cnt;
diff -ur a/include/acpi/acpixf.h b/include/acpi/acpixf.h
--- a/include/acpi/acpixf.h	2017-03-23 15:02:50.000000000 +0100
+++ b/include/acpi/acpixf.h	2017-03-14 02:40:09.000000000 +0100
@@ -177,7 +177,7 @@
  */
 acpi_status acpi_reallocate_root_table(void);
 
-acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
+acpi_status acpi_find_root_pointer(acpi_physical_address *rsdp_address);
 
 acpi_status acpi_unload_table_id(acpi_owner_id id);
 
diff -ur a/include/acpi/actbl2.h b/include/acpi/actbl2.h
--- a/include/acpi/actbl2.h	2017-03-23 15:02:52.000000000 +0100
+++ b/include/acpi/actbl2.h	2017-03-14 02:40:12.000000000 +0100
@@ -444,8 +444,8 @@
 };
 
 struct acpi_dmar_pci_path {
-	u8 dev;
-	u8 fn;
+	u8 device;
+	u8 function;
 };
 
 /*
diff -ur a/include/acpi/actypes.h b/include/acpi/actypes.h
--- a/include/acpi/actypes.h	2017-03-23 15:02:51.000000000 +0100
+++ b/include/acpi/actypes.h	2017-03-14 02:40:10.000000000 +0100
@@ -511,6 +511,7 @@
 #define ACPI_NO_ACPI_ENABLE             0x10
 #define ACPI_NO_DEVICE_INIT             0x20
 #define ACPI_NO_OBJECT_INIT             0x40
+#define ACPI_NO_FACS_INIT               0x80
 
 /*
  * Initialization state
diff -ur a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
--- a/include/drm/drm_pciids.h	2017-03-23 15:03:53.000000000 +0100
+++ b/include/drm/drm_pciids.h	2017-03-14 02:41:40.000000000 +0100
@@ -172,6 +172,7 @@
 	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff -ur a/include/linux/ata.h b/include/linux/ata.h
--- a/include/linux/ata.h	2017-03-23 15:00:45.000000000 +0100
+++ b/include/linux/ata.h	2017-03-14 02:37:07.000000000 +0100
@@ -365,7 +365,8 @@
 	SATA_PMP_GSCR_9705_GPO = 928,
 	SATA_PMP_GSCR_9705_GPO_EN = 932,        
 	SATA_PMP_GSCR_9705_GPI_POLARITY = 940,  
-	SATA_PMP_GSCR_9705_SATA_BLINK_RATE = 1004,  
+	SATA_PMP_GSCR_9705_SATA_4_BLINK_RATE = 1000,  
+	SATA_PMP_GSCR_9705_SATA_0_TO_3_BLINK_RATE = 1004,  
 #endif  
 
 	SATA_PMP_PSCR_STATUS	= 0,
@@ -437,8 +438,8 @@
 };
 
 enum ata_ioctls {
-	ATA_IOC_GET_IO32	= 0x309,
-	ATA_IOC_SET_IO32	= 0x324,
+	ATA_IOC_GET_IO32	= 0x309,  
+	ATA_IOC_SET_IO32	= 0x324,  
 };
 
 struct ata_bmdma_prd {
diff -ur a/include/linux/blk_types.h b/include/linux/blk_types.h
--- a/include/linux/blk_types.h	2017-03-23 15:01:41.000000000 +0100
+++ b/include/linux/blk_types.h	2017-03-14 02:38:24.000000000 +0100
@@ -84,6 +84,9 @@
 #define BIO_RESET_BITS	13
 #define BIO_OWNS_VEC	13	 
 #ifdef MY_ABC_HERE
+#define BIO_AUTO_REMAP 14	 
+#endif  
+#ifdef MY_ABC_HERE
  
 #define BIO_MD_RETURN_ERROR 15
 #endif  
Nur in a/include/linux: compiler-gcc3.h.
Nur in a/include/linux: compiler-gcc4.h.
Nur in a/include/linux: compiler-gcc5.h.
diff -ur a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
--- a/include/linux/compiler-gcc.h	2017-03-23 15:01:21.000000000 +0100
+++ b/include/linux/compiler-gcc.h	2017-03-14 02:37:58.000000000 +0100
@@ -99,10 +99,116 @@
 #define __maybe_unused			__attribute__((unused))
 #define __always_unused			__attribute__((unused))
 
-#define __gcc_header(x) #x
-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
-#define gcc_header(x) _gcc_header(x)
-#include gcc_header(__GNUC__)
+/* gcc version specific checks */
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION < 30300
+# define __used			__attribute__((__unused__))
+#else
+# define __used			__attribute__((__used__))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+#   error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
+#if GCC_VERSION >= 30400
+#define __must_check		__attribute__((warn_unused_result))
+#endif
+
+#if GCC_VERSION >= 40000
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
+#  error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used			__attribute__((__used__))
+#define __compiler_offsetof(a, b)					\
+	__builtin_offsetof(a, b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ * to them will be unlikely.  This means a lot of manual unlikely()s
+ * are unnecessary now for any paths leading to the usual suspects
+ * like BUG(), printk(), panic() etc. [but let's keep them for now for
+ * older compilers]
+ *
+ * Early snapshots of gcc 4.3 don't support this and we can't detect this
+ * in the preprocessor, but we can live with this because they're unreleased.
+ * Maketime probing would be overkill here.
+ *
+ * gcc also has a __attribute__((__hot__)) to move hot functions into
+ * a special section, but I don't see any sense in this right now in
+ * the kernel context
+ */
+#define __cold			__attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable.  This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased.  Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible	__attribute__((externally_visible))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#endif	/* gcc version >= 40000 specific checks */
 
 #if !defined(__noclone)
 #define __noclone	/* not needed */
diff -ur a/include/linux/compiler.h b/include/linux/compiler.h
--- a/include/linux/compiler.h	2017-03-23 15:01:32.000000000 +0100
+++ b/include/linux/compiler.h	2017-03-14 02:38:11.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 #ifndef __LINUX_COMPILER_H
 #define __LINUX_COMPILER_H
 
@@ -131,7 +134,7 @@
  */
 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
 #define __trace_if(cond) \
-	if (__builtin_constant_p((cond)) ? !!(cond) :			\
+	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
 	({								\
 		int ______r;						\
 		static struct ftrace_branch_data			\
@@ -179,6 +182,112 @@
 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 #endif
 
+#ifdef MY_ABC_HERE
+#include <uapi/linux/types.h>
+
+#define __READ_ONCE_SIZE						\
+({									\
+	switch (size) {							\
+	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
+	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
+	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
+	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
+	default:							\
+		barrier();						\
+		__builtin_memcpy((void *)res, (const void *)p, size);	\
+		barrier();						\
+	}								\
+})
+
+static __always_inline
+void __read_once_size(const volatile void *p, void *res, int size)
+{
+	__READ_ONCE_SIZE;
+}
+
+#ifdef CONFIG_KASAN
+/*
+ * This function is not 'inline' because __no_sanitize_address confilcts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+static __no_sanitize_address __maybe_unused
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+	__READ_ONCE_SIZE;
+}
+#else
+static __always_inline
+void __read_once_size_nocheck(const volatile void *p, void *res, int size)
+{
+	__READ_ONCE_SIZE;
+}
+#endif
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+	switch (size) {
+	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+	default:
+		barrier();
+		__builtin_memcpy((void *)p, (const void *)res, size);
+		barrier();
+	}
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering.  One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
+ * least two memcpy()s: one for the __builtin_memcpy() and then one for
+ * the macro doing the copy of variable - '__u' allocated on the stack.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define __READ_ONCE(x, check)						\
+({									\
+	union { typeof(x) __val; char __c[1]; } __u;			\
+	if (check)							\
+		__read_once_size(&(x), __u.__c, sizeof(x));		\
+	else								\
+		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
+	__u.__val;							\
+})
+#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
+ * to hide memory access from KASAN.
+ */
+#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+
+#define WRITE_ONCE(x, val) \
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__val = (__force typeof(x)) (val) }; \
+	__write_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
+#endif /* MY_ABC_HERE */
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff -ur a/include/linux/console.h b/include/linux/console.h
--- a/include/linux/console.h	2017-03-23 15:01:48.000000000 +0100
+++ b/include/linux/console.h	2017-03-14 02:38:33.000000000 +0100
@@ -153,6 +153,7 @@
 extern void console_unlock(void);
 extern void console_conditional_schedule(void);
 extern void console_unblank(void);
+extern void console_flush_on_panic(void);
 extern struct tty_driver *console_device(int *);
 extern void console_stop(struct console *);
 extern void console_start(struct console *);
diff -ur a/include/linux/cpu.h b/include/linux/cpu.h
--- a/include/linux/cpu.h	2017-03-23 15:01:19.000000000 +0100
+++ b/include/linux/cpu.h	2017-03-14 02:37:55.000000000 +0100
@@ -1,6 +1,3 @@
-#ifndef MY_ABC_HERE
-#define MY_ABC_HERE
-#endif
  
 #ifndef _LINUX_CPU_H_
 #define _LINUX_CPU_H_
@@ -50,7 +47,7 @@
 
 	CPU_PRI_PERF		= 20,
 	CPU_PRI_MIGRATION	= 10,
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
 	CPU_PRI_SMPBOOT		= 9,
 #endif  
 	 
@@ -109,7 +106,7 @@
 }
 #endif
 
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
 void smpboot_thread_init(void);
 #endif  
 int cpu_up(unsigned int cpu);
@@ -138,7 +135,7 @@
 {
 }
 
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
 static inline void smpboot_thread_init(void)
 {
 }
diff -ur a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
--- a/include/linux/crc-t10dif.h	2017-03-23 15:01:15.000000000 +0100
+++ b/include/linux/crc-t10dif.h	2017-03-14 02:37:50.000000000 +0100
@@ -3,6 +3,10 @@
 
 #include <linux/types.h>
 
+#define CRC_T10DIF_DIGEST_SIZE 2
+#define CRC_T10DIF_BLOCK_SIZE 1
+
+__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
 __u16 crc_t10dif(unsigned char const *, size_t);
 
 #endif
diff -ur a/include/linux/device-mapper.h b/include/linux/device-mapper.h
--- a/include/linux/device-mapper.h	2017-03-23 15:00:56.000000000 +0100
+++ b/include/linux/device-mapper.h	2017-03-14 02:37:21.000000000 +0100
@@ -72,6 +72,11 @@
 
 typedef int (*dm_busy_fn) (struct dm_target *ti);
 
+#ifdef MY_ABC_HERE
+typedef void (*dm_lvinfoset_fn) (struct dm_target *ti);
+typedef sector_t (*dm_lg_sector_get_fn) (sector_t sector, struct dm_target *ti);
+#endif  
+
 void dm_error(const char *message);
 
 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
@@ -112,6 +117,10 @@
 	dm_handle_4kn_target_support_fn handle_4kn_target_support;
 #endif  
 	dm_io_hints_fn io_hints;
+#ifdef MY_ABC_HERE
+	dm_lvinfoset_fn lvinfoset;
+	dm_lg_sector_get_fn lg_sector_get;
+#endif  
 
 	struct list_head list;
 };
diff -ur a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
--- a/include/linux/devpts_fs.h	2017-03-23 15:00:49.000000000 +0100
+++ b/include/linux/devpts_fs.h	2017-03-14 02:37:14.000000000 +0100
@@ -19,6 +19,8 @@
 
 int devpts_new_index(struct inode *ptmx_inode);
 void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
 /* mknod in devpts */
 struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
 		void *priv);
@@ -32,6 +34,8 @@
 /* Dummy stubs in the no-pty case */
 static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
 static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
 static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
 		dev_t device, int index, void *priv)
 {
diff -ur a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
--- a/include/linux/dma_remapping.h	2017-03-23 15:00:51.000000000 +0100
+++ b/include/linux/dma_remapping.h	2017-03-14 02:37:16.000000000 +0100
@@ -26,7 +26,6 @@
 struct root_entry;
 
 #ifdef CONFIG_INTEL_IOMMU
-extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
@@ -40,9 +39,6 @@
 {
 	return 0;
 }
-static inline void free_dmar_iommu(struct intel_iommu *iommu)
-{
-}
 #define dmar_disabled	(1)
 #define intel_iommu_enabled (0)
 #endif
diff -ur a/include/linux/dmar.h b/include/linux/dmar.h
--- a/include/linux/dmar.h	2017-03-23 15:00:57.000000000 +0100
+++ b/include/linux/dmar.h	2017-03-14 02:37:24.000000000 +0100
@@ -25,6 +25,8 @@
 #include <linux/types.h>
 #include <linux/msi.h>
 #include <linux/irqreturn.h>
+#include <linux/rwsem.h>
+#include <linux/rcupdate.h>
 
 struct acpi_dmar_header;
 
@@ -33,13 +35,20 @@
 #define DMAR_X2APIC_OPT_OUT	0x2
 
 struct intel_iommu;
+
+struct dmar_dev_scope {
+	struct device __rcu *dev;
+	u8 bus;
+	u8 devfn;
+};
+
 #ifdef CONFIG_DMAR_TABLE
 extern struct acpi_table_header *dmar_tbl;
 struct dmar_drhd_unit {
 	struct list_head list;		/* list of drhd units	*/
 	struct  acpi_dmar_header *hdr;	/* ACPI header		*/
 	u64	reg_base_addr;		/* register base address*/
-	struct	pci_dev **devices; 	/* target device array	*/
+	struct	dmar_dev_scope *devices;/* target device array	*/
 	int	devices_cnt;		/* target device count	*/
 	u16	segment;		/* PCI domain		*/
 	u8	ignored:1; 		/* ignore drhd		*/
@@ -47,29 +56,66 @@
 	struct intel_iommu *iommu;
 };
 
+struct dmar_pci_notify_info {
+	struct pci_dev			*dev;
+	unsigned long			event;
+	int				bus;
+	u16				seg;
+	u16				level;
+	struct acpi_dmar_pci_path	path[];
+}  __attribute__((packed));
+
+extern struct rw_semaphore dmar_global_lock;
 extern struct list_head dmar_drhd_units;
 
 #define for_each_drhd_unit(drhd) \
-	list_for_each_entry(drhd, &dmar_drhd_units, list)
+	list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+
+#define for_each_active_drhd_unit(drhd)					\
+	list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)		\
+		if (drhd->ignored) {} else
 
 #define for_each_active_iommu(i, drhd)					\
-	list_for_each_entry(drhd, &dmar_drhd_units, list)		\
+	list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)		\
 		if (i=drhd->iommu, drhd->ignored) {} else
 
 #define for_each_iommu(i, drhd)						\
-	list_for_each_entry(drhd, &dmar_drhd_units, list)		\
+	list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)		\
 		if (i=drhd->iommu, 0) {} else 
 
+static inline bool dmar_rcu_check(void)
+{
+	return rwsem_is_locked(&dmar_global_lock) ||
+	       system_state == SYSTEM_BOOTING;
+}
+
+#define	dmar_rcu_dereference(p)	rcu_dereference_check((p), dmar_rcu_check())
+
+#define	for_each_dev_scope(a, c, p, d)	\
+	for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
+			NULL, (p) < (c)); (p)++)
+
+#define	for_each_active_dev_scope(a, c, p, d)	\
+	for_each_dev_scope((a), (c), (p), (d))	if (!(d)) { continue; } else
+
 extern int dmar_table_init(void);
 extern int dmar_dev_scope_init(void);
-
+extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
+				struct dmar_dev_scope **devices, u16 segment);
+extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
+extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
+extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
+				 void *start, void*end, u16 segment,
+				 struct dmar_dev_scope *devices,
+				 int devices_cnt);
+extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
+				 u16 segment, struct dmar_dev_scope *devices,
+				 int count);
 /* Intel IOMMU detection */
 extern int detect_intel_iommu(void);
 extern int enable_drhd_fault_handling(void);
-
-extern int parse_ioapics_under_ir(void);
-extern int alloc_iommu(struct dmar_drhd_unit *);
 #else
+struct dmar_pci_notify_info;
 static inline int detect_intel_iommu(void)
 {
 	return -ENODEV;
@@ -133,32 +179,9 @@
 
 #ifdef CONFIG_INTEL_IOMMU
 extern int iommu_detected, no_iommu;
-extern struct list_head dmar_rmrr_units;
-struct dmar_rmrr_unit {
-	struct list_head list;		/* list of rmrr units	*/
-	struct acpi_dmar_header *hdr;	/* ACPI header		*/
-	u64	base_address;		/* reserved base address*/
-	u64	end_address;		/* reserved end address */
-	struct pci_dev **devices;	/* target devices */
-	int	devices_cnt;		/* target device count */
-};
-
-#define for_each_rmrr_units(rmrr) \
-	list_for_each_entry(rmrr, &dmar_rmrr_units, list)
-
-struct dmar_atsr_unit {
-	struct list_head list;		/* list of ATSR units */
-	struct acpi_dmar_header *hdr;	/* ACPI header */
-	struct pci_dev **devices;	/* target devices */
-	int devices_cnt;		/* target device count */
-	u8 include_all:1;		/* include all ports */
-};
-
-int dmar_parse_rmrr_atsr_dev(void);
 extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
 extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
-				struct pci_dev ***devices, u16 segment);
+extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
 extern int intel_iommu_init(void);
 #else /* !CONFIG_INTEL_IOMMU: */
 static inline int intel_iommu_init(void) { return -ENODEV; }
@@ -170,7 +193,7 @@
 {
 	return 0;
 }
-static inline int dmar_parse_rmrr_atsr_dev(void)
+static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
 {
 	return 0;
 }
diff -ur a/include/linux/dm-io.h b/include/linux/dm-io.h
--- a/include/linux/dm-io.h	2017-03-23 15:01:14.000000000 +0100
+++ b/include/linux/dm-io.h	2017-03-14 02:37:47.000000000 +0100
@@ -60,7 +60,7 @@
 
 #ifdef MY_ABC_HERE
 int syno_dm_io(struct dm_io_request *io_req, unsigned num_regions,
-	  struct dm_io_region *region, unsigned long *sync_error_bits);
+		struct dm_io_region *region, unsigned long *sync_error_bits, unsigned long bi_flags);
 #endif  
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
 	  struct dm_io_region *region, unsigned long *sync_error_bits);
diff -ur a/include/linux/efi.h b/include/linux/efi.h
--- a/include/linux/efi.h	2017-03-23 15:01:32.000000000 +0100
+++ b/include/linux/efi.h	2017-03-14 02:38:11.000000000 +0100
@@ -767,8 +767,10 @@
  * and we use a page for reading/writing.
  */
 
+#define EFI_VAR_NAME_LEN	1024
+
 struct efi_variable {
-	efi_char16_t  VariableName[1024/sizeof(efi_char16_t)];
+	efi_char16_t  VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
 	efi_guid_t    VendorGuid;
 	unsigned long DataSize;
 	__u8          Data[1024];
@@ -830,7 +832,10 @@
 struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
 				       struct list_head *head, bool remove);
 
-bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+		     unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+				  size_t len);
 
 extern struct work_struct efivar_work;
 void efivar_run_worker(void);
diff -ur a/include/linux/enclosure.h b/include/linux/enclosure.h
--- a/include/linux/enclosure.h	2017-03-23 15:01:47.000000000 +0100
+++ b/include/linux/enclosure.h	2017-03-14 02:38:33.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  * Enclosure Services
  *
@@ -29,7 +32,14 @@
 /* A few generic types ... taken from ses-2 */
 enum enclosure_component_type {
 	ENCLOSURE_COMPONENT_DEVICE = 0x01,
+	ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
+#ifdef MY_DEF_HERE
+	ENCLOSURE_COMPONENT_ENCLOSURE = 0x0E,
+#endif /* MY_DEF_HERE */
+	ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
+	ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
 	ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
+	ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
 };
 
 /* ses-2 common element status */
diff -ur a/include/linux/fs.h b/include/linux/fs.h
--- a/include/linux/fs.h	2017-03-23 15:01:03.000000000 +0100
+++ b/include/linux/fs.h	2017-03-14 02:37:29.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/stat.h>
 #include <linux/cache.h>
 #include <linux/list.h>
+#include <linux/llist.h>
 #include <linux/radix-tree.h>
 #include <linux/rbtree.h>
 #include <linux/init.h>
@@ -94,6 +95,13 @@
 
 #define MASK_RDONLY_CHECK (MAY_WRITE|MAY_APPEND|MAY_WRITE_ATTR|MAY_WRITE_EXT_ATTR|MAY_WRITE_PERMISSION|MAY_DEL|MAY_DEL_CHILD|MAY_GET_OWNER_SHIP)
 
+enum bypass_synoacl_type {
+	BYPASS_SYNOACL_SYNOUTIME,
+	BYPASS_SYNOACL_SYNOARCHIVE_OVERWRITE,
+	BYPASS_SYNOACL_SYNOARCHIVE_OVERWRITE_ACL,
+	BYPASS_SYNOACL_SYNOACL_XATTR,
+	BYPASS_SYNOACL_MAX
+};
 #else  
 #define MAY_EXEC		0x00000001
 #define MAY_WRITE		0x00000002
@@ -621,6 +629,7 @@
 	 
 	union {
 		struct list_head	fu_list;
+		struct llist_node	fu_llist;
 		struct rcu_head 	fu_rcuhead;
 	} f_u;
 	struct path		f_path;
@@ -1131,6 +1140,10 @@
 	atomic_long_t s_remove_count;
 
 	int s_readonly_remount;
+#ifdef MY_ABC_HERE
+	 
+	long relatime_period;
+#endif  
 };
 
 #ifdef MY_ABC_HERE
@@ -1478,7 +1491,7 @@
 	S_VERSION = 8,
 };
 
-extern void touch_atime(struct path *);
+extern void touch_atime(const struct path *);
 static inline void file_accessed(struct file *file)
 {
 	if (!(file->f_flags & O_NOATIME))
diff -ur a/include/linux/genhd.h b/include/linux/genhd.h
--- a/include/linux/genhd.h	2017-03-23 15:00:45.000000000 +0100
+++ b/include/linux/genhd.h	2017-03-14 02:37:07.000000000 +0100
@@ -113,6 +113,9 @@
 #endif
 	atomic_t ref;
 	struct rcu_head rcu_head;
+#ifdef MY_ABC_HERE
+	unsigned auto_remap;
+#endif  
 };
 
 #define GENHD_FL_REMOVABLE			1
diff -ur a/include/linux/iio/iio.h b/include/linux/iio/iio.h
--- a/include/linux/iio/iio.h	2017-03-23 15:02:14.000000000 +0100
+++ b/include/linux/iio/iio.h	2017-03-14 02:39:15.000000000 +0100
@@ -567,6 +567,15 @@
 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
 
 /**
+ * IIO_RAD_TO_DEGREE() - Convert rad to degree
+ * @rad: A value in rad
+ *
+ * Returns the given value converted from rad to degree
+ */
+#define IIO_RAD_TO_DEGREE(rad) \
+	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
+
+/**
  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
  * @g: A value in g
  *
@@ -574,4 +583,12 @@
  */
 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
 
+/**
+ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
+ * @ms2: A value in meter / second**2
+ *
+ * Returns the given value converted from meter / second**2 to g
+ */
+#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
+
 #endif /* _INDUSTRIAL_IO_H_ */
diff -ur a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
--- a/include/linux/intel-iommu.h	2017-03-23 15:01:30.000000000 +0100
+++ b/include/linux/intel-iommu.h	2017-03-14 02:38:08.000000000 +0100
@@ -288,6 +288,7 @@
 
 struct ir_table {
 	struct irte *base;
+	unsigned long *bitmap;
 };
 #endif
 
@@ -348,8 +349,6 @@
 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
 extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
 
-extern int alloc_iommu(struct dmar_drhd_unit *drhd);
-extern void free_iommu(struct intel_iommu *iommu);
 extern int dmar_enable_qi(struct intel_iommu *iommu);
 extern void dmar_disable_qi(struct intel_iommu *iommu);
 extern int dmar_reenable_qi(struct intel_iommu *iommu);
diff -ur a/include/linux/interrupt.h b/include/linux/interrupt.h
--- a/include/linux/interrupt.h	2017-03-23 15:01:30.000000000 +0100
+++ b/include/linux/interrupt.h	2017-03-14 02:38:09.000000000 +0100
@@ -11,8 +11,6 @@
 #include <linux/irqnr.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
 #include <linux/hrtimer.h>
 #include <linux/kref.h>
 #include <linux/workqueue.h>
@@ -487,15 +485,6 @@
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
 
-/* This is the worklist that queues up per-cpu softirq work.
- *
- * send_remote_sendirq() adds work to these lists, and
- * the softirq handler itself dequeues from them.  The queues
- * are protected by disabling local cpu interrupts and they must
- * only be accessed by the local cpu that they are for.
- */
-DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -503,17 +492,6 @@
 	return this_cpu_read(ksoftirqd);
 }
 
-/* Try to send a softirq to a remote cpu.  If this cannot be done, the
- * work will be queued to the local cpu.
- */
-extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
-
-/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
- * and compute the current cpu, passed in as 'this_cpu'.
- */
-extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
-				  int this_cpu, int softirq);
-
 /* Tasklets --- multithreaded analogue of BHs.
 
    Main feature differing them of generic softirqs: tasklet
diff -ur a/include/linux/iova.h b/include/linux/iova.h
--- a/include/linux/iova.h	2017-03-23 15:00:53.000000000 +0100
+++ b/include/linux/iova.h	2017-03-14 02:37:18.000000000 +0100
@@ -47,5 +47,7 @@
 void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
 void put_iova_domain(struct iova_domain *iovad);
+struct iova *split_and_remove_iova(struct iova_domain *iovad,
+	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
 
 #endif
diff -ur a/include/linux/jbd2.h b/include/linux/jbd2.h
--- a/include/linux/jbd2.h	2017-03-23 15:01:05.000000000 +0100
+++ b/include/linux/jbd2.h	2017-03-14 02:37:34.000000000 +0100
@@ -974,6 +974,7 @@
 #define JBD2_ABORT_ON_SYNCDATA_ERR	0x040	/* Abort the journal on file
 						 * data write error in ordered
 						 * mode */
+#define JBD2_REC_ERR	0x080	/* The errno in the sb has been recorded */
 
 /*
  * Function declarations for the journaling transaction and buffer
@@ -994,15 +995,16 @@
 int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
 int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
 			      unsigned long *block);
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
 void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
 
 /* Commit management */
 extern void jbd2_journal_commit_transaction(journal_t *);
 
 /* Checkpoint list management */
-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
 int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 /*
@@ -1112,7 +1114,7 @@
 extern int	   jbd2_journal_wipe       (journal_t *, int);
 extern int	   jbd2_journal_skip_recovery	(journal_t *);
 extern void	   jbd2_journal_update_sb_errno(journal_t *);
-extern void	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
+extern int	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
 				unsigned long, int);
 extern void	   __jbd2_journal_abort_hard	(journal_t *);
 extern void	   jbd2_journal_abort      (journal_t *, int);
diff -ur a/include/linux/kernel.h b/include/linux/kernel.h
--- a/include/linux/kernel.h	2017-03-23 15:01:29.000000000 +0100
+++ b/include/linux/kernel.h	2017-03-14 02:38:07.000000000 +0100
@@ -422,7 +422,7 @@
 
 #define do_trace_printk(fmt, args...)					\
 do {									\
-	static const char *trace_printk_fmt				\
+	static const char *trace_printk_fmt __used			\
 		__attribute__((section("__trace_printk_fmt"))) =	\
 		__builtin_constant_p(fmt) ? fmt : NULL;			\
 									\
@@ -444,7 +444,7 @@
 extern int __trace_puts(unsigned long ip, const char *str, int size);
 
 #define trace_puts(str) ({						\
-	static const char *trace_printk_fmt				\
+	static const char *trace_printk_fmt __used			\
 		__attribute__((section("__trace_printk_fmt"))) =	\
 		__builtin_constant_p(str) ? str : NULL;			\
 									\
@@ -459,7 +459,7 @@
 #define ftrace_vprintk(fmt, vargs)					\
 do {									\
 	if (__builtin_constant_p(fmt)) {				\
-		static const char *trace_printk_fmt			\
+		static const char *trace_printk_fmt __used		\
 		  __attribute__((section("__trace_printk_fmt"))) =	\
 			__builtin_constant_p(fmt) ? fmt : NULL;		\
 									\
diff -ur a/include/linux/libata.h b/include/linux/libata.h
--- a/include/linux/libata.h	2017-03-23 15:00:49.000000000 +0100
+++ b/include/linux/libata.h	2017-03-14 02:37:12.000000000 +0100
@@ -191,6 +191,7 @@
 	ATA_LFLAG_SW_ACTIVITY	= (1 << 7),  
 	ATA_LFLAG_NO_LPM	= (1 << 8),  
 	ATA_LFLAG_RST_ONCE	= (1 << 9),  
+	ATA_LFLAG_CHANGED	= (1 << 10),  
 
 	ATA_FLAG_SLAVE_POSS	= (1 << 0),  
 					     
@@ -274,6 +275,8 @@
 
 	ATA_TMOUT_PMP_SRST_WAIT	= 5000,
 
+	ATA_TMOUT_SPURIOUS_PHY	= 10000,
+
 	BUS_UNKNOWN		= 0,
 	BUS_DMA			= 1,
 	BUS_IDLE		= 2,
@@ -384,6 +387,7 @@
 #ifdef MY_DEF_HERE
 	ATA_HORKAGE_NOWCACHE	= (1 << 22),	 
 #endif  
+	ATA_HORKAGE_NOTRIM = (1 << 24),		 
 
 	ATA_DMA_MASK_ATA	= (1 << 0),	 
 	ATA_DMA_MASK_ATAPI	= (1 << 1),	 
@@ -434,6 +438,9 @@
 #ifdef MY_DEF_HERE
 	SYNO_STATUS_IS_MV9235		= 1 << 3,
 #endif  
+#ifdef MY_DEF_HERE
+	SYNO_STATUS_IS_SIL			= 1 << 4,
+#endif  
 	 
 #ifdef MY_DEF_HERE
 	SYNO_STATUS_IS_SIL3x26		= 1 << 0,
@@ -660,7 +667,7 @@
 	union {
 		u16		id[ATA_ID_WORDS];  
 		u32		gscr[SATA_PMP_GSCR_DWORDS];  
-	};
+	} ____cacheline_aligned;
 
 	u8			devslp_timing[ATA_LOG_DEVSLP_SIZE];
 
@@ -732,6 +739,8 @@
 
 	struct ata_device	device[ATA_MAX_DEVICES];
 
+	unsigned long		last_lpm_change;  
+
 #if defined(MY_DEF_HERE) || \
 	defined(MY_DEF_HERE) || \
 	defined(MY_DEF_HERE) || \
@@ -965,9 +974,6 @@
 #ifdef MY_DEF_HERE
 extern struct device_attribute dev_attr_syno_wcache;
 #endif  
-#ifdef CONFIG_SYNO_SATA_DISK_SERIAL
-extern struct device_attribute dev_attr_syno_disk_serial;
-#endif  
 #ifdef MY_DEF_HERE
 extern struct device_attribute dev_attr_syno_diskname_trans;
 #endif  
@@ -1108,6 +1114,7 @@
 extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
 extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
 extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
 
 extern int ata_cable_40wire(struct ata_port *ap);
 extern int ata_cable_80wire(struct ata_port *ap);
@@ -1247,7 +1254,8 @@
 #ifdef MY_DEF_HERE
 #define IS_SYNO_PMP_GSCR_9705_CONFIG(tf) (SATA_PMP_GSCR_9705_GPO_EN == ((tf->hob_feature << 8) | tf->feature) || \
 										  SATA_PMP_GSCR_9705_GPI_POLARITY == ((tf->hob_feature << 8) | tf->feature) || \
-										  SATA_PMP_GSCR_9705_SATA_BLINK_RATE == ((tf->hob_feature << 8) | tf->feature))
+										  SATA_PMP_GSCR_9705_SATA_0_TO_3_BLINK_RATE == ((tf->hob_feature << 8) | tf->feature) || \
+										  SATA_PMP_GSCR_9705_SATA_4_BLINK_RATE == ((tf->hob_feature << 8) | tf->feature))
 
 #define IS_SYNO_PMP_WRITE_CMD(tf) (ATA_CMD_PMP_WRITE == tf->command && \
 								  (SATA_PMP_GSCR_3XXX_GPIO == tf->feature || \
diff -ur a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
--- a/include/linux/lockd/lockd.h	2017-03-23 15:02:19.000000000 +0100
+++ b/include/linux/lockd/lockd.h	2017-03-14 02:39:21.000000000 +0100
@@ -236,7 +236,8 @@
 struct nlm_host * nlm_get_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
 void		  nlm_shutdown_hosts_net(struct net *net);
-void		  nlm_host_rebooted(const struct nlm_reboot *);
+void		  nlm_host_rebooted(const struct net *net,
+					const struct nlm_reboot *);
 
 /*
  * Host monitoring
@@ -244,11 +245,13 @@
 int		  nsm_monitor(const struct nlm_host *host);
 void		  nsm_unmonitor(const struct nlm_host *host);
 
-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+struct nsm_handle *nsm_get_handle(const struct net *net,
+					const struct sockaddr *sap,
 					const size_t salen,
 					const char *hostname,
 					const size_t hostname_len);
-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
+struct nsm_handle *nsm_reboot_lookup(const struct net *net,
+					const struct nlm_reboot *info);
 void		  nsm_release(struct nsm_handle *nsm);
 
 /*
diff -ur a/include/linux/mbus.h b/include/linux/mbus.h
--- a/include/linux/mbus.h	2017-03-23 15:00:51.000000000 +0100
+++ b/include/linux/mbus.h	2017-03-14 02:37:16.000000000 +0100
@@ -67,6 +67,6 @@
 int mvebu_mbus_del_window(phys_addr_t base, size_t size);
 int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
 		    size_t mbus_size, phys_addr_t sdram_phys_base,
-		    size_t sdram_size);
+		    size_t sdram_size, int is_coherent);
 
 #endif /* __LINUX_MBUS_H */
diff -ur a/include/linux/mm.h b/include/linux/mm.h
--- a/include/linux/mm.h	2017-03-23 15:01:49.000000000 +0100
+++ b/include/linux/mm.h	2017-03-14 02:38:34.000000000 +0100
@@ -1387,6 +1387,7 @@
 #define FOLL_HWPOISON	0x100	 
 #define FOLL_NUMA	0x200	 
 #define FOLL_MIGRATION	0x400	 
+#define FOLL_COW	0x4000	 
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
diff -ur a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
--- a/include/linux/mod_devicetable.h	2017-03-23 15:00:53.000000000 +0100
+++ b/include/linux/mod_devicetable.h	2017-03-14 02:37:18.000000000 +0100
@@ -394,6 +394,7 @@
 /*
  * For Hyper-V devices we use the device guid as the id.
  */
+#define vmbus_device_id hv_vmbus_device_id
 struct hv_vmbus_device_id {
 	__u8 guid[16];
 	kernel_ulong_t driver_data;	/* Data private to the driver */
@@ -544,6 +545,11 @@
  * See documentation of "x86_match_cpu" for details.
  */
 
+/*
+ * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
+ * Although gcc seems to ignore this error, clang fails without this define.
+ */
+#define x86cpu_device_id x86_cpu_id
 struct x86_cpu_id {
 	__u16 vendor;
 	__u16 family;
@@ -571,6 +577,7 @@
 #define MEI_CL_MODULE_PREFIX "mei:"
 #define MEI_CL_NAME_SIZE 32
 
+#define mei_device_id mei_cl_device_id
 struct mei_cl_device_id {
 	char name[MEI_CL_NAME_SIZE];
 	kernel_ulong_t driver_info;
diff -ur a/include/linux/module.h b/include/linux/module.h
--- a/include/linux/module.h	2017-03-23 15:01:40.000000000 +0100
+++ b/include/linux/module.h	2017-03-14 02:38:22.000000000 +0100
@@ -97,6 +97,11 @@
 /* For userspace: you can also call me... */
 #define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
 
+/* Soft module dependencies. See man modprobe.d for details.
+ * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
+ */
+#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
+
 /*
  * The following license idents are currently accepted as indicating free
  * software modules
@@ -220,6 +225,12 @@
 	unsigned long decs;
 } __attribute((aligned(2 * sizeof(unsigned long))));
 
+struct mod_kallsyms {
+	Elf_Sym *symtab;
+	unsigned int num_symtab;
+	char *strtab;
+};
+
 struct module
 {
 	enum module_state state;
@@ -308,14 +319,9 @@
 #endif
 
 #ifdef CONFIG_KALLSYMS
-	/*
-	 * We keep the symbol and string tables for kallsyms.
-	 * The core_* fields below are temporary, loader-only (they
-	 * could really be discarded after module init).
-	 */
-	Elf_Sym *symtab, *core_symtab;
-	unsigned int num_symtab, core_num_syms;
-	char *strtab, *core_strtab;
+	/* Protected by RCU and/or module_mutex: use rcu_dereference() */
+	struct mod_kallsyms *kallsyms;
+	struct mod_kallsyms core_kallsyms;
 
 	/* Section attributes */
 	struct module_sect_attrs *sect_attrs;
diff -ur a/include/linux/namei.h b/include/linux/namei.h
--- a/include/linux/namei.h	2017-03-23 15:00:44.000000000 +0100
+++ b/include/linux/namei.h	2017-03-14 02:37:06.000000000 +0100
@@ -72,8 +72,7 @@
 extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
 extern void done_path_create(struct path *, struct dentry *);
 extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
-			   const char *, unsigned int, struct path *);
+extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
 
 #ifdef MY_ABC_HERE
 extern struct dentry *lookup_hash(struct nameidata *nd);
diff -ur a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
--- a/include/linux/nfs_fs.h	2017-03-23 15:01:18.000000000 +0100
+++ b/include/linux/nfs_fs.h	2017-03-14 02:37:53.000000000 +0100
@@ -577,9 +577,7 @@
 
 static inline loff_t nfs_size_to_loff_t(__u64 size)
 {
-	if (size > (__u64) OFFSET_MAX - 1)
-		return OFFSET_MAX - 1;
-	return (loff_t) size;
+	return min_t(u64, size, OFFSET_MAX);
 }
 
 static inline ino_t
diff -ur a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
--- a/include/linux/nfs_xdr.h	2017-03-23 15:01:15.000000000 +0100
+++ b/include/linux/nfs_xdr.h	2017-03-14 02:37:48.000000000 +0100
@@ -1093,7 +1093,7 @@
 	struct pnfs_commit_bucket *buckets;
 };
 
-#define NFS4_EXCHANGE_ID_LEN	(48)
+#define NFS4_EXCHANGE_ID_LEN	(127)
 struct nfs41_exchange_id_args {
 	struct nfs_client		*client;
 	nfs4_verifier			*verifier;
diff -ur a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
--- a/include/linux/nilfs2_fs.h	2017-03-23 15:01:00.000000000 +0100
+++ b/include/linux/nilfs2_fs.h	2017-03-14 02:37:28.000000000 +0100
@@ -456,7 +456,7 @@
 /* level */
 #define NILFS_BTREE_LEVEL_DATA          0
 #define NILFS_BTREE_LEVEL_NODE_MIN      (NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX           14
+#define NILFS_BTREE_LEVEL_MAX           14	/* Max level (exclusive) */
 
 /**
  * struct nilfs_palloc_group_desc - block group descriptor
diff -ur a/include/linux/pci.h b/include/linux/pci.h
--- a/include/linux/pci.h	2017-03-23 15:01:14.000000000 +0100
+++ b/include/linux/pci.h	2017-03-14 02:37:45.000000000 +0100
@@ -336,6 +336,7 @@
 	unsigned int	__aer_firmware_first:1;
 	unsigned int	broken_intx_masking:1;
 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
+	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
 	pci_dev_flags_t dev_flags;
 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
 
diff -ur a/include/linux/poison.h b/include/linux/poison.h
--- a/include/linux/poison.h	2017-03-23 15:01:20.000000000 +0100
+++ b/include/linux/poison.h	2017-03-14 02:37:56.000000000 +0100
@@ -19,8 +19,8 @@
  * under normal circumstances, used to verify that nobody uses
  * non-initialized list entries.
  */
-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
 
 /********** include/linux/timer.h **********/
 /*
diff -ur a/include/linux/ptrace.h b/include/linux/ptrace.h
--- a/include/linux/ptrace.h	2017-03-23 15:00:59.000000000 +0100
+++ b/include/linux/ptrace.h	2017-03-14 02:37:26.000000000 +0100
@@ -56,7 +56,29 @@
 #define PTRACE_MODE_READ	0x01
 #define PTRACE_MODE_ATTACH	0x02
 #define PTRACE_MODE_NOAUDIT	0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 
 static inline int ptrace_reparented(struct task_struct *child)
diff -ur a/include/linux/radix-tree.h b/include/linux/radix-tree.h
--- a/include/linux/radix-tree.h	2017-03-23 15:01:37.000000000 +0100
+++ b/include/linux/radix-tree.h	2017-03-14 02:38:19.000000000 +0100
@@ -322,12 +322,28 @@
 			     struct radix_tree_iter *iter, unsigned flags);
 
 /**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter:	iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true.  If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+	iter->next_index = iter->index;
+	return NULL;
+}
+
+/**
  * radix_tree_chunk_size - get current chunk size
  *
  * @iter:	pointer to radix tree iterator
  * Returns:	current chunk size
  */
-static __always_inline unsigned
+static __always_inline long
 radix_tree_chunk_size(struct radix_tree_iter *iter)
 {
 	return iter->next_index - iter->index;
@@ -361,9 +377,9 @@
 			return slot + offset + 1;
 		}
 	} else {
-		unsigned size = radix_tree_chunk_size(iter) - 1;
+		long size = radix_tree_chunk_size(iter);
 
-		while (size--) {
+		while (--size > 0) {
 			slot++;
 			iter->index++;
 			if (likely(*slot))
diff -ur a/include/linux/raid/libmd-report.h b/include/linux/raid/libmd-report.h
--- a/include/linux/raid/libmd-report.h	2017-03-23 15:02:08.000000000 +0100
+++ b/include/linux/raid/libmd-report.h	2017-03-14 02:39:09.000000000 +0100
@@ -17,4 +17,10 @@
 								struct block_device *bdev, const char *szFuncName);
 
 #endif  
+
+#ifdef MY_ABC_HERE
+extern int (*funcSYNOSendAutoRemapLVMEvent)(const char*, unsigned long long, unsigned int);
+extern int (*funcSYNOSendAutoRemapRaidEvent)(unsigned int, unsigned long long, unsigned int);
+#endif  
+
 #endif  
diff -ur a/include/linux/random.h b/include/linux/random.h
--- a/include/linux/random.h	2017-03-23 15:01:17.000000000 +0100
+++ b/include/linux/random.h	2017-03-14 02:37:53.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  * include/linux/random.h
  *
@@ -32,6 +35,25 @@
 u32 prandom_u32_state(struct rnd_state *);
 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
 
+#ifdef MY_ABC_HERE
+/**
+ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+ * @ep_ro: right open interval endpoint
+ *
+ * Returns a pseudo-random number that is in interval [0, ep_ro). Note
+ * that the result depends on PRNG being well distributed in [0, ~0U]
+ * u32 space. Here we use maximally equidistributed combined Tausworthe
+ * generator, that is, prandom_u32(). This is useful when requesting a
+ * random index of an array containing ep_ro elements, for example.
+ *
+ * Returns: pseudo-random number in interval [0, ep_ro)
+ */
+static inline u32 prandom_u32_max(u32 ep_ro)
+{
+	return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+}
+#endif /* MY_ABC_HERE */
+
 /*
  * Handle minimum values for seeds
  */
diff -ur a/include/linux/sched.h b/include/linux/sched.h
--- a/include/linux/sched.h	2017-03-23 15:01:30.000000000 +0100
+++ b/include/linux/sched.h	2017-03-14 02:38:08.000000000 +0100
@@ -498,6 +498,7 @@
 	unsigned long mq_bytes;	 
 #endif
 	unsigned long locked_shm;  
+	unsigned long unix_inflight;	 
 
 #ifdef CONFIG_KEYS
 	struct key *uid_keyring;	 
@@ -1812,15 +1813,15 @@
 	return p->exit_signal >= 0;
 }
 
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline bool has_group_leader_pid(struct task_struct *p)
 {
-	return p->pid == p->tgid;
+	return task_pid(p) == p->signal->leader_pid;
 }
 
 static inline
-int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
 {
-	return p1->tgid == p2->tgid;
+	return p1->signal == p2->signal;
 }
 
 static inline struct task_struct *next_thread(const struct task_struct *p)
diff -ur a/include/linux/security.h b/include/linux/security.h
--- a/include/linux/security.h	2017-03-23 15:01:48.000000000 +0100
+++ b/include/linux/security.h	2017-03-14 02:38:32.000000000 +0100
@@ -2393,7 +2393,7 @@
 				      unsigned long arg4,
 				      unsigned long arg5)
 {
-	return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+	return cap_task_prctl(option, arg2, arg3, arg4, arg5);
 }
 
 static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff -ur a/include/linux/signal.h b/include/linux/signal.h
--- a/include/linux/signal.h	2017-03-23 15:01:17.000000000 +0100
+++ b/include/linux/signal.h	2017-03-14 02:37:52.000000000 +0100
@@ -247,7 +247,6 @@
 extern void set_current_blocked(sigset_t *);
 extern void __set_current_blocked(const sigset_t *);
 extern int show_unhandled_signals;
-extern int sigsuspend(sigset_t *);
 
 struct sigaction {
 #ifndef __ARCH_HAS_IRIX_SIGACTION
diff -ur a/include/linux/skbuff.h b/include/linux/skbuff.h
--- a/include/linux/skbuff.h	2017-03-23 15:01:27.000000000 +0100
+++ b/include/linux/skbuff.h	2017-03-14 02:38:05.000000000 +0100
@@ -1521,6 +1521,9 @@
 {
 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+		 skb_checksum_start_offset(skb) < 0)
+		skb->ip_summed = CHECKSUM_NONE;
 }
 
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
diff -ur a/include/linux/smp.h b/include/linux/smp.h
--- a/include/linux/smp.h	2017-03-23 15:01:15.000000000 +0100
+++ b/include/linux/smp.h	2017-03-14 02:37:49.000000000 +0100
@@ -11,7 +11,6 @@
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
-#include <linux/irqflags.h>
 
 extern void cpu_idle(void);
 
@@ -29,6 +28,29 @@
 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 			     int wait);
 
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+		void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+		smp_call_func_t func, void *info, bool wait,
+		gfp_t gfp_flags);
+
+int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait);
+
 #ifdef CONFIG_SMP
 
 #include <linux/preempt.h>
@@ -74,9 +96,6 @@
 void smp_call_function_many(const struct cpumask *mask,
 			    smp_call_func_t func, void *info, bool wait);
 
-void __smp_call_function_single(int cpuid, struct call_single_data *data,
-				int wait);
-
 int smp_call_function_any(const struct cpumask *mask,
 			  smp_call_func_t func, void *info, int wait);
 
@@ -95,27 +114,6 @@
 #endif
 
 /*
- * Call a function on all processors
- */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
-
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
-		void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-		smp_call_func_t func, void *info, bool wait,
-		gfp_t gfp_flags);
-
-/*
  * Mark the boot cpu "online" so that it can call console drivers in
  * printk() and can access its per-cpu storage.
  */
@@ -140,46 +138,6 @@
 #define smp_call_function(func, info, wait) \
 			(up_smp_call_function(func, info))
 
-static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	func(info);
-	local_irq_restore(flags);
-	return 0;
-}
-
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
-	do {						\
-		if (cpumask_test_cpu(0, (mask))) {	\
-			local_irq_disable();		\
-			(func)(info);			\
-			local_irq_enable();		\
-		}					\
-	} while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
-	do {							\
-		void *__info = (info);				\
-		preempt_disable();				\
-		if ((cond_func)(0, __info)) {			\
-			local_irq_disable();			\
-			(func)(__info);				\
-			local_irq_enable();			\
-		}						\
-		preempt_enable();				\
-	} while (0)
-
 static inline void smp_send_reschedule(int cpu) { }
 #define smp_prepare_boot_cpu()			do {} while (0)
 #define smp_call_function_many(mask, func, info, wait) \
@@ -228,6 +186,9 @@
  */
 extern void arch_disable_smp_support(void);
 
+extern void arch_enable_nonboot_cpus_begin(void);
+extern void arch_enable_nonboot_cpus_end(void);
+
 void smp_setup_processor_id(void);
 
 #endif /* __LINUX_SMP_H */
diff -ur a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
--- a/include/linux/sunrpc/svcsock.h	2017-03-23 15:02:32.000000000 +0100
+++ b/include/linux/sunrpc/svcsock.h	2017-03-14 02:39:42.000000000 +0100
@@ -22,7 +22,7 @@
 
 	/* We keep the old state_change and data_ready CB's here */
 	void			(*sk_ostate)(struct sock *);
-	void			(*sk_odata)(struct sock *, int bytes);
+	void			(*sk_odata)(struct sock *);
 	void			(*sk_owspace)(struct sock *);
 
 	/* private TCP part */
Nur in b/include/linux: synobios.h.
diff -ur a/include/linux/synolib.h b/include/linux/synolib.h
--- a/include/linux/synolib.h	2017-03-23 15:01:29.000000000 +0100
+++ b/include/linux/synolib.h	2017-03-14 02:38:07.000000000 +0100
@@ -43,9 +43,21 @@
 #ifdef MY_DEF_HERE
 #define SATA_REMAP_MAX  32
 #define SATA_REMAP_NOT_INIT 0xff
-extern unsigned char g_syno_sata_remap[SATA_REMAP_MAX];
+extern int g_syno_sata_remap[SATA_REMAP_MAX];
 extern int g_use_sata_remap;
 int syno_get_remap_idx(int origin_idx);
 #endif  
 
+#ifdef MY_DEF_HERE
+#include <linux/pci.h>
+
+#define PCI_ADDR_LEN_MAX 9
+#define PCI_ADDR_NUM_MAX CONFIG_SYNO_MAX_PCI_SLOT
+ 
+#define M2SATA_START_IDX 300
+extern char gszPciAddrList[PCI_ADDR_NUM_MAX][PCI_ADDR_LEN_MAX];
+extern int gPciAddrNum;
+extern int gPciDeferStart;
+void syno_insert_sata_index_remap(unsigned int idx, unsigned int num, unsigned int id_start);
+#endif  
 #endif  
diff -ur a/include/linux/synosata.h b/include/linux/synosata.h
--- a/include/linux/synosata.h	2017-03-23 15:00:55.000000000 +0100
+++ b/include/linux/synosata.h	2017-03-14 02:37:20.000000000 +0100
@@ -335,6 +335,8 @@
 			ret = 4;
 		} else if (IS_SYNOLOGY_RX1214(syno_uniq) || IS_SYNOLOGY_RX1217(syno_uniq) || IS_SYNOLOGY_DX1215(syno_uniq)) {
 			ret = 3;
+		} else if (IS_SYNOLOGY_DX517(syno_uniq)) {
+			ret = 5;
 		} else {
 			printk("%s not synology device", __FUNCTION__);
 			ret = 5;
diff -ur a/include/linux/syscalls.h b/include/linux/syscalls.h
--- a/include/linux/syscalls.h	2017-03-23 15:01:01.000000000 +0100
+++ b/include/linux/syscalls.h	2017-03-14 02:37:29.000000000 +0100
@@ -488,7 +488,7 @@
 asmlinkage long sys_lchown(const char __user *filename,
 				uid_t user, gid_t group);
 asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
 asmlinkage long sys_chown16(const char __user *filename,
 				old_uid_t user, old_gid_t group);
 asmlinkage long sys_lchown16(const char __user *filename,
diff -ur a/include/linux/tracepoint.h b/include/linux/tracepoint.h
--- a/include/linux/tracepoint.h	2017-03-23 15:01:04.000000000 +0100
+++ b/include/linux/tracepoint.h	2017-03-14 02:37:33.000000000 +0100
@@ -14,8 +14,11 @@
  * See the file COPYING for more details.
  */
 
+#include <linux/smp.h>
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
 #include <linux/rcupdate.h>
 #include <linux/static_key.h>
 
@@ -259,15 +262,19 @@
  * "void *__data, proto" as the callback prototype.
  */
 #define DECLARE_TRACE_NOARGS(name)					\
-		__DECLARE_TRACE(name, void, , 1, void *__data, __data)
+	__DECLARE_TRACE(name, void, ,					\
+			cpu_online(raw_smp_processor_id()),		\
+			void *__data, __data)
 
 #define DECLARE_TRACE(name, proto, args)				\
-		__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1,	\
-				PARAMS(void *__data, proto),		\
-				PARAMS(__data, args))
+	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
+			cpu_online(raw_smp_processor_id()),		\
+			PARAMS(void *__data, proto),			\
+			PARAMS(__data, args))
 
 #define DECLARE_TRACE_CONDITION(name, proto, args, cond)		\
-	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
+			cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
 			PARAMS(void *__data, proto),			\
 			PARAMS(__data, args))
 
diff -ur a/include/linux/types.h b/include/linux/types.h
--- a/include/linux/types.h	2017-03-23 15:01:43.000000000 +0100
+++ b/include/linux/types.h	2017-03-14 02:38:27.000000000 +0100
@@ -35,7 +35,7 @@
 
 typedef unsigned long		uintptr_t;
 
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
 /* This is defined by include/asm-{arch}/posix_types.h */
 typedef __kernel_old_uid_t	old_uid_t;
 typedef __kernel_old_gid_t	old_gid_t;
diff -ur a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
--- a/include/linux/ucs2_string.h	2017-03-23 15:01:30.000000000 +0100
+++ b/include/linux/ucs2_string.h	2017-03-14 02:38:09.000000000 +0100
@@ -11,4 +11,8 @@
 unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
 int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
 
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+			   unsigned long maxlength);
+
 #endif /* _LINUX_UCS2_STRING_H_ */
diff -ur a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
--- a/include/linux/usb/quirks.h	2017-03-23 15:02:10.000000000 +0100
+++ b/include/linux/usb/quirks.h	2017-03-14 02:39:12.000000000 +0100
@@ -33,4 +33,7 @@
 /* device generates spurious wakeup, ignore remote wakeup capability */
 #define USB_QUIRK_IGNORE_REMOTE_WAKEUP	0x00000200
 
+/* device can't handle Link Power Management */
+#define USB_QUIRK_NO_LPM			BIT(10)
+
 #endif /* __LINUX_USB_QUIRKS_H */
diff -ur a/include/net/af_unix.h b/include/net/af_unix.h
--- a/include/net/af_unix.h	2017-03-23 15:03:34.000000000 +0100
+++ b/include/net/af_unix.h	2017-03-14 02:41:15.000000000 +0100
@@ -6,8 +6,8 @@
 #include <linux/mutex.h>
 #include <net/sock.h>
 
-extern void unix_inflight(struct file *fp);
-extern void unix_notinflight(struct file *fp);
+extern void unix_inflight(struct user_struct *user, struct file *fp);
+extern void unix_notinflight(struct user_struct *user, struct file *fp);
 extern void unix_gc(void);
 extern void wait_for_unix_gc(void);
 extern struct sock *unix_get_socket(struct file *filp);
@@ -62,8 +62,13 @@
 #define UNIX_GC_CANDIDATE	0
 #define UNIX_GC_MAYBE_CYCLE	1
 	struct socket_wq	peer_wq;
+	wait_queue_t		peer_wake;
 };
-#define unix_sk(__sk) ((struct unix_sock *)__sk)
+
+static inline struct unix_sock *unix_sk(struct sock *sk)
+{
+	return (struct unix_sock *)sk;
+}
 
 #define peer_wait peer_wq.wait
 
diff -ur a/include/net/inet_common.h b/include/net/inet_common.h
--- a/include/net/inet_common.h	2017-03-23 15:03:40.000000000 +0100
+++ b/include/net/inet_common.h	2017-03-14 02:41:24.000000000 +0100
@@ -40,7 +40,8 @@
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
-	sk_release_kernel(sk);
+	if (sk)
+		sk_release_kernel(sk);
 }
 
 #endif
diff -ur a/include/net/ip6_fib.h b/include/net/ip6_fib.h
--- a/include/net/ip6_fib.h	2017-03-23 15:03:37.000000000 +0100
+++ b/include/net/ip6_fib.h	2017-03-14 02:41:19.000000000 +0100
@@ -300,7 +300,7 @@
 						struct nl_info *info);
 
 extern void			fib6_run_gc(unsigned long expires,
-					    struct net *net);
+					    struct net *net, bool force);
 
 extern void			fib6_gc_cleanup(void);
 
diff -ur a/include/net/ip.h b/include/net/ip.h
--- a/include/net/ip.h	2017-03-23 15:03:38.000000000 +0100
+++ b/include/net/ip.h	2017-03-14 02:41:21.000000000 +0100
@@ -141,6 +141,7 @@
 }
 
 /* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 extern int		ip4_datagram_connect(struct sock *sk, 
 					     struct sockaddr *uaddr, int addr_len);
 
diff -ur a/include/net/ipv6.h b/include/net/ipv6.h
--- a/include/net/ipv6.h	2017-03-23 15:03:40.000000000 +0100
+++ b/include/net/ipv6.h	2017-03-14 02:41:23.000000000 +0100
@@ -432,6 +432,7 @@
 	u32 user;
 	const struct in6_addr *src;
 	const struct in6_addr *dst;
+	int iif;
 	u8 ecn;
 };
 
diff -ur a/include/net/netns/sctp.h b/include/net/netns/sctp.h
--- a/include/net/netns/sctp.h	2017-03-23 15:03:47.000000000 +0100
+++ b/include/net/netns/sctp.h	2017-03-14 02:41:34.000000000 +0100
@@ -31,6 +31,7 @@
 	struct list_head addr_waitq;
 	struct timer_list addr_wq_timer;
 	struct list_head auto_asconf_splist;
+	/* Lock that protects both addr_waitq and auto_asconf_splist */
 	spinlock_t addr_wq_lock;
 
 	/* Lock that protects the local_addr_list writers */
diff -ur a/include/net/scm.h b/include/net/scm.h
--- a/include/net/scm.h	2017-03-23 15:03:36.000000000 +0100
+++ b/include/net/scm.h	2017-03-14 02:41:18.000000000 +0100
@@ -21,6 +21,7 @@
 struct scm_fp_list {
 	short			count;
 	short			max;
+	struct user_struct	*user;
 	struct file		*fp[SCM_MAX_FD];
 };
 
diff -ur a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
--- a/include/net/sctp/sctp.h	2017-03-23 15:03:47.000000000 +0100
+++ b/include/net/sctp/sctp.h	2017-03-14 02:41:34.000000000 +0100
@@ -125,7 +125,7 @@
 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
-void sctp_data_ready(struct sock *sk, int len);
+void sctp_data_ready(struct sock *sk);
 unsigned int sctp_poll(struct file *file, struct socket *sock,
 		poll_table *wait);
 void sctp_sock_rfree(struct sk_buff *skb);
diff -ur a/include/net/sctp/structs.h b/include/net/sctp/structs.h
--- a/include/net/sctp/structs.h	2017-03-23 15:03:49.000000000 +0100
+++ b/include/net/sctp/structs.h	2017-03-14 02:41:35.000000000 +0100
@@ -226,6 +226,10 @@
 	atomic_t pd_mode;
 	/* Receive to here while partial delivery is in effect. */
 	struct sk_buff_head pd_lobby;
+
+	/* These must be the last fields, as they will skipped on copies,
+	 * like on accept and peeloff operations
+	 */
 	struct list_head auto_asconf_list;
 	int do_auto_asconf;
 };
diff -ur a/include/net/sock.h b/include/net/sock.h
--- a/include/net/sock.h	2017-03-23 15:03:40.000000000 +0100
+++ b/include/net/sock.h	2017-03-14 02:41:23.000000000 +0100
@@ -352,6 +352,7 @@
 				sk_no_check  : 2,
 				sk_userlocks : 4,
 				sk_protocol  : 8,
+#define SK_PROTOCOL_MAX U8_MAX
 				sk_type      : 16;
 	kmemcheck_bitfield_end(flags);
 	int			sk_wmem_queued;
@@ -395,7 +396,7 @@
 	u32			sk_classid;
 	struct cg_proto		*sk_cgrp;
 	void			(*sk_state_change)(struct sock *sk);
-	void			(*sk_data_ready)(struct sock *sk, int bytes);
+	void			(*sk_data_ready)(struct sock *sk);
 	void			(*sk_write_space)(struct sock *sk);
 	void			(*sk_error_report)(struct sock *sk);
 	int			(*sk_backlog_rcv)(struct sock *sk,
@@ -672,6 +673,8 @@
 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
 };
 
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
 {
 	nsk->sk_flags = osk->sk_flags;
diff -ur a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
--- a/include/scsi/scsi_device.h	2017-03-23 15:04:11.000000000 +0100
+++ b/include/scsi/scsi_device.h	2017-03-14 02:42:01.000000000 +0100
@@ -77,6 +77,9 @@
 #ifdef MY_ABC_HERE
 	char syno_disk_name[BDEVNAME_SIZE];		 
 #endif  
+#ifdef MY_ABC_HERE
+	unsigned char auto_remap;
+#endif  
 
 	unsigned int manufacturer;	 
 	unsigned sector_size;	 
@@ -156,6 +159,7 @@
 	unsigned long   idle;    
 	unsigned char	spindown;
 	unsigned char   nospindown;
+	unsigned char   do_standby_syncing;
 #endif  
 
 	struct device		sdev_gendev,
@@ -187,7 +191,7 @@
 
 	unsigned long		sdev_data[0];
 #ifdef MY_ABC_HERE
-#define SERIAL_NUM_SIZE	20	 
+#define SERIAL_NUM_SIZE	36	 
 	char syno_disk_serial[SERIAL_NUM_SIZE + 1];
 #endif  
 } __attribute__((aligned(sizeof(unsigned long))));
diff -ur a/include/scsi/scsi.h b/include/scsi/scsi.h
--- a/include/scsi/scsi.h	2017-03-23 15:04:12.000000000 +0100
+++ b/include/scsi/scsi.h	2017-03-14 02:42:03.000000000 +0100
@@ -460,4 +460,10 @@
 #endif  
 #endif  
 
+#ifdef MY_ABC_HERE
+#define SYNO_DESCRIPTOR_RESERVED_INDEX 3  
+#define SYNO_NCQ_FAKE_UNC 0x01  
+#define SYNO_SCSI_SECT_SIZE 512
+#endif  
+
 #endif  
diff -ur a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
--- a/include/scsi/scsi_host.h	2017-03-23 15:04:12.000000000 +0100
+++ b/include/scsi/scsi_host.h	2017-03-14 02:42:02.000000000 +0100
@@ -294,6 +294,11 @@
 
 	struct device *dma_dev;
 
+#ifdef MY_DEF_HERE
+	 
+	int isCacheSSD;
+#endif
+
 	unsigned long hostdata[0]   
 		__attribute__ ((aligned (sizeof(unsigned long))));
 };
diff -ur a/include/sound/emu10k1.h b/include/sound/emu10k1.h
--- a/include/sound/emu10k1.h	2017-03-23 15:04:02.000000000 +0100
+++ b/include/sound/emu10k1.h	2017-03-14 02:41:51.000000000 +0100
@@ -40,7 +40,8 @@
 
 #define EMUPAGESIZE     4096
 #define MAXREQVOICES    8
-#define MAXPAGES        8192
+#define MAXPAGES0       4096	/* 32 bit mode */
+#define MAXPAGES1       8192	/* 31 bit mode */
 #define RESERVED        0
 #define NUM_MIDI        16
 #define NUM_G           64              /* use all channels */
@@ -49,8 +50,7 @@
 
 /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
 #define EMU10K1_DMA_MASK	0x7fffffffUL	/* 31bit */
-#define AUDIGY_DMA_MASK		0x7fffffffUL	/* 31bit FIXME - 32 should work? */
-						/* See ALSA bug #1276 - rlrevell */
+#define AUDIGY_DMA_MASK		0xffffffffUL	/* 32bit mode */
 
 #define TMEMSIZE        256*1024
 #define TMEMSIZEREG     4
@@ -463,8 +463,11 @@
 
 #define MAPB			0x0d		/* Cache map B						*/
 
-#define MAP_PTE_MASK		0xffffe000	/* The 19 MSBs of the PTE indexed by the PTI		*/
-#define MAP_PTI_MASK		0x00001fff	/* The 13 bit index to one of the 8192 PTE dwords      	*/
+#define MAP_PTE_MASK0		0xfffff000	/* The 20 MSBs of the PTE indexed by the PTI		*/
+#define MAP_PTI_MASK0		0x00000fff	/* The 12 bit index to one of the 4096 PTE dwords      	*/
+
+#define MAP_PTE_MASK1		0xffffe000	/* The 19 MSBs of the PTE indexed by the PTI		*/
+#define MAP_PTI_MASK1		0x00001fff	/* The 13 bit index to one of the 8192 PTE dwords      	*/
 
 /* 0x0e, 0x0f: Not used */
 
@@ -1697,6 +1700,7 @@
 	unsigned short model;			/* subsystem id */
 	unsigned int card_type;			/* EMU10K1_CARD_* */
 	unsigned int ecard_ctrl;		/* ecard control bits */
+	unsigned int address_mode;		/* address mode */
 	unsigned long dma_mask;			/* PCI DMA mask */
 	unsigned int delay_pcm_irq;		/* in samples */
 	int max_cache_pages;			/* max memory size / PAGE_SIZE */
diff -ur a/include/sound/wm8904.h b/include/sound/wm8904.h
--- a/include/sound/wm8904.h	2017-03-23 15:03:56.000000000 +0100
+++ b/include/sound/wm8904.h	2017-03-14 02:41:45.000000000 +0100
@@ -118,7 +118,7 @@
 #define WM8904_MIC_REGS  2
 #define WM8904_GPIO_REGS 4
 #define WM8904_DRC_REGS  4
-#define WM8904_EQ_REGS   25
+#define WM8904_EQ_REGS   24
 
 /**
  * DRC configurations are specified with a label and a set of register
Nur in b/include/trace/events: iommu.h.
diff -ur a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
--- a/include/uapi/linux/btrfs.h	2017-03-23 15:03:02.000000000 +0100
+++ b/include/uapi/linux/btrfs.h	2017-03-14 02:40:27.000000000 +0100
@@ -272,6 +272,9 @@
 
 #define BTRFS_DEFRAG_RANGE_COMPRESS 1
 #define BTRFS_DEFRAG_RANGE_START_IO 2
+#ifdef MY_ABC_HERE
+#define BTRFS_DEFRAG_RANGE_SYNO_DEFRAG (1ULL << 2)
+#endif  
 
 #define BTRFS_SAME_DATA_DIFFERS	1
  
@@ -430,7 +433,16 @@
 	__u8 ruuid[BTRFS_UUID_SIZE];
 };
 #endif  
- 
+
+#ifdef MY_ABC_HERE
+struct btrfs_ioctl_snapshot_size_query_args {
+	__u64 snap_count;
+	__s64 fd;
+	__u64 __user *snap_id;
+	__u64 calc_size;
+};
+#endif  
+
 #define BTRFS_SEND_FLAG_NO_FILE_DATA		0x1
 
 #define BTRFS_SEND_FLAG_OMIT_STREAM_HEADER	0x2
@@ -461,9 +473,23 @@
 	__u64 flags;			 
 #ifdef MY_ABC_HERE
 	__u64 total_data_size;    
-	int g_verbose;
+	__u32 g_verbose;
 #endif
+#ifdef MY_ABC_HERE
+	__u64 skip_cmd_count;
+#endif  
+
+#if defined(MY_ABC_HERE) && defined(MY_ABC_HERE)
+	__u32 reserved_u32;
+	__u64 reserved[1];		 
+#elif defined(MY_ABC_HERE)
+	__u32 reserved_u32;
+	__u64 reserved[2];		 
+#elif defined(MY_ABC_HERE)
+	__u64 reserved[3];		 
+#else
 	__u64 reserved[4];		 
+#endif  
 };
 
 #ifdef MY_ABC_HERE
@@ -617,6 +643,11 @@
 				   struct btrfs_ioctl_feature_flags[3])
 
 #ifdef MY_ABC_HERE
+#define BTRFS_IOC_SNAPSHOT_SIZE_QUERY _IOWR(BTRFS_IOCTL_MAGIC, 247, \
+				   struct btrfs_ioctl_snapshot_size_query_args)
+#endif  
+
+#ifdef MY_ABC_HERE
 #define BTRFS_IOC_COMPR_CTL _IOR(BTRFS_IOCTL_MAGIC, 248, \
 									struct btrfs_ioctl_compr_ctl_args)
 #endif  
diff -ur a/include/uapi/linux/const.h b/include/uapi/linux/const.h
--- a/include/uapi/linux/const.h	2017-03-23 15:03:06.000000000 +0100
+++ b/include/uapi/linux/const.h	2017-03-14 02:40:34.000000000 +0100
@@ -21,4 +21,7 @@
 #define _AT(T,X)	((T)(X))
 #endif
 
+#define _BITUL(x)	(_AC(1,UL) << (x))
+#define _BITULL(x)	(_AC(1,ULL) << (x))
+
 #endif /* !(_LINUX_CONST_H) */
diff -ur a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
--- a/include/uapi/linux/stat.h	2017-03-23 15:02:55.000000000 +0100
+++ b/include/uapi/linux/stat.h	2017-03-14 02:40:18.000000000 +0100
@@ -87,12 +87,13 @@
 #define S2_SYNO_ACL_SUPPORT  				(1<<9)	 
 #define ALL_SYNO_ACL_ARCHIVE	(S2_SMB_READONLY|S2_SYNO_ACL_INHERIT|S2_SYNO_ACL_IS_OWNER_GROUP|S2_SYNO_ACL_EXIST|S2_SYNO_ACL_SUPPORT)
 #endif  
+#define S2_SMB_SPARSE						(1<<10)	 
 #define ALL_IARCHIVE (S2_IARCHIVE|S3_IARCHIVE)	 
 #define ALL_SYNO_ARCHIVE (S2_IARCHIVE|S2_SMB_ARCHIVE|S3_IARCHIVE)	 
-#ifdef MY_ABC_HERE 
-#define ALL_ARCHIVE_BIT (S2_IARCHIVE|S2_SMB_ARCHIVE|S2_SMB_HIDDEN|S2_SMB_SYSTEM|S3_IARCHIVE|ALL_SYNO_ACL_ARCHIVE)
+#ifdef MY_ABC_HERE
+#define ALL_ARCHIVE_BIT (S2_IARCHIVE|S2_SMB_ARCHIVE|S2_SMB_HIDDEN|S2_SMB_SYSTEM|S3_IARCHIVE|ALL_SYNO_ACL_ARCHIVE|S2_SMB_SPARSE)
 #else
-#define ALL_ARCHIVE_BIT (S2_IARCHIVE|S2_SMB_ARCHIVE|S2_SMB_HIDDEN|S2_SMB_SYSTEM|S3_IARCHIVE)
+#define ALL_ARCHIVE_BIT (S2_IARCHIVE|S2_SMB_ARCHIVE|S2_SMB_HIDDEN|S2_SMB_SYSTEM|S3_IARCHIVE|S2_SMB_SPARSE)
 #endif  
 
 #endif  
diff -ur a/include/uapi/linux/syno.h b/include/uapi/linux/syno.h
--- a/include/uapi/linux/syno.h	2017-03-23 15:02:55.000000000 +0100
+++ b/include/uapi/linux/syno.h	2017-03-14 02:40:17.000000000 +0100
@@ -26,7 +26,8 @@
 #ifdef MY_ABC_HERE
 #if defined (F_CLEAR_ARCHIVE) || defined (F_SETSMB_ARCHIVE) || defined (F_SETSMB_HIDDEN) || \
 	defined (F_SETSMB_SYSTEM) || defined (F_CLRSMB_ARCHIVE) || defined (F_CLRSMB_HIDDEN) || \
-	defined (F_CLRSMB_SYSTEM) || defined (F_CLEAR_S3_ARCHIVE)
+	defined (F_CLRSMB_SYSTEM) || defined (F_CLEAR_S3_ARCHIVE) || \
+	defined (F_SETSMB_SPARSE) || defined (F_CLRSMB_SPARSE)
 #error "Samba archive bit redefine."
 #endif
 
@@ -50,7 +51,7 @@
 #define F_CLRSMB_SYSTEM             (SYNO_FCNTL_BASE + 6)
 #define F_CLEAR_S3_ARCHIVE          (SYNO_FCNTL_BASE + 7)
 
-#ifdef MY_ABC_HERE 
+#ifdef MY_ABC_HERE
 #define F_CLRSMB_READONLY           (SYNO_FCNTL_BASE + 8)
 #define F_SETSMB_READONLY           (SYNO_FCNTL_BASE + 9)
 #define F_CLRACL_INHERIT            (SYNO_FCNTL_BASE + 10)
@@ -61,9 +62,14 @@
 #define F_SETACL_SUPPORT            (SYNO_FCNTL_BASE + 15)
 #define F_CLRACL_OWNER_IS_GROUP     (SYNO_FCNTL_BASE + 16)
 #define F_SETACL_OWNER_IS_GROUP     (SYNO_FCNTL_BASE + 17)
-#define SYNO_FCNTL_LAST             F_SETACL_OWNER_IS_GROUP
+#define F_SETSMB_SPARSE				(SYNO_FCNTL_BASE + 18)
+#define F_CLRSMB_SPARSE				(SYNO_FCNTL_BASE + 19)
+#define SYNO_FCNTL_LAST             F_CLRSMB_SPARSE
 #else
-#define SYNO_FCNTL_LAST             F_CLEAR_S3_ARCHIVE
+#define F_SETSMB_SPARSE				(SYNO_FCNTL_BASE + 8)
+#define F_CLRSMB_SPARSE				(SYNO_FCNTL_BASE + 9)
+
+#define SYNO_FCNTL_LAST             F_CLRSMB_SPARSE
 #endif  
 
 #endif  
diff -ur a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
--- a/include/uapi/linux/usbdevice_fs.h	2017-03-23 15:03:04.000000000 +0100
+++ b/include/uapi/linux/usbdevice_fs.h	2017-03-14 02:40:31.000000000 +0100
@@ -125,11 +125,12 @@
 	char port [127];	/* e.g. port 3 connects to device 27 */
 };
 
-/* Device capability flags */
+/* System and bus capability flags */
 #define USBDEVFS_CAP_ZERO_PACKET		0x01
 #define USBDEVFS_CAP_BULK_CONTINUATION		0x02
 #define USBDEVFS_CAP_NO_PACKET_SIZE_LIM		0x04
 #define USBDEVFS_CAP_BULK_SCATTER_GATHER	0x08
+#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT	0x10
 
 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */
 
diff -ur a/include/uapi/scsi/scsi.h b/include/uapi/scsi/scsi.h
--- a/include/uapi/scsi/scsi.h	2017-03-23 15:04:12.000000000 +0100
+++ b/include/uapi/scsi/scsi.h	2017-03-14 02:42:03.000000000 +0100
@@ -460,4 +460,10 @@
 #endif  
 #endif  
 
+#ifdef MY_ABC_HERE
+#define SYNO_DESCRIPTOR_RESERVED_INDEX 3  
+#define SYNO_NCQ_FAKE_UNC 0x01  
+#define SYNO_SCSI_SECT_SIZE 512
+#endif  
+
 #endif  
diff -ur a/include/xen/interface/sched.h b/include/xen/interface/sched.h
--- a/include/xen/interface/sched.h	2017-03-23 15:02:39.000000000 +0100
+++ b/include/xen/interface/sched.h	2017-03-14 02:39:53.000000000 +0100
@@ -107,5 +107,13 @@
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
diff -ur a/init/main.c b/init/main.c
--- a/init/main.c	2017-03-23 15:00:22.000000000 +0100
+++ b/init/main.c	2017-03-14 02:36:46.000000000 +0100
@@ -1,6 +1,3 @@
-#ifndef MY_ABC_HERE
-#define MY_ABC_HERE
-#endif
  
 #define DEBUG		 
 
@@ -301,7 +298,7 @@
 	int pid;
 
 	rcu_scheduler_starting();
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
 	smpboot_thread_init();
 #endif  
 	 
diff -ur a/ipc/mqueue.c b/ipc/mqueue.c
--- a/ipc/mqueue.c	2017-03-23 14:16:14.000000000 +0100
+++ b/ipc/mqueue.c	2017-03-14 01:54:24.000000000 +0100
@@ -143,7 +143,6 @@
 		if (!leaf)
 			return -ENOMEM;
 		INIT_LIST_HEAD(&leaf->msg_list);
-		info->qsize += sizeof(*leaf);
 	}
 	leaf->priority = msg->m_type;
 	rb_link_node(&leaf->rb_node, parent, p);
@@ -188,7 +187,6 @@
 			     "lazy leaf delete!\n");
 		rb_erase(&leaf->rb_node, &info->msg_tree);
 		if (info->node_cache) {
-			info->qsize -= sizeof(*leaf);
 			kfree(leaf);
 		} else {
 			info->node_cache = leaf;
@@ -201,7 +199,6 @@
 		if (list_empty(&leaf->msg_list)) {
 			rb_erase(&leaf->rb_node, &info->msg_tree);
 			if (info->node_cache) {
-				info->qsize -= sizeof(*leaf);
 				kfree(leaf);
 			} else {
 				info->node_cache = leaf;
@@ -1025,7 +1022,6 @@
 		/* Save our speculative allocation into the cache */
 		INIT_LIST_HEAD(&new_leaf->msg_list);
 		info->node_cache = new_leaf;
-		info->qsize += sizeof(*new_leaf);
 		new_leaf = NULL;
 	} else {
 		kfree(new_leaf);
@@ -1132,7 +1128,6 @@
 		/* Save our speculative allocation into the cache */
 		INIT_LIST_HEAD(&new_leaf->msg_list);
 		info->node_cache = new_leaf;
-		info->qsize += sizeof(*new_leaf);
 	} else {
 		kfree(new_leaf);
 	}
diff -ur a/ipc/msg.c b/ipc/msg.c
--- a/ipc/msg.c	2017-03-23 14:16:14.000000000 +0100
+++ b/ipc/msg.c	2017-03-14 01:54:23.000000000 +0100
@@ -202,13 +202,6 @@
 		return retval;
 	}
 
-	/* ipc_addid() locks msq upon success. */
-	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
-	if (id < 0) {
-		ipc_rcu_putref(msq, msg_rcu_free);
-		return id;
-	}
-
 	msq->q_stime = msq->q_rtime = 0;
 	msq->q_ctime = get_seconds();
 	msq->q_cbytes = msq->q_qnum = 0;
@@ -218,6 +211,13 @@
 	INIT_LIST_HEAD(&msq->q_receivers);
 	INIT_LIST_HEAD(&msq->q_senders);
 
+	/* ipc_addid() locks msq upon success. */
+	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+	if (id < 0) {
+		ipc_rcu_putref(msq, msg_rcu_free);
+		return id;
+	}
+
 	ipc_unlock_object(&msq->q_perm);
 	rcu_read_unlock();
 
diff -ur a/ipc/sem.c b/ipc/sem.c
--- a/ipc/sem.c	2017-03-23 14:16:15.000000000 +0100
+++ b/ipc/sem.c	2017-03-14 01:54:24.000000000 +0100
@@ -252,6 +252,16 @@
 }
 
 /*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
+
+/*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
  * New simple ops cannot start, because simple ops first check
@@ -274,6 +284,7 @@
 		sem = sma->sem_base + i;
 		spin_unlock_wait(&sem->lock);
 	}
+	ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -325,8 +336,13 @@
 
 		/* Then check that the global lock is free */
 		if (!spin_is_locked(&sma->sem_perm.lock)) {
-			/* spin_is_locked() is not a memory barrier */
-			smp_mb();
+			/*
+			 * We need a memory barrier with acquire semantics,
+			 * otherwise we can race with another thread that does:
+			 *	complex_count++;
+			 *	spin_unlock(sem_perm.lock);
+			 */
+			ipc_smp_acquire__after_spin_is_unlocked();
 
 			/* Now repeat the test of complex_count:
 			 * It can't change anymore until we drop sem->lock.
@@ -2043,17 +2059,28 @@
 		rcu_read_lock();
 		un = list_entry_rcu(ulp->list_proc.next,
 				    struct sem_undo, list_proc);
-		if (&un->list_proc == &ulp->list_proc)
-			semid = -1;
-		 else
-			semid = un->semid;
+		if (&un->list_proc == &ulp->list_proc) {
+			/*
+			 * We must wait for freeary() before freeing this ulp,
+			 * in case we raced with last sem_undo. There is a small
+			 * possibility where we exit while freeary() didn't
+			 * finish unlocking sem_undo_list.
+			 */
+			spin_unlock_wait(&ulp->lock);
+			rcu_read_unlock();
+			break;
+		}
+		spin_lock(&ulp->lock);
+		semid = un->semid;
+		spin_unlock(&ulp->lock);
 
+		/* exit_sem raced with IPC_RMID, nothing to do */
 		if (semid == -1) {
 			rcu_read_unlock();
-			break;
+			continue;
 		}
 
-		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
 		/* exit_sem raced with IPC_RMID, nothing to do */
 		if (IS_ERR(sma)) {
 			rcu_read_unlock();
diff -ur a/ipc/shm.c b/ipc/shm.c
--- a/ipc/shm.c	2017-03-23 14:16:13.000000000 +0100
+++ b/ipc/shm.c	2017-03-14 01:54:23.000000000 +0100
@@ -542,12 +542,6 @@
 	if (IS_ERR(file))
 		goto no_file;
 
-	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-	if (id < 0) {
-		error = id;
-		goto no_id;
-	}
-
 	shp->shm_cprid = task_tgid_vnr(current);
 	shp->shm_lprid = 0;
 	shp->shm_atim = shp->shm_dtim = 0;
@@ -557,6 +551,12 @@
 	shp->shm_file = file;
 	shp->shm_creator = current;
 
+	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+	if (id < 0) {
+		error = id;
+		goto no_id;
+	}
+
 	/*
 	 * shmid gets reported as "inode#" in /proc/pid/maps.
 	 * proc-ps tools use this. Changing this will break them.
diff -ur a/ipc/util.c b/ipc/util.c
--- a/ipc/util.c	2017-03-23 14:16:14.000000000 +0100
+++ b/ipc/util.c	2017-03-14 01:54:23.000000000 +0100
@@ -292,6 +292,10 @@
 	rcu_read_lock();
 	spin_lock(&new->lock);
 
+	current_euid_egid(&euid, &egid);
+	new->cuid = new->uid = euid;
+	new->gid = new->cgid = egid;
+
 	id = idr_alloc(&ids->ipcs_idr, new,
 		       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
 		       GFP_NOWAIT);
@@ -304,10 +308,6 @@
 
 	ids->in_use++;
 
-	current_euid_egid(&euid, &egid);
-	new->cuid = new->uid = euid;
-	new->gid = new->cgid = egid;
-
 	if (next_id < 0) {
 		new->seq = ids->seq++;
 		if (ids->seq > ids->seq_max)
diff -ur a/kernel/cpu.c b/kernel/cpu.c
--- a/kernel/cpu.c	2017-03-23 15:00:06.000000000 +0100
+++ b/kernel/cpu.c	2017-03-14 02:36:32.000000000 +0100
@@ -1,6 +1,3 @@
-#ifndef MY_ABC_HERE
-#define MY_ABC_HERE
-#endif
  
 #include <linux/proc_fs.h>
 #include <linux/smp.h>
@@ -247,7 +244,7 @@
 	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
 	if (err) {
 		 
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
 		 
 #else  
 		smpboot_unpark_threads(cpu);
@@ -293,7 +290,7 @@
 EXPORT_SYMBOL(cpu_down);
 #endif  
 
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
  
 static int smpboot_thread_call(struct notifier_block *nfb,
 			       unsigned long action, void *hcpu)
@@ -362,7 +359,7 @@
 		goto out_notify;
 	BUG_ON(!cpu_online(cpu));
 
-#ifdef MY_DEF_HERE
+#ifdef CONFIG_SYNO_FIX_SMPBOOT_RACE
 	 
 #else  
 	 
diff -ur a/kernel/events/core.c b/kernel/events/core.c
--- a/kernel/events/core.c	2017-03-23 15:00:23.000000000 +0100
+++ b/kernel/events/core.c	2017-03-14 02:36:47.000000000 +0100
@@ -2950,7 +2950,7 @@
 
 	/* Reuse ptrace permission checks for now. */
 	err = -EACCES;
-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
 		goto errout;
 
 	return task;
@@ -4005,12 +4005,20 @@
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+	/* only the parent has fasync state */
+	if (event->parent)
+		event = event->parent;
+	return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
 	ring_buffer_wakeup(event);
 
 	if (event->pending_kill) {
-		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+		kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
 		event->pending_kill = 0;
 	}
 }
@@ -5165,7 +5173,7 @@
 	else
 		perf_event_output(event, data, regs);
 
-	if (event->fasync && event->pending_kill) {
+	if (*perf_event_fasync(event) && event->pending_kill) {
 		event->pending_wakeup = 1;
 		irq_work_queue(&event->pending);
 	}
@@ -5643,6 +5651,10 @@
 {
 	void *record = data->raw->data;
 
+	/* only top level events have filters set */
+	if (event->parent)
+		event = event->parent;
+
 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
 		return 1;
 	return 0;
diff -ur a/kernel/fork.c b/kernel/fork.c
--- a/kernel/fork.c	2017-03-23 14:59:58.000000000 +0100
+++ b/kernel/fork.c	2017-03-14 02:36:26.000000000 +0100
@@ -1577,8 +1577,15 @@
 		return -EINVAL;
 	 
 	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
-		 
-		if (atomic_read(&current->mm->mm_users) > 1)
+		if (!thread_group_empty(current))
+			return -EINVAL;
+	}
+	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
+		if (atomic_read(&current->sighand->count) > 1)
+			return -EINVAL;
+	}
+	if (unshare_flags & CLONE_VM) {
+		if (!current_is_single_threaded())
 			return -EINVAL;
 	}
 
@@ -1632,12 +1639,12 @@
 	if (unshare_flags & CLONE_NEWPID)
 		unshare_flags |= CLONE_THREAD;
 	 
-	if (unshare_flags & CLONE_THREAD)
-		unshare_flags |= CLONE_VM;
-	 
 	if (unshare_flags & CLONE_VM)
 		unshare_flags |= CLONE_SIGHAND;
 	 
+	if (unshare_flags & CLONE_SIGHAND)
+		unshare_flags |= CLONE_THREAD;
+	 
 	if (unshare_flags & CLONE_NEWNS)
 		unshare_flags |= CLONE_FS;
 
diff -ur a/kernel/futex.c b/kernel/futex.c
--- a/kernel/futex.c	2017-03-23 15:00:10.000000000 +0100
+++ b/kernel/futex.c	2017-03-14 02:36:36.000000000 +0100
@@ -2491,6 +2491,11 @@
 		if (q.pi_state && (q.pi_state->owner != current)) {
 			spin_lock(q.lock_ptr);
 			ret = fixup_pi_state_owner(uaddr2, &q, current);
+			/*
+			 * Drop the reference to the pi state which
+			 * the requeue_pi() code acquired for us.
+			 */
+			free_pi_state(q.pi_state);
 			spin_unlock(q.lock_ptr);
 		}
 	} else {
@@ -2617,7 +2622,7 @@
 	}
 
 	ret = -EPERM;
-	if (!ptrace_may_access(p, PTRACE_MODE_READ))
+	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
 		goto err_unlock;
 
 	head = p->robust_list;
diff -ur a/kernel/futex_compat.c b/kernel/futex_compat.c
--- a/kernel/futex_compat.c	2017-03-23 14:59:56.000000000 +0100
+++ b/kernel/futex_compat.c	2017-03-14 02:36:25.000000000 +0100
@@ -154,7 +154,7 @@
 	}
 
 	ret = -EPERM;
-	if (!ptrace_may_access(p, PTRACE_MODE_READ))
+	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
 		goto err_unlock;
 
 	head = p->compat_robust_list;
diff -ur a/kernel/irq/manage.c b/kernel/irq/manage.c
--- a/kernel/irq/manage.c	2017-03-23 15:00:16.000000000 +0100
+++ b/kernel/irq/manage.c	2017-03-14 02:36:42.000000000 +0100
@@ -1228,6 +1228,7 @@
 	if (!desc)
 		return NULL;
 
+	chip_bus_lock(desc);
 	raw_spin_lock_irqsave(&desc->lock, flags);
 
 	/*
@@ -1241,7 +1242,7 @@
 		if (!action) {
 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
 			raw_spin_unlock_irqrestore(&desc->lock, flags);
-
+			chip_bus_sync_unlock(desc);
 			return NULL;
 		}
 
@@ -1264,6 +1265,7 @@
 #endif
 
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
+	chip_bus_sync_unlock(desc);
 
 	unregister_handler_proc(irq, action);
 
@@ -1337,9 +1339,7 @@
 		desc->affinity_notify = NULL;
 #endif
 
-	chip_bus_lock(desc);
 	kfree(__free_irq(irq, dev_id));
-	chip_bus_sync_unlock(desc);
 }
 EXPORT_SYMBOL(free_irq);
 
diff -ur a/kernel/irq/proc.c b/kernel/irq/proc.c
--- a/kernel/irq/proc.c	2017-03-23 15:00:14.000000000 +0100
+++ b/kernel/irq/proc.c	2017-03-14 02:36:40.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/mutex.h>
 
 #include "internals.h"
 
@@ -308,18 +309,29 @@
 
 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
+	static DEFINE_MUTEX(register_lock);
 	char name [MAX_NAMELEN];
 
-	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
 		return;
 
+	/*
+	 * irq directories are registered only when a handler is
+	 * added, not when the descriptor is created, so multiple
+	 * tasks might try to register at the same time.
+	 */
+	mutex_lock(&register_lock);
+
+	if (desc->dir)
+		goto out_unlock;
+
 	memset(name, 0, MAX_NAMELEN);
 	sprintf(name, "%d", irq);
 
 	/* create /proc/irq/1234 */
 	desc->dir = proc_mkdir(name, root_irq_dir);
 	if (!desc->dir)
-		return;
+		goto out_unlock;
 
 #ifdef CONFIG_SMP
 	/* create /proc/irq/<irq>/smp_affinity */
@@ -340,6 +352,9 @@
 
 	proc_create_data("spurious", 0444, desc->dir,
 			 &irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+	mutex_unlock(&register_lock);
 }
 
 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
diff -ur a/kernel/irq/resend.c b/kernel/irq/resend.c
--- a/kernel/irq/resend.c	2017-03-23 15:00:15.000000000 +0100
+++ b/kernel/irq/resend.c	2017-03-14 02:36:40.000000000 +0100
@@ -75,13 +75,21 @@
 		    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
 #ifdef CONFIG_HARDIRQS_SW_RESEND
 			/*
-			 * If the interrupt has a parent irq and runs
-			 * in the thread context of the parent irq,
-			 * retrigger the parent.
+			 * If the interrupt is running in the thread
+			 * context of the parent irq we need to be
+			 * careful, because we cannot trigger it
+			 * directly.
 			 */
-			if (desc->parent_irq &&
-			    irq_settings_is_nested_thread(desc))
+			if (irq_settings_is_nested_thread(desc)) {
+				/*
+				 * If the parent_irq is valid, we
+				 * retrigger the parent, otherwise we
+				 * do nothing.
+				 */
+				if (!desc->parent_irq)
+					return;
 				irq = desc->parent_irq;
+			}
 			/* Set it pending and activate the softirq: */
 			set_bit(irq, irqs_resend);
 			tasklet_schedule(&resend_tasklet);
diff -ur a/kernel/kcmp.c b/kernel/kcmp.c
--- a/kernel/kcmp.c	2017-03-23 15:00:05.000000000 +0100
+++ b/kernel/kcmp.c	2017-03-14 02:36:32.000000000 +0100
@@ -122,8 +122,8 @@
 			&task2->signal->cred_guard_mutex);
 	if (ret)
 		goto err;
-	if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
-	    !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
+	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
 		ret = -EPERM;
 		goto err_unlock;
 	}
diff -ur a/kernel/Makefile b/kernel/Makefile
--- a/kernel/Makefile	2016-10-20 04:32:07.000000000 +0200
+++ b/kernel/Makefile	2017-02-14 17:25:11.000000000 +0100
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-obj-y     = fork.o exec_domain.o panic.o printk.o \
+obj-y     = fork.o exec_domain.o panic.o \
 	    cpu.o exit.o itimer.o time.o softirq.o resource.o \
 	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
 	    signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
@@ -25,6 +25,7 @@
 
 obj-y += sched/
 obj-y += power/
+obj-y += printk/
 obj-y += cpu/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
diff -ur a/kernel/module.c b/kernel/module.c
--- a/kernel/module.c	2017-03-23 15:00:07.000000000 +0100
+++ b/kernel/module.c	2017-03-14 02:36:33.000000000 +0100
@@ -144,6 +144,9 @@
 	struct _ddebug *debug;
 	unsigned int num_debug;
 	bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+	unsigned long mod_kallsyms_init_off;
+#endif
 	struct {
 		unsigned int sym, str, mod, vers, info, pcpu;
 	} index;
@@ -843,9 +846,11 @@
 	if (core_kernel_text(a))
 		return;
 
+	preempt_disable();
 	modaddr = __module_text_address(a);
 	BUG_ON(!modaddr);
 	module_put(modaddr);
+	preempt_enable();
 }
 EXPORT_SYMBOL_GPL(symbol_put_addr);
 
@@ -2150,6 +2155,12 @@
 	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
 					 info->index.str) | INIT_OFFSET_MASK;
 	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+	mod->init_size = ALIGN(mod->init_size,
+			       __alignof__(struct mod_kallsyms));
+	info->mod_kallsyms_init_off = mod->init_size;
+	mod->init_size += sizeof(struct mod_kallsyms);
+	mod->init_size = debug_align(mod->init_size);
 }
 
 static void add_kallsyms(struct module *mod, const struct load_info *info)
@@ -2160,27 +2171,30 @@
 	char *s;
 	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
 
-	mod->symtab = (void *)symsec->sh_addr;
-	mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+	mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
+
+	mod->kallsyms->symtab = (void *)symsec->sh_addr;
+	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
 	 
-	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
-	for (i = 0; i < mod->num_symtab; i++)
-		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+	for (i = 0; i < mod->kallsyms->num_symtab; i++)
+		mod->kallsyms->symtab[i].st_info
+			= elf_type(&mod->kallsyms->symtab[i], info);
 
-	mod->core_symtab = dst = mod->module_core + info->symoffs;
-	mod->core_strtab = s = mod->module_core + info->stroffs;
-	src = mod->symtab;
-	for (ndst = i = 0; i < mod->num_symtab; i++) {
+	mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
+	mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
+	src = mod->kallsyms->symtab;
+	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
 		if (i == 0 ||
 		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
 			dst[ndst] = src[i];
-			dst[ndst++].st_name = s - mod->core_strtab;
-			s += strlcpy(s, &mod->strtab[src[i].st_name],
+			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
 				     KSYM_NAME_LEN) + 1;
 		}
 	}
-	mod->core_num_syms = ndst;
+	mod->core_kallsyms.num_symtab = ndst;
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -2833,9 +2847,8 @@
 	module_put(mod);
 	trim_init_extable(mod);
 #ifdef CONFIG_KALLSYMS
-	mod->num_symtab = mod->core_num_syms;
-	mod->symtab = mod->core_symtab;
-	mod->strtab = mod->core_strtab;
+	 
+	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
 #endif
 	unset_module_init_ro_nx(mod);
 	module_free(mod, mod->module_init);
@@ -3085,6 +3098,11 @@
 	       && (str[2] == '\0' || str[2] == '.');
 }
 
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+{
+	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+}
+
 static const char *get_ksymbol(struct module *mod,
 			       unsigned long addr,
 			       unsigned long *size,
@@ -3092,36 +3110,37 @@
 {
 	unsigned int i, best = 0;
 	unsigned long nextval;
+	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
 	if (within_module_init(addr, mod))
 		nextval = (unsigned long)mod->module_init+mod->init_text_size;
 	else
 		nextval = (unsigned long)mod->module_core+mod->core_text_size;
 
-	for (i = 1; i < mod->num_symtab; i++) {
-		if (mod->symtab[i].st_shndx == SHN_UNDEF)
+	for (i = 1; i < kallsyms->num_symtab; i++) {
+		if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+			continue;
+
+		if (*symname(kallsyms, i) == '\0'
+		    || is_arm_mapping_symbol(symname(kallsyms, i)))
 			continue;
 
-		if (mod->symtab[i].st_value <= addr
-		    && mod->symtab[i].st_value > mod->symtab[best].st_value
-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+		if (kallsyms->symtab[i].st_value <= addr
+		    && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
 			best = i;
-		if (mod->symtab[i].st_value > addr
-		    && mod->symtab[i].st_value < nextval
-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
-			nextval = mod->symtab[i].st_value;
+		if (kallsyms->symtab[i].st_value > addr
+		    && kallsyms->symtab[i].st_value < nextval)
+			nextval = kallsyms->symtab[i].st_value;
 	}
 
 	if (!best)
 		return NULL;
 
 	if (size)
-		*size = nextval - mod->symtab[best].st_value;
+		*size = nextval - kallsyms->symtab[best].st_value;
 	if (offset)
-		*offset = addr - mod->symtab[best].st_value;
-	return mod->strtab + mod->symtab[best].st_name;
+		*offset = addr - kallsyms->symtab[best].st_value;
+	return symname(kallsyms, best);
 }
 
 const char *module_address_lookup(unsigned long addr,
@@ -3215,19 +3234,21 @@
 
 	preempt_disable();
 	list_for_each_entry_rcu(mod, &modules, list) {
+		struct mod_kallsyms *kallsyms;
+
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		if (symnum < mod->num_symtab) {
-			*value = mod->symtab[symnum].st_value;
-			*type = mod->symtab[symnum].st_info;
-			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
-				KSYM_NAME_LEN);
+		kallsyms = rcu_dereference_sched(mod->kallsyms);
+		if (symnum < kallsyms->num_symtab) {
+			*value = kallsyms->symtab[symnum].st_value;
+			*type = kallsyms->symtab[symnum].st_info;
+			strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
 			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
 			*exported = is_exported(name, *value, mod);
 			preempt_enable();
 			return 0;
 		}
-		symnum -= mod->num_symtab;
+		symnum -= kallsyms->num_symtab;
 	}
 	preempt_enable();
 	return -ERANGE;
@@ -3236,11 +3257,12 @@
 static unsigned long mod_find_symname(struct module *mod, const char *name)
 {
 	unsigned int i;
+	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
-	for (i = 0; i < mod->num_symtab; i++)
-		if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
-		    mod->symtab[i].st_info != 'U')
-			return mod->symtab[i].st_value;
+	for (i = 0; i < kallsyms->num_symtab; i++)
+		if (strcmp(name, symname(kallsyms, i)) == 0 &&
+		    kallsyms->symtab[i].st_info != 'U')
+			return kallsyms->symtab[i].st_value;
 	return 0;
 }
 
@@ -3277,11 +3299,14 @@
 	int ret;
 
 	list_for_each_entry(mod, &modules, list) {
+		 
+		struct mod_kallsyms *kallsyms = mod->kallsyms;
+
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		for (i = 0; i < mod->num_symtab; i++) {
-			ret = fn(data, mod->strtab + mod->symtab[i].st_name,
-				 mod, mod->symtab[i].st_value);
+		for (i = 0; i < kallsyms->num_symtab; i++) {
+			ret = fn(data, symname(kallsyms, i),
+				 mod, kallsyms->symtab[i].st_value);
 			if (ret != 0)
 				return ret;
 		}
diff -ur a/kernel/panic.c b/kernel/panic.c
--- a/kernel/panic.c	2017-03-23 15:00:00.000000000 +0100
+++ b/kernel/panic.c	2017-03-14 02:36:28.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/sysrq.h>
 #include <linux/init.h>
 #include <linux/nmi.h>
+#include <linux/console.h>
 
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
@@ -128,6 +129,17 @@
 
 	bust_spinlocks(0);
 
+	/*
+	 * We may have ended up stopping the CPU holding the lock (in
+	 * smp_send_stop()) while still having some valuable data in the console
+	 * buffer.  Try to acquire the lock then release it regardless of the
+	 * result.  The release will also print the buffers out.  Locks debug
+	 * should be disabled to avoid reporting bad unlock balance when
+	 * panic() is not being callled from OOPS.
+	 */
+	debug_locks_off();
+	console_flush_on_panic();
+
 	if (!panic_blink)
 		panic_blink = no_blink;
 
Nur in b/kernel: printk.
Nur in a/kernel: printk.c.
diff -ur a/kernel/ptrace.c b/kernel/ptrace.c
--- a/kernel/ptrace.c	2017-03-23 14:59:59.000000000 +0100
+++ b/kernel/ptrace.c	2017-03-14 02:36:27.000000000 +0100
@@ -224,6 +224,14 @@
 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
 	const struct cred *cred = current_cred(), *tcred;
+	int dumpable = 0;
+	kuid_t caller_uid;
+	kgid_t caller_gid;
+
+	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
+		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
+		return -EPERM;
+	}
 
 	/* May we inspect the given task?
 	 * This check is used both for attaching with ptrace
@@ -233,18 +241,33 @@
 	 * because setting up the necessary parent/child relationship
 	 * or halting the specified task is impossible.
 	 */
-	int dumpable = 0;
+
 	/* Don't let security modules deny introspection */
-	if (task == current)
+	if (same_thread_group(task, current))
 		return 0;
 	rcu_read_lock();
+	if (mode & PTRACE_MODE_FSCREDS) {
+		caller_uid = cred->fsuid;
+		caller_gid = cred->fsgid;
+	} else {
+		/*
+		 * Using the euid would make more sense here, but something
+		 * in userland might rely on the old behavior, and this
+		 * shouldn't be a security problem since
+		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
+		 * used a syscall that requests access to another process
+		 * (and not a filesystem syscall to procfs).
+		 */
+		caller_uid = cred->uid;
+		caller_gid = cred->gid;
+	}
 	tcred = __task_cred(task);
-	if (uid_eq(cred->uid, tcred->euid) &&
-	    uid_eq(cred->uid, tcred->suid) &&
-	    uid_eq(cred->uid, tcred->uid)  &&
-	    gid_eq(cred->gid, tcred->egid) &&
-	    gid_eq(cred->gid, tcred->sgid) &&
-	    gid_eq(cred->gid, tcred->gid))
+	if (uid_eq(caller_uid, tcred->euid) &&
+	    uid_eq(caller_uid, tcred->suid) &&
+	    uid_eq(caller_uid, tcred->uid)  &&
+	    gid_eq(caller_gid, tcred->egid) &&
+	    gid_eq(caller_gid, tcred->sgid) &&
+	    gid_eq(caller_gid, tcred->gid))
 		goto ok;
 	if (ptrace_has_cap(tcred->user_ns, mode))
 		goto ok;
@@ -311,7 +334,7 @@
 		goto out;
 
 	task_lock(task);
-	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
+	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 	task_unlock(task);
 	if (retval)
 		goto unlock_creds;
diff -ur a/kernel/resource.c b/kernel/resource.c
--- a/kernel/resource.c	2017-03-23 15:00:05.000000000 +0100
+++ b/kernel/resource.c	2017-03-14 02:36:32.000000000 +0100
@@ -959,9 +959,10 @@
 		if (!conflict)
 			break;
 		if (conflict != parent) {
-			parent = conflict;
-			if (!(conflict->flags & IORESOURCE_BUSY))
+			if (!(conflict->flags & IORESOURCE_BUSY)) {
+				parent = conflict;
 				continue;
+			}
 		}
 		if (conflict->flags & flags & IORESOURCE_MUXED) {
 			add_wait_queue(&muxed_resource_wait, &wait);
diff -ur a/kernel/sched/core.c b/kernel/sched/core.c
--- a/kernel/sched/core.c	2017-03-23 15:00:19.000000000 +0100
+++ b/kernel/sched/core.c	2017-03-14 02:36:44.000000000 +0100
@@ -1,6 +1,3 @@
-#ifndef MY_ABC_HERE
-#define MY_ABC_HERE
-#endif
  
 #include <linux/mm.h>
 #include <linux/module.h>
@@ -1212,7 +1209,6 @@
 
 int wake_up_process(struct task_struct *p)
 {
-	WARN_ON(task_is_stopped_or_traced(p));
 	return try_to_wake_up(p, TASK_NORMAL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
@@ -3759,6 +3755,7 @@
 
 	case CPU_UP_PREPARE:
 		rq->calc_load_update = calc_load_update;
+		account_reset_rq(rq);
 		break;
 
 	case CPU_ONLINE:
@@ -3806,10 +3803,8 @@
 				      unsigned long action, void *hcpu)
 {
 	switch (action & ~CPU_TASKS_FROZEN) {
-#ifdef MY_DEF_HERE
 	case CPU_ONLINE:
 		 
-#endif  
 	case CPU_DOWN_FAILED:
 		set_cpu_active((long)hcpu, true);
 		return NOTIFY_OK;
diff -ur a/kernel/sched/sched.h b/kernel/sched/sched.h
--- a/kernel/sched/sched.h	2017-03-23 15:00:15.000000000 +0100
+++ b/kernel/sched/sched.h	2017-03-14 02:36:40.000000000 +0100
@@ -1374,3 +1374,16 @@
 }
 #endif /* CONFIG_64BIT */
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+static inline void account_reset_rq(struct rq *rq)
+{
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+	rq->prev_irq_time = 0;
+#endif
+#ifdef CONFIG_PARAVIRT
+	rq->prev_steal_time = 0;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+	rq->prev_steal_time_rq = 0;
+#endif
+}
diff -ur a/kernel/signal.c b/kernel/signal.c
--- a/kernel/signal.c	2017-03-23 15:00:12.000000000 +0100
+++ b/kernel/signal.c	2017-03-14 02:36:37.000000000 +0100
@@ -2767,7 +2767,8 @@
 		 * Other callers might not initialize the si_lsb field,
 		 * so check explicitly for the right codes here.
 		 */
-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+		if (from->si_signo == SIGBUS &&
+		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
 		break;
@@ -3034,7 +3035,7 @@
 			int, sig,
 			struct compat_siginfo __user *, uinfo)
 {
-	siginfo_t info;
+	siginfo_t info = {};
 	int ret = copy_siginfo_from_user32(&info, uinfo);
 	if (unlikely(ret))
 		return ret;
@@ -3080,7 +3081,7 @@
 			int, sig,
 			struct compat_siginfo __user *, uinfo)
 {
-	siginfo_t info;
+	siginfo_t info = {};
 
 	if (copy_siginfo_from_user32(&info, uinfo))
 		return -EFAULT;
@@ -3549,7 +3550,7 @@
 
 #endif
 
-int sigsuspend(sigset_t *set)
+static int sigsuspend(sigset_t *set)
 {
 	current->saved_sigmask = current->blocked;
 	set_current_blocked(set);
diff -ur a/kernel/smp.c b/kernel/smp.c
--- a/kernel/smp.c	2017-03-23 15:00:08.000000000 +0100
+++ b/kernel/smp.c	2017-03-14 02:36:34.000000000 +0100
@@ -18,6 +18,7 @@
 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 enum {
 	CSD_FLAG_LOCK		= 0x01,
+	CSD_FLAG_WAIT		= 0x02,
 };
 
 struct call_function_data {
@@ -121,7 +122,7 @@
 
 static void csd_unlock(struct call_single_data *csd)
 {
-	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
+	WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
 
 	/*
 	 * ensure we're all done before releasing data:
@@ -136,13 +137,15 @@
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static
-void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
+static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 {
 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
 	unsigned long flags;
 	int ipi;
 
+	if (wait)
+		csd->flags |= CSD_FLAG_WAIT;
+
 	raw_spin_lock_irqsave(&dst->lock, flags);
 	ipi = list_empty(&dst->list);
 	list_add_tail(&csd->list, &dst->list);
@@ -318,18 +321,18 @@
 /**
  * __smp_call_function_single(): Run a function on a specific CPU
  * @cpu: The CPU to run on.
- * @data: Pre-allocated and setup data structure
+ * @csd: Pre-allocated and setup data structure
  * @wait: If true, wait until function has completed on specified CPU.
  *
  * Like smp_call_function_single(), but allow caller to pass in a
  * pre-allocated data structure. Useful for embedding @data inside
  * other structures, for instance.
  */
-void __smp_call_function_single(int cpu, struct call_single_data *csd,
-				int wait)
+int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait)
 {
 	unsigned int this_cpu;
 	unsigned long flags;
+	int err = 0;
 
 	this_cpu = get_cpu();
 	/*
@@ -345,12 +348,16 @@
 		local_irq_save(flags);
 		csd->func(csd->info);
 		local_irq_restore(flags);
-	} else {
+	} else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
 		csd_lock(csd);
 		generic_exec_single(cpu, csd, wait);
+	} else {
+		err = -ENXIO;	/* CPU not online */
 	}
 	put_cpu();
+	return err;
 }
+EXPORT_SYMBOL_GPL(__smp_call_function_single);
 
 /**
  * smp_call_function_many(): Run a function on a set of other CPUs.
@@ -585,8 +592,10 @@
  *
  * If @wait is true, then returns once @func has returned.
  *
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.  The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
  */
 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 			void *info, bool wait)
@@ -595,9 +604,10 @@
 
 	smp_call_function_many(mask, func, info, wait);
 	if (cpumask_test_cpu(cpu, mask)) {
-		local_irq_disable();
+		unsigned long flags;
+		local_irq_save(flags);
 		func(info);
-		local_irq_enable();
+		local_irq_restore(flags);
 	}
 	put_cpu();
 }
diff -ur a/kernel/softirq.c b/kernel/softirq.c
--- a/kernel/softirq.c	2017-03-23 14:59:58.000000000 +0100
+++ b/kernel/softirq.c	2017-03-14 02:36:26.000000000 +0100
@@ -6,8 +6,6 @@
  *	Distribute under GPLv2.
  *
  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
- *
- *	Remote softirq infrastructure is by Jens Axboe.
  */
 
 #include <linux/export.h>
@@ -619,146 +617,17 @@
 }
 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
 
-/*
- * Remote softirq bits
- */
-
-DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-EXPORT_PER_CPU_SYMBOL(softirq_work_list);
-
-static void __local_trigger(struct call_single_data *cp, int softirq)
-{
-	struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
-
-	list_add_tail(&cp->list, head);
-
-	/* Trigger the softirq only if the list was previously empty.  */
-	if (head->next == &cp->list)
-		raise_softirq_irqoff(softirq);
-}
-
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
-static void remote_softirq_receive(void *data)
-{
-	struct call_single_data *cp = data;
-	unsigned long flags;
-	int softirq;
-
-	softirq = *(int *)cp->info;
-	local_irq_save(flags);
-	__local_trigger(cp, softirq);
-	local_irq_restore(flags);
-}
-
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	if (cpu_online(cpu)) {
-		cp->func = remote_softirq_receive;
-		cp->info = &softirq;
-		cp->flags = 0;
-
-		__smp_call_function_single(cpu, cp, 0);
-		return 0;
-	}
-	return 1;
-}
-#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	return 1;
-}
-#endif
-
-/**
- * __send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @this_cpu: the currently executing cpu
- * @softirq: the softirq for the work
- *
- * Attempt to schedule softirq work on a remote cpu.  If this cannot be
- * done, the work is instead queued up on the local cpu.
- *
- * Interrupts must be disabled.
- */
-void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
-{
-	if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
-		__local_trigger(cp, softirq);
-}
-EXPORT_SYMBOL(__send_remote_softirq);
-
-/**
- * send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @softirq: the softirq for the work
- *
- * Like __send_remote_softirq except that disabling interrupts and
- * computing the current cpu is done for the caller.
- */
-void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	unsigned long flags;
-	int this_cpu;
-
-	local_irq_save(flags);
-	this_cpu = smp_processor_id();
-	__send_remote_softirq(cp, cpu, this_cpu, softirq);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(send_remote_softirq);
-
-static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
-					       unsigned long action, void *hcpu)
-{
-	/*
-	 * If a CPU goes away, splice its entries to the current CPU
-	 * and trigger a run of the softirq
-	 */
-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-		int cpu = (unsigned long) hcpu;
-		int i;
-
-		local_irq_disable();
-		for (i = 0; i < NR_SOFTIRQS; i++) {
-			struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
-			struct list_head *local_head;
-
-			if (list_empty(head))
-				continue;
-
-			local_head = &__get_cpu_var(softirq_work_list[i]);
-			list_splice_init(head, local_head);
-			raise_softirq_irqoff(i);
-		}
-		local_irq_enable();
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
-	.notifier_call	= remote_softirq_cpu_notify,
-};
-
 void __init softirq_init(void)
 {
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		int i;
-
 		per_cpu(tasklet_vec, cpu).tail =
 			&per_cpu(tasklet_vec, cpu).head;
 		per_cpu(tasklet_hi_vec, cpu).tail =
 			&per_cpu(tasklet_hi_vec, cpu).head;
-		for (i = 0; i < NR_SOFTIRQS; i++)
-			INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
 	}
 
-	register_hotcpu_notifier(&remote_softirq_cpu_notifier);
-
 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }
diff -ur a/kernel/syno_bootargs.c b/kernel/syno_bootargs.c
--- a/kernel/syno_bootargs.c	2017-03-23 14:59:55.000000000 +0100
+++ b/kernel/syno_bootargs.c	2017-03-14 02:36:25.000000000 +0100
@@ -37,11 +37,16 @@
 #endif  
 
 #ifdef MY_DEF_HERE
-extern unsigned char g_syno_sata_remap[SATA_REMAP_MAX];
+extern int g_syno_sata_remap[SATA_REMAP_MAX];
 extern int g_use_sata_remap;
 #endif  
 
 #ifdef MY_DEF_HERE
+extern char gszPciAddrList[PCI_ADDR_NUM_MAX][PCI_ADDR_LEN_MAX];
+extern int gPciAddrNum;
+#endif  
+
+#ifdef MY_DEF_HERE
 extern char giDiskSeqReverse[8];
 #endif  
 
@@ -109,6 +114,10 @@
 extern unsigned gSynoUsbVbusGpp[CONFIG_SYNO_USB_VBUS_NUM_GPIO];
 #endif  
 
+#ifdef MY_DEF_HERE
+extern int g_syno_ds1815p_speed_limit;
+#endif  
+
 #ifdef MY_ABC_HERE
 static int __init early_disk_idx_map(char *p)
 {
@@ -390,6 +399,42 @@
 #endif  
 
 #ifdef MY_DEF_HERE
+static int __init early_pci_sata_cache(char *p)
+{
+	int index = 0;
+	char *ptr = p;
+
+	while(ptr && *ptr){
+		if (',' ==  *ptr) {
+			index = 0;
+			gPciAddrNum ++;
+			if (PCI_ADDR_NUM_MAX >= gPciAddrNum){
+				goto FMT_ERR;
+			}
+		} else {
+			if (PCI_ADDR_LEN_MAX <= index) {
+				goto FMT_ERR;
+			}
+			gszPciAddrList[gPciAddrNum][index] = *ptr;
+			index++;
+		}
+		ptr++;
+	}
+	gPciAddrNum ++;
+
+	printk(KERN_ERR "Syno Bootargs : pci_sata_cache initialized\n");
+	return 0;
+
+FMT_ERR:
+	gPciAddrNum = 0;
+	printk(KERN_ERR "SYNO: pci_sata_cache format error, ignore.\n" );
+	return 0;
+
+}
+__setup("pci_sata_cache=", early_pci_sata_cache);
+#endif  
+
+#ifdef MY_DEF_HERE
 static int __init early_disk_seq_reserve(char *p)
 {
 	snprintf(giDiskSeqReverse, sizeof(giDiskSeqReverse), "%s", p);
@@ -680,3 +725,13 @@
 }
 __setup("syno_usb_vbus_gpio=", early_usb_vbus_gpio);
 #endif  
+
+#ifdef MY_DEF_HERE
+static int __init early_ds1815p_speed_limit(char *p)
+{
+        g_syno_ds1815p_speed_limit = simple_strtol(p, NULL, 10);
+
+        return 1;
+}
+__setup("1815p_speed_limit=", early_ds1815p_speed_limit);
+#endif  
diff -ur a/kernel/sysctl.c b/kernel/sysctl.c
--- a/kernel/sysctl.c	2017-03-23 15:00:08.000000000 +0100
+++ b/kernel/sysctl.c	2017-03-14 02:36:34.000000000 +0100
@@ -192,13 +192,22 @@
 #endif  
 
 #ifdef MY_DEF_HERE
-unsigned char g_syno_sata_remap[SATA_REMAP_MAX] = {SATA_REMAP_NOT_INIT};
+int g_syno_sata_remap[SATA_REMAP_MAX] = {SATA_REMAP_NOT_INIT};
 EXPORT_SYMBOL(g_syno_sata_remap);
 int g_use_sata_remap = 0;
 EXPORT_SYMBOL(g_use_sata_remap);
 #endif  
 
 #ifdef MY_DEF_HERE
+char gszPciAddrList[PCI_ADDR_NUM_MAX][PCI_ADDR_LEN_MAX] = {{0}};
+int gPciAddrNum = 0;
+int gPciDeferStart = M2SATA_START_IDX;
+EXPORT_SYMBOL(gszPciAddrList);
+EXPORT_SYMBOL(gPciAddrNum);
+EXPORT_SYMBOL(gPciDeferStart);
+#endif  
+
+#ifdef MY_DEF_HERE
 char giDiskSeqReverse[8] = {0};
 EXPORT_SYMBOL(giDiskSeqReverse);
 #endif  
@@ -302,6 +311,21 @@
 EXPORT_SYMBOL(syno_test_list);
 #endif  
 
+#ifdef MY_ABC_HERE
+int (*syno_valid_lsi3008_led)(u8 cmd);
+EXPORT_SYMBOL(syno_valid_lsi3008_led);
+#endif  
+
+#ifdef MY_DEF_HERE
+int (*syno_disk_map_table_gen_mv14xx)(int *iDiskMapTable, int iPortMax);
+EXPORT_SYMBOL(syno_disk_map_table_gen_mv14xx);
+#endif  
+
+#ifdef MY_DEF_HERE
+int g_syno_ds1815p_speed_limit = 1;
+EXPORT_SYMBOL(g_syno_ds1815p_speed_limit);
+#endif  
+
 extern int sysctl_overcommit_memory;
 extern int sysctl_overcommit_ratio;
 extern int max_threads;
diff -ur a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
--- a/kernel/time/posix-clock.c	2017-03-23 15:00:10.000000000 +0100
+++ b/kernel/time/posix-clock.c	2017-03-14 02:36:37.000000000 +0100
@@ -69,10 +69,10 @@
 static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
 {
 	struct posix_clock *clk = get_posix_clock(fp);
-	int result = 0;
+	unsigned int result = 0;
 
 	if (!clk)
-		return -ENODEV;
+		return POLLERR;
 
 	if (clk->ops.poll)
 		result = clk->ops.poll(clk, fp, wait);
diff -ur a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
--- a/kernel/trace/ring_buffer_benchmark.c	2017-03-23 15:00:21.000000000 +0100
+++ b/kernel/trace/ring_buffer_benchmark.c	2017-03-14 02:36:45.000000000 +0100
@@ -455,7 +455,7 @@
 
 	if (producer_fifo >= 0) {
 		struct sched_param param = {
-			.sched_priority = consumer_fifo
+			.sched_priority = producer_fifo
 		};
 		sched_setscheduler(producer, SCHED_FIFO, &param);
 	} else
diff -ur a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
--- a/kernel/trace/ring_buffer.c	2017-03-23 15:00:25.000000000 +0100
+++ b/kernel/trace/ring_buffer.c	2017-03-14 02:36:49.000000000 +0100
@@ -463,7 +463,7 @@
 	raw_spinlock_t			reader_lock;	/* serialize readers */
 	arch_spinlock_t			lock;
 	struct lock_class_key		lock_key;
-	unsigned int			nr_pages;
+	unsigned long			nr_pages;
 	struct list_head		*pages;
 	struct buffer_page		*head_page;	/* read from head */
 	struct buffer_page		*tail_page;	/* write to tail */
@@ -483,7 +483,7 @@
 	u64				write_stamp;
 	u64				read_stamp;
 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
-	int				nr_pages_to_update;
+	long				nr_pages_to_update;
 	struct list_head		new_pages; /* new pages to add */
 	struct work_struct		update_pages_work;
 	struct completion		update_done;
@@ -1118,10 +1118,10 @@
 	return 0;
 }
 
-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
 {
-	int i;
 	struct buffer_page *bpage, *tmp;
+	long i;
 
 	for (i = 0; i < nr_pages; i++) {
 		struct page *page;
@@ -1158,7 +1158,7 @@
 }
 
 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
-			     unsigned nr_pages)
+			     unsigned long nr_pages)
 {
 	LIST_HEAD(pages);
 
@@ -1183,7 +1183,7 @@
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct buffer_page *bpage;
@@ -1282,8 +1282,9 @@
 					struct lock_class_key *key)
 {
 	struct ring_buffer *buffer;
+	long nr_pages;
 	int bsize;
-	int cpu, nr_pages;
+	int cpu;
 
 	/* keep it in its own cache line */
 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1406,12 +1407,12 @@
 }
 
 static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 {
 	struct list_head *tail_page, *to_remove, *next_page;
 	struct buffer_page *to_remove_page, *tmp_iter_page;
 	struct buffer_page *last_page, *first_page;
-	unsigned int nr_removed;
+	unsigned long nr_removed;
 	unsigned long head_bit;
 	int page_entries;
 
@@ -1627,7 +1628,7 @@
 			int cpu_id)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
-	unsigned nr_pages;
+	unsigned long nr_pages;
 	int cpu, err = 0;
 
 	/*
@@ -1641,14 +1642,13 @@
 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
 		return size;
 
-	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
-	size *= BUF_PAGE_SIZE;
+	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
 	/* we need a minimum of two pages */
-	if (size < BUF_PAGE_SIZE * 2)
-		size = BUF_PAGE_SIZE * 2;
+	if (nr_pages < 2)
+		nr_pages = 2;
 
-	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+	size = nr_pages * BUF_PAGE_SIZE;
 
 	/*
 	 * Don't succeed if resizing is disabled, as a reader might be
@@ -1946,12 +1946,6 @@
 		goto again;
 }
 
-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
-{
-	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
-	cpu_buffer->reader_page->read = 0;
-}
-
 static void rb_inc_iter(struct ring_buffer_iter *iter)
 {
 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -3589,7 +3583,7 @@
 
 	/* Finally update the reader page to the new head */
 	cpu_buffer->reader_page = reader;
-	rb_reset_reader_page(cpu_buffer);
+	cpu_buffer->reader_page->read = 0;
 
 	if (overwrite != cpu_buffer->last_overrun) {
 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3599,6 +3593,10 @@
 	goto again;
 
  out:
+	/* Update the read_stamp on the first event */
+	if (reader && reader->read == 0)
+		cpu_buffer->read_stamp = reader->page->time_stamp;
+
 	arch_spin_unlock(&cpu_buffer->lock);
 	local_irq_restore(flags);
 
@@ -4607,8 +4605,9 @@
 	struct ring_buffer *buffer =
 		container_of(self, struct ring_buffer, cpu_notify);
 	long cpu = (long)hcpu;
-	int cpu_i, nr_pages_same;
-	unsigned int nr_pages;
+	long nr_pages_same;
+	int cpu_i;
+	unsigned long nr_pages;
 
 	switch (action) {
 	case CPU_UP_PREPARE:
diff -ur a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
--- a/kernel/trace/trace_branch.c	2017-03-23 15:00:20.000000000 +0100
+++ b/kernel/trace/trace_branch.c	2017-03-14 02:36:44.000000000 +0100
@@ -37,9 +37,12 @@
 	struct trace_branch *entry;
 	struct ring_buffer *buffer;
 	unsigned long flags;
-	int cpu, pc;
+	int pc;
 	const char *p;
 
+	if (current->trace_recursion & TRACE_BRANCH_BIT)
+		return;
+
 	/*
 	 * I would love to save just the ftrace_likely_data pointer, but
 	 * this code can also be used by modules. Ugly things can happen
@@ -50,10 +53,10 @@
 	if (unlikely(!tr))
 		return;
 
-	local_irq_save(flags);
-	cpu = raw_smp_processor_id();
-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
-	if (atomic_inc_return(&data->disabled) != 1)
+	raw_local_irq_save(flags);
+	current->trace_recursion |= TRACE_BRANCH_BIT;
+	data = this_cpu_ptr(tr->trace_buffer.data);
+	if (atomic_read(&data->disabled))
 		goto out;
 
 	pc = preempt_count();
@@ -82,8 +85,8 @@
 		__buffer_unlock_commit(buffer, event);
 
  out:
-	atomic_dec(&data->disabled);
-	local_irq_restore(flags);
+	current->trace_recursion &= ~TRACE_BRANCH_BIT;
+	raw_local_irq_restore(flags);
 }
 
 static inline
diff -ur a/kernel/trace/trace.c b/kernel/trace/trace.c
--- a/kernel/trace/trace.c	2017-03-23 15:00:26.000000000 +0100
+++ b/kernel/trace/trace.c	2017-03-14 02:36:49.000000000 +0100
@@ -4348,7 +4348,10 @@
 
 	spd.nr_pages = i;
 
-	ret = splice_to_pipe(pipe, &spd);
+	if (i)
+		ret = splice_to_pipe(pipe, &spd);
+	else
+		ret = 0;
 out:
 	splice_shrink_spd(&spd);
 	return ret;
diff -ur a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
--- a/kernel/trace/trace_events.c	2017-03-23 15:00:22.000000000 +0100
+++ b/kernel/trace/trace_events.c	2017-03-14 02:36:46.000000000 +0100
@@ -602,7 +602,8 @@
 		 * The ftrace subsystem is for showing formats only.
 		 * They can not be enabled or disabled via the event files.
 		 */
-		if (call->class && call->class->reg)
+		if (call->class && call->class->reg &&
+		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
 			return file;
 	}
 
diff -ur a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
--- a/kernel/trace/trace_events_filter.c	2017-03-23 15:00:21.000000000 +0100
+++ b/kernel/trace/trace_events_filter.c	2017-03-14 02:36:45.000000000 +0100
@@ -1015,6 +1015,9 @@
 
 static char infix_next(struct filter_parse_state *ps)
 {
+	if (!ps->infix.cnt)
+		return 0;
+
 	ps->infix.cnt--;
 
 	return ps->infix.string[ps->infix.tail++];
@@ -1030,6 +1033,9 @@
 
 static void infix_advance(struct filter_parse_state *ps)
 {
+	if (!ps->infix.cnt)
+		return;
+
 	ps->infix.cnt--;
 	ps->infix.tail++;
 }
@@ -1328,19 +1334,26 @@
 {
 	int n_normal_preds = 0, n_logical_preds = 0;
 	struct postfix_elt *elt;
+	int cnt = 0;
 
 	list_for_each_entry(elt, &ps->postfix, list) {
-		if (elt->op == OP_NONE)
+		if (elt->op == OP_NONE) {
+			cnt++;
 			continue;
+		}
 
+		cnt--;
 		if (elt->op == OP_AND || elt->op == OP_OR) {
 			n_logical_preds++;
 			continue;
 		}
 		n_normal_preds++;
+		/* all ops should have operands */
+		if (cnt < 0)
+			break;
 	}
 
-	if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
+	if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
 		parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
 		return -EINVAL;
 	}
diff -ur a/kernel/trace/trace.h b/kernel/trace/trace.h
--- a/kernel/trace/trace.h	2017-03-23 15:00:22.000000000 +0100
+++ b/kernel/trace/trace.h	2017-03-14 02:36:46.000000000 +0100
@@ -425,6 +425,7 @@
 
 	TRACE_CONTROL_BIT,
 
+	TRACE_BRANCH_BIT,
 /*
  * Abuse of the trace_recursion.
  * As we need a way to maintain state if we are tracing the function
diff -ur a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
--- a/kernel/trace/trace_irqsoff.c	2017-03-23 15:00:18.000000000 +0100
+++ b/kernel/trace/trace_irqsoff.c	2017-03-14 02:36:43.000000000 +0100
@@ -118,8 +118,12 @@
 		return 0;
 
 	local_save_flags(*flags);
-	/* slight chance to get a false positive on tracing_cpu */
-	if (!irqs_disabled_flags(*flags))
+	/*
+	 * Slight chance to get a false positive on tracing_cpu,
+	 * although I'm starting to think there isn't a chance.
+	 * Leave this for now just to be paranoid.
+	 */
+	if (!irqs_disabled_flags(*flags) && !preempt_count())
 		return 0;
 
 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff -ur a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
--- a/kernel/trace/trace_printk.c	2017-03-23 15:00:17.000000000 +0100
+++ b/kernel/trace/trace_printk.c	2017-03-14 02:36:43.000000000 +0100
@@ -271,6 +271,9 @@
 	const char *str = *fmt;
 	int i;
 
+	if (!*fmt)
+		return 0;
+
 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
 
 	/*
diff -ur a/kernel/up.c b/kernel/up.c
--- a/kernel/up.c	2017-03-23 15:00:04.000000000 +0100
+++ b/kernel/up.c	2017-03-14 02:36:31.000000000 +0100
@@ -10,12 +10,76 @@
 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 				int wait)
 {
+	unsigned long flags;
+
 	WARN_ON(cpu != 0);
 
-	local_irq_disable();
-	(func)(info);
-	local_irq_enable();
+	local_irq_save(flags);
+	func(info);
+	local_irq_restore(flags);
 
 	return 0;
 }
 EXPORT_SYMBOL(smp_call_function_single);
+
+int __smp_call_function_single(int cpu, struct call_single_data *csd,
+			       int wait)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	csd->func(csd->info);
+	local_irq_restore(flags);
+	return 0;
+}
+EXPORT_SYMBOL(__smp_call_function_single);
+
+int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	func(info);
+	local_irq_restore(flags);
+	return 0;
+}
+EXPORT_SYMBOL(on_each_cpu);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+		      smp_call_func_t func, void *info, bool wait)
+{
+	unsigned long flags;
+
+	if (cpumask_test_cpu(0, mask)) {
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
+	}
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+		      smp_call_func_t func, void *info, bool wait,
+		      gfp_t gfp_flags)
+{
+	unsigned long flags;
+
+	preempt_disable();
+	if (cond_func(0, info)) {
+		local_irq_save(flags);
+		func(info);
+		local_irq_restore(flags);
+	}
+	preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
diff -ur a/kernel/workqueue.c b/kernel/workqueue.c
--- a/kernel/workqueue.c	2017-03-23 15:00:08.000000000 +0100
+++ b/kernel/workqueue.c	2017-03-14 02:36:32.000000000 +0100
@@ -606,6 +606,35 @@
 	 */
 	smp_wmb();
 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+	/*
+	 * The following mb guarantees that previous clear of a PENDING bit
+	 * will not be reordered with any speculative LOADS or STORES from
+	 * work->current_func, which is executed afterwards.  This possible
+	 * reordering can lead to a missed execution on attempt to qeueue
+	 * the same @work.  E.g. consider this case:
+	 *
+	 *   CPU#0                         CPU#1
+	 *   ----------------------------  --------------------------------
+	 *
+	 * 1  STORE event_indicated
+	 * 2  queue_work_on() {
+	 * 3    test_and_set_bit(PENDING)
+	 * 4 }                             set_..._and_clear_pending() {
+	 * 5                                 set_work_data() # clear bit
+	 * 6                                 smp_mb()
+	 * 7                               work->current_func() {
+	 * 8				      LOAD event_indicated
+	 *				   }
+	 *
+	 * Without an explicit full barrier speculative LOAD on line 8 can
+	 * be executed before CPU#0 does STORE on line 1.  If that happens,
+	 * CPU#0 observes the PENDING bit is still set and new execution of
+	 * a @work is not queued in a hope, that CPU#1 will eventually
+	 * finish the queued @work.  Meanwhile CPU#1 does not see
+	 * event_indicated is set, because speculative LOAD was executed
+	 * before actual STORE.
+	 */
+	smp_mb();
 }
 
 static void clear_work_data(struct work_struct *work)
diff -ur a/lib/bitmap.c b/lib/bitmap.c
--- a/lib/bitmap.c	2017-03-23 15:00:26.000000000 +0100
+++ b/lib/bitmap.c	2017-03-14 02:36:51.000000000 +0100
@@ -602,12 +602,12 @@
 	unsigned a, b;
 	int c, old_c, totaldigits;
 	const char __user __force *ubuf = (const char __user __force *)buf;
-	int exp_digit, in_range;
+	int at_start, in_range;
 
 	totaldigits = c = 0;
 	bitmap_zero(maskp, nmaskbits);
 	do {
-		exp_digit = 1;
+		at_start = 1;
 		in_range = 0;
 		a = b = 0;
 
@@ -636,11 +636,10 @@
 				break;
 
 			if (c == '-') {
-				if (exp_digit || in_range)
+				if (at_start || in_range)
 					return -EINVAL;
 				b = 0;
 				in_range = 1;
-				exp_digit = 1;
 				continue;
 			}
 
@@ -650,16 +649,18 @@
 			b = b * 10 + (c - '0');
 			if (!in_range)
 				a = b;
-			exp_digit = 0;
+			at_start = 0;
 			totaldigits++;
 		}
 		if (!(a <= b))
 			return -EINVAL;
 		if (b >= nmaskbits)
 			return -ERANGE;
-		while (a <= b) {
-			set_bit(a, maskp);
-			a++;
+		if (!at_start) {
+			while (a <= b) {
+				set_bit(a, maskp);
+				a++;
+			}
 		}
 	} while (buflen && c == ',');
 	return 0;
diff -ur a/lib/crc-t10dif.c b/lib/crc-t10dif.c
--- a/lib/crc-t10dif.c	2017-03-23 15:00:32.000000000 +0100
+++ b/lib/crc-t10dif.c	2017-03-14 02:36:56.000000000 +0100
@@ -11,57 +11,54 @@
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/crc-t10dif.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <crypto/hash.h>
+#include <linux/static_key.h>
 
-/* Table generated using the following polynomium:
- * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
- * gt: 0x8bb7
- */
-static const __u16 t10_dif_crc_table[256] = {
-	0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
-	0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
-	0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
-	0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
-	0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
-	0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
-	0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
-	0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
-	0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
-	0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
-	0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
-	0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
-	0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
-	0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
-	0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
-	0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
-	0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
-	0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
-	0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
-	0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
-	0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
-	0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
-	0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
-	0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
-	0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
-	0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
-	0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
-	0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
-	0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
-	0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
-	0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
-	0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
-};
+static struct crypto_shash *crct10dif_tfm;
+static struct static_key crct10dif_fallback __read_mostly;
 
 __u16 crc_t10dif(const unsigned char *buffer, size_t len)
 {
-	__u16 crc = 0;
-	unsigned int i;
+	struct {
+		struct shash_desc shash;
+		char ctx[2];
+	} desc;
+	int err;
 
-	for (i = 0 ; i < len ; i++)
-		crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
+	if (static_key_false(&crct10dif_fallback))
+		return crc_t10dif_generic(0, buffer, len);
 
-	return crc;
+	desc.shash.tfm = crct10dif_tfm;
+	desc.shash.flags = 0;
+	*(__u16 *)desc.ctx = 0;
+
+	err = crypto_shash_update(&desc.shash, buffer, len);
+	BUG_ON(err);
+
+	return *(__u16 *)desc.ctx;
 }
 EXPORT_SYMBOL(crc_t10dif);
 
+static int __init crc_t10dif_mod_init(void)
+{
+	crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
+	if (IS_ERR(crct10dif_tfm)) {
+		static_key_slow_inc(&crct10dif_fallback);
+		crct10dif_tfm = NULL;
+	}
+	return 0;
+}
+
+static void __exit crc_t10dif_mod_fini(void)
+{
+	crypto_free_shash(crct10dif_tfm);
+}
+
+module_init(crc_t10dif_mod_init);
+module_exit(crc_t10dif_mod_fini);
+
 MODULE_DESCRIPTION("T10 DIF CRC calculation");
 MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crct10dif");
diff -ur a/lib/devres.c b/lib/devres.c
--- a/lib/devres.c	2017-03-23 15:00:26.000000000 +0100
+++ b/lib/devres.c	2017-03-14 02:36:50.000000000 +0100
@@ -423,7 +423,7 @@
 	if (!iomap)
 		return;
 
-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+	for (i = 0; i < PCIM_IOMAP_MAX; i++) {
 		if (!(mask & (1 << i)))
 			continue;
 
diff -ur a/lib/dma-debug.c b/lib/dma-debug.c
--- a/lib/dma-debug.c	2017-03-23 15:00:32.000000000 +0100
+++ b/lib/dma-debug.c	2017-03-14 02:36:56.000000000 +0100
@@ -962,7 +962,7 @@
 
 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
 {
-	if (overlap(addr, len, _text, _etext) ||
+	if (overlap(addr, len, _stext, _etext) ||
 	    overlap(addr, len, __start_rodata, __end_rodata))
 		err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
 }
diff -ur a/lib/Kconfig b/lib/Kconfig
--- a/lib/Kconfig	2016-10-20 04:32:10.000000000 +0200
+++ b/lib/Kconfig	2016-06-23 08:51:53.000000000 +0200
@@ -63,6 +63,8 @@
 
 config CRC_T10DIF
 	tristate "CRC calculation for the T10 Data Integrity Field"
+	select CRYPTO
+	select CRYPTO_CRCT10DIF
 	help
 	  This option is only needed if a module that's not in the
 	  kernel tree needs to calculate CRC checks for use with the
diff -ur a/lib/Kconfig.debug b/lib/Kconfig.debug
--- a/lib/Kconfig.debug	2016-10-20 04:32:10.000000000 +0200
+++ b/lib/Kconfig.debug	2017-02-14 17:25:11.000000000 +0100
@@ -682,7 +682,7 @@
 	  mutexes and rwsems.
 
 config STACKTRACE
-	bool
+	bool "SYNO_STACKTRACE"
 	depends on STACKTRACE_SUPPORT
 
 config DEBUG_STACK_USAGE
diff -ur a/lib/klist.c b/lib/klist.c
--- a/lib/klist.c	2017-03-23 15:00:28.000000000 +0100
+++ b/lib/klist.c	2017-03-14 02:36:53.000000000 +0100
@@ -282,9 +282,9 @@
 			  struct klist_node *n)
 {
 	i->i_klist = k;
-	i->i_cur = n;
-	if (n)
-		kref_get(&n->n_ref);
+	i->i_cur = NULL;
+	if (n && kref_get_unless_zero(&n->n_ref))
+		i->i_cur = n;
 }
 EXPORT_SYMBOL_GPL(klist_iter_init_node);
 
diff -ur a/lib/radix-tree.c b/lib/radix-tree.c
--- a/lib/radix-tree.c	2017-03-23 15:00:32.000000000 +0100
+++ b/lib/radix-tree.c	2017-03-14 02:36:56.000000000 +0100
@@ -1013,9 +1013,13 @@
 		return 0;
 
 	radix_tree_for_each_slot(slot, root, &iter, first_index) {
-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+		results[ret] = rcu_dereference_raw(*slot);
 		if (!results[ret])
 			continue;
+		if (radix_tree_is_indirect_ptr(results[ret])) {
+			slot = radix_tree_iter_retry(&iter);
+			continue;
+		}
 		if (++ret == max_items)
 			break;
 	}
@@ -1092,9 +1096,13 @@
 		return 0;
 
 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+		results[ret] = rcu_dereference_raw(*slot);
 		if (!results[ret])
 			continue;
+		if (radix_tree_is_indirect_ptr(results[ret])) {
+			slot = radix_tree_iter_retry(&iter);
+			continue;
+		}
 		if (++ret == max_items)
 			break;
 	}
diff -ur a/lib/strnlen_user.c b/lib/strnlen_user.c
--- a/lib/strnlen_user.c	2017-03-23 15:00:30.000000000 +0100
+++ b/lib/strnlen_user.c	2017-03-14 02:36:54.000000000 +0100
@@ -57,7 +57,8 @@
 			return res + find_zero(data) + 1 - align;
 		}
 		res += sizeof(unsigned long);
-		if (unlikely(max < sizeof(unsigned long)))
+		/* We already handled 'unsigned long' bytes. Did we do it all ? */
+		if (unlikely(max <= sizeof(unsigned long)))
 			break;
 		max -= sizeof(unsigned long);
 		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
diff -ur a/lib/synolib/Makefile b/lib/synolib/Makefile
--- a/lib/synolib/Makefile	2016-10-20 04:32:10.000000000 +0200
+++ b/lib/synolib/Makefile	2017-02-14 17:25:11.000000000 +0100
@@ -5,3 +5,4 @@
 obj-$(CONFIG_SYNO_DEBUG_FLAG) += syno_process_name_get.o
 obj-$(CONFIG_SYNO_PLUGIN_INTERFACE) += syno_plugin.o
 obj-$(CONFIG_SYNO_SATA_REMAP) += syno_remap_idx_get.o
+obj-$(CONFIG_SYNO_PCI_HOST_SATA_CACHE) += syno_shift_remap_table.o
Nur in b/lib/synolib: syno_shift_remap_table.c.
diff -ur a/lib/ucs2_string.c b/lib/ucs2_string.c
--- a/lib/ucs2_string.c	2017-03-23 15:00:27.000000000 +0100
+++ b/lib/ucs2_string.c	2017-03-14 02:36:52.000000000 +0100
@@ -49,3 +49,65 @@
         }
 }
 EXPORT_SYMBOL(ucs2_strncmp);
+
+unsigned long
+ucs2_utf8size(const ucs2_char_t *src)
+{
+	unsigned long i;
+	unsigned long j = 0;
+
+	for (i = 0; i < ucs2_strlen(src); i++) {
+		u16 c = src[i];
+
+		if (c >= 0x800)
+			j += 3;
+		else if (c >= 0x80)
+			j += 2;
+		else
+			j += 1;
+	}
+
+	return j;
+}
+EXPORT_SYMBOL(ucs2_utf8size);
+
+/*
+ * copy at most maxlength bytes of whole utf8 characters to dest from the
+ * ucs2 string src.
+ *
+ * The return value is the number of characters copied, not including the
+ * final NUL character.
+ */
+unsigned long
+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
+{
+	unsigned int i;
+	unsigned long j = 0;
+	unsigned long limit = ucs2_strnlen(src, maxlength);
+
+	for (i = 0; maxlength && i < limit; i++) {
+		u16 c = src[i];
+
+		if (c >= 0x800) {
+			if (maxlength < 3)
+				break;
+			maxlength -= 3;
+			dest[j++] = 0xe0 | (c & 0xf000) >> 12;
+			dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
+			dest[j++] = 0x80 | (c & 0x003f);
+		} else if (c >= 0x80) {
+			if (maxlength < 2)
+				break;
+			maxlength -= 2;
+			dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
+			dest[j++] = 0x80 | (c & 0x03f);
+		} else {
+			maxlength -= 1;
+			dest[j++] = c & 0x7f;
+		}
+	}
+	if (maxlength)
+		dest[j] = '\0';
+	return j;
+}
+EXPORT_SYMBOL(ucs2_as_utf8);
diff -ur a/MAINTAINERS b/MAINTAINERS
--- a/MAINTAINERS	2016-10-20 04:32:07.000000000 +0200
+++ b/MAINTAINERS	2016-07-29 05:48:09.000000000 +0200
@@ -1522,6 +1522,13 @@
 S:	Maintained
 F:	drivers/net/wireless/atmel*
 
+ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
+M:      Bradley Grove <linuxdrivers@attotech.com>
+L:      linux-scsi@vger.kernel.org
+W:      http://www.attotech.com
+S:      Supported
+F:      drivers/scsi/esas2r
+
 AUDIT SUBSYSTEM
 M:	Al Viro <viro@zeniv.linux.org.uk>
 M:	Eric Paris <eparis@redhat.com>
@@ -3046,8 +3053,8 @@
 F:	arch/ia64/kernel/efi.c
 F:	arch/x86/boot/compressed/eboot.[ch]
 F:	arch/x86/include/asm/efi.h
-F:	arch/x86/platform/efi/*
-F:	drivers/firmware/efi/*
+F:	arch/x86/platform/efi/
+F:	drivers/firmware/efi/
 F:	include/linux/efi*.h
 
 EFI VARIABLE FILESYSTEM
diff -ur a/Makefile b/Makefile
--- a/Makefile	2016-10-20 04:32:07.000000000 +0200
+++ b/Makefile	2016-07-29 05:48:09.000000000 +0200
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 77
+SUBLEVEL = 102
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
@@ -241,7 +241,7 @@
 
 HOSTCC       = gcc
 HOSTCXX      = g++
-HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
 HOSTCXXFLAGS = -O2
 
 # Decide whether to build built-in, modular, or both.
@@ -374,7 +374,9 @@
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
-		   -fno-delete-null-pointer-checks
+		   -fno-delete-null-pointer-checks \
+		   -std=gnu89
+
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
 KBUILD_AFLAGS   := -D__ASSEMBLY__
diff -ur a/mm/filemap.c b/mm/filemap.c
--- a/mm/filemap.c	2017-03-23 15:10:07.000000000 +0100
+++ b/mm/filemap.c	2017-03-14 02:48:02.000000000 +0100
@@ -2020,6 +2020,11 @@
 			break;
 		}
 
+		if (fatal_signal_pending(current)) {
+			status = -EINTR;
+			break;
+		}
+
 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
 						&page, &fsdata);
 		if (unlikely(status))
@@ -2053,10 +2058,6 @@
 		written += copied;
 
 		balance_dirty_pages_ratelimited(mapping);
-		if (fatal_signal_pending(current)) {
-			status = -EINTR;
-			break;
-		}
 	} while (iov_iter_count(i));
 
 	return written ? written : status;
diff -ur a/mm/hugetlb.c b/mm/hugetlb.c
--- a/mm/hugetlb.c	2017-03-23 15:10:08.000000000 +0100
+++ b/mm/hugetlb.c	2017-03-14 02:48:03.000000000 +0100
@@ -2572,6 +2572,14 @@
 			continue;
 
 		/*
+		 * Shared VMAs have their own reserves and do not affect
+		 * MAP_PRIVATE accounting but it is possible that a shared
+		 * VMA is using the same page so check and skip such VMAs.
+		 */
+		if (iter_vma->vm_flags & VM_MAYSHARE)
+			continue;
+
+		/*
 		 * Unmap the page from other VMAs without their own reserves.
 		 * They get marked to be SIGKILLed if they fault in these
 		 * areas. This is because a future no-page fault on this VMA
diff -ur a/mm/memcontrol.c b/mm/memcontrol.c
--- a/mm/memcontrol.c	2017-03-23 15:10:09.000000000 +0100
+++ b/mm/memcontrol.c	2017-03-14 02:48:04.000000000 +0100
@@ -5784,16 +5784,17 @@
 swap_buffers:
 	/* Swap primary and spare array */
 	thresholds->spare = thresholds->primary;
-	/* If all events are unregistered, free the spare array */
-	if (!new) {
-		kfree(thresholds->spare);
-		thresholds->spare = NULL;
-	}
 
 	rcu_assign_pointer(thresholds->primary, new);
 
 	/* To be sure that nobody uses thresholds */
 	synchronize_rcu();
+
+	/* If all events are unregistered, free the spare array */
+	if (!new) {
+		kfree(thresholds->spare);
+		thresholds->spare = NULL;
+	}
 unlock:
 	mutex_unlock(&memcg->thresholds_lock);
 }
diff -ur a/mm/memory.c b/mm/memory.c
--- a/mm/memory.c	2017-03-23 15:10:06.000000000 +0100
+++ b/mm/memory.c	2017-03-14 02:48:01.000000000 +0100
@@ -1135,6 +1135,12 @@
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+	return pte_write(pte) ||
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
 struct page *follow_page_mask(struct vm_area_struct *vma,
 			      unsigned long address, unsigned int flags,
 			      unsigned int *page_mask)
@@ -1225,7 +1231,7 @@
 	}
 	if ((flags & FOLL_NUMA) && pte_numa(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !pte_write(pte))
+	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
 		goto unlock;
 
 	page = vm_normal_page(vma, address, pte);
@@ -1429,7 +1435,7 @@
 
 				if ((ret & VM_FAULT_WRITE) &&
 				    !(vma->vm_flags & VM_WRITE))
-					foll_flags &= ~FOLL_WRITE;
+					foll_flags |= FOLL_COW;
 
 				cond_resched();
 			}
@@ -2368,6 +2374,9 @@
 
 	pte_unmap(page_table);
 
+	if (vma->vm_flags & VM_SHARED)
+		return VM_FAULT_SIGBUS;
+
 	if (check_stack_guard_page(vma, address) < 0)
 		return VM_FAULT_SIGSEGV;
 
@@ -2585,6 +2594,9 @@
 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
 	pte_unmap(page_table);
+	 
+	if (!vma->vm_ops->fault)
+		return VM_FAULT_SIGBUS;
 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
@@ -2760,11 +2772,9 @@
 	entry = *pte;
 	if (!pte_present(entry)) {
 		if (pte_none(entry)) {
-			if (vma->vm_ops) {
-				if (likely(vma->vm_ops->fault))
-					return do_linear_fault(mm, vma, address,
+			if (vma->vm_ops)
+				return do_linear_fault(mm, vma, address,
 						pte, pmd, flags, entry);
-			}
 			return do_anonymous_page(mm, vma, address,
 						 pte, pmd, flags);
 		}
diff -ur a/mm/memory-failure.c b/mm/memory-failure.c
--- a/mm/memory-failure.c	2017-03-23 15:10:07.000000000 +0100
+++ b/mm/memory-failure.c	2017-03-14 02:48:02.000000000 +0100
@@ -1117,10 +1117,10 @@
 	 * The check (unnecessarily) ignores LRU pages being isolated and
 	 * walked by the page reclaim code, however that's not a big loss.
 	 */
-	if (!PageHuge(p) && !PageTransTail(p)) {
-		if (!PageLRU(p))
-			shake_page(p, 0);
-		if (!PageLRU(p)) {
+	if (!PageHuge(p)) {
+		if (!PageLRU(hpage))
+			shake_page(hpage, 0);
+		if (!PageLRU(hpage)) {
 			/*
 			 * shake_page could have turned it free.
 			 */
@@ -1472,7 +1472,9 @@
 		 * Did it turn free?
 		 */
 		ret = __get_any_page(page, pfn, 0);
-		if (!PageLRU(page)) {
+		if (ret == 1 && !PageLRU(page)) {
+			/* Drop page reference which is from __get_any_page() */
+			put_page(page);
 			pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
 				pfn, page->flags);
 			return -EIO;
diff -ur a/mm/memory_hotplug.c b/mm/memory_hotplug.c
--- a/mm/memory_hotplug.c	2017-03-23 15:10:07.000000000 +0100
+++ b/mm/memory_hotplug.c	2017-03-14 02:48:02.000000000 +0100
@@ -1206,23 +1206,30 @@
  */
 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
 {
-	unsigned long pfn;
+	unsigned long pfn, sec_end_pfn;
 	struct zone *zone = NULL;
 	struct page *page;
 	int i;
-	for (pfn = start_pfn;
+	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
 	     pfn < end_pfn;
-	     pfn += MAX_ORDER_NR_PAGES) {
-		i = 0;
-		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
-		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
-			i++;
-		if (i == MAX_ORDER_NR_PAGES)
+	     pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+		/* Make sure the memory section is present first */
+		if (!present_section_nr(pfn_to_section_nr(pfn)))
 			continue;
-		page = pfn_to_page(pfn + i);
-		if (zone && page_zone(page) != zone)
-			return 0;
-		zone = page_zone(page);
+		for (; pfn < sec_end_pfn && pfn < end_pfn;
+		     pfn += MAX_ORDER_NR_PAGES) {
+			i = 0;
+			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
+			while ((i < MAX_ORDER_NR_PAGES) &&
+				!pfn_valid_within(pfn + i))
+				i++;
+			if (i == MAX_ORDER_NR_PAGES)
+				continue;
+			page = pfn_to_page(pfn + i);
+			if (zone && page_zone(page) != zone)
+				return 0;
+			zone = page_zone(page);
+		}
 	}
 	return 1;
 }
@@ -1800,8 +1807,10 @@
 		 * wait_table may be allocated from boot memory,
 		 * here only free if it's allocated by vmalloc.
 		 */
-		if (is_vmalloc_addr(zone->wait_table))
+		if (is_vmalloc_addr(zone->wait_table)) {
 			vfree(zone->wait_table);
+			zone->wait_table = NULL;
+		}
 	}
 }
 EXPORT_SYMBOL(try_offline_node);
diff -ur a/mm/process_vm_access.c b/mm/process_vm_access.c
--- a/mm/process_vm_access.c	2017-03-23 15:10:06.000000000 +0100
+++ b/mm/process_vm_access.c	2017-03-14 02:48:02.000000000 +0100
@@ -298,7 +298,7 @@
 		goto free_proc_pages;
 	}
 
-	mm = mm_access(task, PTRACE_MODE_ATTACH);
+	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 	if (!mm || IS_ERR(mm)) {
 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
 		/*
diff -ur a/mm/vmscan.c b/mm/vmscan.c
--- a/mm/vmscan.c	2017-03-23 15:10:08.000000000 +0100
+++ b/mm/vmscan.c	2017-03-14 02:48:03.000000000 +0100
@@ -730,20 +730,15 @@
 			 * could easily OOM just because too many pages are in
 			 * writeback and there is nothing else to reclaim.
 			 *
-			 * Check __GFP_IO, certainly because a loop driver
+			 * Require may_enter_fs to wait on writeback, because
+			 * fs may not have submitted IO yet. And a loop driver
 			 * thread might enter reclaim, and deadlock if it waits
 			 * on a page for which it is needed to do the write
 			 * (loop masks off __GFP_IO|__GFP_FS for this reason);
 			 * but more thought would probably show more reasons.
-			 *
-			 * Don't require __GFP_FS, since we're not going into
-			 * the FS, just waiting on its writeback completion.
-			 * Worryingly, ext4 gfs2 and xfs allocate pages with
-			 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
-			 * testing may_enter_fs here is liable to OOM on them.
 			 */
 			if (global_reclaim(sc) ||
-			    !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+			    !PageReclaim(page) || !may_enter_fs) {
 				/*
 				 * This is slightly racy - end_page_writeback()
 				 * might have just cleared PageReclaim, then
@@ -930,7 +925,7 @@
 		if (PageSwapCache(page))
 			try_to_free_swap(page);
 		unlock_page(page);
-		putback_lru_page(page);
+		list_add(&page->lru, &ret_pages);
 		continue;
 
 activate_locked:
diff -ur a/net/9p/client.c b/net/9p/client.c
--- a/net/9p/client.c	2017-03-23 15:04:37.000000000 +0100
+++ b/net/9p/client.c	2017-03-14 02:42:26.000000000 +0100
@@ -825,7 +825,8 @@
 	if (err < 0) {
 		if (err == -EIO)
 			c->status = Disconnected;
-		goto reterr;
+		if (err != -ERESTARTSYS)
+			goto reterr;
 	}
 	if (req->status == REQ_STATUS_ERROR) {
 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
diff -ur a/net/atm/clip.c b/net/atm/clip.c
--- a/net/atm/clip.c	2017-03-23 15:05:25.000000000 +0100
+++ b/net/atm/clip.c	2017-03-14 02:43:17.000000000 +0100
@@ -68,7 +68,7 @@
 
 	sk = sk_atm(atmarpd);
 	skb_queue_tail(&sk->sk_receive_queue, skb);
-	sk->sk_data_ready(sk, skb->len);
+	sk->sk_data_ready(sk);
 	return 0;
 }
 
diff -ur a/net/atm/lec.c b/net/atm/lec.c
--- a/net/atm/lec.c	2017-03-23 15:05:28.000000000 +0100
+++ b/net/atm/lec.c	2017-03-14 02:43:19.000000000 +0100
@@ -152,7 +152,7 @@
 		atm_force_charge(priv->lecd, skb2->truesize);
 		sk = sk_atm(priv->lecd);
 		skb_queue_tail(&sk->sk_receive_queue, skb2);
-		sk->sk_data_ready(sk, skb2->len);
+		sk->sk_data_ready(sk);
 	}
 }
 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -447,7 +447,7 @@
 			atm_force_charge(priv->lecd, skb2->truesize);
 			sk = sk_atm(priv->lecd);
 			skb_queue_tail(&sk->sk_receive_queue, skb2);
-			sk->sk_data_ready(sk, skb2->len);
+			sk->sk_data_ready(sk);
 		}
 	}
 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
@@ -530,13 +530,13 @@
 	atm_force_charge(priv->lecd, skb->truesize);
 	sk = sk_atm(priv->lecd);
 	skb_queue_tail(&sk->sk_receive_queue, skb);
-	sk->sk_data_ready(sk, skb->len);
+	sk->sk_data_ready(sk);
 
 	if (data != NULL) {
 		pr_debug("about to send %d bytes of data\n", data->len);
 		atm_force_charge(priv->lecd, data->truesize);
 		skb_queue_tail(&sk->sk_receive_queue, data);
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 	}
 
 	return 0;
@@ -616,7 +616,7 @@
 
 		pr_debug("%s: To daemon\n", dev->name);
 		skb_queue_tail(&sk->sk_receive_queue, skb);
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 	} else {		/* Data frame, queue to protocol handlers */
 		struct lec_arp_table *entry;
 		unsigned char *src, *dst;
diff -ur a/net/atm/mpc.c b/net/atm/mpc.c
--- a/net/atm/mpc.c	2017-03-23 15:05:26.000000000 +0100
+++ b/net/atm/mpc.c	2017-03-14 02:43:18.000000000 +0100
@@ -705,7 +705,7 @@
 		dprintk("(%s) control packet arrived\n", dev->name);
 		/* Pass control packets to daemon */
 		skb_queue_tail(&sk->sk_receive_queue, skb);
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 		return;
 	}
 
@@ -991,7 +991,7 @@
 
 	sk = sk_atm(mpc->mpoad_vcc);
 	skb_queue_tail(&sk->sk_receive_queue, skb);
-	sk->sk_data_ready(sk, skb->len);
+	sk->sk_data_ready(sk);
 
 	return 0;
 }
@@ -1273,7 +1273,7 @@
 
 	sk = sk_atm(vcc);
 	skb_queue_tail(&sk->sk_receive_queue, skb);
-	sk->sk_data_ready(sk, skb->len);
+	sk->sk_data_ready(sk);
 	dprintk("exiting\n");
 }
 
diff -ur a/net/atm/raw.c b/net/atm/raw.c
--- a/net/atm/raw.c	2017-03-23 15:05:25.000000000 +0100
+++ b/net/atm/raw.c	2017-03-14 02:43:17.000000000 +0100
@@ -25,7 +25,7 @@
 		struct sock *sk = sk_atm(vcc);
 
 		skb_queue_tail(&sk->sk_receive_queue, skb);
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 	}
 }
 
diff -ur a/net/atm/signaling.c b/net/atm/signaling.c
--- a/net/atm/signaling.c	2017-03-23 15:05:25.000000000 +0100
+++ b/net/atm/signaling.c	2017-03-14 02:43:17.000000000 +0100
@@ -51,7 +51,7 @@
 #endif
 	atm_force_charge(sigd, skb->truesize);
 	skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
-	sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
+	sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
 }
 
 static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
diff -ur a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
--- a/net/ax25/af_ax25.c	2017-03-23 15:04:36.000000000 +0100
+++ b/net/ax25/af_ax25.c	2017-03-14 02:42:25.000000000 +0100
@@ -804,6 +804,9 @@
 	struct sock *sk;
 	ax25_cb *ax25;
 
+	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+		return -EINVAL;
+
 	if (!net_eq(net, &init_net))
 		return -EAFNOSUPPORT;
 
diff -ur a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
--- a/net/ax25/ax25_in.c	2017-03-23 15:04:35.000000000 +0100
+++ b/net/ax25/ax25_in.c	2017-03-14 02:42:23.000000000 +0100
@@ -422,7 +422,7 @@
 
 	if (sk) {
 		if (!sock_flag(sk, SOCK_DEAD))
-			sk->sk_data_ready(sk, skb->len);
+			sk->sk_data_ready(sk);
 		sock_put(sk);
 	} else {
 free:
diff -ur a/net/batman-adv/send.c b/net/batman-adv/send.c
--- a/net/batman-adv/send.c	2017-03-23 15:04:21.000000000 +0100
+++ b/net/batman-adv/send.c	2017-03-14 02:42:10.000000000 +0100
@@ -353,6 +353,9 @@
 
 		if (pending) {
 			hlist_del(&forw_packet->list);
+			if (!forw_packet->own)
+				atomic_inc(&bat_priv->batman_queue_left);
+
 			batadv_forw_packet_free(forw_packet);
 		}
 	}
@@ -379,6 +382,9 @@
 
 		if (pending) {
 			hlist_del(&forw_packet->list);
+			if (!forw_packet->own)
+				atomic_inc(&bat_priv->bcast_queue_left);
+
 			batadv_forw_packet_free(forw_packet);
 		}
 	}
diff -ur a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
--- a/net/bluetooth/hidp/core.c	2017-03-23 15:05:52.000000000 +0100
+++ b/net/bluetooth/hidp/core.c	2017-03-14 02:43:48.000000000 +0100
@@ -396,6 +396,20 @@
 {
 	struct hidp_session *session = (struct hidp_session *) arg;
 
+	/* The HIDP user-space API only contains calls to add and remove
+	 * devices. There is no way to forward events of any kind. Therefore,
+	 * we have to forcefully disconnect a device on idle-timeouts. This is
+	 * unfortunate and weird API design, but it is spec-compliant and
+	 * required for backwards-compatibility. Hence, on idle-timeout, we
+	 * signal driver-detach events, so poll() will be woken up with an
+	 * error-condition on both sockets.
+	 */
+
+	session->intr_sock->sk->sk_err = EUNATCH;
+	session->ctrl_sock->sk->sk_err = EUNATCH;
+	wake_up_interruptible(sk_sleep(session->intr_sock->sk));
+	wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
+
 	hidp_session_terminate(session);
 }
 
diff -ur a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
--- a/net/bluetooth/l2cap_sock.c	2017-03-23 15:05:50.000000000 +0100
+++ b/net/bluetooth/l2cap_sock.c	2017-03-14 02:43:46.000000000 +0100
@@ -1043,7 +1043,7 @@
 
 		if (parent) {
 			bt_accept_unlink(sk);
-			parent->sk_data_ready(parent, 0);
+			parent->sk_data_ready(parent);
 		} else {
 			sk->sk_state_change(sk);
 		}
@@ -1092,7 +1092,7 @@
 	sk->sk_state_change(sk);
 
 	if (parent)
-		parent->sk_data_ready(parent, 0);
+		parent->sk_data_ready(parent);
 
 	release_sock(sk);
 }
@@ -1103,7 +1103,7 @@
 	struct sock *parent = bt_sk(sk)->parent;
 
 	if (parent)
-		parent->sk_data_ready(parent, 0);
+		parent->sk_data_ready(parent);
 }
 
 static struct l2cap_ops l2cap_chan_ops = {
diff -ur a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
--- a/net/bluetooth/rfcomm/core.c	2017-03-23 15:05:54.000000000 +0100
+++ b/net/bluetooth/rfcomm/core.c	2017-03-14 02:43:49.000000000 +0100
@@ -185,9 +185,9 @@
 	rfcomm_schedule();
 }
 
-static void rfcomm_l2data_ready(struct sock *sk, int bytes)
+static void rfcomm_l2data_ready(struct sock *sk)
 {
-	BT_DBG("%p bytes %d", sk, bytes);
+	BT_DBG("%p", sk);
 	rfcomm_schedule();
 }
 
diff -ur a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
--- a/net/bluetooth/rfcomm/sock.c	2017-03-23 15:05:52.000000000 +0100
+++ b/net/bluetooth/rfcomm/sock.c	2017-03-14 02:43:48.000000000 +0100
@@ -54,7 +54,7 @@
 
 	atomic_add(skb->len, &sk->sk_rmem_alloc);
 	skb_queue_tail(&sk->sk_receive_queue, skb);
-	sk->sk_data_ready(sk, skb->len);
+	sk->sk_data_ready(sk);
 
 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		rfcomm_dlc_throttle(d);
@@ -84,7 +84,7 @@
 			sock_set_flag(sk, SOCK_ZAPPED);
 			bt_accept_unlink(sk);
 		}
-		parent->sk_data_ready(parent, 0);
+		parent->sk_data_ready(parent);
 	} else {
 		if (d->state == BT_CONNECTED)
 			rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
diff -ur a/net/bluetooth/sco.c b/net/bluetooth/sco.c
--- a/net/bluetooth/sco.c	2017-03-23 15:05:50.000000000 +0100
+++ b/net/bluetooth/sco.c	2017-03-14 02:43:45.000000000 +0100
@@ -456,6 +456,9 @@
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
 
+	if (addr_len < sizeof(struct sockaddr_sco))
+		return -EINVAL;
+
 	lock_sock(sk);
 
 	if (sk->sk_state != BT_OPEN) {
@@ -975,7 +978,7 @@
 			sk->sk_state = BT_CONNECTED;
 
 		/* Wake up parent */
-		parent->sk_data_ready(parent, 1);
+		parent->sk_data_ready(parent);
 
 		bh_unlock_sock(parent);
 
diff -ur a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
--- a/net/bridge/br_ioctl.c	2017-03-23 15:05:07.000000000 +0100
+++ b/net/bridge/br_ioctl.c	2017-03-14 02:42:57.000000000 +0100
@@ -21,18 +21,19 @@
 #include <asm/uaccess.h>
 #include "br_private.h"
 
-/* called with RTNL */
 static int get_bridge_ifindices(struct net *net, int *indices, int num)
 {
 	struct net_device *dev;
 	int i = 0;
 
-	for_each_netdev(net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
 		if (i >= num)
 			break;
 		if (dev->priv_flags & IFF_EBRIDGE)
 			indices[i++] = dev->ifindex;
 	}
+	rcu_read_unlock();
 
 	return i;
 }
@@ -247,9 +248,7 @@
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		spin_lock_bh(&br->lock);
 		br_stp_set_bridge_priority(br, args[1]);
-		spin_unlock_bh(&br->lock);
 		return 0;
 
 	case BRCTL_SET_PORT_PRIORITY:
diff -ur a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
--- a/net/bridge/br_mdb.c	2017-03-23 15:05:07.000000000 +0100
+++ b/net/bridge/br_mdb.c	2017-03-14 02:42:58.000000000 +0100
@@ -345,7 +345,6 @@
 		return -ENOMEM;
 	rcu_assign_pointer(*pp, p);
 
-	br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
 	return 0;
 }
 
@@ -368,6 +367,7 @@
 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 		return -EINVAL;
 
+	memset(&ip, 0, sizeof(ip));
 	ip.proto = entry->addr.proto;
 	if (ip.proto == htons(ETH_P_IP))
 		ip.u.ip4 = entry->addr.u.ip4;
@@ -417,6 +417,7 @@
 	if (timer_pending(&br->multicast_querier_timer))
 		return -EBUSY;
 
+	memset(&ip, 0, sizeof(ip));
 	ip.proto = entry->addr.proto;
 	if (ip.proto == htons(ETH_P_IP))
 		ip.u.ip4 = entry->addr.u.ip4;
diff -ur a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
--- a/net/bridge/br_multicast.c	2017-03-23 15:05:08.000000000 +0100
+++ b/net/bridge/br_multicast.c	2017-03-14 02:42:58.000000000 +0100
@@ -1007,7 +1007,7 @@
 
 		err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
 						 vid);
-		if (!err)
+		if (err)
 			break;
 	}
 
@@ -1026,6 +1026,9 @@
 	struct net_bridge_port *p;
 	struct hlist_node *slot = NULL;
 
+	if (!hlist_unhashed(&port->rlist))
+		return;
+
 	hlist_for_each_entry(p, &br->router_list, rlist) {
 		if ((unsigned long) port >= (unsigned long) p)
 			break;
@@ -1053,12 +1056,8 @@
 	if (port->multicast_router != 1)
 		return;
 
-	if (!hlist_unhashed(&port->rlist))
-		goto timer;
-
 	br_multicast_add_router(br, port);
 
-timer:
 	mod_timer(&port->multicast_router_timer,
 		  now + br->multicast_querier_interval);
 }
diff -ur a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
--- a/net/bridge/br_stp_if.c	2017-03-23 15:05:07.000000000 +0100
+++ b/net/bridge/br_stp_if.c	2017-03-14 02:42:58.000000000 +0100
@@ -127,7 +127,10 @@
 	char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
 	char *envp[] = { NULL };
 
-	r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+	if (net_eq(dev_net(br->dev), &init_net))
+		r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+	else
+		r = -ENOENT;
 
 	spin_lock_bh(&br->lock);
 
@@ -240,12 +243,13 @@
 	return true;
 }
 
-/* called under bridge lock */
+/* Acquires and releases bridge lock */
 void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
 {
 	struct net_bridge_port *p;
 	int wasroot;
 
+	spin_lock_bh(&br->lock);
 	wasroot = br_is_root_bridge(br);
 
 	list_for_each_entry(p, &br->port_list, list) {
@@ -263,6 +267,7 @@
 	br_port_state_selection(br);
 	if (br_is_root_bridge(br) && !wasroot)
 		br_become_root_bridge(br);
+	spin_unlock_bh(&br->lock);
 }
 
 /* called under bridge lock */
diff -ur a/net/caif/caif_socket.c b/net/caif/caif_socket.c
--- a/net/caif/caif_socket.c	2017-03-23 15:05:56.000000000 +0100
+++ b/net/caif/caif_socket.c	2017-03-14 02:43:53.000000000 +0100
@@ -124,7 +124,6 @@
 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	int err;
-	int skb_len;
 	unsigned long flags;
 	struct sk_buff_head *list = &sk->sk_receive_queue;
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -153,14 +152,13 @@
 	 * may be freed by other threads of control pulling packets
 	 * from the queue.
 	 */
-	skb_len = skb->len;
 	spin_lock_irqsave(&list->lock, flags);
 	if (!sock_flag(sk, SOCK_DEAD))
 		__skb_queue_tail(list, skb);
 	spin_unlock_irqrestore(&list->lock, flags);
 
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb_len);
+		sk->sk_data_ready(sk);
 	else
 		kfree_skb(skb);
 	return 0;
@@ -331,6 +329,10 @@
 		release_sock(sk);
 		timeo = schedule_timeout(timeo);
 		lock_sock(sk);
+
+		if (sock_flag(sk, SOCK_DEAD))
+			break;
+
 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
 	}
 
@@ -374,6 +376,10 @@
 		struct sk_buff *skb;
 
 		lock_sock(sk);
+		if (sock_flag(sk, SOCK_DEAD)) {
+			err = -ECONNRESET;
+			goto unlock;
+		}
 		skb = skb_dequeue(&sk->sk_receive_queue);
 		caif_check_flow_release(sk);
 
diff -ur a/net/ceph/messenger.c b/net/ceph/messenger.c
--- a/net/ceph/messenger.c	2017-03-23 15:05:27.000000000 +0100
+++ b/net/ceph/messenger.c	2017-03-14 02:43:18.000000000 +0100
@@ -383,7 +383,7 @@
  */
 
 /* data available on socket, or listen socket received a connect */
-static void ceph_sock_data_ready(struct sock *sk, int count_unused)
+static void ceph_sock_data_ready(struct sock *sk)
 {
 	struct ceph_connection *con = sk->sk_user_data;
 	if (atomic_read(&con->msgr->stopping)) {
@@ -2271,7 +2271,7 @@
 		con->in_base_pos = -front_len - middle_len - data_len -
 			sizeof(m->footer);
 		con->in_tag = CEPH_MSGR_TAG_READY;
-		return 0;
+		return 1;
 	} else if ((s64)seq - (s64)con->in_seq > 1) {
 		pr_err("read_partial_message bad seq %lld expected %lld\n",
 		       seq, con->in_seq + 1);
@@ -2304,7 +2304,7 @@
 				sizeof(m->footer);
 			con->in_tag = CEPH_MSGR_TAG_READY;
 			con->in_seq++;
-			return 0;
+			return 1;
 		}
 
 		BUG_ON(!con->in_msg);
diff -ur a/net/ceph/osd_client.c b/net/ceph/osd_client.c
--- a/net/ceph/osd_client.c	2017-03-23 15:05:27.000000000 +0100
+++ b/net/ceph/osd_client.c	2017-03-14 02:43:17.000000000 +0100
@@ -1702,20 +1702,29 @@
 		err = __map_request(osdc, req,
 				    force_resend || force_resend_writes);
 		dout("__map_request returned %d\n", err);
-		if (err == 0)
-			continue;  /* no change and no osd was specified */
 		if (err < 0)
 			continue;  /* hrm! */
-		if (req->r_osd == NULL) {
-			dout("tid %llu maps to no valid osd\n", req->r_tid);
-			needmap++;  /* request a newer map */
-			continue;
-		}
+		if (req->r_osd == NULL || err > 0) {
+			if (req->r_osd == NULL) {
+				dout("lingering %p tid %llu maps to no osd\n",
+				     req, req->r_tid);
+				/*
+				 * A homeless lingering request makes
+				 * no sense, as it's job is to keep
+				 * a particular OSD connection open.
+				 * Request a newer map and kick the
+				 * request, knowing that it won't be
+				 * resent until we actually get a map
+				 * that can tell us where to send it.
+				 */
+				needmap++;
+			}
 
-		dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
-		     req->r_osd ? req->r_osd->o_osd : -1);
-		__register_request(osdc, req);
-		__unregister_linger_request(osdc, req);
+			dout("kicking lingering %p tid %llu osd%d\n", req,
+			     req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
+			__register_request(osdc, req);
+			__unregister_linger_request(osdc, req);
+		}
 	}
 	reset_changed_osds(osdc);
 	mutex_unlock(&osdc->request_mutex);
diff -ur a/net/ceph/osdmap.c b/net/ceph/osdmap.c
--- a/net/ceph/osdmap.c	2017-03-23 15:05:24.000000000 +0100
+++ b/net/ceph/osdmap.c	2017-03-14 02:43:16.000000000 +0100
@@ -89,7 +89,7 @@
 {
 	int j;
 	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
-	ceph_decode_32_safe(p, end, b->num_nodes, bad);
+	ceph_decode_8_safe(p, end, b->num_nodes, bad);
 	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
 	if (b->node_weights == NULL)
 		return -ENOMEM;
diff -ur a/net/core/datagram.c b/net/core/datagram.c
--- a/net/core/datagram.c	2017-03-23 15:04:30.000000000 +0100
+++ b/net/core/datagram.c	2017-03-14 02:42:19.000000000 +0100
@@ -83,10 +83,40 @@
 	goto out;
 }
 
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
+{
+	struct sk_buff *nskb;
+
+	if (skb->peeked)
+		return skb;
+
+	if (!skb_shared(skb))
+		goto done;
+
+	nskb = skb_clone(skb, GFP_ATOMIC);
+	if (!nskb)
+		return ERR_PTR(-ENOMEM);
+
+	skb->prev->next = nskb;
+	skb->next->prev = nskb;
+	nskb->prev = skb->prev;
+	nskb->next = skb->next;
+
+	consume_skb(skb);
+	skb = nskb;
+
+done:
+	skb->peeked = 1;
+
+	return skb;
+}
+
 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
 				    int *peeked, int *off, int *err)
 {
+	struct sk_buff_head *queue = &sk->sk_receive_queue;
 	struct sk_buff *skb, *last;
+	unsigned long cpu_flags;
 	long timeo;
 	 
 	int error = sock_error(sk);
@@ -98,8 +128,6 @@
 
 	do {
 		 
-		unsigned long cpu_flags;
-		struct sk_buff_head *queue = &sk->sk_receive_queue;
 		int _off = *off;
 
 		last = (struct sk_buff *)queue;
@@ -113,7 +141,12 @@
 					_off -= skb->len;
 					continue;
 				}
-				skb->peeked = 1;
+
+				skb = skb_set_peeked(skb);
+				error = PTR_ERR(skb);
+				if (IS_ERR(skb))
+					goto unlock_err;
+
 				atomic_inc(&skb->users);
 			} else
 				__skb_unlink(skb, queue);
@@ -132,6 +165,8 @@
 
 	return NULL;
 
+unlock_err:
+	spin_unlock_irqrestore(&queue->lock, cpu_flags);
 no_packet:
 	*err = error;
 	return NULL;
@@ -612,7 +647,8 @@
 	if (likely(!sum)) {
 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
 			netdev_rx_csum_fault(skb->dev);
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		if (!skb_shared(skb))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 	return sum;
 }
diff -ur a/net/core/dev.c b/net/core/dev.c
--- a/net/core/dev.c	2017-03-23 15:04:36.000000000 +0100
+++ b/net/core/dev.c	2017-03-14 02:42:24.000000000 +0100
@@ -2716,8 +2716,6 @@
 
 	pt_prev = NULL;
 
-	rcu_read_lock();
-
 another_round:
 	skb->skb_iif = skb->dev->ifindex;
 
@@ -2727,7 +2725,7 @@
 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
 		skb = vlan_untag(skb);
 		if (unlikely(!skb))
-			goto unlock;
+			goto out;
 	}
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -2752,7 +2750,7 @@
 #ifdef CONFIG_NET_CLS_ACT
 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
 	if (!skb)
-		goto unlock;
+		goto out;
 ncls:
 #endif
 
@@ -2767,7 +2765,7 @@
 		if (vlan_do_receive(&skb))
 			goto another_round;
 		else if (unlikely(!skb))
-			goto unlock;
+			goto out;
 	}
 
 	rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -2779,7 +2777,7 @@
 		switch (rx_handler(&skb)) {
 		case RX_HANDLER_CONSUMED:
 			ret = NET_RX_SUCCESS;
-			goto unlock;
+			goto out;
 		case RX_HANDLER_ANOTHER:
 			goto another_round;
 		case RX_HANDLER_EXACT:
@@ -2825,8 +2823,6 @@
 		ret = NET_RX_DROP;
 	}
 
-unlock:
-	rcu_read_unlock();
 out:
 	return ret;
 }
@@ -2849,29 +2845,30 @@
 
 int netif_receive_skb(struct sk_buff *skb)
 {
+	int ret;
+
 	net_timestamp_check(netdev_tstamp_prequeue, skb);
 
 	if (skb_defer_rx_timestamp(skb))
 		return NET_RX_SUCCESS;
 
+	rcu_read_lock();
+
 #ifdef CONFIG_RPS
 	if (static_key_false(&rps_needed)) {
 		struct rps_dev_flow voidflow, *rflow = &voidflow;
-		int cpu, ret;
-
-		rcu_read_lock();
-
-		cpu = get_rps_cpu(skb->dev, skb, &rflow);
+		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
 		if (cpu >= 0) {
 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 			rcu_read_unlock();
 			return ret;
 		}
-		rcu_read_unlock();
 	}
 #endif
-	return __netif_receive_skb(skb);
+	ret = __netif_receive_skb(skb);
+	rcu_read_unlock();
+	return ret;
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -3261,8 +3258,10 @@
 		unsigned int qlen;
 
 		while ((skb = __skb_dequeue(&sd->process_queue))) {
+			rcu_read_lock();
 			local_irq_enable();
 			__netif_receive_skb(skb);
+			rcu_read_unlock();
 			local_irq_disable();
 			input_queue_head_incr(sd);
 			if (++work >= quota) {
diff -ur a/net/core/dst.c b/net/core/dst.c
--- a/net/core/dst.c	2017-03-23 15:04:30.000000000 +0100
+++ b/net/core/dst.c	2017-03-14 02:42:19.000000000 +0100
@@ -279,10 +279,11 @@
 {
 	if (dst) {
 		int newrefcnt;
+		unsigned short nocache = dst->flags & DST_NOCACHE;
 
 		newrefcnt = atomic_dec_return(&dst->__refcnt);
 		WARN_ON(newrefcnt < 0);
-		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+		if (!newrefcnt && unlikely(nocache))
 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
 	}
 }
diff -ur a/net/core/ethtool.c b/net/core/ethtool.c
--- a/net/core/ethtool.c	2017-03-23 15:04:33.000000000 +0100
+++ b/net/core/ethtool.c	2017-03-14 02:42:22.000000000 +0100
@@ -1066,7 +1066,7 @@
 
 	gstrings.len = ret;
 
-	data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+	data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
 	if (!data)
 		return -ENOMEM;
 
diff -ur a/net/core/fib_rules.c b/net/core/fib_rules.c
--- a/net/core/fib_rules.c	2017-03-23 15:04:32.000000000 +0100
+++ b/net/core/fib_rules.c	2017-03-14 02:42:20.000000000 +0100
@@ -596,15 +596,17 @@
 {
 	int idx = 0;
 	struct fib_rule *rule;
+	int err = 0;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
 		if (idx < cb->args[1])
 			goto skip;
 
-		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
-				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
-				     NLM_F_MULTI, ops) < 0)
+		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
+				       NLM_F_MULTI, ops);
+		if (err < 0)
 			break;
 skip:
 		idx++;
@@ -613,7 +615,7 @@
 	cb->args[1] = idx;
 	rules_ops_put(ops);
 
-	return skb->len;
+	return err;
 }
 
 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -629,7 +631,9 @@
 		if (ops == NULL)
 			return -EAFNOSUPPORT;
 
-		return dump_rules(skb, cb, ops);
+		dump_rules(skb, cb, ops);
+
+		return skb->len;
 	}
 
 	rcu_read_lock();
diff -ur a/net/core/pktgen.c b/net/core/pktgen.c
--- a/net/core/pktgen.c	2017-03-23 15:04:36.000000000 +0100
+++ b/net/core/pktgen.c	2017-03-14 02:42:24.000000000 +0100
@@ -3369,8 +3369,10 @@
 	pktgen_rem_thread(t);
 
 	/* Wait for kthread_stop */
-	while (!kthread_should_stop()) {
+	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
 		schedule();
 	}
 	__set_current_state(TASK_RUNNING);
diff -ur a/net/core/rtnetlink.c b/net/core/rtnetlink.c
--- a/net/core/rtnetlink.c	2017-03-23 15:04:33.000000000 +0100
+++ b/net/core/rtnetlink.c	2017-03-14 02:42:21.000000000 +0100
@@ -899,14 +899,16 @@
 		goto nla_put_failure;
 
 	if (1) {
-		struct rtnl_link_ifmap map = {
-			.mem_start   = dev->mem_start,
-			.mem_end     = dev->mem_end,
-			.base_addr   = dev->base_addr,
-			.irq         = dev->irq,
-			.dma         = dev->dma,
-			.port        = dev->if_port,
-		};
+		struct rtnl_link_ifmap map;
+
+		memset(&map, 0, sizeof(map));
+		map.mem_start   = dev->mem_start;
+		map.mem_end     = dev->mem_end;
+		map.base_addr   = dev->base_addr;
+		map.irq         = dev->irq;
+		map.dma         = dev->dma;
+		map.port        = dev->if_port;
+
 		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
 			goto nla_put_failure;
 	}
diff -ur a/net/core/scm.c b/net/core/scm.c
--- a/net/core/scm.c	2017-03-23 15:04:32.000000000 +0100
+++ b/net/core/scm.c	2017-03-14 02:42:20.000000000 +0100
@@ -86,6 +86,7 @@
 		*fplp = fpl;
 		fpl->count = 0;
 		fpl->max = SCM_MAX_FD;
+		fpl->user = NULL;
 	}
 	fpp = &fpl->fp[fpl->count];
 
@@ -106,6 +107,10 @@
 		*fpp++ = file;
 		fpl->count++;
 	}
+
+	if (!fpl->user)
+		fpl->user = get_uid(current_user());
+
 	return num;
 }
 
@@ -118,6 +123,7 @@
 		scm->fp = NULL;
 		for (i=fpl->count-1; i>=0; i--)
 			fput(fpl->fp[i]);
+		free_uid(fpl->user);
 		kfree(fpl);
 	}
 }
@@ -305,6 +311,8 @@
 			err = put_user(cmlen, &cm->cmsg_len);
 		if (!err) {
 			cmlen = CMSG_SPACE(i*sizeof(int));
+			if (msg->msg_controllen < cmlen)
+				cmlen = msg->msg_controllen;
 			msg->msg_control += cmlen;
 			msg->msg_controllen -= cmlen;
 		}
@@ -334,6 +342,7 @@
 		for (i = 0; i < fpl->count; i++)
 			get_file(fpl->fp[i]);
 		new_fpl->max = new_fpl->count;
+		new_fpl->user = get_uid(fpl->user);
 	}
 	return new_fpl;
 }
diff -ur a/net/core/skbuff.c b/net/core/skbuff.c
--- a/net/core/skbuff.c	2017-03-23 15:04:34.000000000 +0100
+++ b/net/core/skbuff.c	2017-03-14 02:42:22.000000000 +0100
@@ -2711,11 +2711,12 @@
  */
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 {
+	unsigned char *data = skb->data;
+
 	BUG_ON(len > skb->len);
-	skb->len -= len;
-	BUG_ON(skb->len < skb->data_len);
-	skb_postpull_rcsum(skb, skb->data, len);
-	return skb->data += len;
+	__skb_pull(skb, len);
+	skb_postpull_rcsum(skb, data, len);
+	return skb->data;
 }
 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 
@@ -3271,8 +3272,6 @@
  */
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
-	int len = skb->len;
-
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
 	    (unsigned int)sk->sk_rcvbuf)
 		return -ENOMEM;
@@ -3287,7 +3286,7 @@
 
 	skb_queue_tail(&sk->sk_error_queue, skb);
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, len);
+		sk->sk_data_ready(sk);
 	return 0;
 }
 EXPORT_SYMBOL(sock_queue_err_skb);
diff -ur a/net/core/sock.c b/net/core/sock.c
--- a/net/core/sock.c	2017-03-23 15:04:34.000000000 +0100
+++ b/net/core/sock.c	2017-03-14 02:42:22.000000000 +0100
@@ -269,8 +269,6 @@
 	}
 }
 
-#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
-
 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 {
 	if (sk->sk_flags & flags) {
@@ -315,7 +313,7 @@
 	spin_unlock_irqrestore(&list->lock, flags);
 
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb_len);
+		sk->sk_data_ready(sk);
 	return 0;
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
@@ -1846,7 +1844,7 @@
 	rcu_read_unlock();
 }
 
-static void sock_def_readable(struct sock *sk, int len)
+static void sock_def_readable(struct sock *sk)
 {
 	struct socket_wq *wq;
 
diff -ur a/net/dccp/input.c b/net/dccp/input.c
--- a/net/dccp/input.c	2017-03-23 15:05:27.000000000 +0100
+++ b/net/dccp/input.c	2017-03-14 02:43:19.000000000 +0100
@@ -28,7 +28,7 @@
 	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
 	__skb_queue_tail(&sk->sk_receive_queue, skb);
 	skb_set_owner_r(skb, sk);
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 }
 
 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
diff -ur a/net/dccp/minisocks.c b/net/dccp/minisocks.c
--- a/net/dccp/minisocks.c	2017-03-23 15:05:27.000000000 +0100
+++ b/net/dccp/minisocks.c	2017-03-14 02:43:19.000000000 +0100
@@ -240,7 +240,7 @@
 
 		/* Wakeup parent, send SIGIO */
 		if (state == DCCP_RESPOND && child->sk_state != state)
-			parent->sk_data_ready(parent, 0);
+			parent->sk_data_ready(parent);
 	} else {
 		/* Alas, it is possible again, because we do lookup
 		 * in main socket hash table and lock on listening
diff -ur a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
--- a/net/decnet/af_decnet.c	2017-03-23 15:04:40.000000000 +0100
+++ b/net/decnet/af_decnet.c	2017-03-14 02:42:28.000000000 +0100
@@ -670,6 +670,9 @@
 {
 	struct sock *sk;
 
+	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+		return -EINVAL;
+
 	if (!net_eq(net, &init_net))
 		return -EAFNOSUPPORT;
 
diff -ur a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
--- a/net/decnet/dn_nsp_in.c	2017-03-23 15:04:38.000000000 +0100
+++ b/net/decnet/dn_nsp_in.c	2017-03-14 02:42:26.000000000 +0100
@@ -583,7 +583,6 @@
 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
 {
 	int err;
-	int skb_len;
 
 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
 	   number of warnings when compiling with -W --ANK
@@ -598,12 +597,11 @@
 	if (err)
 		goto out;
 
-	skb_len = skb->len;
 	skb_set_owner_r(skb, sk);
 	skb_queue_tail(queue, skb);
 
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb_len);
+		sk->sk_data_ready(sk);
 out:
 	return err;
 }
diff -ur a/net/decnet/dn_route.c b/net/decnet/dn_route.c
--- a/net/decnet/dn_route.c	2017-03-23 15:04:39.000000000 +0100
+++ b/net/decnet/dn_route.c	2017-03-14 02:42:28.000000000 +0100
@@ -1026,10 +1026,13 @@
 	if (!fld.daddr) {
 		fld.daddr = fld.saddr;
 
-		err = -EADDRNOTAVAIL;
 		if (dev_out)
 			dev_put(dev_out);
+		err = -EINVAL;
 		dev_out = init_net.loopback_dev;
+		if (!dev_out->dn_ptr)
+			goto out;
+		err = -EADDRNOTAVAIL;
 		dev_hold(dev_out);
 		if (!fld.daddr) {
 			fld.daddr =
@@ -1102,6 +1105,8 @@
 		if (dev_out == NULL)
 			goto out;
 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+		if (!dn_db)
+			goto e_inval;
 		/* Possible improvement - check all devices for local addr */
 		if (dn_dev_islocal(dev_out, fld.daddr)) {
 			dev_put(dev_out);
@@ -1143,6 +1148,8 @@
 			dev_put(dev_out);
 		dev_out = init_net.loopback_dev;
 		dev_hold(dev_out);
+		if (!dev_out->dn_ptr)
+			goto e_inval;
 		fld.flowidn_oif = dev_out->ifindex;
 		if (res.fi)
 			dn_fib_info_put(res.fi);
diff -ur a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
--- a/net/ipv4/af_inet.c	2017-03-23 15:05:15.000000000 +0100
+++ b/net/ipv4/af_inet.c	2017-03-14 02:43:07.000000000 +0100
@@ -287,6 +287,9 @@
 		if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
 			build_ehash_secret();
 
+	if (protocol < 0 || protocol >= IPPROTO_MAX)
+		return -EINVAL;
+
 	sock->state = SS_UNCONNECTED;
 
 	/* Look for the requested type/protocol pair. */
diff -ur a/net/ipv4/datagram.c b/net/ipv4/datagram.c
--- a/net/ipv4/datagram.c	2017-03-23 15:05:13.000000000 +0100
+++ b/net/ipv4/datagram.c	2017-03-14 02:43:05.000000000 +0100
@@ -20,7 +20,7 @@
 #include <net/route.h>
 #include <net/tcp_states.h>
 
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -38,8 +38,6 @@
 
 	sk_dst_reset(sk);
 
-	lock_sock(sk);
-
 	oif = sk->sk_bound_dev_if;
 	saddr = inet->inet_saddr;
 	if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -80,9 +78,19 @@
 	sk_dst_set(sk, &rt->dst);
 	err = 0;
 out:
-	release_sock(sk);
 	return err;
 }
+EXPORT_SYMBOL(__ip4_datagram_connect);
+
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+	int res;
+
+	lock_sock(sk);
+	res = __ip4_datagram_connect(sk, uaddr, addr_len);
+	release_sock(sk);
+	return res;
+}
 EXPORT_SYMBOL(ip4_datagram_connect);
 
 /* Because UDP xmit path can manipulate sk_dst_cache without holding
diff -ur a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
--- a/net/ipv4/ip_fragment.c	2017-03-23 15:05:16.000000000 +0100
+++ b/net/ipv4/ip_fragment.c	2017-03-14 02:43:08.000000000 +0100
@@ -354,7 +354,7 @@
 	ihl = ip_hdrlen(skb);
 
 	/* Determine the position of this fragment. */
-	end = offset + skb->len - ihl;
+	end = offset + skb->len - skb_network_offset(skb) - ihl;
 	err = -EINVAL;
 
 	/* Is this the final fragment? */
@@ -384,7 +384,7 @@
 		goto err;
 
 	err = -ENOMEM;
-	if (pskb_pull(skb, ihl) == NULL)
+	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
 		goto err;
 
 	err = pskb_trim_rcsum(skb, end - offset);
@@ -624,6 +624,9 @@
 	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
 	iph->tot_len = htons(len);
 	iph->tos |= ecn;
+
+	ip_send_check(iph);
+
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
 	qp->q.fragments = NULL;
 	qp->q.fragments_tail = NULL;
diff -ur a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
--- a/net/ipv4/ipmr.c	2017-03-23 15:05:17.000000000 +0100
+++ b/net/ipv4/ipmr.c	2017-03-14 02:43:09.000000000 +0100
@@ -136,7 +136,7 @@
 			      struct mfc_cache *c, struct rtmsg *rtm);
 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
 				 int cmd);
-static void mroute_clean_tables(struct mr_table *mrt);
+static void mroute_clean_tables(struct mr_table *mrt, bool all);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -348,7 +348,7 @@
 static void ipmr_free_table(struct mr_table *mrt)
 {
 	del_timer_sync(&mrt->ipmr_expire_timer);
-	mroute_clean_tables(mrt);
+	mroute_clean_tables(mrt, true);
 	kfree(mrt);
 }
 
@@ -1198,7 +1198,7 @@
  *	Close the multicast socket, and clear the vif tables etc
  */
 
-static void mroute_clean_tables(struct mr_table *mrt)
+static void mroute_clean_tables(struct mr_table *mrt, bool all)
 {
 	int i;
 	LIST_HEAD(list);
@@ -1207,8 +1207,9 @@
 	/* Shut down all active vif entries */
 
 	for (i = 0; i < mrt->maxvif; i++) {
-		if (!(mrt->vif_table[i].flags & VIFF_STATIC))
-			vif_delete(mrt, i, 0, &list);
+		if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
+			continue;
+		vif_delete(mrt, i, 0, &list);
 	}
 	unregister_netdevice_many(&list);
 
@@ -1216,7 +1217,7 @@
 
 	for (i = 0; i < MFC_LINES; i++) {
 		list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
-			if (c->mfc_flags & MFC_STATIC)
+			if (!all && (c->mfc_flags & MFC_STATIC))
 				continue;
 			list_del_rcu(&c->list);
 			mroute_netlink_event(mrt, c, RTM_DELROUTE);
@@ -1251,7 +1252,7 @@
 						    NETCONFA_IFINDEX_ALL,
 						    net->ipv4.devconf_all);
 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
-			mroute_clean_tables(mrt);
+			mroute_clean_tables(mrt, false);
 		}
 	}
 	rtnl_unlock();
@@ -1669,8 +1670,8 @@
 {
 	struct ip_options *opt = &(IPCB(skb)->opt);
 
-	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
-	IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
+	IP_INC_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP_ADD_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
 
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
@@ -1732,7 +1733,7 @@
 		 * to blackhole.
 		 */
 
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 		ip_rt_put(rt);
 		goto out_free;
 	}
diff -ur a/net/ipv4/ping.c b/net/ipv4/ping.c
--- a/net/ipv4/ping.c	2017-03-23 15:05:16.000000000 +0100
+++ b/net/ipv4/ping.c	2017-03-14 02:43:08.000000000 +0100
@@ -138,6 +138,7 @@
 	if (sk_hashed(sk)) {
 		write_lock_bh(&ping_table.lock);
 		hlist_nulls_del(&sk->sk_nulls_node);
+		sk_nulls_node_init(&sk->sk_nulls_node);
 		sock_put(sk);
 		isk->inet_num = 0;
 		isk->inet_sport = 0;
diff -ur a/net/ipv4/route.c b/net/ipv4/route.c
--- a/net/ipv4/route.c	2017-03-23 15:05:18.000000000 +0100
+++ b/net/ipv4/route.c	2017-03-14 02:43:10.000000000 +0100
@@ -869,6 +869,10 @@
 	bool send;
 	int code;
 
+	/* IP on this device is disabled. */
+	if (!in_dev)
+		goto out;
+
 	net = dev_net(rt->dst.dev);
 	if (!IN_DEV_FORWARD(in_dev)) {
 		switch (rt->dst.error) {
@@ -1869,6 +1873,18 @@
 		 */
 		if (fi && res->prefixlen < 4)
 			fi = NULL;
+	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+		   (orig_oif != dev_out->ifindex)) {
+		/* For local routes that require a particular output interface
+		 * we do not want to cache the result.  Caching the result
+		 * causes incorrect behaviour when there are multiple source
+		 * addresses on the interface, the end result being that if the
+		 * intended recipient is waiting on that interface for the
+		 * packet he won't receive it because it will be delivered on
+		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
+		 * be set to the loopback interface as well.
+		 */
+		fi = NULL;
 	}
 
 	fnhe = NULL;
diff -ur a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
--- a/net/ipv4/tcp_input.c	2017-03-23 15:05:23.000000000 +0100
+++ b/net/ipv4/tcp_input.c	2017-03-14 02:43:14.000000000 +0100
@@ -1,3 +1,6 @@
+#ifndef MY_ABC_HERE
+#define MY_ABC_HERE
+#endif
 /*
  * INET		An implementation of the TCP/IP protocol suite for the LINUX
  *		operating system.  INET is implemented using the  BSD Socket
@@ -87,7 +90,11 @@
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 /* rfc5961 challenge ack rate limiting */
+#ifdef MY_ABC_HERE
+int sysctl_tcp_challenge_ack_limit = 1000;
+#else
 int sysctl_tcp_challenge_ack_limit = 100;
+#endif /* MY_ABC_HERE */
 
 int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
@@ -3287,13 +3294,31 @@
 	/* unprotected vars, we dont care of overwrites */
 	static u32 challenge_timestamp;
 	static unsigned int challenge_count;
+#ifdef MY_ABC_HERE
+	u32 count;
+#endif /* MY_ABC_HERE */
 	u32 now = jiffies / HZ;
 
 	if (now != challenge_timestamp) {
+#ifdef MY_ABC_HERE
+		u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
+#endif /* MY_ABC_HERE */
 		challenge_timestamp = now;
+#ifdef MY_ABC_HERE
+		WRITE_ONCE(challenge_count, half +
+			   prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+#else
 		challenge_count = 0;
+#endif /* MY_ABC_HERE */
 	}
+#ifdef MY_ABC_HERE
+	count = READ_ONCE(challenge_count);
+	if (count > 0) {
+		WRITE_ONCE(challenge_count, count - 1);
+#else
 	if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+#endif /* MY_ABC_HERE */
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
 		tcp_send_ack(sk);
 	}
@@ -4406,7 +4431,7 @@
 		if (eaten > 0)
 			kfree_skb_partial(skb, fragstolen);
 		if (!sock_flag(sk, SOCK_DEAD))
-			sk->sk_data_ready(sk, 0);
+			sk->sk_data_ready(sk);
 		return;
 	}
 
@@ -4915,7 +4940,7 @@
 				BUG();
 			tp->urg_data = TCP_URG_VALID | tmp;
 			if (!sock_flag(sk, SOCK_DEAD))
-				sk->sk_data_ready(sk, 0);
+				sk->sk_data_ready(sk);
 		}
 	}
 }
@@ -5001,11 +5026,11 @@
 		    (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
 		    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
 			tp->ucopy.wakeup = 1;
-			sk->sk_data_ready(sk, 0);
+			sk->sk_data_ready(sk);
 		}
 	} else if (chunk > 0) {
 		tp->ucopy.wakeup = 1;
-		sk->sk_data_ready(sk, 0);
+		sk->sk_data_ready(sk);
 	}
 out:
 	return copied_early;
@@ -5276,7 +5301,7 @@
 #endif
 			if (eaten)
 				kfree_skb_partial(skb, fragstolen);
-			sk->sk_data_ready(sk, 0);
+			sk->sk_data_ready(sk);
 			return 0;
 		}
 	}
@@ -5575,6 +5600,7 @@
 		}
 
 		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+		tp->copied_seq = tp->rcv_nxt;
 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
 
 		/* RFC1323: The window in SYN & SYN/ACK segments is
diff -ur a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
--- a/net/ipv4/tcp_ipv4.c	2017-03-23 15:05:23.000000000 +0100
+++ b/net/ipv4/tcp_ipv4.c	2017-03-14 02:43:14.000000000 +0100
@@ -1012,7 +1012,8 @@
 	}
 
 	md5sig = rcu_dereference_protected(tp->md5sig_info,
-					   sock_owned_by_user(sk));
+					   sock_owned_by_user(sk) ||
+					   lockdep_is_held(&sk->sk_lock.slock));
 	if (!md5sig) {
 		md5sig = kmalloc(sizeof(*md5sig), gfp);
 		if (!md5sig)
@@ -1455,7 +1456,7 @@
 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 		tp->syn_data_acked = 1;
 	}
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 	bh_unlock_sock(child);
 	sock_put(child);
 	WARN_ON(req->sk == NULL);
diff -ur a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
--- a/net/ipv4/tcp_minisocks.c	2017-03-23 15:05:16.000000000 +0100
+++ b/net/ipv4/tcp_minisocks.c	2017-03-14 02:43:08.000000000 +0100
@@ -753,7 +753,7 @@
 					    skb->len);
 		/* Wakeup parent, send SIGIO */
 		if (state == TCP_SYN_RECV && child->sk_state != state)
-			parent->sk_data_ready(parent, 0);
+			parent->sk_data_ready(parent);
 	} else {
 		/* Alas, it is possible again, because we do lookup
 		 * in main socket hash table and lock on listening
diff -ur a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
--- a/net/ipv4/tcp_yeah.c	2017-03-23 15:05:16.000000000 +0100
+++ b/net/ipv4/tcp_yeah.c	2017-03-14 02:43:09.000000000 +0100
@@ -221,7 +221,7 @@
 	yeah->fast_count = 0;
 	yeah->reno_count = max(yeah->reno_count>>1, 2U);
 
-	return tp->snd_cwnd - reduction;
+	return max_t(int, tp->snd_cwnd - reduction, 2);
 }
 
 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
diff -ur a/net/ipv4/udp.c b/net/ipv4/udp.c
--- a/net/ipv4/udp.c	2017-03-23 15:05:19.000000000 +0100
+++ b/net/ipv4/udp.c	2017-03-14 02:43:11.000000000 +0100
@@ -1206,6 +1206,7 @@
 	int peeked, off = 0;
 	int err;
 	int is_udplite = IS_UDPLITE(sk);
+	bool checksum_valid = false;
 	bool slow;
 
 	if (flags & MSG_ERRQUEUE)
@@ -1231,11 +1232,12 @@
 	 */
 
 	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
-		if (udp_lib_checksum_complete(skb))
+		checksum_valid = !udp_lib_checksum_complete(skb);
+		if (!checksum_valid)
 			goto csum_copy_err;
 	}
 
-	if (skb_csum_unnecessary(skb))
+	if (checksum_valid || skb_csum_unnecessary(skb))
 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
 					      msg->msg_iov, copied);
 	else {
diff -ur a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
--- a/net/ipv4/xfrm4_policy.c	2017-03-23 15:05:13.000000000 +0100
+++ b/net/ipv4/xfrm4_policy.c	2017-03-14 02:43:06.000000000 +0100
@@ -235,7 +235,7 @@
 	.destroy =		xfrm4_dst_destroy,
 	.ifdown =		xfrm4_dst_ifdown,
 	.local_out =		__ip_local_out,
-	.gc_thresh =		1024,
+	.gc_thresh =		32768,
 };
 
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
diff -ur a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
--- a/net/ipv6/addrlabel.c	2017-03-23 15:05:03.000000000 +0100
+++ b/net/ipv6/addrlabel.c	2017-03-14 02:42:53.000000000 +0100
@@ -558,7 +558,7 @@
 
 	rcu_read_lock();
 	p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
-	if (p && ip6addrlbl_hold(p))
+	if (p && !ip6addrlbl_hold(p))
 		p = NULL;
 	lseq = ip6addrlbl_table.seq;
 	rcu_read_unlock();
diff -ur a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
--- a/net/ipv6/af_inet6.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/af_inet6.c	2017-03-14 02:42:54.000000000 +0100
@@ -94,6 +94,9 @@
 	    !inet_ehash_secret)
 		build_ehash_secret();
 
+	if (protocol < 0 || protocol >= IPPROTO_MAX)
+		return -EINVAL;
+
 lookup_protocol:
 	err = -ESOCKTNOSUPPORT;
 	rcu_read_lock();
@@ -283,11 +286,9 @@
 #ifdef MY_ABC_HERE
 				if (__ipv6_addr_is_link_local(addr_type) && !sk->sk_bound_dev_if) {
 					for_each_netdev(net, dev) {
-						unsigned flags = dev_get_flags(dev);
 						struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, &addr->sin6_addr, dev, 1);
 
-						if (ifp != NULL && (flags & IFF_RUNNING) &&
-							!(flags & (IFF_LOOPBACK | IFF_SLAVE))) {
+						if (ifp != NULL) {
 							sk->sk_bound_dev_if = dev->ifindex;
 							in6_ifa_put(ifp);
 							break;
diff -ur a/net/ipv6/datagram.c b/net/ipv6/datagram.c
--- a/net/ipv6/datagram.c	2017-03-23 15:05:01.000000000 +0100
+++ b/net/ipv6/datagram.c	2017-03-14 02:42:51.000000000 +0100
@@ -31,7 +31,7 @@
 	return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
 	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
 	struct inet_sock      	*inet = inet_sk(sk);
@@ -47,7 +47,7 @@
 	if (usin->sin6_family == AF_INET) {
 		if (__ipv6_only_sock(sk))
 			return -EAFNOSUPPORT;
-		err = ip4_datagram_connect(sk, uaddr, addr_len);
+		err = __ip4_datagram_connect(sk, uaddr, addr_len);
 		goto ipv4_connected;
 	}
 
@@ -88,9 +88,9 @@
 		sin.sin_addr.s_addr = daddr->s6_addr32[3];
 		sin.sin_port = usin->sin6_port;
 
-		err = ip4_datagram_connect(sk,
-					   (struct sockaddr *) &sin,
-					   sizeof(sin));
+		err = __ip4_datagram_connect(sk,
+					     (struct sockaddr *) &sin,
+					     sizeof(sin));
 
 ipv4_connected:
 		if (err)
@@ -199,6 +199,16 @@
 	fl6_sock_release(flowlabel);
 	return err;
 }
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+	int res;
+
+	lock_sock(sk);
+	res = __ip6_datagram_connect(sk, uaddr, addr_len);
+	release_sock(sk);
+	return res;
+}
 EXPORT_SYMBOL_GPL(ip6_datagram_connect);
 
 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
diff -ur a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
--- a/net/ipv6/exthdrs_core.c	2017-03-23 15:05:02.000000000 +0100
+++ b/net/ipv6/exthdrs_core.c	2017-03-14 02:42:52.000000000 +0100
@@ -257,7 +257,11 @@
 						*fragoff = _frag_off;
 					return hp->nexthdr;
 				}
-				return -ENOENT;
+				if (!found)
+					return -ENOENT;
+				if (fragoff)
+					*fragoff = _frag_off;
+				break;
 			}
 			hdrlen = 8;
 		} else if (nexthdr == NEXTHDR_AUTH) {
diff -ur a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
--- a/net/ipv6/exthdrs_offload.c	2017-03-23 15:05:02.000000000 +0100
+++ b/net/ipv6/exthdrs_offload.c	2017-03-14 02:42:52.000000000 +0100
@@ -36,6 +36,6 @@
 	return ret;
 
 out_rt:
-	inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+	inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
 	goto out;
 }
diff -ur a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
--- a/net/ipv6/ip6_fib.c	2017-03-23 15:05:02.000000000 +0100
+++ b/net/ipv6/ip6_fib.c	2017-03-14 02:42:52.000000000 +0100
@@ -1645,27 +1645,28 @@
 
 static DEFINE_SPINLOCK(fib6_gc_lock);
 
-void fib6_run_gc(unsigned long expires, struct net *net)
+void fib6_run_gc(unsigned long expires, struct net *net, bool force)
 {
-	if (expires != ~0UL) {
+	unsigned long now;
+
+	if (force) {
 		spin_lock_bh(&fib6_gc_lock);
-		gc_args.timeout = expires ? (int)expires :
-			net->ipv6.sysctl.ip6_rt_gc_interval;
-	} else {
-		if (!spin_trylock_bh(&fib6_gc_lock)) {
-			mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
-			return;
-		}
-		gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
+	} else if (!spin_trylock_bh(&fib6_gc_lock)) {
+		mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
+		return;
 	}
+	gc_args.timeout = expires ? (int)expires :
+			  net->ipv6.sysctl.ip6_rt_gc_interval;
 
 	gc_args.more = icmp6_dst_gc();
 
 	fib6_clean_all(net, fib6_age, 0, NULL);
+	now = jiffies;
+	net->ipv6.ip6_rt_last_gc = now;
 
 	if (gc_args.more)
 		mod_timer(&net->ipv6.ip6_fib_timer,
-			  round_jiffies(jiffies
+			  round_jiffies(now
 					+ net->ipv6.sysctl.ip6_rt_gc_interval));
 	else
 		del_timer(&net->ipv6.ip6_fib_timer);
@@ -1674,7 +1675,7 @@
 
 static void fib6_gc_timer_cb(unsigned long arg)
 {
-	fib6_run_gc(0, (struct net *)arg);
+	fib6_run_gc(0, (struct net *)arg, true);
 }
 
 static int __net_init fib6_net_init(struct net *net)
diff -ur a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
--- a/net/ipv6/ip6_gre.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/ip6_gre.c	2017-03-14 02:42:54.000000000 +0100
@@ -358,6 +358,7 @@
 	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
 
 	ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+	ip6_tnl_dst_reset(netdev_priv(dev));
 	dev_put(dev);
 }
 
@@ -1537,13 +1538,11 @@
 			return -EEXIST;
 	} else {
 		t = nt;
-
-		ip6gre_tunnel_unlink(ign, t);
-		ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
-		ip6gre_tunnel_link(ign, t);
-		netdev_state_change(dev);
 	}
 
+	ip6gre_tunnel_unlink(ign, t);
+	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+	ip6gre_tunnel_link(ign, t);
 	return 0;
 }
 
diff -ur a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
--- a/net/ipv6/ip6_input.c	2017-03-23 15:05:00.000000000 +0100
+++ b/net/ipv6/ip6_input.c	2017-03-14 02:42:50.000000000 +0100
@@ -321,10 +321,10 @@
 				if (offset < 0)
 					goto out;
 
-				if (!ipv6_is_mld(skb, nexthdr, offset))
-					goto out;
+				if (ipv6_is_mld(skb, nexthdr, offset))
+					deliver = true;
 
-				deliver = true;
+				goto out;
 			}
 			/* unknown RA - process it normally */
 		}
diff -ur a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
--- a/net/ipv6/ip6mr.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/ip6mr.c	2017-03-14 02:42:53.000000000 +0100
@@ -120,7 +120,7 @@
 			      int cmd);
 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
 			       struct netlink_callback *cb);
-static void mroute_clean_tables(struct mr6_table *mrt);
+static void mroute_clean_tables(struct mr6_table *mrt, bool all);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -336,8 +336,8 @@
 
 static void ip6mr_free_table(struct mr6_table *mrt)
 {
-	del_timer(&mrt->ipmr_expire_timer);
-	mroute_clean_tables(mrt);
+	del_timer_sync(&mrt->ipmr_expire_timer);
+	mroute_clean_tables(mrt, true);
 	kfree(mrt);
 }
 
@@ -551,7 +551,7 @@
 
 	if (it->cache == &mrt->mfc6_unres_queue)
 		spin_unlock_bh(&mfc_unres_lock);
-	else if (it->cache == mrt->mfc6_cache_array)
+	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
 		read_unlock(&mrt_lock);
 }
 
@@ -1535,7 +1535,7 @@
  *	Close the multicast socket, and clear the vif tables etc
  */
 
-static void mroute_clean_tables(struct mr6_table *mrt)
+static void mroute_clean_tables(struct mr6_table *mrt, bool all)
 {
 	int i;
 	LIST_HEAD(list);
@@ -1545,8 +1545,9 @@
 	 *	Shut down all active vif entries
 	 */
 	for (i = 0; i < mrt->maxvif; i++) {
-		if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
-			mif6_delete(mrt, i, &list);
+		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
+			continue;
+		mif6_delete(mrt, i, &list);
 	}
 	unregister_netdevice_many(&list);
 
@@ -1555,7 +1556,7 @@
 	 */
 	for (i = 0; i < MFC6_LINES; i++) {
 		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
-			if (c->mfc_flags & MFC_STATIC)
+			if (!all && (c->mfc_flags & MFC_STATIC))
 				continue;
 			write_lock_bh(&mrt_lock);
 			list_del(&c->list);
@@ -1618,7 +1619,7 @@
 						     net->ipv6.devconf_all);
 			write_unlock_bh(&mrt_lock);
 
-			mroute_clean_tables(mrt);
+			mroute_clean_tables(mrt, false);
 			err = 0;
 			break;
 		}
diff -ur a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
--- a/net/ipv6/ip6_tunnel.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/ip6_tunnel.c	2017-03-14 02:42:54.000000000 +0100
@@ -266,12 +266,12 @@
 
 	t = netdev_priv(dev);
 
+	dev->rtnl_link_ops = &ip6_link_ops;
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto out;
 
 	strcpy(t->parms.name, dev->name);
-	dev->rtnl_link_ops = &ip6_link_ops;
 
 	dev_hold(dev);
 	ip6_tnl_link(ip6n, t);
diff -ur a/net/ipv6/mcast.c b/net/ipv6/mcast.c
--- a/net/ipv6/mcast.c	2017-03-23 15:05:06.000000000 +0100
+++ b/net/ipv6/mcast.c	2017-03-14 02:42:56.000000000 +0100
@@ -1439,7 +1439,6 @@
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
 	} else {
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 	}
@@ -1803,7 +1802,6 @@
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, type);
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
 	} else
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
diff -ur a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
--- a/net/ipv6/ndisc.c	2017-03-23 15:05:05.000000000 +0100
+++ b/net/ipv6/ndisc.c	2017-03-14 02:42:55.000000000 +0100
@@ -1588,7 +1588,7 @@
 	switch (event) {
 	case NETDEV_CHANGEADDR:
 		neigh_changeaddr(&nd_tbl, dev);
-		fib6_run_gc(~0UL, net);
+		fib6_run_gc(0, net, false);
 		idev = in6_dev_get(dev);
 		if (!idev)
 			break;
@@ -1598,7 +1598,7 @@
 		break;
 	case NETDEV_DOWN:
 		neigh_ifdown(&nd_tbl, dev);
-		fib6_run_gc(~0UL, net);
+		fib6_run_gc(0, net, false);
 		break;
 	case NETDEV_NOTIFY_PEERS:
 		ndisc_send_unsol_na(dev);
diff -ur a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c	2017-03-23 15:05:06.000000000 +0100
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c	2017-03-14 02:42:56.000000000 +0100
@@ -171,7 +171,7 @@
 /* Creation primitives. */
 static inline struct frag_queue *fq_find(struct net *net, __be32 id,
 					 u32 user, struct in6_addr *src,
-					 struct in6_addr *dst, u8 ecn)
+					 struct in6_addr *dst, int iif, u8 ecn)
 {
 	struct inet_frag_queue *q;
 	struct ip6_create_arg arg;
@@ -181,6 +181,7 @@
 	arg.user = user;
 	arg.src = src;
 	arg.dst = dst;
+	arg.iif = iif;
 	arg.ecn = ecn;
 
 	read_lock_bh(&nf_frags.lock);
@@ -588,7 +589,7 @@
 	local_bh_enable();
 
 	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
-		     ip6_frag_ecn(hdr));
+		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
 	if (fq == NULL) {
 		pr_debug("Can't find and can't create new queue\n");
 		goto ret_orig;
diff -ur a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
--- a/net/ipv6/reassembly.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/reassembly.c	2017-03-14 02:42:54.000000000 +0100
@@ -111,7 +111,10 @@
 	return	fq->id == arg->id &&
 		fq->user == arg->user &&
 		ipv6_addr_equal(&fq->saddr, arg->src) &&
-		ipv6_addr_equal(&fq->daddr, arg->dst);
+		ipv6_addr_equal(&fq->daddr, arg->dst) &&
+		(arg->iif == fq->iif ||
+		 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
+					       IPV6_ADDR_LINKLOCAL)));
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
@@ -180,7 +183,7 @@
 
 static __inline__ struct frag_queue *
 fq_find(struct net *net, __be32 id, const struct in6_addr *src,
-	const struct in6_addr *dst, u8 ecn)
+	const struct in6_addr *dst, int iif, u8 ecn)
 {
 	struct inet_frag_queue *q;
 	struct ip6_create_arg arg;
@@ -190,6 +193,7 @@
 	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
 	arg.src = src;
 	arg.dst = dst;
+	arg.iif = iif;
 	arg.ecn = ecn;
 
 	read_lock(&ip6_frags.lock);
@@ -558,7 +562,7 @@
 				 IPSTATS_MIB_REASMFAILS, evicted);
 
 	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
-		     ip6_frag_ecn(hdr));
+		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
 	if (fq != NULL) {
 		int ret;
 
diff -ur a/net/ipv6/route.c b/net/ipv6/route.c
--- a/net/ipv6/route.c	2017-03-23 15:05:05.000000000 +0100
+++ b/net/ipv6/route.c	2017-03-14 02:42:54.000000000 +0100
@@ -1334,7 +1334,6 @@
 
 static int ip6_dst_gc(struct dst_ops *ops)
 {
-	unsigned long now = jiffies;
 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
 	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
@@ -1344,13 +1343,12 @@
 	int entries;
 
 	entries = dst_entries_get_fast(ops);
-	if (time_after(rt_last_gc + rt_min_interval, now) &&
+	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
 	    entries <= rt_max_size)
 		goto out;
 
 	net->ipv6.ip6_rt_gc_expire++;
-	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
-	net->ipv6.ip6_rt_last_gc = now;
+	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
 	entries = dst_entries_get_slow(ops);
 	if (entries < ops->gc_thresh)
 		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@ -2849,7 +2847,7 @@
 	net = (struct net *)ctl->extra1;
 	delay = net->ipv6.sysctl.flush_delay;
 	proc_dointvec(ctl, write, buffer, lenp, ppos);
-	fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
 	return 0;
 }
 
diff -ur a/net/ipv6/udp.c b/net/ipv6/udp.c
--- a/net/ipv6/udp.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/udp.c	2017-03-14 02:42:54.000000000 +0100
@@ -367,6 +367,7 @@
 	int peeked, off = 0;
 	int err;
 	int is_udplite = IS_UDPLITE(sk);
+	bool checksum_valid = false;
 	int is_udp4;
 	bool slow;
 
@@ -398,11 +399,12 @@
 	 */
 
 	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
-		if (udp_lib_checksum_complete(skb))
+		checksum_valid = !udp_lib_checksum_complete(skb);
+		if (!checksum_valid)
 			goto csum_copy_err;
 	}
 
-	if (skb_csum_unnecessary(skb))
+	if (checksum_valid || skb_csum_unnecessary(skb))
 		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
 					      msg->msg_iov, copied);
 	else {
@@ -836,11 +838,9 @@
 		int ret = udpv6_queue_rcv_skb(sk, skb);
 		sock_put(sk);
 
-		/* a return value > 0 means to resubmit the input, but
-		 * it wants the return to be -protocol, or 0
-		 */
+		/* a return value > 0 means to resubmit the input */
 		if (ret > 0)
-			return -ret;
+			return ret;
 
 		return 0;
 	}
@@ -1211,9 +1211,6 @@
 	if (tclass < 0)
 		tclass = np->tclass;
 
-	if (dontfrag < 0)
-		dontfrag = np->dontfrag;
-
 	if (msg->msg_flags&MSG_CONFIRM)
 		goto do_confirm;
 back_from_confirm:
@@ -1232,6 +1229,8 @@
 	up->pending = AF_INET6;
 
 do_append_data:
+	if (dontfrag < 0)
+		dontfrag = np->dontfrag;
 	up->len += ulen;
 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
 	err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
diff -ur a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
--- a/net/ipv6/xfrm6_policy.c	2017-03-23 15:05:04.000000000 +0100
+++ b/net/ipv6/xfrm6_policy.c	2017-03-14 02:42:54.000000000 +0100
@@ -284,7 +284,7 @@
 	.destroy =		xfrm6_dst_destroy,
 	.ifdown =		xfrm6_dst_ifdown,
 	.local_out =		__ip6_local_out,
-	.gc_thresh =		1024,
+	.gc_thresh =		32768,
 };
 
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
diff -ur a/net/irda/af_irda.c b/net/irda/af_irda.c
--- a/net/irda/af_irda.c	2017-03-23 15:04:28.000000000 +0100
+++ b/net/irda/af_irda.c	2017-03-14 02:42:16.000000000 +0100
@@ -1105,6 +1105,9 @@
 
 	IRDA_DEBUG(2, "%s()\n", __func__);
 
+	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
+		return -EINVAL;
+
 	if (net != &init_net)
 		return -EAFNOSUPPORT;
 
diff -ur a/net/irda/irlmp.c b/net/irda/irlmp.c
--- a/net/irda/irlmp.c	2017-03-23 15:04:27.000000000 +0100
+++ b/net/irda/irlmp.c	2017-03-14 02:42:15.000000000 +0100
@@ -1876,7 +1876,7 @@
 	for (element = hashbin_get_first(iter->hashbin);
 	     element != NULL;
 	     element = hashbin_get_next(iter->hashbin)) {
-		if (!off || *off-- == 0) {
+		if (!off || (*off)-- == 0) {
 			/* NB: hashbin left locked */
 			return element;
 		}
diff -ur a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
--- a/net/iucv/af_iucv.c	2017-03-23 15:05:49.000000000 +0100
+++ b/net/iucv/af_iucv.c	2017-03-14 02:43:45.000000000 +0100
@@ -1755,7 +1755,7 @@
 
 	/* Wake up accept */
 	nsk->sk_state = IUCV_CONNECTED;
-	sk->sk_data_ready(sk, 1);
+	sk->sk_data_ready(sk);
 	err = 0;
 fail:
 	bh_unlock_sock(sk);
@@ -1966,7 +1966,7 @@
 	if (!err) {
 		iucv_accept_enqueue(sk, nsk);
 		nsk->sk_state = IUCV_CONNECTED;
-		sk->sk_data_ready(sk, 1);
+		sk->sk_data_ready(sk);
 	} else
 		iucv_sock_kill(nsk);
 	bh_unlock_sock(sk);
diff -ur a/net/key/af_key.c b/net/key/af_key.c
--- a/net/key/af_key.c	2017-03-23 15:05:18.000000000 +0100
+++ b/net/key/af_key.c	2017-03-14 02:43:09.000000000 +0100
@@ -205,7 +205,7 @@
 		if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
 			skb_set_owner_r(*skb2, sk);
 			skb_queue_tail(&sk->sk_receive_queue, *skb2);
-			sk->sk_data_ready(sk, (*skb2)->len);
+			sk->sk_data_ready(sk);
 			*skb2 = NULL;
 			err = 0;
 		}
diff -ur a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
--- a/net/l2tp/l2tp_core.c	2017-03-23 15:04:40.000000000 +0100
+++ b/net/l2tp/l2tp_core.c	2017-03-14 02:42:28.000000000 +0100
@@ -1380,7 +1380,7 @@
 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
 	sk = l2tp_tunnel_sock_lookup(tunnel);
 	if (!sk)
-		return;
+		goto out;
 
 	sock = sk->sk_socket;
 
@@ -1401,6 +1401,8 @@
 	}
 
 	l2tp_tunnel_sock_put(sk);
+out:
+	l2tp_tunnel_dec_refcount(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
@@ -1730,8 +1732,13 @@
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+	l2tp_tunnel_inc_refcount(tunnel);
 	l2tp_tunnel_closeall(tunnel);
-	return (false == queue_work(l2tp_wq, &tunnel->del_work));
+	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+		l2tp_tunnel_dec_refcount(tunnel);
+		return 1;
+	}
+	return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
diff -ur a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
--- a/net/l2tp/l2tp_ip6.c	2017-03-23 15:04:38.000000000 +0100
+++ b/net/l2tp/l2tp_ip6.c	2017-03-14 02:42:27.000000000 +0100
@@ -135,12 +135,11 @@
 	struct l2tp_tunnel *tunnel = NULL;
 	int length;
 
-	/* Point to L2TP header */
-	optr = ptr = skb->data;
-
 	if (!pskb_may_pull(skb, 4))
 		goto discard;
 
+	/* Point to L2TP header */
+	optr = ptr = skb->data;
 	session_id = ntohl(*((__be32 *) ptr));
 	ptr += 4;
 
@@ -168,6 +167,9 @@
 		if (!pskb_may_pull(skb, length))
 			goto discard;
 
+		/* Point to L2TP header */
+		optr = ptr = skb->data;
+		ptr += 4;
 		pr_debug("%s: ip recv\n", tunnel->name);
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
diff -ur a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
--- a/net/l2tp/l2tp_ip.c	2017-03-23 15:04:38.000000000 +0100
+++ b/net/l2tp/l2tp_ip.c	2017-03-14 02:42:26.000000000 +0100
@@ -123,12 +123,11 @@
 	struct l2tp_tunnel *tunnel = NULL;
 	int length;
 
-	/* Point to L2TP header */
-	optr = ptr = skb->data;
-
 	if (!pskb_may_pull(skb, 4))
 		goto discard;
 
+	/* Point to L2TP header */
+	optr = ptr = skb->data;
 	session_id = ntohl(*((__be32 *) ptr));
 	ptr += 4;
 
@@ -156,6 +155,9 @@
 		if (!pskb_may_pull(skb, length))
 			goto discard;
 
+		/* Point to L2TP header */
+		optr = ptr = skb->data;
+		ptr += 4;
 		pr_debug("%s: ip recv\n", tunnel->name);
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
diff -ur a/net/llc/af_llc.c b/net/llc/af_llc.c
--- a/net/llc/af_llc.c	2017-03-23 15:05:46.000000000 +0100
+++ b/net/llc/af_llc.c	2017-03-14 02:43:41.000000000 +0100
@@ -625,6 +625,7 @@
 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
 		struct llc_pktinfo info;
 
+		memset(&info, 0, sizeof(info));
 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
 		llc_pdu_decode_da(skb, info.lpi_mac);
diff -ur a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
--- a/net/mac80211/agg-rx.c	2017-03-23 15:05:38.000000000 +0100
+++ b/net/mac80211/agg-rx.c	2017-03-14 02:43:31.000000000 +0100
@@ -290,7 +290,7 @@
 	}
 
 	/* prepare A-MPDU MLME for Rx aggregation */
-	tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+	tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
 	if (!tid_agg_rx)
 		goto end;
 
diff -ur a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
--- a/net/mac80211/debugfs_netdev.c	2017-03-23 15:05:41.000000000 +0100
+++ b/net/mac80211/debugfs_netdev.c	2017-03-14 02:43:35.000000000 +0100
@@ -695,6 +695,7 @@
 
 	debugfs_remove_recursive(sdata->vif.debugfs_dir);
 	sdata->vif.debugfs_dir = NULL;
+	sdata->debugfs.subdir_stations = NULL;
 }
 
 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff -ur a/net/mac80211/main.c b/net/mac80211/main.c
--- a/net/mac80211/main.c	2017-03-23 15:05:41.000000000 +0100
+++ b/net/mac80211/main.c	2017-03-14 02:43:34.000000000 +0100
@@ -257,6 +257,7 @@
 {
 	struct ieee80211_local *local =
 		container_of(work, struct ieee80211_local, restart_work);
+	struct ieee80211_sub_if_data *sdata;
 
 	/* wait for scan work complete */
 	flush_workqueue(local->workqueue);
@@ -269,6 +270,8 @@
 	mutex_unlock(&local->mtx);
 
 	rtnl_lock();
+	list_for_each_entry(sdata, &local->interfaces, list)
+		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
 	ieee80211_scan_cancel(local);
 	ieee80211_reconfig(local);
 	rtnl_unlock();
diff -ur a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
--- a/net/mac80211/mesh_pathtbl.c	2017-03-23 15:05:42.000000000 +0100
+++ b/net/mac80211/mesh_pathtbl.c	2017-03-14 02:43:36.000000000 +0100
@@ -741,10 +741,8 @@
 static void mesh_path_node_reclaim(struct rcu_head *rp)
 {
 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
-	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
 
 	del_timer_sync(&node->mpath->timer);
-	atomic_dec(&sdata->u.mesh.mpaths);
 	kfree(node->mpath);
 	kfree(node);
 }
@@ -752,8 +750,9 @@
 /* needs to be called with the corresponding hashwlock taken */
 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
 {
-	struct mesh_path *mpath;
-	mpath = node->mpath;
+	struct mesh_path *mpath = node->mpath;
+	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
 	spin_lock(&mpath->state_lock);
 	mpath->flags |= MESH_PATH_RESOLVING;
 	if (mpath->is_gate)
@@ -761,6 +760,7 @@
 	hlist_del_rcu(&node->list);
 	call_rcu(&node->rcu, mesh_path_node_reclaim);
 	spin_unlock(&mpath->state_lock);
+	atomic_dec(&sdata->u.mesh.mpaths);
 	atomic_dec(&tbl->entries);
 }
 
diff -ur a/net/mac80211/mlme.c b/net/mac80211/mlme.c
--- a/net/mac80211/mlme.c	2017-03-23 15:05:45.000000000 +0100
+++ b/net/mac80211/mlme.c	2017-03-14 02:43:37.000000000 +0100
@@ -3070,7 +3070,7 @@
 
 	if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
-		int sig = ifmgd->ave_beacon_signal;
+		int sig = ifmgd->ave_beacon_signal / 16;
 		int last_sig = ifmgd->last_ave_beacon_signal;
 
 		/*
diff -ur a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
--- a/net/mac80211/rc80211_minstrel_ht.c	2017-03-23 15:05:39.000000000 +0100
+++ b/net/mac80211/rc80211_minstrel_ht.c	2017-03-14 02:43:32.000000000 +0100
@@ -452,7 +452,7 @@
 	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
 		return;
 
-	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
+	ieee80211_start_tx_ba_session(pubsta, tid, 0);
 }
 
 static void
diff -ur a/net/mac80211/tx.c b/net/mac80211/tx.c
--- a/net/mac80211/tx.c	2017-03-23 15:05:44.000000000 +0100
+++ b/net/mac80211/tx.c	2017-03-14 02:43:38.000000000 +0100
@@ -281,9 +281,6 @@
 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
 		return TX_CONTINUE;
 
-	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-		return TX_CONTINUE;
-
 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
 		return TX_CONTINUE;
 
diff -ur a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c	2017-03-23 15:04:56.000000000 +0100
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c	2017-03-14 02:42:46.000000000 +0100
@@ -88,7 +88,7 @@
 	dptr = skb->data + dataoff;
 	datalen = skb->len - dataoff;
 
-	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
+	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
 		return -EINVAL;
 
 	/* N.B: pe_data is only set on success,
diff -ur a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
--- a/net/netfilter/ipvs/ip_vs_sync.c	2017-03-23 15:05:01.000000000 +0100
+++ b/net/netfilter/ipvs/ip_vs_sync.c	2017-03-14 02:42:51.000000000 +0100
@@ -598,7 +598,7 @@
 			pkts = atomic_add_return(1, &cp->in_pkts);
 		else
 			pkts = sysctl_sync_threshold(ipvs);
-		ip_vs_sync_conn(net, cp->control, pkts);
+		ip_vs_sync_conn(net, cp, pkts);
 	}
 }
 
diff -ur a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
--- a/net/netfilter/ipvs/ip_vs_xmit.c	2017-03-23 15:04:58.000000000 +0100
+++ b/net/netfilter/ipvs/ip_vs_xmit.c	2017-03-14 02:42:47.000000000 +0100
@@ -129,7 +129,6 @@
 
 	memset(&fl4, 0, sizeof(fl4));
 	fl4.daddr = daddr;
-	fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
 	fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
 			   FLOWI_FLAG_KNOWN_NH : 0;
 
diff -ur a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
--- a/net/netfilter/nf_conntrack_core.c	2017-03-23 15:04:53.000000000 +0100
+++ b/net/netfilter/nf_conntrack_core.c	2017-03-14 02:42:42.000000000 +0100
@@ -311,6 +311,21 @@
 	nf_ct_put(ct);
 }
 
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+			const struct nf_conntrack_tuple *tuple,
+			u16 zone)
+{
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* A conntrack can be recreated with the equal tuple,
+	 * so we need to check that the conntrack is confirmed
+	 */
+	return nf_ct_tuple_equal(tuple, &h->tuple) &&
+		nf_ct_zone(ct) == zone &&
+		nf_ct_is_confirmed(ct);
+}
+
 /*
  * Warning :
  * - Caller must take a reference on returned object
@@ -332,8 +347,7 @@
 	local_bh_disable();
 begin:
 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
-		if (nf_ct_tuple_equal(tuple, &h->tuple) &&
-		    nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
+		if (nf_ct_key_equal(h, tuple, zone)) {
 			NF_CT_STAT_INC(net, found);
 			local_bh_enable();
 			return h;
@@ -380,8 +394,7 @@
 			     !atomic_inc_not_zero(&ct->ct_general.use)))
 			h = NULL;
 		else {
-			if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
-				     nf_ct_zone(ct) != zone)) {
+			if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
 				nf_ct_put(ct);
 				goto begin;
 			}
diff -ur a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
--- a/net/netfilter/nf_conntrack_expect.c	2017-03-23 15:04:49.000000000 +0100
+++ b/net/netfilter/nf_conntrack_expect.c	2017-03-14 02:42:39.000000000 +0100
@@ -202,7 +202,8 @@
 			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
 	}
 
-	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
+	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
+	       nf_ct_zone(a->master) == nf_ct_zone(b->master);
 }
 
 static inline int expect_matches(const struct nf_conntrack_expect *a,
diff -ur a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
--- a/net/netfilter/nfnetlink_cthelper.c	2017-03-23 15:04:55.000000000 +0100
+++ b/net/netfilter/nfnetlink_cthelper.c	2017-03-14 02:42:45.000000000 +0100
@@ -74,6 +74,9 @@
 	if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
 		return -EINVAL;
 
+	/* Not all fields are initialized so first zero the tuple */
+	memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
 	tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
 	tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
 
@@ -83,7 +86,7 @@
 static int
 nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
 {
-	const struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conn_help *help = nfct_help(ct);
 
 	if (attr == NULL)
 		return -EINVAL;
@@ -91,7 +94,7 @@
 	if (help->helper->data_len == 0)
 		return -EINVAL;
 
-	memcpy(&help->data, nla_data(attr), help->helper->data_len);
+	memcpy(help->data, nla_data(attr), help->helper->data_len);
 	return 0;
 }
 
diff -ur a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
--- a/net/netlink/af_netlink.c	2017-03-23 15:04:50.000000000 +0100
+++ b/net/netlink/af_netlink.c	2017-03-14 02:42:39.000000000 +0100
@@ -214,25 +214,51 @@
 	return NULL;
 }
 
+static void
+__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
+		   unsigned int order)
+{
+	struct netlink_sock *nlk = nlk_sk(sk);
+	struct sk_buff_head *queue;
+	struct netlink_ring *ring;
+
+	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+
+	spin_lock_bh(&queue->lock);
+
+	ring->frame_max		= req->nm_frame_nr - 1;
+	ring->head		= 0;
+	ring->frame_size	= req->nm_frame_size;
+	ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
+
+	swap(ring->pg_vec_len, req->nm_block_nr);
+	swap(ring->pg_vec_order, order);
+	swap(ring->pg_vec, pg_vec);
+
+	__skb_queue_purge(queue);
+	spin_unlock_bh(&queue->lock);
+
+	WARN_ON(atomic_read(&nlk->mapped));
+
+	if (pg_vec)
+		free_pg_vec(pg_vec, order, req->nm_block_nr);
+}
+
 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
-			    bool closing, bool tx_ring)
+			    bool tx_ring)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 	struct netlink_ring *ring;
-	struct sk_buff_head *queue;
 	void **pg_vec = NULL;
 	unsigned int order = 0;
-	int err;
 
 	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
-	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
 
-	if (!closing) {
-		if (atomic_read(&nlk->mapped))
-			return -EBUSY;
-		if (atomic_read(&ring->pending))
-			return -EBUSY;
-	}
+	if (atomic_read(&nlk->mapped))
+		return -EBUSY;
+	if (atomic_read(&ring->pending))
+		return -EBUSY;
 
 	if (req->nm_block_nr) {
 		if (ring->pg_vec != NULL)
@@ -264,31 +290,19 @@
 			return -EINVAL;
 	}
 
-	err = -EBUSY;
 	mutex_lock(&nlk->pg_vec_lock);
-	if (closing || atomic_read(&nlk->mapped) == 0) {
-		err = 0;
-		spin_lock_bh(&queue->lock);
-
-		ring->frame_max		= req->nm_frame_nr - 1;
-		ring->head		= 0;
-		ring->frame_size	= req->nm_frame_size;
-		ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
-
-		swap(ring->pg_vec_len, req->nm_block_nr);
-		swap(ring->pg_vec_order, order);
-		swap(ring->pg_vec, pg_vec);
-
-		__skb_queue_purge(queue);
-		spin_unlock_bh(&queue->lock);
-
-		WARN_ON(atomic_read(&nlk->mapped));
+	if (atomic_read(&nlk->mapped) == 0) {
+		__netlink_set_ring(sk, req, tx_ring, pg_vec, order);
+		mutex_unlock(&nlk->pg_vec_lock);
+		return 0;
 	}
+
 	mutex_unlock(&nlk->pg_vec_lock);
 
 	if (pg_vec)
 		free_pg_vec(pg_vec, order, req->nm_block_nr);
-	return err;
+
+	return -EBUSY;
 }
 
 static void netlink_mm_open(struct vm_area_struct *vma)
@@ -762,10 +776,10 @@
 
 		memset(&req, 0, sizeof(req));
 		if (nlk->rx_ring.pg_vec)
-			netlink_set_ring(sk, &req, true, false);
+			__netlink_set_ring(sk, &req, false, NULL, 0);
 		memset(&req, 0, sizeof(req));
 		if (nlk->tx_ring.pg_vec)
-			netlink_set_ring(sk, &req, true, true);
+			__netlink_set_ring(sk, &req, true, NULL, 0);
 	}
 #endif /* CONFIG_NETLINK_MMAP */
 
@@ -1530,7 +1544,7 @@
 	else
 #endif /* CONFIG_NETLINK_MMAP */
 		skb_queue_tail(&sk->sk_receive_queue, skb);
-	sk->sk_data_ready(sk, len);
+	sk->sk_data_ready(sk);
 	return len;
 }
 
@@ -2017,7 +2031,7 @@
 			return -EINVAL;
 		if (copy_from_user(&req, optval, sizeof(req)))
 			return -EFAULT;
-		err = netlink_set_ring(sk, &req, false,
+		err = netlink_set_ring(sk, &req,
 				       optname == NETLINK_TX_RING);
 		break;
 	}
@@ -2263,7 +2277,7 @@
 	return err ? : copied;
 }
 
-static void netlink_data_ready(struct sock *sk, int len)
+static void netlink_data_ready(struct sock *sk)
 {
 	BUG();
 }
diff -ur a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
--- a/net/netrom/af_netrom.c	2017-03-23 15:04:25.000000000 +0100
+++ b/net/netrom/af_netrom.c	2017-03-14 02:42:13.000000000 +0100
@@ -1011,7 +1011,7 @@
 	skb_queue_head(&sk->sk_receive_queue, skb);
 
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 
 	bh_unlock_sock(sk);
 
diff -ur a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
--- a/net/nfc/llcp_core.c	2017-03-23 15:04:43.000000000 +0100
+++ b/net/nfc/llcp_core.c	2017-03-14 02:42:32.000000000 +0100
@@ -966,7 +966,7 @@
 	new_sk->sk_state = LLCP_CONNECTED;
 
 	/* Wake the listening processes */
-	parent->sk_data_ready(parent, 0);
+	parent->sk_data_ready(parent);
 
 	/* Send CC */
 	nfc_llcp_send_cc(new_sock);
diff -ur a/net/packet/af_packet.c b/net/packet/af_packet.c
--- a/net/packet/af_packet.c	2017-03-23 15:05:18.000000000 +0100
+++ b/net/packet/af_packet.c	2017-03-14 02:43:09.000000000 +0100
@@ -1149,16 +1149,6 @@
 	sk_refcnt_debug_dec(sk);
 }
 
-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
-{
-	int x = atomic_read(&f->rr_cur) + 1;
-
-	if (x >= num)
-		x = 0;
-
-	return x;
-}
-
 static unsigned int fanout_demux_hash(struct packet_fanout *f,
 				      struct sk_buff *skb,
 				      unsigned int num)
@@ -1170,13 +1160,9 @@
 				    struct sk_buff *skb,
 				    unsigned int num)
 {
-	int cur, old;
+	unsigned int val = atomic_inc_return(&f->rr_cur);
 
-	cur = atomic_read(&f->rr_cur);
-	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
-				     fanout_rr_next(f, num))) != cur)
-		cur = old;
-	return cur;
+	return val % num;
 }
 
 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
@@ -1216,7 +1202,7 @@
 			     struct packet_type *pt, struct net_device *orig_dev)
 {
 	struct packet_fanout *f = pt->af_packet_priv;
-	unsigned int num = f->num_members;
+	unsigned int num = ACCESS_ONCE(f->num_members);
 	struct packet_sock *po;
 	unsigned int idx;
 
@@ -1711,7 +1697,7 @@
 	skb->dropcount = atomic_read(&sk->sk_drops);
 	__skb_queue_tail(&sk->sk_receive_queue, skb);
 	spin_unlock(&sk->sk_receive_queue.lock);
-	sk->sk_data_ready(sk, skb->len);
+	sk->sk_data_ready(sk);
 	return 0;
 
 drop_n_acct:
@@ -1918,7 +1904,7 @@
 	else
 		prb_clear_blk_fill_status(&po->rx_ring);
 
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 
 drop_n_restore:
 	if (skb_head != skb->data && skb_shared(skb)) {
@@ -1933,7 +1919,7 @@
 	po->stats.stats1.tp_drops++;
 	spin_unlock(&sk->sk_receive_queue.lock);
 
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 	kfree_skb(copy_skb);
 	goto drop_n_restore;
 }
@@ -3008,6 +2994,7 @@
 	i->ifindex = mreq->mr_ifindex;
 	i->alen = mreq->mr_alen;
 	memcpy(i->addr, mreq->mr_address, i->alen);
+	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
 	i->count = 1;
 	i->next = po->mclist;
 	po->mclist = i;
@@ -3145,19 +3132,25 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 		switch (val) {
 		case TPACKET_V1:
 		case TPACKET_V2:
 		case TPACKET_V3:
-			po->tp_version = val;
-			return 0;
+			break;
 		default:
 			return -EINVAL;
 		}
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_version = val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_RESERVE:
 	{
@@ -3609,6 +3602,7 @@
 	/* Added to avoid minimal code churn */
 	struct tpacket_req *req = &req_u->req;
 
+	lock_sock(sk);
 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
 		WARN(1, "Tx-ring is not supported.\n");
@@ -3690,8 +3684,6 @@
 			goto out;
 	}
 
-	lock_sock(sk);
-
 	/* Detach socket from network */
 	spin_lock(&po->bind_lock);
 	was_running = po->running;
@@ -3739,11 +3731,11 @@
 		if (!tx_ring)
 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
 	}
-	release_sock(sk);
 
 	if (pg_vec)
 		free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
+	release_sock(sk);
 	return err;
 }
 
diff -ur a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
--- a/net/phonet/af_phonet.c	2017-03-23 15:04:44.000000000 +0100
+++ b/net/phonet/af_phonet.c	2017-03-14 02:42:33.000000000 +0100
@@ -376,6 +376,10 @@
 	struct sockaddr_pn sa;
 	u16 len;
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		return NET_RX_DROP;
+
 	/* check we have at least a full Phonet header */
 	if (!pskb_pull(skb, sizeof(struct phonethdr)))
 		goto out;
diff -ur a/net/phonet/pep.c b/net/phonet/pep.c
--- a/net/phonet/pep.c	2017-03-23 15:04:45.000000000 +0100
+++ b/net/phonet/pep.c	2017-03-14 02:42:34.000000000 +0100
@@ -462,10 +462,9 @@
 queue:
 	skb->dev = NULL;
 	skb_set_owner_r(skb, sk);
-	err = skb->len;
 	skb_queue_tail(queue, skb);
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, err);
+		sk->sk_data_ready(sk);
 	return NET_RX_SUCCESS;
 }
 
@@ -587,10 +586,9 @@
 		pn->rx_credits--;
 		skb->dev = NULL;
 		skb_set_owner_r(skb, sk);
-		err = skb->len;
 		skb_queue_tail(&sk->sk_receive_queue, skb);
 		if (!sock_flag(sk, SOCK_DEAD))
-			sk->sk_data_ready(sk, err);
+			sk->sk_data_ready(sk);
 		return NET_RX_SUCCESS;
 
 	case PNS_PEP_CONNECT_RESP:
@@ -698,7 +696,7 @@
 		skb_queue_head(&sk->sk_receive_queue, skb);
 		sk_acceptq_added(sk);
 		if (!sock_flag(sk, SOCK_DEAD))
-			sk->sk_data_ready(sk, 0);
+			sk->sk_data_ready(sk);
 		return NET_RX_SUCCESS;
 
 	case PNS_PEP_DISCONNECT_REQ:
diff -ur a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
--- a/net/phonet/pep-gprs.c	2017-03-23 15:04:44.000000000 +0100
+++ b/net/phonet/pep-gprs.c	2017-03-14 02:42:33.000000000 +0100
@@ -37,7 +37,7 @@
 struct gprs_dev {
 	struct sock		*sk;
 	void			(*old_state_change)(struct sock *);
-	void			(*old_data_ready)(struct sock *, int);
+	void			(*old_data_ready)(struct sock *);
 	void			(*old_write_space)(struct sock *);
 
 	struct net_device	*dev;
@@ -146,7 +146,7 @@
 	return err;
 }
 
-static void gprs_data_ready(struct sock *sk, int len)
+static void gprs_data_ready(struct sock *sk)
 {
 	struct gprs_dev *gp = sk->sk_user_data;
 	struct sk_buff *skb;
diff -ur a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
--- a/net/rds/ib_rdma.c	2017-03-23 15:05:11.000000000 +0100
+++ b/net/rds/ib_rdma.c	2017-03-14 02:43:02.000000000 +0100
@@ -758,8 +758,10 @@
 	}
 
 	ibmr = rds_ib_alloc_fmr(rds_ibdev);
-	if (IS_ERR(ibmr))
+	if (IS_ERR(ibmr)) {
+		rds_ib_dev_put(rds_ibdev);
 		return ibmr;
+	}
 
 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
 	if (ret == 0)
diff -ur a/net/rds/info.c b/net/rds/info.c
--- a/net/rds/info.c	2017-03-23 15:05:10.000000000 +0100
+++ b/net/rds/info.c	2017-03-14 02:43:02.000000000 +0100
@@ -176,7 +176,7 @@
 
 	/* check for all kinds of wrapping and the like */
 	start = (unsigned long)optval;
-	if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+	if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
 		ret = -EINVAL;
 		goto out;
 	}
diff -ur a/net/rds/send.c b/net/rds/send.c
--- a/net/rds/send.c	2017-03-23 15:05:12.000000000 +0100
+++ b/net/rds/send.c	2017-03-14 02:43:04.000000000 +0100
@@ -955,11 +955,13 @@
 		release_sock(sk);
 	}
 
-	/* racing with another thread binding seems ok here */
+	lock_sock(sk);
 	if (daddr == 0 || rs->rs_bound_addr == 0) {
+		release_sock(sk);
 		ret = -ENOTCONN; /* XXX not a great errno */
 		goto out;
 	}
+	release_sock(sk);
 
 	/* size of rm including all sgs */
 	ret = rds_rm_size(msg, payload_len);
diff -ur a/net/rds/tcp.h b/net/rds/tcp.h
--- a/net/rds/tcp.h	2017-03-23 15:05:11.000000000 +0100
+++ b/net/rds/tcp.h	2017-03-14 02:43:03.000000000 +0100
@@ -61,12 +61,12 @@
 /* tcp_listen.c */
 int rds_tcp_listen_init(void);
 void rds_tcp_listen_stop(void);
-void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
+void rds_tcp_listen_data_ready(struct sock *sk);
 
 /* tcp_recv.c */
 int rds_tcp_recv_init(void);
 void rds_tcp_recv_exit(void);
-void rds_tcp_data_ready(struct sock *sk, int bytes);
+void rds_tcp_data_ready(struct sock *sk);
 int rds_tcp_recv(struct rds_connection *conn);
 void rds_tcp_inc_free(struct rds_incoming *inc);
 int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
diff -ur a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
--- a/net/rds/tcp_listen.c	2017-03-23 15:05:09.000000000 +0100
+++ b/net/rds/tcp_listen.c	2017-03-14 02:43:01.000000000 +0100
@@ -108,9 +108,9 @@
 		cond_resched();
 }
 
-void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
+void rds_tcp_listen_data_ready(struct sock *sk)
 {
-	void (*ready)(struct sock *sk, int bytes);
+	void (*ready)(struct sock *sk);
 
 	rdsdebug("listen data ready sk %p\n", sk);
 
@@ -132,7 +132,7 @@
 
 out:
 	read_unlock(&sk->sk_callback_lock);
-	ready(sk, bytes);
+	ready(sk);
 }
 
 int rds_tcp_listen_init(void)
diff -ur a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
--- a/net/rds/tcp_recv.c	2017-03-23 15:05:10.000000000 +0100
+++ b/net/rds/tcp_recv.c	2017-03-14 02:43:01.000000000 +0100
@@ -234,8 +234,15 @@
 			}
 
 			to_copy = min(tc->t_tinc_data_rem, left);
-			pskb_pull(clone, offset);
-			pskb_trim(clone, to_copy);
+			if (!pskb_pull(clone, offset) ||
+			    pskb_trim(clone, to_copy)) {
+				pr_warn("rds_tcp_data_recv: pull/trim failed "
+					"left %zu data_rem %zu skb_len %d\n",
+					left, tc->t_tinc_data_rem, skb->len);
+				kfree_skb(clone);
+				desc->error = -ENOMEM;
+				goto out;
+			}
 			skb_queue_tail(&tinc->ti_skb_list, clone);
 
 			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
@@ -314,13 +321,13 @@
 	return ret;
 }
 
-void rds_tcp_data_ready(struct sock *sk, int bytes)
+void rds_tcp_data_ready(struct sock *sk)
 {
-	void (*ready)(struct sock *sk, int bytes);
+	void (*ready)(struct sock *sk);
 	struct rds_connection *conn;
 	struct rds_tcp_connection *tc;
 
-	rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
+	rdsdebug("data ready sk %p\n", sk);
 
 	read_lock(&sk->sk_callback_lock);
 	conn = sk->sk_user_data;
@@ -337,7 +344,7 @@
 		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
 	read_unlock(&sk->sk_callback_lock);
-	ready(sk, bytes);
+	ready(sk);
 }
 
 int rds_tcp_recv_init(void)
diff -ur a/net/rfkill/core.c b/net/rfkill/core.c
--- a/net/rfkill/core.c	2017-03-23 15:04:25.000000000 +0100
+++ b/net/rfkill/core.c	2017-03-14 02:42:14.000000000 +0100
@@ -51,7 +51,6 @@
 struct rfkill {
 	spinlock_t		lock;
 
-	const char		*name;
 	enum rfkill_type	type;
 
 	unsigned long		state;
@@ -75,6 +74,7 @@
 	struct delayed_work	poll_work;
 	struct work_struct	uevent_work;
 	struct work_struct	sync_work;
+	char			name[];
 };
 #define to_rfkill(d)	container_of(d, struct rfkill, dev)
 
@@ -866,14 +866,14 @@
 	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
 		return NULL;
 
-	rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
+	rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
 	if (!rfkill)
 		return NULL;
 
 	spin_lock_init(&rfkill->lock);
 	INIT_LIST_HEAD(&rfkill->node);
 	rfkill->type = type;
-	rfkill->name = name;
+	strcpy(rfkill->name, name);
 	rfkill->ops = ops;
 	rfkill->data = ops_data;
 
@@ -1083,17 +1083,6 @@
 	return res;
 }
 
-static bool rfkill_readable(struct rfkill_data *data)
-{
-	bool r;
-
-	mutex_lock(&data->mtx);
-	r = !list_empty(&data->events);
-	mutex_unlock(&data->mtx);
-
-	return r;
-}
-
 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
 			       size_t count, loff_t *pos)
 {
@@ -1110,8 +1099,11 @@
 			goto out;
 		}
 		mutex_unlock(&data->mtx);
+		/* since we re-check and it just compares pointers,
+		 * using !list_empty() without locking isn't a problem
+		 */
 		ret = wait_event_interruptible(data->read_wait,
-					       rfkill_readable(data));
+					       !list_empty(&data->events));
 		mutex_lock(&data->mtx);
 
 		if (ret)
diff -ur a/net/rose/af_rose.c b/net/rose/af_rose.c
--- a/net/rose/af_rose.c	2017-03-23 15:05:45.000000000 +0100
+++ b/net/rose/af_rose.c	2017-03-14 02:43:40.000000000 +0100
@@ -1041,7 +1041,7 @@
 	rose_start_heartbeat(make);
 
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 
 	return 1;
 }
diff -ur a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
--- a/net/rxrpc/ar-input.c	2017-03-23 15:05:49.000000000 +0100
+++ b/net/rxrpc/ar-input.c	2017-03-14 02:43:45.000000000 +0100
@@ -115,7 +115,7 @@
 			spin_unlock_bh(&sk->sk_receive_queue.lock);
 
 			if (!sock_flag(sk, SOCK_DEAD))
-				sk->sk_data_ready(sk, skb_len);
+				sk->sk_data_ready(sk);
 		}
 		skb = NULL;
 	} else {
@@ -668,7 +668,7 @@
  * handle data received on the local endpoint
  * - may be called in interrupt context
  */
-void rxrpc_data_ready(struct sock *sk, int count)
+void rxrpc_data_ready(struct sock *sk)
 {
 	struct rxrpc_connection *conn;
 	struct rxrpc_transport *trans;
@@ -678,7 +678,7 @@
 	struct sk_buff *skb;
 	int ret;
 
-	_enter("%p, %d", sk, count);
+	_enter("%p", sk);
 
 	ASSERT(!irqs_disabled());
 
diff -ur a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
--- a/net/rxrpc/ar-internal.h	2017-03-23 15:05:50.000000000 +0100
+++ b/net/rxrpc/ar-internal.h	2017-03-14 02:43:45.000000000 +0100
@@ -501,10 +501,9 @@
 extern unsigned long rxrpc_ack_timeout;
 extern const char *rxrpc_pkts[];
 
-extern void rxrpc_data_ready(struct sock *, int);
-extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
-			       bool);
-extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_data_ready(struct sock *);
+int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
+void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
 
 /*
  * ar-local.c
diff -ur a/net/sched/sch_api.c b/net/sched/sch_api.c
--- a/net/sched/sch_api.c	2017-03-23 15:04:44.000000000 +0100
+++ b/net/sched/sch_api.c	2017-03-14 02:42:32.000000000 +0100
@@ -748,10 +748,8 @@
 		if (dev->flags & IFF_UP)
 			dev_deactivate(dev);
 
-		if (new && new->ops->attach) {
-			new->ops->attach(new);
-			num_q = 0;
-		}
+		if (new && new->ops->attach)
+			goto skip;
 
 		for (i = 0; i < num_q; i++) {
 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@@ -767,12 +765,16 @@
 				qdisc_destroy(old);
 		}
 
+skip:
 		if (!ingress) {
 			notify_and_destroy(net, skb, n, classid,
 					   dev->qdisc, new);
 			if (new && !new->ops->attach)
 				atomic_inc(&new->refcnt);
 			dev->qdisc = new ? : &noop_qdisc;
+
+			if (new && new->ops->attach)
+				new->ops->attach(new);
 		} else {
 			notify_and_destroy(net, skb, n, classid, old, new);
 		}
diff -ur a/net/sctp/auth.c b/net/sctp/auth.c
--- a/net/sctp/auth.c	2017-03-23 15:05:29.000000000 +0100
+++ b/net/sctp/auth.c	2017-03-14 02:43:21.000000000 +0100
@@ -803,8 +803,8 @@
 	if (!has_sha1)
 		return -EINVAL;
 
-	memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
-		hmacs->shmac_num_idents * sizeof(__u16));
+	for (i = 0; i < hmacs->shmac_num_idents; i++)
+		ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
 	ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
 				hmacs->shmac_num_idents * sizeof(__u16));
 	return 0;
diff -ur a/net/sctp/ipv6.c b/net/sctp/ipv6.c
--- a/net/sctp/ipv6.c	2017-03-23 15:05:30.000000000 +0100
+++ b/net/sctp/ipv6.c	2017-03-14 02:43:22.000000000 +0100
@@ -520,6 +520,8 @@
 		}
 		return 0;
 	}
+	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+		return 0;
 	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
 		return 0;
 	/* If this is a linklocal address, compare the scope_id. */
@@ -639,6 +641,7 @@
 	struct sock *newsk;
 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
 	struct sctp6_sock *newsctp6sk;
+	struct ipv6_txoptions *opt;
 
 	newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
 	if (!newsk)
@@ -658,6 +661,13 @@
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
+	rcu_read_lock();
+	opt = rcu_dereference(np->opt);
+	if (opt)
+		opt = ipv6_dup_options(newsk, opt);
+	RCU_INIT_POINTER(newnp->opt, opt);
+	rcu_read_unlock();
+
 	/* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
 	 * and getpeername().
 	 */
diff -ur a/net/sctp/output.c b/net/sctp/output.c
--- a/net/sctp/output.c	2017-03-23 15:05:29.000000000 +0100
+++ b/net/sctp/output.c	2017-03-14 02:43:22.000000000 +0100
@@ -617,7 +617,9 @@
 	return err;
 no_route:
 	kfree_skb(nskb);
-	IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+
+	if (asoc)
+		IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
 	/* FIXME: Returning the 'err' will effect all the associations
 	 * associated with a socket, although only one of the paths of the
diff -ur a/net/sctp/protocol.c b/net/sctp/protocol.c
--- a/net/sctp/protocol.c	2017-03-23 15:05:30.000000000 +0100
+++ b/net/sctp/protocol.c	2017-03-14 02:43:22.000000000 +0100
@@ -1169,7 +1169,7 @@
 	unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
 }
 
-static int __net_init sctp_net_init(struct net *net)
+static int __net_init sctp_defaults_init(struct net *net)
 {
 	int status;
 
@@ -1262,12 +1262,6 @@
 
 	sctp_dbg_objcnt_init(net);
 
-	/* Initialize the control inode/socket for handling OOTB packets.  */
-	if ((status = sctp_ctl_sock_init(net))) {
-		pr_err("Failed to initialize the SCTP control sock\n");
-		goto err_ctl_sock_init;
-	}
-
 	/* Initialize the local address list. */
 	INIT_LIST_HEAD(&net->sctp.local_addr_list);
 	spin_lock_init(&net->sctp.local_addr_lock);
@@ -1283,9 +1277,6 @@
 
 	return 0;
 
-err_ctl_sock_init:
-	sctp_dbg_objcnt_exit(net);
-	sctp_proc_exit(net);
 err_init_proc:
 	cleanup_sctp_mibs(net);
 err_init_mibs:
@@ -1294,15 +1285,12 @@
 	return status;
 }
 
-static void __net_exit sctp_net_exit(struct net *net)
+static void __net_exit sctp_defaults_exit(struct net *net)
 {
 	/* Free the local address list */
 	sctp_free_addr_wq(net);
 	sctp_free_local_addr_list(net);
 
-	/* Free the control endpoint.  */
-	inet_ctl_sock_destroy(net->sctp.ctl_sock);
-
 	sctp_dbg_objcnt_exit(net);
 
 	sctp_proc_exit(net);
@@ -1310,9 +1298,32 @@
 	sctp_sysctl_net_unregister(net);
 }
 
-static struct pernet_operations sctp_net_ops = {
-	.init = sctp_net_init,
-	.exit = sctp_net_exit,
+static struct pernet_operations sctp_defaults_ops = {
+	.init = sctp_defaults_init,
+	.exit = sctp_defaults_exit,
+};
+
+static int __net_init sctp_ctrlsock_init(struct net *net)
+{
+	int status;
+
+	/* Initialize the control inode/socket for handling OOTB packets.  */
+	status = sctp_ctl_sock_init(net);
+	if (status)
+		pr_err("Failed to initialize the SCTP control sock\n");
+
+	return status;
+}
+
+static void __net_init sctp_ctrlsock_exit(struct net *net)
+{
+	/* Free the control endpoint.  */
+	inet_ctl_sock_destroy(net->sctp.ctl_sock);
+}
+
+static struct pernet_operations sctp_ctrlsock_ops = {
+	.init = sctp_ctrlsock_init,
+	.exit = sctp_ctrlsock_exit,
 };
 
 /* Initialize the universe into something sensible.  */
@@ -1447,8 +1458,11 @@
 	sctp_v4_pf_init();
 	sctp_v6_pf_init();
 
-	status = sctp_v4_protosw_init();
+	status = register_pernet_subsys(&sctp_defaults_ops);
+	if (status)
+		goto err_register_defaults;
 
+	status = sctp_v4_protosw_init();
 	if (status)
 		goto err_protosw_init;
 
@@ -1456,9 +1470,9 @@
 	if (status)
 		goto err_v6_protosw_init;
 
-	status = register_pernet_subsys(&sctp_net_ops);
+	status = register_pernet_subsys(&sctp_ctrlsock_ops);
 	if (status)
-		goto err_register_pernet_subsys;
+		goto err_register_ctrlsock;
 
 	status = sctp_v4_add_protocol();
 	if (status)
@@ -1475,12 +1489,14 @@
 err_v6_add_protocol:
 	sctp_v4_del_protocol();
 err_add_protocol:
-	unregister_pernet_subsys(&sctp_net_ops);
-err_register_pernet_subsys:
+	unregister_pernet_subsys(&sctp_ctrlsock_ops);
+err_register_ctrlsock:
 	sctp_v6_protosw_exit();
 err_v6_protosw_init:
 	sctp_v4_protosw_exit();
 err_protosw_init:
+	unregister_pernet_subsys(&sctp_defaults_ops);
+err_register_defaults:
 	sctp_v4_pf_exit();
 	sctp_v6_pf_exit();
 	sctp_sysctl_unregister();
@@ -1513,12 +1529,14 @@
 	sctp_v6_del_protocol();
 	sctp_v4_del_protocol();
 
-	unregister_pernet_subsys(&sctp_net_ops);
+	unregister_pernet_subsys(&sctp_ctrlsock_ops);
 
 	/* Free protosw registrations */
 	sctp_v6_protosw_exit();
 	sctp_v4_protosw_exit();
 
+	unregister_pernet_subsys(&sctp_defaults_ops);
+
 	/* Unregister with socket layer. */
 	sctp_v6_pf_exit();
 	sctp_v4_pf_exit();
diff -ur a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
--- a/net/sctp/sm_statefuns.c	2017-03-23 15:05:39.000000000 +0100
+++ b/net/sctp/sm_statefuns.c	2017-03-14 02:43:30.000000000 +0100
@@ -4831,7 +4831,8 @@
 
 	retval = SCTP_DISPOSITION_CONSUME;
 
-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+	if (abort)
+		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
 
 	/* Even if we can't send the ABORT due to low memory delete the
 	 * TCB.  This is a departure from our typical NOMEM handling.
@@ -4968,7 +4969,8 @@
 			SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
 	retval = SCTP_DISPOSITION_CONSUME;
 
-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+	if (abort)
+		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
 			SCTP_STATE(SCTP_STATE_CLOSED));
diff -ur a/net/sctp/socket.c b/net/sctp/socket.c
--- a/net/sctp/socket.c	2017-03-23 15:05:40.000000000 +0100
+++ b/net/sctp/socket.c	2017-03-14 02:43:30.000000000 +0100
@@ -1532,8 +1532,7 @@
 			struct sctp_chunk *chunk;
 
 			chunk = sctp_make_abort_user(asoc, NULL, 0);
-			if (chunk)
-				sctp_primitive_ABORT(net, asoc, chunk);
+			sctp_primitive_ABORT(net, asoc, chunk);
 		} else
 			sctp_primitive_SHUTDOWN(net, asoc, NULL);
 	}
@@ -1547,8 +1546,10 @@
 
 	/* Supposedly, no process has access to the socket, but
 	 * the net layers still may.
+	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+	 * held and that should be grabbed before socket lock.
 	 */
-	sctp_local_bh_disable();
+	spin_lock_bh(&net->sctp.addr_wq_lock);
 	sctp_bh_lock_sock(sk);
 
 	/* Hold the sock, since sk_common_release() will put sock_put()
@@ -1558,7 +1559,7 @@
 	sk_common_release(sk);
 
 	sctp_bh_unlock_sock(sk);
-	sctp_local_bh_enable();
+	spin_unlock_bh(&net->sctp.addr_wq_lock);
 
 	sock_put(sk);
 
@@ -3506,6 +3507,7 @@
 	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
 		return 0;
 
+	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
 	if (val == 0 && sp->do_auto_asconf) {
 		list_del(&sp->auto_asconf_list);
 		sp->do_auto_asconf = 0;
@@ -3514,6 +3516,7 @@
 		    &sock_net(sk)->sctp.auto_asconf_splist);
 		sp->do_auto_asconf = 1;
 	}
+	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
 	return 0;
 }
 
@@ -4003,18 +4006,28 @@
 	local_bh_disable();
 	percpu_counter_inc(&sctp_sockets_allocated);
 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+
+	/* Nothing can fail after this block, otherwise
+	 * sctp_destroy_sock() will be called without addr_wq_lock held
+	 */
 	if (net->sctp.default_auto_asconf) {
+		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
 		list_add_tail(&sp->auto_asconf_list,
 		    &net->sctp.auto_asconf_splist);
 		sp->do_auto_asconf = 1;
-	} else
+		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+	} else {
 		sp->do_auto_asconf = 0;
+	}
+
 	local_bh_enable();
 
 	return 0;
 }
 
-/* Cleanup any SCTP per socket resources.  */
+/* Cleanup any SCTP per socket resources. Must be called with
+ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
+ */
 SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
 {
 	struct sctp_sock *sp;
@@ -6723,7 +6736,7 @@
 	goto out;
 }
 
-void sctp_data_ready(struct sock *sk, int len)
+void sctp_data_ready(struct sock *sk)
 {
 	struct socket_wq *wq;
 
@@ -6945,6 +6958,22 @@
 	newinet->mc_ttl = 1;
 	newinet->mc_index = 0;
 	newinet->mc_list = NULL;
+
+	if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
+		net_enable_timestamp();
+}
+
+static inline void sctp_copy_descendant(struct sock *sk_to,
+					const struct sock *sk_from)
+{
+	int ancestor_size = sizeof(struct inet_sock) +
+			    sizeof(struct sctp_sock) -
+			    offsetof(struct sctp_sock, auto_asconf_list);
+
+	if (sk_from->sk_family == PF_INET6)
+		ancestor_size += sizeof(struct ipv6_pinfo);
+
+	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
 }
 
 /* Populate the fields of the newsk from the oldsk and migrate the assoc
@@ -6961,7 +6990,6 @@
 	struct sk_buff *skb, *tmp;
 	struct sctp_ulpevent *event;
 	struct sctp_bind_hashbucket *head;
-	struct list_head tmplist;
 
 	/* Migrate socket buffer sizes and all the socket level options to the
 	 * new socket.
@@ -6969,12 +6997,7 @@
 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
 	/* Brute force copy old sctp opt. */
-	if (oldsp->do_auto_asconf) {
-		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
-		inet_sk_copy_descendant(newsk, oldsk);
-		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
-	} else
-		inet_sk_copy_descendant(newsk, oldsk);
+	sctp_copy_descendant(newsk, oldsk);
 
 	/* Restore the ep value that was overwritten with the above structure
 	 * copy.
@@ -7117,6 +7140,13 @@
 
 #if IS_ENABLED(CONFIG_IPV6)
 
+#include <net/transp_v6.h>
+static void sctp_v6_destroy_sock(struct sock *sk)
+{
+	sctp_destroy_sock(sk);
+	inet6_destroy_sock(sk);
+}
+
 struct proto sctpv6_prot = {
 	.name		= "SCTPv6",
 	.owner		= THIS_MODULE,
@@ -7126,7 +7156,7 @@
 	.accept		= sctp_accept,
 	.ioctl		= sctp_ioctl,
 	.init		= sctp_init_sock,
-	.destroy	= sctp_destroy_sock,
+	.destroy	= sctp_v6_destroy_sock,
 	.shutdown	= sctp_shutdown,
 	.setsockopt	= sctp_setsockopt,
 	.getsockopt	= sctp_getsockopt,
diff -ur a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
--- a/net/sctp/ulpqueue.c	2017-03-23 15:05:30.000000000 +0100
+++ b/net/sctp/ulpqueue.c	2017-03-14 02:43:22.000000000 +0100
@@ -265,7 +265,7 @@
 		sctp_ulpq_clear_pd(ulpq);
 
 	if (queue == &sk->sk_receive_queue)
-		sk->sk_data_ready(sk, 0);
+		sk->sk_data_ready(sk);
 	return 1;
 
 out_free:
@@ -1134,5 +1134,5 @@
 
 	/* If there is data waiting, send it up the socket now. */
 	if (sctp_ulpq_clear_pd(ulpq) || ev)
-		sk->sk_data_ready(sk, 0);
+		sk->sk_data_ready(sk);
 }
diff -ur a/net/socket.c b/net/socket.c
--- a/net/socket.c	2017-03-23 15:04:23.000000000 +0100
+++ b/net/socket.c	2017-03-14 02:42:11.000000000 +0100
@@ -1636,14 +1636,12 @@
 	int err, ctl_len, total_len;
 
 	err = -EFAULT;
-	if (MSG_CMSG_COMPAT & flags) {
-		if (get_compat_msghdr(msg_sys, msg_compat))
-			return -EFAULT;
-	} else {
+	if (MSG_CMSG_COMPAT & flags)
+		err = get_compat_msghdr(msg_sys, msg_compat);
+	else
 		err = copy_msghdr_from_user(msg_sys, msg);
-		if (err)
-			return err;
-	}
+	if (err)
+		return err;
 
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
 		err = -EMSGSIZE;
@@ -1824,14 +1822,12 @@
 	struct sockaddr __user *uaddr;
 	int __user *uaddr_len;
 
-	if (MSG_CMSG_COMPAT & flags) {
-		if (get_compat_msghdr(msg_sys, msg_compat))
-			return -EFAULT;
-	} else {
+	if (MSG_CMSG_COMPAT & flags)
+		err = get_compat_msghdr(msg_sys, msg_compat);
+	else
 		err = copy_msghdr_from_user(msg_sys, msg);
-		if (err)
-			return err;
-	}
+	if (err)
+		return err;
 
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
 		err = -EMSGSIZE;
@@ -1992,23 +1988,22 @@
 			break;
 	}
 
-out_put:
-	fput_light(sock->file, fput_needed);
-
 	if (err == 0)
-		return datagrams;
+		goto out_put;
 
-	if (datagrams != 0) {
-		 
-		if (err != -EAGAIN) {
-			 
-			sock->sk->sk_err = -err;
-		}
+	if (datagrams == 0) {
+		datagrams = err;
+		goto out_put;
+	}
 
-		return datagrams;
+	if (err != -EAGAIN) {
+		 
+		sock->sk->sk_err = -err;
 	}
+out_put:
+	fput_light(sock->file, fput_needed);
 
-	return err;
+	return datagrams;
 }
 
 SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
diff -ur a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c	2017-03-23 15:05:35.000000000 +0100
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c	2017-03-14 02:43:28.000000000 +0100
@@ -792,20 +792,26 @@
 {
 	u32 value_follows;
 	int err;
+	struct page *scratch;
+
+	scratch = alloc_page(GFP_KERNEL);
+	if (!scratch)
+		return -ENOMEM;
+	xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
 
 	/* res->status */
 	err = gssx_dec_status(xdr, &res->status);
 	if (err)
-		return err;
+		goto out_free;
 
 	/* res->context_handle */
 	err = gssx_dec_bool(xdr, &value_follows);
 	if (err)
-		return err;
+		goto out_free;
 	if (value_follows) {
 		err = gssx_dec_ctx(xdr, res->context_handle);
 		if (err)
-			return err;
+			goto out_free;
 	} else {
 		res->context_handle = NULL;
 	}
@@ -813,11 +819,11 @@
 	/* res->output_token */
 	err = gssx_dec_bool(xdr, &value_follows);
 	if (err)
-		return err;
+		goto out_free;
 	if (value_follows) {
 		err = gssx_dec_buffer(xdr, res->output_token);
 		if (err)
-			return err;
+			goto out_free;
 	} else {
 		res->output_token = NULL;
 	}
@@ -825,14 +831,17 @@
 	/* res->delegated_cred_handle */
 	err = gssx_dec_bool(xdr, &value_follows);
 	if (err)
-		return err;
+		goto out_free;
 	if (value_follows) {
 		/* we do not support upcall servers sending this data. */
-		return -EINVAL;
+		err = -EINVAL;
+		goto out_free;
 	}
 
 	/* res->options */
 	err = gssx_dec_option_array(xdr, &res->options);
 
+out_free:
+	__free_page(scratch);
 	return err;
 }
diff -ur a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
--- a/net/sunrpc/backchannel_rqst.c	2017-03-23 15:05:33.000000000 +0100
+++ b/net/sunrpc/backchannel_rqst.c	2017-03-14 02:43:26.000000000 +0100
@@ -60,7 +60,7 @@
 
 	dprintk("RPC:        free allocations for req= %p\n", req);
 	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
-	xbufp = &req->rq_private_buf;
+	xbufp = &req->rq_rcv_buf;
 	free_page((unsigned long)xbufp->head[0].iov_base);
 	xbufp = &req->rq_snd_buf;
 	free_page((unsigned long)xbufp->head[0].iov_base);
diff -ur a/net/sunrpc/cache.c b/net/sunrpc/cache.c
--- a/net/sunrpc/cache.c	2017-03-23 15:05:35.000000000 +0100
+++ b/net/sunrpc/cache.c	2017-03-14 02:43:27.000000000 +0100
@@ -1215,7 +1215,7 @@
 	if (bp[0] == '\\' && bp[1] == 'x') {
 		/* HEX STRING */
 		bp += 2;
-		while (len < bufsize) {
+		while (len < bufsize - 1) {
 			int h, l;
 
 			h = hex_to_bin(bp[0]);
diff -ur a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
--- a/net/sunrpc/svcsock.c	2017-03-23 15:05:35.000000000 +0100
+++ b/net/sunrpc/svcsock.c	2017-03-14 02:43:28.000000000 +0100
@@ -59,7 +59,7 @@
 
 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
 					 int flags);
-static void		svc_udp_data_ready(struct sock *, int);
+static void		svc_udp_data_ready(struct sock *);
 static int		svc_udp_recvfrom(struct svc_rqst *);
 static int		svc_udp_sendto(struct svc_rqst *);
 static void		svc_sock_detach(struct svc_xprt *);
@@ -399,14 +399,14 @@
 /*
  * INET callback when data has been received on the socket.
  */
-static void svc_udp_data_ready(struct sock *sk, int count)
+static void svc_udp_data_ready(struct sock *sk)
 {
 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
 	wait_queue_head_t *wq = sk_sleep(sk);
 
 	if (svsk) {
-		dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
-			svsk, sk, count,
+		dprintk("svc: socket %p(inet %p), busy=%d\n",
+			svsk, sk,
 			test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
 		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		svc_xprt_enqueue(&svsk->sk_xprt);
@@ -728,7 +728,7 @@
  * A data_ready event on a listening socket means there's a connection
  * pending. Do not use state_change as a substitute for it.
  */
-static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
+static void svc_tcp_listen_data_ready(struct sock *sk)
 {
 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
 	wait_queue_head_t *wq;
@@ -780,7 +780,7 @@
 		wake_up_interruptible_all(wq);
 }
 
-static void svc_tcp_data_ready(struct sock *sk, int count)
+static void svc_tcp_data_ready(struct sock *sk)
 {
 	struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
 	wait_queue_head_t *wq = sk_sleep(sk);
diff -ur a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
--- a/net/sunrpc/xprtsock.c	2017-03-23 15:05:36.000000000 +0100
+++ b/net/sunrpc/xprtsock.c	2017-03-14 02:43:28.000000000 +0100
@@ -252,7 +252,7 @@
 	/*
 	 * Saved socket callback addresses
 	 */
-	void			(*old_data_ready)(struct sock *, int);
+	void			(*old_data_ready)(struct sock *);
 	void			(*old_state_change)(struct sock *);
 	void			(*old_write_space)(struct sock *);
 };
@@ -918,7 +918,7 @@
  *
  * Currently this assumes we can read the whole reply in a single gulp.
  */
-static void xs_local_data_ready(struct sock *sk, int len)
+static void xs_local_data_ready(struct sock *sk)
 {
 	struct rpc_task *task;
 	struct rpc_xprt *xprt;
@@ -981,7 +981,7 @@
  * @len: how much data to read
  *
  */
-static void xs_udp_data_ready(struct sock *sk, int len)
+static void xs_udp_data_ready(struct sock *sk)
 {
 	struct rpc_task *task;
 	struct rpc_xprt *xprt;
@@ -1416,7 +1416,7 @@
  * @bytes: how much data to read
  *
  */
-static void xs_tcp_data_ready(struct sock *sk, int bytes)
+static void xs_tcp_data_ready(struct sock *sk)
 {
 	struct rpc_xprt *xprt;
 	read_descriptor_t rd_desc;
diff -ur a/net/tipc/socket.c b/net/tipc/socket.c
--- a/net/tipc/socket.c	2017-03-23 15:04:47.000000000 +0100
+++ b/net/tipc/socket.c	2017-03-14 02:42:36.000000000 +0100
@@ -63,7 +63,7 @@
 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
 static void wakeupdispatch(struct tipc_port *tport);
-static void tipc_data_ready(struct sock *sk, int len);
+static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
 
 static const struct proto_ops packet_ops;
@@ -1127,7 +1127,7 @@
  * @sk: socket
  * @len: the length of messages
  */
-static void tipc_data_ready(struct sock *sk, int len)
+static void tipc_data_ready(struct sock *sk)
 {
 	struct socket_wq *wq;
 
@@ -1287,7 +1287,7 @@
 	__skb_queue_tail(&sk->sk_receive_queue, buf);
 	skb_set_owner_r(buf, sk);
 
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 	return TIPC_OK;
 }
 
@@ -1528,6 +1528,7 @@
 	res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
 	if (res)
 		goto exit;
+	security_sk_clone(sock->sk, new_sock->sk);
 
 	new_sk = new_sock->sk;
 	new_tsock = tipc_sk(new_sk);
diff -ur a/net/unix/af_unix.c b/net/unix/af_unix.c
--- a/net/unix/af_unix.c	2017-03-23 15:05:15.000000000 +0100
+++ b/net/unix/af_unix.c	2017-03-14 02:43:07.000000000 +0100
@@ -312,6 +312,118 @@
 	return s;
 }
 
+/* Support code for asymmetrically connected dgram sockets
+ *
+ * If a datagram socket is connected to a socket not itself connected
+ * to the first socket (eg, /dev/log), clients may only enqueue more
+ * messages if the present receive queue of the server socket is not
+ * "too large". This means there's a second writeability condition
+ * poll and sendmsg need to test. The dgram recv code will do a wake
+ * up on the peer_wait wait queue of a socket upon reception of a
+ * datagram which needs to be propagated to sleeping would-be writers
+ * since these might not have sent anything so far. This can't be
+ * accomplished via poll_wait because the lifetime of the server
+ * socket might be less than that of its clients if these break their
+ * association with it or if the server socket is closed while clients
+ * are still connected to it and there's no way to inform "a polling
+ * implementation" that it should let go of a certain wait queue
+ *
+ * In order to propagate a wake up, a wait_queue_t of the client
+ * socket is enqueued on the peer_wait queue of the server socket
+ * whose wake function does a wake_up on the ordinary client socket
+ * wait queue. This connection is established whenever a write (or
+ * poll for write) hit the flow control condition and broken when the
+ * association to the server socket is dissolved or after a wake up
+ * was relayed.
+ */
+
+static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
+				      void *key)
+{
+	struct unix_sock *u;
+	wait_queue_head_t *u_sleep;
+
+	u = container_of(q, struct unix_sock, peer_wake);
+
+	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
+			    q);
+	u->peer_wake.private = NULL;
+
+	/* relaying can only happen while the wq still exists */
+	u_sleep = sk_sleep(&u->sk);
+	if (u_sleep)
+		wake_up_interruptible_poll(u_sleep, key);
+
+	return 0;
+}
+
+static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
+{
+	struct unix_sock *u, *u_other;
+	int rc;
+
+	u = unix_sk(sk);
+	u_other = unix_sk(other);
+	rc = 0;
+	spin_lock(&u_other->peer_wait.lock);
+
+	if (!u->peer_wake.private) {
+		u->peer_wake.private = other;
+		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
+
+		rc = 1;
+	}
+
+	spin_unlock(&u_other->peer_wait.lock);
+	return rc;
+}
+
+static void unix_dgram_peer_wake_disconnect(struct sock *sk,
+					    struct sock *other)
+{
+	struct unix_sock *u, *u_other;
+
+	u = unix_sk(sk);
+	u_other = unix_sk(other);
+	spin_lock(&u_other->peer_wait.lock);
+
+	if (u->peer_wake.private == other) {
+		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
+		u->peer_wake.private = NULL;
+	}
+
+	spin_unlock(&u_other->peer_wait.lock);
+}
+
+static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
+						   struct sock *other)
+{
+	unix_dgram_peer_wake_disconnect(sk, other);
+	wake_up_interruptible_poll(sk_sleep(sk),
+				   POLLOUT |
+				   POLLWRNORM |
+				   POLLWRBAND);
+}
+
+/* preconditions:
+ *	- unix_peer(sk) == other
+ *	- association is stable
+ */
+static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+{
+	int connected;
+
+	connected = unix_dgram_peer_wake_connect(sk, other);
+
+	if (unix_recvq_full(other))
+		return 1;
+
+	if (connected)
+		unix_dgram_peer_wake_disconnect(sk, other);
+
+	return 0;
+}
+
 static inline int unix_writable(struct sock *sk)
 {
 	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
@@ -416,6 +528,8 @@
 			skpair->sk_state_change(skpair);
 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
 		}
+
+		unix_dgram_peer_wake_disconnect(sk, skpair);
 		sock_put(skpair); /* It may now die */
 		unix_peer(sk) = NULL;
 	}
@@ -648,6 +762,7 @@
 	INIT_LIST_HEAD(&u->link);
 	mutex_init(&u->readlock); /* single task reading lock */
 	init_waitqueue_head(&u->peer_wait);
+	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 	unix_insert_socket(unix_sockets_unbound(sk), sk);
 out:
 	if (sk == NULL)
@@ -1015,6 +1130,8 @@
 	if (unix_peer(sk)) {
 		struct sock *old_peer = unix_peer(sk);
 		unix_peer(sk) = other;
+		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+
 		unix_state_double_unlock(sk, other);
 
 		if (other != old_peer)
@@ -1212,7 +1329,7 @@
 	__skb_queue_tail(&other->sk_receive_queue, skb);
 	spin_unlock(&other->sk_receive_queue.lock);
 	unix_state_unlock(other);
-	other->sk_data_ready(other, 0);
+	other->sk_data_ready(other);
 	sock_put(other);
 	return 0;
 
@@ -1346,7 +1463,7 @@
 	UNIXCB(skb).fp = NULL;
 
 	for (i = scm->fp->count-1; i >= 0; i--)
-		unix_notinflight(scm->fp->fp[i]);
+		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
 }
 
 static void unix_destruct_scm(struct sk_buff *skb)
@@ -1363,6 +1480,21 @@
 	sock_wfree(skb);
 }
 
+/*
+ * The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+	struct user_struct *user = current_user();
+
+	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+	return false;
+}
+
 #define MAX_RECURSION_LEVEL 4
 
 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
@@ -1371,6 +1503,9 @@
 	unsigned char max_level = 0;
 	int unix_sock_count = 0;
 
+	if (too_many_unix_fds(current))
+		return -ETOOMANYREFS;
+
 	for (i = scm->fp->count - 1; i >= 0; i--) {
 		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
 
@@ -1392,10 +1527,8 @@
 	if (!UNIXCB(skb).fp)
 		return -ENOMEM;
 
-	if (unix_sock_count) {
-		for (i = scm->fp->count - 1; i >= 0; i--)
-			unix_inflight(scm->fp->fp[i]);
-	}
+	for (i = scm->fp->count - 1; i >= 0; i--)
+		unix_inflight(scm->fp->user, scm->fp->fp[i]);
 	return max_level;
 }
 
@@ -1453,6 +1586,7 @@
 	struct scm_cookie tmp_scm;
 	int max_level;
 	int data_len = 0;
+	int sk_locked;
 
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
@@ -1529,12 +1663,14 @@
 		goto out_free;
 	}
 
+	sk_locked = 0;
 	unix_state_lock(other);
+restart_locked:
 	err = -EPERM;
 	if (!unix_may_send(sk, other))
 		goto out_unlock;
 
-	if (sock_flag(other, SOCK_DEAD)) {
+	if (unlikely(sock_flag(other, SOCK_DEAD))) {
 		/*
 		 *	Check with 1003.1g - what should
 		 *	datagram error
@@ -1542,10 +1678,14 @@
 		unix_state_unlock(other);
 		sock_put(other);
 
+		if (!sk_locked)
+			unix_state_lock(sk);
+
 		err = 0;
-		unix_state_lock(sk);
 		if (unix_peer(sk) == other) {
 			unix_peer(sk) = NULL;
+			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
 			unix_state_unlock(sk);
 
 			unix_dgram_disconnected(sk, other);
@@ -1571,21 +1711,43 @@
 			goto out_unlock;
 	}
 
-	if (unix_peer(other) != sk && unix_recvq_full(other)) {
-		if (!timeo) {
-			err = -EAGAIN;
-			goto out_unlock;
+	/* other == sk && unix_peer(other) != sk if
+	 * - unix_peer(sk) == NULL, destination address bound to sk
+	 * - unix_peer(sk) == sk by time of get but disconnected before lock
+	 */
+	if (other != sk &&
+	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+		if (timeo) {
+			timeo = unix_wait_for_peer(other, timeo);
+
+			err = sock_intr_errno(timeo);
+			if (signal_pending(current))
+				goto out_free;
+
+			goto restart;
 		}
 
-		timeo = unix_wait_for_peer(other, timeo);
+		if (!sk_locked) {
+			unix_state_unlock(other);
+			unix_state_double_lock(sk, other);
+		}
 
-		err = sock_intr_errno(timeo);
-		if (signal_pending(current))
-			goto out_free;
+		if (unix_peer(sk) != other ||
+		    unix_dgram_peer_wake_me(sk, other)) {
+			err = -EAGAIN;
+			sk_locked = 1;
+			goto out_unlock;
+		}
 
-		goto restart;
+		if (!sk_locked) {
+			sk_locked = 1;
+			goto restart_locked;
+		}
 	}
 
+	if (unlikely(sk_locked))
+		unix_state_unlock(sk);
+
 	if (sock_flag(other, SOCK_RCVTSTAMP))
 		__net_timestamp(skb);
 	maybe_add_creds(skb, sock, other);
@@ -1593,12 +1755,14 @@
 	if (max_level > unix_sk(other)->recursion_level)
 		unix_sk(other)->recursion_level = max_level;
 	unix_state_unlock(other);
-	other->sk_data_ready(other, len);
+	other->sk_data_ready(other);
 	sock_put(other);
 	scm_destroy(siocb->scm);
 	return len;
 
 out_unlock:
+	if (sk_locked)
+		unix_state_unlock(sk);
 	unix_state_unlock(other);
 out_free:
 	kfree_skb(skb);
@@ -1706,7 +1870,7 @@
 		if (max_level > unix_sk(other)->recursion_level)
 			unix_sk(other)->recursion_level = max_level;
 		unix_state_unlock(other);
-		other->sk_data_ready(other, size);
+		other->sk_data_ready(other);
 		sent += size;
 	}
 
@@ -1893,6 +2057,10 @@
 		unix_state_unlock(sk);
 		timeo = schedule_timeout(timeo);
 		unix_state_lock(sk);
+
+		if (sock_flag(sk, SOCK_DEAD))
+			break;
+
 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
 	}
 
@@ -1938,20 +2106,17 @@
 		memset(&tmp_scm, 0, sizeof(tmp_scm));
 	}
 
-	err = mutex_lock_interruptible(&u->readlock);
-	if (unlikely(err)) {
-		/* recvmsg() in non blocking mode is supposed to return -EAGAIN
-		 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
-		 */
-		err = noblock ? -EAGAIN : -ERESTARTSYS;
-		goto out;
-	}
+	mutex_lock(&u->readlock);
 
 	do {
 		int chunk;
 		struct sk_buff *skb, *last;
 
 		unix_state_lock(sk);
+		if (sock_flag(sk, SOCK_DEAD)) {
+			err = -ECONNRESET;
+			goto unlock;
+		}
 		last = skb = skb_peek(&sk->sk_receive_queue);
 again:
 		if (skb == NULL) {
@@ -1977,12 +2142,12 @@
 
 			timeo = unix_stream_data_wait(sk, timeo, last);
 
-			if (signal_pending(current)
-			    ||  mutex_lock_interruptible(&u->readlock)) {
+			if (signal_pending(current)) {
 				err = sock_intr_errno(timeo);
 				goto out;
 			}
 
+			mutex_lock(&u->readlock);
 			continue;
  unlock:
 			unix_state_unlock(sk);
@@ -2050,8 +2215,20 @@
 			if (UNIXCB(skb).fp)
 				siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
 
-			sk_peek_offset_fwd(sk, chunk);
+			if (skip) {
+				sk_peek_offset_fwd(sk, chunk);
+				skip -= chunk;
+			}
+
+			if (UNIXCB(skb).fp)
+				break;
 
+			last = skb;
+			unix_state_lock(sk);
+			skb = skb_peek_next(skb, &sk->sk_receive_queue);
+			if (skb)
+				goto again;
+			unix_state_unlock(sk);
 			break;
 		}
 	} while (size);
@@ -2235,14 +2412,16 @@
 		return mask;
 
 	writable = unix_writable(sk);
-	other = unix_peer_get(sk);
-	if (other) {
-		if (unix_peer(other) != sk) {
-			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
-			if (unix_recvq_full(other))
-				writable = 0;
-		}
-		sock_put(other);
+	if (writable) {
+		unix_state_lock(sk);
+
+		other = unix_peer(sk);
+		if (other && unix_peer(other) != sk &&
+		    unix_recvq_full(other) &&
+		    unix_dgram_peer_wake_me(sk, other))
+			writable = 0;
+
+		unix_state_unlock(sk);
 	}
 
 	if (writable)
diff -ur a/net/unix/garbage.c b/net/unix/garbage.c
--- a/net/unix/garbage.c	2017-03-23 15:05:13.000000000 +0100
+++ b/net/unix/garbage.c	2017-03-14 02:43:05.000000000 +0100
@@ -121,12 +121,15 @@
  *	descriptor if it is for an AF_UNIX socket.
  */
 
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
 {
 	struct sock *s = unix_get_socket(fp);
+
+	spin_lock(&unix_gc_lock);
+
 	if (s) {
 		struct unix_sock *u = unix_sk(s);
-		spin_lock(&unix_gc_lock);
+
 		if (atomic_long_inc_return(&u->inflight) == 1) {
 			BUG_ON(!list_empty(&u->link));
 			list_add_tail(&u->link, &gc_inflight_list);
@@ -134,22 +137,27 @@
 			BUG_ON(list_empty(&u->link));
 		}
 		unix_tot_inflight++;
-		spin_unlock(&unix_gc_lock);
 	}
+	user->unix_inflight++;
+	spin_unlock(&unix_gc_lock);
 }
 
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
 {
 	struct sock *s = unix_get_socket(fp);
+
+	spin_lock(&unix_gc_lock);
+
 	if (s) {
 		struct unix_sock *u = unix_sk(s);
-		spin_lock(&unix_gc_lock);
+
 		BUG_ON(list_empty(&u->link));
 		if (atomic_long_dec_and_test(&u->inflight))
 			list_del_init(&u->link);
 		unix_tot_inflight--;
-		spin_unlock(&unix_gc_lock);
 	}
+	user->unix_inflight--;
+	spin_unlock(&unix_gc_lock);
 }
 
 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
diff -ur a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
--- a/net/vmw_vsock/af_vsock.c	2017-03-23 15:04:31.000000000 +0100
+++ b/net/vmw_vsock/af_vsock.c	2017-03-14 02:42:19.000000000 +0100
@@ -1803,27 +1803,8 @@
 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
 		err = 0;
 
-	if (copied > 0) {
-		/* We only do these additional bookkeeping/notification steps
-		 * if we actually copied something out of the queue pair
-		 * instead of just peeking ahead.
-		 */
-
-		if (!(flags & MSG_PEEK)) {
-			/* If the other side has shutdown for sending and there
-			 * is nothing more to read, then modify the socket
-			 * state.
-			 */
-			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
-				if (vsock_stream_has_data(vsk) <= 0) {
-					sk->sk_state = SS_UNCONNECTED;
-					sock_set_flag(sk, SOCK_DONE);
-					sk->sk_state_change(sk);
-				}
-			}
-		}
+	if (copied > 0)
 		err = copied;
-	}
 
 out_wait:
 	finish_wait(sk_sleep(sk), &wait);
diff -ur a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
--- a/net/vmw_vsock/vmci_transport_notify.c	2017-03-23 15:04:30.000000000 +0100
+++ b/net/vmw_vsock/vmci_transport_notify.c	2017-03-14 02:42:18.000000000 +0100
@@ -315,7 +315,7 @@
 	struct vsock_sock *vsk = vsock_sk(sk);
 	PKT_FIELD(vsk, sent_waiting_read) = false;
 #endif
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 }
 
 static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
diff -ur a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c	2017-03-23 15:04:29.000000000 +0100
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c	2017-03-14 02:42:18.000000000 +0100
@@ -92,7 +92,7 @@
 			    bool bottom_half,
 			    struct sockaddr_vm *dst, struct sockaddr_vm *src)
 {
-	sk->sk_data_ready(sk, 0);
+	sk->sk_data_ready(sk);
 }
 
 static void vsock_block_update_write_window(struct sock *sk)
@@ -290,7 +290,7 @@
 		/* See the comment in
 		 * vmci_transport_notify_pkt_send_post_enqueue().
 		 */
-		sk->sk_data_ready(sk, 0);
+		sk->sk_data_ready(sk);
 	}
 
 	return err;
diff -ur a/net/wireless/nl80211.c b/net/wireless/nl80211.c
--- a/net/wireless/nl80211.c	2017-03-23 15:06:06.000000000 +0100
+++ b/net/wireless/nl80211.c	2017-03-14 02:44:01.000000000 +0100
@@ -10679,7 +10679,7 @@
 	struct wireless_dev *wdev;
 	struct cfg80211_beacon_registration *reg, *tmp;
 
-	if (state != NETLINK_URELEASE)
+	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
 		return NOTIFY_DONE;
 
 	rcu_read_lock();
diff -ur a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
--- a/net/wireless/wext-compat.c	2017-03-23 15:05:54.000000000 +0100
+++ b/net/wireless/wext-compat.c	2017-03-14 02:43:50.000000000 +0100
@@ -1342,6 +1342,8 @@
 	memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
 	wdev_unlock(wdev);
 
+	memset(&sinfo, 0, sizeof(sinfo));
+
 	if (rdev_get_station(rdev, dev, bssid, &sinfo))
 		return NULL;
 
diff -ur a/net/wireless/wext-core.c b/net/wireless/wext-core.c
--- a/net/wireless/wext-core.c	2017-03-23 15:05:54.000000000 +0100
+++ b/net/wireless/wext-core.c	2017-03-14 02:43:50.000000000 +0100
@@ -340,6 +340,39 @@
 
 /* IW event code */
 
+static void wireless_nlevent_flush(void)
+{
+	struct sk_buff *skb;
+	struct net *net;
+
+	ASSERT_RTNL();
+
+	for_each_net(net) {
+		while ((skb = skb_dequeue(&net->wext_nlevents)))
+			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+				    GFP_KERNEL);
+	}
+}
+
+static int wext_netdev_notifier_call(struct notifier_block *nb,
+				     unsigned long state, void *ptr)
+{
+	/*
+	 * When a netdev changes state in any way, flush all pending messages
+	 * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
+	 * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
+	 * or similar - all of which could otherwise happen due to delays from
+	 * schedule_work().
+	 */
+	wireless_nlevent_flush();
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wext_netdev_notifier = {
+	.notifier_call = wext_netdev_notifier_call,
+};
+
 static int __net_init wext_pernet_init(struct net *net)
 {
 	skb_queue_head_init(&net->wext_nlevents);
@@ -358,7 +391,12 @@
 
 static int __init wireless_nlevent_init(void)
 {
-	return register_pernet_subsys(&wext_pernet_ops);
+	int err = register_pernet_subsys(&wext_pernet_ops);
+
+	if (err)
+		return err;
+
+	return register_netdevice_notifier(&wext_netdev_notifier);
 }
 
 subsys_initcall(wireless_nlevent_init);
@@ -366,17 +404,8 @@
 /* Process events generated by the wireless layer or the driver. */
 static void wireless_nlevent_process(struct work_struct *work)
 {
-	struct sk_buff *skb;
-	struct net *net;
-
 	rtnl_lock();
-
-	for_each_net(net) {
-		while ((skb = skb_dequeue(&net->wext_nlevents)))
-			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
-				    GFP_KERNEL);
-	}
-
+	wireless_nlevent_flush();
 	rtnl_unlock();
 }
 
diff -ur a/net/x25/af_x25.c b/net/x25/af_x25.c
--- a/net/x25/af_x25.c	2017-03-23 15:05:13.000000000 +0100
+++ b/net/x25/af_x25.c	2017-03-14 02:43:05.000000000 +0100
@@ -1060,7 +1060,7 @@
 	x25_start_heartbeat(make);
 
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skb->len);
+		sk->sk_data_ready(sk);
 	rc = 1;
 	sock_put(sk);
 out:
diff -ur a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
--- a/net/x25/x25_facilities.c	2017-03-23 15:05:12.000000000 +0100
+++ b/net/x25/x25_facilities.c	2017-03-14 02:43:04.000000000 +0100
@@ -271,6 +271,7 @@
 
 	memset(&theirs, 0, sizeof(theirs));
 	memcpy(new, ours, sizeof(*new));
+	memset(dte, 0, sizeof(*dte));
 
 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
 	if (len < 0)
diff -ur a/net/x25/x25_in.c b/net/x25/x25_in.c
--- a/net/x25/x25_in.c	2017-03-23 15:05:12.000000000 +0100
+++ b/net/x25/x25_in.c	2017-03-14 02:43:04.000000000 +0100
@@ -77,7 +77,7 @@
 	skb_set_owner_r(skbn, sk);
 	skb_queue_tail(&sk->sk_receive_queue, skbn);
 	if (!sock_flag(sk, SOCK_DEAD))
-		sk->sk_data_ready(sk, skbn->len);
+		sk->sk_data_ready(sk);
 
 	return 0;
 }
diff -ur a/scripts/bloat-o-meter b/scripts/bloat-o-meter
--- a/scripts/bloat-o-meter	2016-10-20 04:32:07.000000000 +0200
+++ b/scripts/bloat-o-meter	2016-07-29 05:48:09.000000000 +0200
@@ -55,8 +55,8 @@
 delta.sort()
 delta.reverse()
 
-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
-      (add, remove, grow, shrink, up, -down, up-down)
-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
+print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+      (add, remove, grow, shrink, up, -down, up-down))
+print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
 for d, n in delta:
-    if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
+    if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
diff -ur a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
--- a/scripts/coccinelle/iterators/use_after_iter.cocci	2016-10-20 04:32:07.000000000 +0200
+++ b/scripts/coccinelle/iterators/use_after_iter.cocci	2016-07-29 05:48:09.000000000 +0200
@@ -123,7 +123,7 @@
 |
 sizeof(<+...c...+>)
 |
-&c->member
+ &c->member
 |
 c = E
 |
diff -ur a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
--- a/scripts/kconfig/streamline_config.pl	2016-10-20 04:32:07.000000000 +0200
+++ b/scripts/kconfig/streamline_config.pl	2016-07-29 05:48:09.000000000 +0200
@@ -137,7 +137,7 @@
 my $kconfig = $ARGV[1];
 my $lsmod_file = $ENV{'LSMOD'};
 
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
 chomp @makefiles;
 
 my %depends;
diff -ur a/scripts/recordmcount.c b/scripts/recordmcount.c
--- a/scripts/recordmcount.c	2017-03-23 14:16:14.000000000 +0100
+++ b/scripts/recordmcount.c	2017-03-14 01:54:24.000000000 +0100
@@ -189,6 +189,20 @@
 		addr = umalloc(sb.st_size);
 		uread(fd_map, addr, sb.st_size);
 	}
+	if (sb.st_nlink != 1) {
+		/* file is hard-linked, break the hard link */
+		close(fd_map);
+		if (unlink(fname) < 0) {
+			perror(fname);
+			fail_file();
+		}
+		fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode);
+		if (fd_map < 0) {
+			perror(fname);
+			fail_file();
+		}
+		uwrite(fd_map, addr, sb.st_size);
+	}
 	return addr;
 }
 
diff -ur a/scripts/recordmcount.h b/scripts/recordmcount.h
--- a/scripts/recordmcount.h	2017-03-23 14:16:15.000000000 +0100
+++ b/scripts/recordmcount.h	2017-03-14 01:54:25.000000000 +0100
@@ -377,7 +377,7 @@
 
 		if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
 			if (make_nop)
-				ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
+				ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
 			if (warn_on_notrace_sect && !once) {
 				printf("Section %s has mcount callers being ignored\n",
 				       txtname);
diff -ur a/scripts/recordmcount.pl b/scripts/recordmcount.pl
--- a/scripts/recordmcount.pl	2016-10-20 04:32:07.000000000 +0200
+++ b/scripts/recordmcount.pl	2016-07-29 05:48:09.000000000 +0200
@@ -265,7 +265,8 @@
 
 } elsif ($arch eq "powerpc") {
     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
+    # See comment in the sparc64 section for why we use '\w'.
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
 
     if ($bits == 64) {
diff -ur a/security/commoncap.c b/security/commoncap.c
--- a/security/commoncap.c	2017-03-23 14:38:20.000000000 +0100
+++ b/security/commoncap.c	2017-03-14 02:12:54.000000000 +0100
@@ -142,12 +142,17 @@
 {
 	int ret = 0;
 	const struct cred *cred, *child_cred;
+	const kernel_cap_t *caller_caps;
 
 	rcu_read_lock();
 	cred = current_cred();
 	child_cred = __task_cred(child);
+	if (mode & PTRACE_MODE_FSCREDS)
+		caller_caps = &cred->cap_effective;
+	else
+		caller_caps = &cred->cap_permitted;
 	if (cred->user_ns == child_cred->user_ns &&
-	    cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+	    cap_issubset(child_cred->cap_permitted, *caller_caps))
 		goto out;
 	if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
 		goto out;
diff -ur a/security/keys/gc.c b/security/keys/gc.c
--- a/security/keys/gc.c	2017-03-23 14:38:22.000000000 +0100
+++ b/security/keys/gc.c	2017-03-14 02:12:58.000000000 +0100
@@ -186,6 +186,12 @@
 		kdebug("- %u", key->serial);
 		key_check(key);
 
+		/* Throw away the key data if the key is instantiated */
+		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+		    !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
+		    key->type->destroy)
+			key->type->destroy(key);
+
 		security_key_free(key);
 
 		/* deal with the user's key tracking and quota */
@@ -200,10 +206,6 @@
 		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
 			atomic_dec(&key->user->nikeys);
 
-		/* now throw away the key memory */
-		if (key->type->destroy)
-			key->type->destroy(key);
-
 		key_user_put(key->user);
 
 		kfree(key->description);
diff -ur a/security/keys/keyctl.c b/security/keys/keyctl.c
--- a/security/keys/keyctl.c	2017-03-23 14:38:24.000000000 +0100
+++ b/security/keys/keyctl.c	2017-03-14 02:13:01.000000000 +0100
@@ -744,16 +744,16 @@
 
 	/* the key is probably readable - now try to read it */
 can_read_key:
-	ret = key_validate(key);
-	if (ret == 0) {
-		ret = -EOPNOTSUPP;
-		if (key->type->read) {
-			/* read the data with the semaphore held (since we
-			 * might sleep) */
-			down_read(&key->sem);
+	ret = -EOPNOTSUPP;
+	if (key->type->read) {
+		/* Read the data with the semaphore held (since we might sleep)
+		 * to protect against the key being updated or revoked.
+		 */
+		down_read(&key->sem);
+		ret = key_validate(key);
+		if (ret == 0)
 			ret = key->type->read(key, buffer, buflen);
-			up_read(&key->sem);
-		}
+		up_read(&key->sem);
 	}
 
 error2:
diff -ur a/sound/arm/Kconfig b/sound/arm/Kconfig
--- a/sound/arm/Kconfig	2016-10-20 04:32:11.000000000 +0200
+++ b/sound/arm/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -9,6 +9,14 @@
 	  Drivers that are implemented on ASoC can be found in
 	  "ALSA for SoC audio support" section.
 
+config SND_PXA2XX_LIB
+	tristate
+	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+	select SND_DMAENGINE_PCM
+
+config SND_PXA2XX_LIB_AC97
+	bool
+
 if SND_ARM
 
 config SND_ARMAACI
@@ -21,13 +29,6 @@
 	tristate
 	select SND_PCM
 
-config SND_PXA2XX_LIB
-	tristate
-	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
-
-config SND_PXA2XX_LIB_AC97
-	bool
-
 config SND_PXA2XX_AC97
 	tristate "AC97 driver for the Intel PXA2xx chip"
 	depends on ARCH_PXA
diff -ur a/sound/core/compress_offload.c b/sound/core/compress_offload.c
--- a/sound/core/compress_offload.c	2017-03-23 15:05:59.000000000 +0100
+++ b/sound/core/compress_offload.c	2017-03-14 02:43:58.000000000 +0100
@@ -44,6 +44,13 @@
 #include <sound/compress_offload.h>
 #include <sound/compress_driver.h>
 
+/* struct snd_compr_codec_caps overflows the ioctl bit size for some
+ * architectures, so we need to disable the relevant ioctls.
+ */
+#if _IOC_SIZEBITS < 14
+#define COMPR_CODEC_CAPS_OVERFLOW
+#endif
+
 /* TODO:
  * - add substream support for multiple devices in case of
  *	SND_DYNAMIC_MINORS is not used
@@ -426,6 +433,7 @@
 	return retval;
 }
 
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
 static int
 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 {
@@ -449,6 +457,7 @@
 	kfree(caps);
 	return retval;
 }
+#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 
 /* revisit this with snd_pcm_preallocate_xxx */
 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
@@ -789,9 +798,11 @@
 	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
 		retval = snd_compr_get_caps(stream, arg);
 		break;
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
 	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
 		retval = snd_compr_get_codec_caps(stream, arg);
 		break;
+#endif
 	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
 		retval = snd_compr_set_params(stream, arg);
 		break;
diff -ur a/sound/core/control.c b/sound/core/control.c
--- a/sound/core/control.c	2017-03-23 15:05:59.000000000 +0100
+++ b/sound/core/control.c	2017-03-14 02:43:57.000000000 +0100
@@ -1325,6 +1325,8 @@
 		return -EFAULT;
 	if (tlv.length < sizeof(unsigned int) * 2)
 		return -EINVAL;
+	if (!tlv.numid)
+		return -EINVAL;
 	down_read(&card->controls_rwsem);
 	kctl = snd_ctl_find_numid(card, tlv.numid);
 	if (kctl == NULL) {
diff -ur a/sound/core/control_compat.c b/sound/core/control_compat.c
--- a/sound/core/control_compat.c	2017-03-23 15:06:00.000000000 +0100
+++ b/sound/core/control_compat.c	2017-03-14 02:43:58.000000000 +0100
@@ -170,6 +170,20 @@
         unsigned char reserved[128];
 };
 
+#ifdef CONFIG_X86_X32
+/* x32 has a different alignment for 64bit values from ia32 */
+struct snd_ctl_elem_value_x32 {
+	struct snd_ctl_elem_id id;
+	unsigned int indirect;	/* bit-field causes misalignment */
+	union {
+		s32 integer[128];
+		unsigned char data[512];
+		s64 integer64[64];
+	} value;
+	unsigned char reserved[128];
+};
+#endif /* CONFIG_X86_X32 */
+
 /* get the value type and count of the control */
 static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
 			int *countp)
@@ -218,9 +232,11 @@
 
 static int copy_ctl_value_from_user(struct snd_card *card,
 				    struct snd_ctl_elem_value *data,
-				    struct snd_ctl_elem_value32 __user *data32,
+				    void __user *userdata,
+				    void __user *valuep,
 				    int *typep, int *countp)
 {
+	struct snd_ctl_elem_value32 __user *data32 = userdata;
 	int i, type, size;
 	int uninitialized_var(count);
 	unsigned int indirect;
@@ -238,8 +254,9 @@
 	if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
 	    type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
 		for (i = 0; i < count; i++) {
+			s32 __user *intp = valuep;
 			int val;
-			if (get_user(val, &data32->value.integer[i]))
+			if (get_user(val, &intp[i]))
 				return -EFAULT;
 			data->value.integer.value[i] = val;
 		}
@@ -249,8 +266,7 @@
 			printk(KERN_ERR "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
 			return -EINVAL;
 		}
-		if (copy_from_user(data->value.bytes.data,
-				   data32->value.data, size))
+		if (copy_from_user(data->value.bytes.data, valuep, size))
 			return -EFAULT;
 	}
 
@@ -260,7 +276,8 @@
 }
 
 /* restore the value to 32bit */
-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+static int copy_ctl_value_to_user(void __user *userdata,
+				  void __user *valuep,
 				  struct snd_ctl_elem_value *data,
 				  int type, int count)
 {
@@ -269,22 +286,22 @@
 	if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
 	    type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
 		for (i = 0; i < count; i++) {
+			s32 __user *intp = valuep;
 			int val;
 			val = data->value.integer.value[i];
-			if (put_user(val, &data32->value.integer[i]))
+			if (put_user(val, &intp[i]))
 				return -EFAULT;
 		}
 	} else {
 		size = get_elem_size(type, count);
-		if (copy_to_user(data32->value.data,
-				 data->value.bytes.data, size))
+		if (copy_to_user(valuep, data->value.bytes.data, size))
 			return -EFAULT;
 	}
 	return 0;
 }
 
-static int snd_ctl_elem_read_user_compat(struct snd_card *card, 
-					 struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_read_user(struct snd_card *card,
+			      void __user *userdata, void __user *valuep)
 {
 	struct snd_ctl_elem_value *data;
 	int err, type, count;
@@ -293,7 +310,9 @@
 	if (data == NULL)
 		return -ENOMEM;
 
-	if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+	err = copy_ctl_value_from_user(card, data, userdata, valuep,
+				       &type, &count);
+	if (err < 0)
 		goto error;
 
 	snd_power_lock(card);
@@ -302,14 +321,15 @@
 		err = snd_ctl_elem_read(card, data);
 	snd_power_unlock(card);
 	if (err >= 0)
-		err = copy_ctl_value_to_user(data32, data, type, count);
+		err = copy_ctl_value_to_user(userdata, valuep, data,
+					     type, count);
  error:
 	kfree(data);
 	return err;
 }
 
-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
-					  struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_write_user(struct snd_ctl_file *file,
+			       void __user *userdata, void __user *valuep)
 {
 	struct snd_ctl_elem_value *data;
 	struct snd_card *card = file->card;
@@ -319,7 +339,9 @@
 	if (data == NULL)
 		return -ENOMEM;
 
-	if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+	err = copy_ctl_value_from_user(card, data, userdata, valuep,
+				       &type, &count);
+	if (err < 0)
 		goto error;
 
 	snd_power_lock(card);
@@ -328,12 +350,39 @@
 		err = snd_ctl_elem_write(card, file, data);
 	snd_power_unlock(card);
 	if (err >= 0)
-		err = copy_ctl_value_to_user(data32, data, type, count);
+		err = copy_ctl_value_to_user(userdata, valuep, data,
+					     type, count);
  error:
 	kfree(data);
 	return err;
 }
 
+static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+					 struct snd_ctl_elem_value32 __user *data32)
+{
+	return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+					  struct snd_ctl_elem_value32 __user *data32)
+{
+	return ctl_elem_write_user(file, data32, &data32->value);
+}
+
+#ifdef CONFIG_X86_X32
+static int snd_ctl_elem_read_user_x32(struct snd_card *card,
+				      struct snd_ctl_elem_value_x32 __user *data32)
+{
+	return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
+				       struct snd_ctl_elem_value_x32 __user *data32)
+{
+	return ctl_elem_write_user(file, data32, &data32->value);
+}
+#endif /* CONFIG_X86_X32 */
+
 /* add or replace a user control */
 static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
 				   struct snd_ctl_elem_info32 __user *data32,
@@ -392,6 +441,10 @@
 	SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
 	SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
 	SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
+#ifdef CONFIG_X86_X32
+	SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
+	SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -430,6 +483,12 @@
 		return snd_ctl_elem_add_compat(ctl, argp, 0);
 	case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
 		return snd_ctl_elem_add_compat(ctl, argp, 1);
+#ifdef CONFIG_X86_X32
+	case SNDRV_CTL_IOCTL_ELEM_READ_X32:
+		return snd_ctl_elem_read_user_x32(ctl->card, argp);
+	case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
+		return snd_ctl_elem_write_user_x32(ctl, argp);
+#endif /* CONFIG_X86_X32 */
 	}
 
 	down_read(&snd_ioctl_rwsem);
diff -ur a/sound/core/hrtimer.c b/sound/core/hrtimer.c
--- a/sound/core/hrtimer.c	2017-03-23 15:05:59.000000000 +0100
+++ b/sound/core/hrtimer.c	2017-03-14 02:43:57.000000000 +0100
@@ -90,7 +90,7 @@
 	struct snd_hrtimer *stime = t->private_data;
 
 	atomic_set(&stime->running, 0);
-	hrtimer_cancel(&stime->hrt);
+	hrtimer_try_to_cancel(&stime->hrt);
 	hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
 		      HRTIMER_MODE_REL);
 	atomic_set(&stime->running, 1);
@@ -101,6 +101,7 @@
 {
 	struct snd_hrtimer *stime = t->private_data;
 	atomic_set(&stime->running, 0);
+	hrtimer_try_to_cancel(&stime->hrt);
 	return 0;
 }
 
diff -ur a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
--- a/sound/core/oss/pcm_oss.c	2017-03-23 15:06:06.000000000 +0100
+++ b/sound/core/oss/pcm_oss.c	2017-03-14 02:44:04.000000000 +0100
@@ -742,7 +742,8 @@
 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
 }
 
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+				     bool trylock)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_pcm_hw_params *params, *sparams;
@@ -756,7 +757,10 @@
 	struct snd_mask sformat_mask;
 	struct snd_mask mask;
 
-	if (mutex_lock_interruptible(&runtime->oss.params_lock))
+	if (trylock) {
+		if (!(mutex_trylock(&runtime->oss.params_lock)))
+			return -EAGAIN;
+	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
 		return -EINTR;
 	sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
 	params = kmalloc(sizeof(*params), GFP_KERNEL);
@@ -999,7 +1003,7 @@
 		if (asubstream == NULL)
 			asubstream = substream;
 		if (substream->runtime->oss.params) {
-			err = snd_pcm_oss_change_params(substream);
+			err = snd_pcm_oss_change_params(substream, false);
 			if (err < 0)
 				return err;
 		}
@@ -1038,7 +1042,7 @@
 		return 0;
 	runtime = substream->runtime;
 	if (runtime->oss.params) {
-		err = snd_pcm_oss_change_params(substream);
+		err = snd_pcm_oss_change_params(substream, false);
 		if (err < 0)
 			return err;
 	}
@@ -2065,7 +2069,7 @@
 	runtime = substream->runtime;
 
 	if (runtime->oss.params &&
-	    (err = snd_pcm_oss_change_params(substream)) < 0)
+	    (err = snd_pcm_oss_change_params(substream, false)) < 0)
 		return err;
 
 	info.fragsize = runtime->oss.period_bytes;
@@ -2698,7 +2702,9 @@
 		return -EIO;
 	
 	if (runtime->oss.params) {
-		if ((err = snd_pcm_oss_change_params(substream)) < 0)
+		 
+		err = snd_pcm_oss_change_params(substream, true);
+		if (err < 0)
 			return err;
 	}
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
diff -ur a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
--- a/sound/core/pcm_compat.c	2017-03-23 15:05:58.000000000 +0100
+++ b/sound/core/pcm_compat.c	2017-03-14 02:43:56.000000000 +0100
@@ -235,10 +235,15 @@
 	if (! (runtime = substream->runtime))
 		return -ENOTTY;
 
-	/* only fifo_size is different, so just copy all */
-	data = memdup_user(data32, sizeof(*data32));
-	if (IS_ERR(data))
-		return PTR_ERR(data);
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* only fifo_size (RO from userspace) is different, so just copy all */
+	if (copy_from_user(data, data32, sizeof(*data32))) {
+		err = -EFAULT;
+		goto error;
+	}
 
 	if (refine)
 		err = snd_pcm_hw_refine(substream, data);
diff -ur a/sound/core/rawmidi.c b/sound/core/rawmidi.c
--- a/sound/core/rawmidi.c	2017-03-23 15:06:01.000000000 +0100
+++ b/sound/core/rawmidi.c	2017-03-14 02:43:59.000000000 +0100
@@ -934,31 +934,36 @@
 	unsigned long flags;
 	long result = 0, count1;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	unsigned long appl_ptr;
 
+	spin_lock_irqsave(&runtime->lock, flags);
 	while (count > 0 && runtime->avail) {
 		count1 = runtime->buffer_size - runtime->appl_ptr;
 		if (count1 > count)
 			count1 = count;
-		spin_lock_irqsave(&runtime->lock, flags);
 		if (count1 > (int)runtime->avail)
 			count1 = runtime->avail;
+
+		/* update runtime->appl_ptr before unlocking for userbuf */
+		appl_ptr = runtime->appl_ptr;
+		runtime->appl_ptr += count1;
+		runtime->appl_ptr %= runtime->buffer_size;
+		runtime->avail -= count1;
+
 		if (kernelbuf)
-			memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
+			memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
 		if (userbuf) {
 			spin_unlock_irqrestore(&runtime->lock, flags);
 			if (copy_to_user(userbuf + result,
-					 runtime->buffer + runtime->appl_ptr, count1)) {
+					 runtime->buffer + appl_ptr, count1)) {
 				return result > 0 ? result : -EFAULT;
 			}
 			spin_lock_irqsave(&runtime->lock, flags);
 		}
-		runtime->appl_ptr += count1;
-		runtime->appl_ptr %= runtime->buffer_size;
-		runtime->avail -= count1;
-		spin_unlock_irqrestore(&runtime->lock, flags);
 		result += count1;
 		count -= count1;
 	}
+	spin_unlock_irqrestore(&runtime->lock, flags);
 	return result;
 }
 
@@ -1161,8 +1166,9 @@
 	unsigned long flags;
 	long count1, result;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+	unsigned long appl_ptr;
 
-	if (snd_BUG_ON(!kernelbuf && !userbuf))
+	if (!kernelbuf && !userbuf)
 		return -EINVAL;
 	if (snd_BUG_ON(!runtime->buffer))
 		return -EINVAL;
@@ -1181,12 +1187,19 @@
 			count1 = count;
 		if (count1 > (long)runtime->avail)
 			count1 = runtime->avail;
+
+		/* update runtime->appl_ptr before unlocking for userbuf */
+		appl_ptr = runtime->appl_ptr;
+		runtime->appl_ptr += count1;
+		runtime->appl_ptr %= runtime->buffer_size;
+		runtime->avail -= count1;
+
 		if (kernelbuf)
-			memcpy(runtime->buffer + runtime->appl_ptr,
+			memcpy(runtime->buffer + appl_ptr,
 			       kernelbuf + result, count1);
 		else if (userbuf) {
 			spin_unlock_irqrestore(&runtime->lock, flags);
-			if (copy_from_user(runtime->buffer + runtime->appl_ptr,
+			if (copy_from_user(runtime->buffer + appl_ptr,
 					   userbuf + result, count1)) {
 				spin_lock_irqsave(&runtime->lock, flags);
 				result = result > 0 ? result : -EFAULT;
@@ -1194,9 +1207,6 @@
 			}
 			spin_lock_irqsave(&runtime->lock, flags);
 		}
-		runtime->appl_ptr += count1;
-		runtime->appl_ptr %= runtime->buffer_size;
-		runtime->avail -= count1;
 		result += count1;
 		count -= count1;
 	}
diff -ur a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
--- a/sound/core/rawmidi_compat.c	2017-03-23 15:05:57.000000000 +0100
+++ b/sound/core/rawmidi_compat.c	2017-03-14 02:43:55.000000000 +0100
@@ -94,9 +94,58 @@
 	return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_rawmidi_status_x32 {
+	s32 stream;
+	u32 rsvd; /* alignment */
+	struct timespec tstamp;
+	u32 avail;
+	u32 xruns;
+	unsigned char reserved[16];
+} __attribute__((packed));
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+					struct snd_rawmidi_status_x32 __user *src)
+{
+	int err;
+	struct snd_rawmidi_status status;
+
+	if (rfile->output == NULL)
+		return -EINVAL;
+	if (get_user(status.stream, &src->stream))
+		return -EFAULT;
+
+	switch (status.stream) {
+	case SNDRV_RAWMIDI_STREAM_OUTPUT:
+		err = snd_rawmidi_output_status(rfile->output, &status);
+		break;
+	case SNDRV_RAWMIDI_STREAM_INPUT:
+		err = snd_rawmidi_input_status(rfile->input, &status);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (err < 0)
+		return err;
+
+	if (put_timespec(&status.tstamp, &src->tstamp) ||
+	    put_user(status.avail, &src->avail) ||
+	    put_user(status.xruns, &src->xruns))
+		return -EFAULT;
+
+	return 0;
+}
+#endif /* CONFIG_X86_X32 */
+
 enum {
 	SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
 	SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
+#ifdef CONFIG_X86_X32
+	SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -115,6 +164,10 @@
 		return snd_rawmidi_ioctl_params_compat(rfile, argp);
 	case SNDRV_RAWMIDI_IOCTL_STATUS32:
 		return snd_rawmidi_ioctl_status_compat(rfile, argp);
+#ifdef CONFIG_X86_X32
+	case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
+		return snd_rawmidi_ioctl_status_x32(rfile, argp);
+#endif /* CONFIG_X86_X32 */
 	}
 	return -ENOIOCTLCMD;
 }
diff -ur a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
--- a/sound/core/seq/oss/seq_oss.c	2017-03-23 15:06:02.000000000 +0100
+++ b/sound/core/seq/oss/seq_oss.c	2017-03-14 02:44:00.000000000 +0100
@@ -148,8 +148,6 @@
 	if ((dp = file->private_data) == NULL)
 		return 0;
 
-	snd_seq_oss_drain_write(dp);
-
 	mutex_lock(&register_mutex);
 	snd_seq_oss_release(dp);
 	mutex_unlock(&register_mutex);
diff -ur a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
--- a/sound/core/seq/oss/seq_oss_device.h	2017-03-23 15:06:02.000000000 +0100
+++ b/sound/core/seq/oss/seq_oss_device.h	2017-03-14 02:44:00.000000000 +0100
@@ -127,7 +127,6 @@
 unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
 
 void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
 
 /* */
 void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
diff -ur a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
--- a/sound/core/seq/oss/seq_oss_init.c	2017-03-23 15:06:02.000000000 +0100
+++ b/sound/core/seq/oss/seq_oss_init.c	2017-03-14 02:44:00.000000000 +0100
@@ -448,22 +448,6 @@
 }
 
 /*
- * Wait until the queue is empty (if we don't have nonblock)
- */
-void
-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
-{
-	if (! dp->timer->running)
-		return;
-	if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
-	    dp->writeq) {
-		debug_printk(("syncing..\n"));
-		while (snd_seq_oss_writeq_sync(dp->writeq))
-			;
-	}
-}
-
-/*
  * reset sequencer devices
  */
 void
diff -ur a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
--- a/sound/core/seq/oss/seq_oss_synth.c	2017-03-23 15:06:02.000000000 +0100
+++ b/sound/core/seq/oss/seq_oss_synth.c	2017-03-14 02:44:00.000000000 +0100
@@ -303,7 +303,7 @@
 	struct seq_oss_synth *rec;
 	struct seq_oss_synthinfo *info;
 
-	if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+	if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
 		return;
 	for (i = 0; i < dp->max_synthdev; i++) {
 		info = &dp->synths[i];
diff -ur a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
--- a/sound/core/seq/seq_clientmgr.c	2017-03-23 15:06:03.000000000 +0100
+++ b/sound/core/seq/seq_clientmgr.c	2017-03-14 02:44:01.000000000 +0100
@@ -667,6 +667,9 @@
 	else
 		down_read(&grp->list_mutex);
 	list_for_each_entry(subs, &grp->list_head, src_list) {
+		/* both ports ready? */
+		if (atomic_read(&subs->ref_count) != 2)
+			continue;
 		event->dest = subs->info.dest;
 		if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
 			/* convert time according to flag with subscription */
@@ -1913,7 +1916,7 @@
 		 * No restrictions so for a user client we can clear
 		 * the whole fifo
 		 */
-		if (client->type == USER_CLIENT)
+		if (client->type == USER_CLIENT && client->data.user.fifo)
 			snd_seq_fifo_clear(client->data.user.fifo);
 	}
 
diff -ur a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
--- a/sound/core/seq/seq_compat.c	2017-03-23 15:06:00.000000000 +0100
+++ b/sound/core/seq/seq_compat.c	2017-03-14 02:43:58.000000000 +0100
@@ -49,11 +49,12 @@
 	struct snd_seq_port_info *data;
 	mm_segment_t fs;
 
-	data = memdup_user(data32, sizeof(*data32));
-	if (IS_ERR(data))
-		return PTR_ERR(data);
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
 
-	if (get_user(data->flags, &data32->flags) ||
+	if (copy_from_user(data, data32, sizeof(*data32)) ||
+	    get_user(data->flags, &data32->flags) ||
 	    get_user(data->time_queue, &data32->time_queue))
 		goto error;
 	data->kernel = NULL;
diff -ur a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
--- a/sound/core/seq/seq_ports.c	2017-03-23 15:06:01.000000000 +0100
+++ b/sound/core/seq/seq_ports.c	2017-03-14 02:43:59.000000000 +0100
@@ -171,10 +171,6 @@
 }
 
 /* */
-enum group_type {
-	SRC_LIST, DEST_LIST
-};
-
 static int subscribe_port(struct snd_seq_client *client,
 			  struct snd_seq_client_port *port,
 			  struct snd_seq_port_subs_info *grp,
@@ -200,6 +196,20 @@
 	return NULL;
 }
 
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+					struct snd_seq_client_port *port,
+					struct snd_seq_subscribers *subs,
+					bool is_src, bool ack);
+
+static inline struct snd_seq_subscribers *
+get_subscriber(struct list_head *p, bool is_src)
+{
+	if (is_src)
+		return list_entry(p, struct snd_seq_subscribers, src_list);
+	else
+		return list_entry(p, struct snd_seq_subscribers, dest_list);
+}
+
 /*
  * remove all subscribers on the list
  * this is called from port_delete, for each src and dest list.
@@ -207,7 +217,7 @@
 static void clear_subscriber_list(struct snd_seq_client *client,
 				  struct snd_seq_client_port *port,
 				  struct snd_seq_port_subs_info *grp,
-				  int grptype)
+				  int is_src)
 {
 	struct list_head *p, *n;
 
@@ -216,15 +226,13 @@
 		struct snd_seq_client *c;
 		struct snd_seq_client_port *aport;
 
-		if (grptype == SRC_LIST) {
-			subs = list_entry(p, struct snd_seq_subscribers, src_list);
+		subs = get_subscriber(p, is_src);
+		if (is_src)
 			aport = get_client_port(&subs->info.dest, &c);
-		} else {
-			subs = list_entry(p, struct snd_seq_subscribers, dest_list);
+		else
 			aport = get_client_port(&subs->info.sender, &c);
-		}
-		list_del(p);
-		unsubscribe_port(client, port, grp, &subs->info, 0);
+		delete_and_unsubscribe_port(client, port, subs, is_src, false);
+
 		if (!aport) {
 			/* looks like the connected port is being deleted.
 			 * we decrease the counter, and when both ports are deleted
@@ -232,21 +240,14 @@
 			 */
 			if (atomic_dec_and_test(&subs->ref_count))
 				kfree(subs);
-		} else {
-			/* ok we got the connected port */
-			struct snd_seq_port_subs_info *agrp;
-			agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
-			down_write(&agrp->list_mutex);
-			if (grptype == SRC_LIST)
-				list_del(&subs->dest_list);
-			else
-				list_del(&subs->src_list);
-			up_write(&agrp->list_mutex);
-			unsubscribe_port(c, aport, agrp, &subs->info, 1);
-			kfree(subs);
-			snd_seq_port_unlock(aport);
-			snd_seq_client_unlock(c);
+			continue;
 		}
+
+		/* ok we got the connected port */
+		delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
+		kfree(subs);
+		snd_seq_port_unlock(aport);
+		snd_seq_client_unlock(c);
 	}
 }
 
@@ -259,8 +260,8 @@
 	snd_use_lock_sync(&port->use_lock); 
 
 	/* clear subscribers info */
-	clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
-	clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
+	clear_subscriber_list(client, port, &port->c_src, true);
+	clear_subscriber_list(client, port, &port->c_dest, false);
 
 	if (port->private_free)
 		port->private_free(port->private_data);
@@ -474,80 +475,120 @@
 	return 0;
 }
 
-/* connect two ports */
-int snd_seq_port_connect(struct snd_seq_client *connector,
-			 struct snd_seq_client *src_client,
-			 struct snd_seq_client_port *src_port,
-			 struct snd_seq_client *dest_client,
-			 struct snd_seq_client_port *dest_port,
-			 struct snd_seq_port_subscribe *info)
-{
-	struct snd_seq_port_subs_info *src = &src_port->c_src;
-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
-	struct snd_seq_subscribers *subs, *s;
-	int err, src_called = 0;
-	unsigned long flags;
-	int exclusive;
-
-	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
-	if (! subs)
-		return -ENOMEM;
-
-	subs->info = *info;
-	atomic_set(&subs->ref_count, 2);
-
-	down_write(&src->list_mutex);
-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+static int check_and_subscribe_port(struct snd_seq_client *client,
+				    struct snd_seq_client_port *port,
+				    struct snd_seq_subscribers *subs,
+				    bool is_src, bool exclusive, bool ack)
+{
+	struct snd_seq_port_subs_info *grp;
+	struct list_head *p;
+	struct snd_seq_subscribers *s;
+	int err;
 
-	exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
+	grp = is_src ? &port->c_src : &port->c_dest;
 	err = -EBUSY;
+	down_write(&grp->list_mutex);
 	if (exclusive) {
-		if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
+		if (!list_empty(&grp->list_head))
 			goto __error;
 	} else {
-		if (src->exclusive || dest->exclusive)
+		if (grp->exclusive)
 			goto __error;
 		/* check whether already exists */
-		list_for_each_entry(s, &src->list_head, src_list) {
-			if (match_subs_info(info, &s->info))
-				goto __error;
-		}
-		list_for_each_entry(s, &dest->list_head, dest_list) {
-			if (match_subs_info(info, &s->info))
+		list_for_each(p, &grp->list_head) {
+			s = get_subscriber(p, is_src);
+			if (match_subs_info(&subs->info, &s->info))
 				goto __error;
 		}
 	}
 
-	if ((err = subscribe_port(src_client, src_port, src, info,
-				  connector->number != src_client->number)) < 0)
-		goto __error;
-	src_called = 1;
-
-	if ((err = subscribe_port(dest_client, dest_port, dest, info,
-				  connector->number != dest_client->number)) < 0)
+	err = subscribe_port(client, port, grp, &subs->info, ack);
+	if (err < 0) {
+		grp->exclusive = 0;
 		goto __error;
+	}
 
 	/* add to list */
-	write_lock_irqsave(&src->list_lock, flags);
-	// write_lock(&dest->list_lock); // no other lock yet
-	list_add_tail(&subs->src_list, &src->list_head);
-	list_add_tail(&subs->dest_list, &dest->list_head);
-	// write_unlock(&dest->list_lock); // no other lock yet
-	write_unlock_irqrestore(&src->list_lock, flags);
+	write_lock_irq(&grp->list_lock);
+	if (is_src)
+		list_add_tail(&subs->src_list, &grp->list_head);
+	else
+		list_add_tail(&subs->dest_list, &grp->list_head);
+	grp->exclusive = exclusive;
+	atomic_inc(&subs->ref_count);
+	write_unlock_irq(&grp->list_lock);
+	err = 0;
 
-	src->exclusive = dest->exclusive = exclusive;
+ __error:
+	up_write(&grp->list_mutex);
+	return err;
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+					struct snd_seq_client_port *port,
+					struct snd_seq_subscribers *subs,
+					bool is_src, bool ack)
+{
+	struct snd_seq_port_subs_info *grp;
+	struct list_head *list;
+	bool empty;
+
+	grp = is_src ? &port->c_src : &port->c_dest;
+	list = is_src ? &subs->src_list : &subs->dest_list;
+	down_write(&grp->list_mutex);
+	write_lock_irq(&grp->list_lock);
+	empty = list_empty(list);
+	if (!empty)
+		list_del_init(list);
+	grp->exclusive = 0;
+	write_unlock_irq(&grp->list_lock);
+	up_write(&grp->list_mutex);
+
+	if (!empty)
+		unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+/* connect two ports */
+int snd_seq_port_connect(struct snd_seq_client *connector,
+			 struct snd_seq_client *src_client,
+			 struct snd_seq_client_port *src_port,
+			 struct snd_seq_client *dest_client,
+			 struct snd_seq_client_port *dest_port,
+			 struct snd_seq_port_subscribe *info)
+{
+	struct snd_seq_subscribers *subs;
+	bool exclusive;
+	int err;
+
+	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+	if (!subs)
+		return -ENOMEM;
+
+	subs->info = *info;
+	atomic_set(&subs->ref_count, 0);
+	INIT_LIST_HEAD(&subs->src_list);
+	INIT_LIST_HEAD(&subs->dest_list);
+
+	exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
+
+	err = check_and_subscribe_port(src_client, src_port, subs, true,
+				       exclusive,
+				       connector->number != src_client->number);
+	if (err < 0)
+		goto error;
+	err = check_and_subscribe_port(dest_client, dest_port, subs, false,
+				       exclusive,
+				       connector->number != dest_client->number);
+	if (err < 0)
+		goto error_dest;
 
-	up_write(&dest->list_mutex);
-	up_write(&src->list_mutex);
 	return 0;
 
- __error:
-	if (src_called)
-		unsubscribe_port(src_client, src_port, src, info,
-				 connector->number != src_client->number);
+ error_dest:
+	delete_and_unsubscribe_port(src_client, src_port, subs, true,
+				    connector->number != src_client->number);
+ error:
 	kfree(subs);
-	up_write(&dest->list_mutex);
-	up_write(&src->list_mutex);
 	return err;
 }
 
@@ -560,37 +601,28 @@
 			    struct snd_seq_port_subscribe *info)
 {
 	struct snd_seq_port_subs_info *src = &src_port->c_src;
-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
 	struct snd_seq_subscribers *subs;
 	int err = -ENOENT;
-	unsigned long flags;
 
 	down_write(&src->list_mutex);
-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
 	/* look for the connection */
 	list_for_each_entry(subs, &src->list_head, src_list) {
 		if (match_subs_info(info, &subs->info)) {
-			write_lock_irqsave(&src->list_lock, flags);
-			// write_lock(&dest->list_lock);  // no lock yet
-			list_del(&subs->src_list);
-			list_del(&subs->dest_list);
-			// write_unlock(&dest->list_lock);
-			write_unlock_irqrestore(&src->list_lock, flags);
-			src->exclusive = dest->exclusive = 0;
-			unsubscribe_port(src_client, src_port, src, info,
-					 connector->number != src_client->number);
-			unsubscribe_port(dest_client, dest_port, dest, info,
-					 connector->number != dest_client->number);
-			kfree(subs);
+			atomic_dec(&subs->ref_count); /* mark as not ready */
 			err = 0;
 			break;
 		}
 	}
-
-	up_write(&dest->list_mutex);
 	up_write(&src->list_mutex);
-	return err;
+	if (err < 0)
+		return err;
+
+	delete_and_unsubscribe_port(src_client, src_port, subs, true,
+				    connector->number != src_client->number);
+	delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
+				    connector->number != dest_client->number);
+	kfree(subs);
+	return 0;
 }
 
 /* get matched subscriber */
diff -ur a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
--- a/sound/core/seq/seq_queue.c	2017-03-23 15:06:01.000000000 +0100
+++ b/sound/core/seq/seq_queue.c	2017-03-14 02:43:59.000000000 +0100
@@ -144,8 +144,10 @@
 static void queue_delete(struct snd_seq_queue *q)
 {
 	/* stop and release the timer */
+	mutex_lock(&q->timer_mutex);
 	snd_seq_timer_stop(q->timer);
 	snd_seq_timer_close(q);
+	mutex_unlock(&q->timer_mutex);
 	/* wait until access free */
 	snd_use_lock_sync(&q->use_lock);
 	/* release resources... */
diff -ur a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
--- a/sound/core/seq/seq_timer.c	2017-03-23 15:06:00.000000000 +0100
+++ b/sound/core/seq/seq_timer.c	2017-03-14 02:43:58.000000000 +0100
@@ -92,6 +92,9 @@
 
 void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&tmr->lock, flags);
 	/* setup defaults */
 	tmr->ppq = 96;		/* 96 PPQ */
 	tmr->tempo = 500000;	/* 120 BPM */
@@ -107,21 +110,25 @@
 	tmr->preferred_resolution = seq_default_timer_resolution;
 
 	tmr->skew = tmr->skew_base = SKEW_BASE;
+	spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
+static void seq_timer_reset(struct snd_seq_timer *tmr)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&tmr->lock, flags);
-
 	/* reset time & songposition */
 	tmr->cur_time.tv_sec = 0;
 	tmr->cur_time.tv_nsec = 0;
 
 	tmr->tick.cur_tick = 0;
 	tmr->tick.fraction = 0;
+}
+
+void snd_seq_timer_reset(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
 
+	spin_lock_irqsave(&tmr->lock, flags);
+	seq_timer_reset(tmr);
 	spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
@@ -139,8 +146,11 @@
 	tmr = q->timer;
 	if (tmr == NULL)
 		return;
-	if (!tmr->running)
+	spin_lock_irqsave(&tmr->lock, flags);
+	if (!tmr->running) {
+		spin_unlock_irqrestore(&tmr->lock, flags);
 		return;
+	}
 
 	resolution *= ticks;
 	if (tmr->skew != tmr->skew_base) {
@@ -149,8 +159,6 @@
 			(((resolution & 0xffff) * tmr->skew) >> 16);
 	}
 
-	spin_lock_irqsave(&tmr->lock, flags);
-
 	/* update timer */
 	snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
 
@@ -297,26 +305,30 @@
 	t->callback = snd_seq_timer_interrupt;
 	t->callback_data = q;
 	t->flags |= SNDRV_TIMER_IFLG_AUTO;
+	spin_lock_irq(&tmr->lock);
 	tmr->timeri = t;
+	spin_unlock_irq(&tmr->lock);
 	return 0;
 }
 
 int snd_seq_timer_close(struct snd_seq_queue *q)
 {
 	struct snd_seq_timer *tmr;
+	struct snd_timer_instance *t;
 	
 	tmr = q->timer;
 	if (snd_BUG_ON(!tmr))
 		return -EINVAL;
-	if (tmr->timeri) {
-		snd_timer_stop(tmr->timeri);
-		snd_timer_close(tmr->timeri);
-		tmr->timeri = NULL;
-	}
+	spin_lock_irq(&tmr->lock);
+	t = tmr->timeri;
+	tmr->timeri = NULL;
+	spin_unlock_irq(&tmr->lock);
+	if (t)
+		snd_timer_close(t);
 	return 0;
 }
 
-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+static int seq_timer_stop(struct snd_seq_timer *tmr)
 {
 	if (! tmr->timeri)
 		return -EINVAL;
@@ -327,6 +339,17 @@
 	return 0;
 }
 
+int snd_seq_timer_stop(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	err = seq_timer_stop(tmr);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+	return err;
+}
+
 static int initialize_timer(struct snd_seq_timer *tmr)
 {
 	struct snd_timer *t;
@@ -359,13 +382,13 @@
 	return 0;
 }
 
-int snd_seq_timer_start(struct snd_seq_timer * tmr)
+static int seq_timer_start(struct snd_seq_timer *tmr)
 {
 	if (! tmr->timeri)
 		return -EINVAL;
 	if (tmr->running)
-		snd_seq_timer_stop(tmr);
-	snd_seq_timer_reset(tmr);
+		seq_timer_stop(tmr);
+	seq_timer_reset(tmr);
 	if (initialize_timer(tmr) < 0)
 		return -EINVAL;
 	snd_timer_start(tmr->timeri, tmr->ticks);
@@ -374,14 +397,25 @@
 	return 0;
 }
 
-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+int snd_seq_timer_start(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	err = seq_timer_start(tmr);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+	return err;
+}
+
+static int seq_timer_continue(struct snd_seq_timer *tmr)
 {
 	if (! tmr->timeri)
 		return -EINVAL;
 	if (tmr->running)
 		return -EBUSY;
 	if (! tmr->initialized) {
-		snd_seq_timer_reset(tmr);
+		seq_timer_reset(tmr);
 		if (initialize_timer(tmr) < 0)
 			return -EINVAL;
 	}
@@ -391,11 +425,24 @@
 	return 0;
 }
 
+int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&tmr->lock, flags);
+	err = seq_timer_continue(tmr);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+	return err;
+}
+
 /* return current 'real' time. use timeofday() to get better granularity. */
 snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
 {
 	snd_seq_real_time_t cur_time;
+	unsigned long flags;
 
+	spin_lock_irqsave(&tmr->lock, flags);
 	cur_time = tmr->cur_time;
 	if (tmr->running) { 
 		struct timeval tm;
@@ -411,7 +458,7 @@
 		}
 		snd_seq_sanity_real_time(&cur_time);
 	}
-                
+	spin_unlock_irqrestore(&tmr->lock, flags);
 	return cur_time;	
 }
 
diff -ur a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
--- a/sound/core/seq/seq_virmidi.c	2017-03-23 15:06:00.000000000 +0100
+++ b/sound/core/seq/seq_virmidi.c	2017-03-14 02:43:59.000000000 +0100
@@ -254,9 +254,13 @@
  */
 static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
 {
+	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
 	struct snd_virmidi *vmidi = substream->runtime->private_data;
-	snd_midi_event_free(vmidi->parser);
+
+	write_lock_irq(&rdev->filelist_lock);
 	list_del(&vmidi->list);
+	write_unlock_irq(&rdev->filelist_lock);
+	snd_midi_event_free(vmidi->parser);
 	substream->runtime->private_data = NULL;
 	kfree(vmidi);
 	return 0;
diff -ur a/sound/core/timer.c b/sound/core/timer.c
--- a/sound/core/timer.c	2017-03-23 15:05:59.000000000 +0100
+++ b/sound/core/timer.c	2017-03-14 02:43:57.000000000 +0100
@@ -73,7 +73,7 @@
 	struct timespec tstamp;		/* trigger tstamp */
 	wait_queue_head_t qchange_sleep;
 	struct fasync_struct *fasync;
-	struct mutex tread_sem;
+	struct mutex ioctl_lock;
 };
 
 /* list of timers */
@@ -215,11 +215,13 @@
 		    slave->slave_id == master->slave_id) {
 			list_move_tail(&slave->open_list, &master->slave_list_head);
 			spin_lock_irq(&slave_active_lock);
+			spin_lock(&master->timer->lock);
 			slave->master = master;
 			slave->timer = master->timer;
 			if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
 				list_add_tail(&slave->active_list,
 					      &master->slave_active_head);
+			spin_unlock(&master->timer->lock);
 			spin_unlock_irq(&slave_active_lock);
 		}
 	}
@@ -298,8 +300,7 @@
 	return 0;
 }
 
-static int _snd_timer_stop(struct snd_timer_instance *timeri,
-			   int keep_flag, int event);
+static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
 
 /*
  * close a timer instance
@@ -341,19 +342,22 @@
 		spin_unlock_irq(&timer->lock);
 		mutex_lock(&register_mutex);
 		list_del(&timeri->open_list);
-		if (timer && list_empty(&timer->open_list_head) &&
+		if (list_empty(&timer->open_list_head) &&
 		    timer->hw.close)
 			timer->hw.close(timer);
 		/* remove slave links */
+		spin_lock_irq(&slave_active_lock);
+		spin_lock(&timer->lock);
 		list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
 					 open_list) {
-			spin_lock_irq(&slave_active_lock);
-			_snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
 			list_move_tail(&slave->open_list, &snd_timer_slave_list);
 			slave->master = NULL;
 			slave->timer = NULL;
-			spin_unlock_irq(&slave_active_lock);
+			list_del_init(&slave->ack_list);
+			list_del_init(&slave->active_list);
 		}
+		spin_unlock(&timer->lock);
+		spin_unlock_irq(&slave_active_lock);
 		mutex_unlock(&register_mutex);
 	}
  out:
@@ -410,7 +414,7 @@
 	spin_lock_irqsave(&timer->lock, flags);
 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
 		if (ts->ccallback)
-			ts->ccallback(ti, event + 100, &tstamp, resolution);
+			ts->ccallback(ts, event + 100, &tstamp, resolution);
 	spin_unlock_irqrestore(&timer->lock, flags);
 }
 
@@ -439,10 +443,17 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&slave_active_lock, flags);
+	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+		spin_unlock_irqrestore(&slave_active_lock, flags);
+		return -EBUSY;
+	}
 	timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
-	if (timeri->master)
+	if (timeri->master && timeri->timer) {
+		spin_lock(&timeri->timer->lock);
 		list_add_tail(&timeri->active_list,
 			      &timeri->master->slave_active_head);
+		spin_unlock(&timeri->timer->lock);
+	}
 	spin_unlock_irqrestore(&slave_active_lock, flags);
 	return 1; /* delayed start */
 }
@@ -460,23 +471,30 @@
 		return -EINVAL;
 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
 		result = snd_timer_start_slave(timeri);
-		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+		if (result >= 0)
+			snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
 		return result;
 	}
 	timer = timeri->timer;
 	if (timer == NULL)
 		return -EINVAL;
 	spin_lock_irqsave(&timer->lock, flags);
+	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+			     SNDRV_TIMER_IFLG_START)) {
+		result = -EBUSY;
+		goto unlock;
+	}
 	timeri->ticks = timeri->cticks = ticks;
 	timeri->pticks = 0;
 	result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
-	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+	if (result >= 0)
+		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
 	return result;
 }
 
-static int _snd_timer_stop(struct snd_timer_instance * timeri,
-			   int keep_flag, int event)
+static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
 {
 	struct snd_timer *timer;
 	unsigned long flags;
@@ -485,17 +503,30 @@
 		return -ENXIO;
 
 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
-		if (!keep_flag) {
-			spin_lock_irqsave(&slave_active_lock, flags);
-			timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+		spin_lock_irqsave(&slave_active_lock, flags);
+		if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
 			spin_unlock_irqrestore(&slave_active_lock, flags);
+			return -EBUSY;
 		}
+		if (timeri->timer)
+			spin_lock(&timeri->timer->lock);
+		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+		list_del_init(&timeri->ack_list);
+		list_del_init(&timeri->active_list);
+		if (timeri->timer)
+			spin_unlock(&timeri->timer->lock);
+		spin_unlock_irqrestore(&slave_active_lock, flags);
 		goto __end;
 	}
 	timer = timeri->timer;
 	if (!timer)
 		return -EINVAL;
 	spin_lock_irqsave(&timer->lock, flags);
+	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+			       SNDRV_TIMER_IFLG_START))) {
+		spin_unlock_irqrestore(&timer->lock, flags);
+		return -EBUSY;
+	}
 	list_del_init(&timeri->ack_list);
 	list_del_init(&timeri->active_list);
 	if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
@@ -510,9 +541,7 @@
 			}
 		}
 	}
-	if (!keep_flag)
-		timeri->flags &=
-			~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+	timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
 	spin_unlock_irqrestore(&timer->lock, flags);
       __end:
 	if (event != SNDRV_TIMER_EVENT_RESOLUTION)
@@ -531,7 +560,7 @@
 	unsigned long flags;
 	int err;
 
-	err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
+	err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
 	if (err < 0)
 		return err;
 	timer = timeri->timer;
@@ -561,10 +590,15 @@
 	if (! timer)
 		return -EINVAL;
 	spin_lock_irqsave(&timer->lock, flags);
+	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+		result = -EBUSY;
+		goto unlock;
+	}
 	if (!timeri->cticks)
 		timeri->cticks = 1;
 	timeri->pticks = 0;
 	result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
 	spin_unlock_irqrestore(&timer->lock, flags);
 	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
 	return result;
@@ -575,7 +609,7 @@
  */
 int snd_timer_pause(struct snd_timer_instance * timeri)
 {
-	return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
+	return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
 }
 
 /*
@@ -692,8 +726,8 @@
 			ti->cticks = ti->ticks;
 		} else {
 			ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
-			if (--timer->running)
-				list_del(&ti->active_list);
+			--timer->running;
+			list_del_init(&ti->active_list);
 		}
 		if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
 		    (ti->flags & SNDRV_TIMER_IFLG_FAST))
@@ -978,8 +1012,8 @@
 		njiff += timer->sticks - priv->correction;
 		priv->correction = 0;
 	}
-	priv->last_expires = priv->tlist.expires = njiff;
-	add_timer(&priv->tlist);
+	priv->last_expires = njiff;
+	mod_timer(&priv->tlist, njiff);
 	return 0;
 }
 
@@ -1256,7 +1290,7 @@
 		return -ENOMEM;
 	spin_lock_init(&tu->qlock);
 	init_waitqueue_head(&tu->qchange_sleep);
-	mutex_init(&tu->tread_sem);
+	mutex_init(&tu->ioctl_lock);
 	tu->ticks = 1;
 	tu->queue_size = 128;
 	tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
@@ -1276,8 +1310,10 @@
 	if (file->private_data) {
 		tu = file->private_data;
 		file->private_data = NULL;
+		mutex_lock(&tu->ioctl_lock);
 		if (tu->timeri)
 			snd_timer_close(tu->timeri);
+		mutex_unlock(&tu->ioctl_lock);
 		kfree(tu->queue);
 		kfree(tu->tqueue);
 		kfree(tu);
@@ -1515,7 +1551,6 @@
 	int err = 0;
 
 	tu = file->private_data;
-	mutex_lock(&tu->tread_sem);
 	if (tu->timeri) {
 		snd_timer_close(tu->timeri);
 		tu->timeri = NULL;
@@ -1559,7 +1594,6 @@
 	}
 
       __err:
-      	mutex_unlock(&tu->tread_sem);
 	return err;
 }
 
@@ -1772,7 +1806,7 @@
 	SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
 };
 
-static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
 				 unsigned long arg)
 {
 	struct snd_timer_user *tu;
@@ -1789,17 +1823,11 @@
 	{
 		int xarg;
 
-		mutex_lock(&tu->tread_sem);
-		if (tu->timeri)	{	/* too late */
-			mutex_unlock(&tu->tread_sem);
+		if (tu->timeri)	/* too late */
 			return -EBUSY;
-		}
-		if (get_user(xarg, p)) {
-			mutex_unlock(&tu->tread_sem);
+		if (get_user(xarg, p))
 			return -EFAULT;
-		}
 		tu->tread = xarg ? 1 : 0;
-		mutex_unlock(&tu->tread_sem);
 		return 0;
 	}
 	case SNDRV_TIMER_IOCTL_GINFO:
@@ -1832,6 +1860,18 @@
 	return -ENOTTY;
 }
 
+static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	struct snd_timer_user *tu = file->private_data;
+	long ret;
+
+	mutex_lock(&tu->ioctl_lock);
+	ret = __snd_timer_user_ioctl(file, cmd, arg);
+	mutex_unlock(&tu->ioctl_lock);
+	return ret;
+}
+
 static int snd_timer_user_fasync(int fd, struct file * file, int on)
 {
 	struct snd_timer_user *tu;
diff -ur a/sound/core/timer_compat.c b/sound/core/timer_compat.c
--- a/sound/core/timer_compat.c	2017-03-23 15:05:57.000000000 +0100
+++ b/sound/core/timer_compat.c	2017-03-14 02:43:55.000000000 +0100
@@ -70,13 +70,14 @@
 					struct snd_timer_status32 __user *_status)
 {
 	struct snd_timer_user *tu;
-	struct snd_timer_status status;
+	struct snd_timer_status32 status;
 	
 	tu = file->private_data;
 	if (snd_BUG_ON(!tu->timeri))
 		return -ENXIO;
 	memset(&status, 0, sizeof(status));
-	status.tstamp = tu->tstamp;
+	status.tstamp.tv_sec = tu->tstamp.tv_sec;
+	status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
 	status.resolution = snd_timer_resolution(tu->timeri);
 	status.lost = tu->timeri->lost;
 	status.overrun = tu->overrun;
@@ -88,12 +89,21 @@
 	return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 */
+#define snd_timer_user_status_x32(file, s) \
+	snd_timer_user_status(file, s)
+#endif /* CONFIG_X86_X32 */
+
 /*
  */
 
 enum {
 	SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
 	SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
+#ifdef CONFIG_X86_X32
+	SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -122,6 +132,10 @@
 		return snd_timer_user_info_compat(file, argp);
 	case SNDRV_TIMER_IOCTL_STATUS32:
 		return snd_timer_user_status_compat(file, argp);
+#ifdef CONFIG_X86_X32
+	case SNDRV_TIMER_IOCTL_STATUS_X32:
+		return snd_timer_user_status_x32(file, argp);
+#endif /* CONFIG_X86_X32 */
 	}
 	return -ENOIOCTLCMD;
 }
diff -ur a/sound/drivers/dummy.c b/sound/drivers/dummy.c
--- a/sound/drivers/dummy.c	2017-03-23 15:06:47.000000000 +0100
+++ b/sound/drivers/dummy.c	2017-03-14 02:44:41.000000000 +0100
@@ -109,6 +109,9 @@
 	snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
 };
 
+#define get_dummy_ops(substream) \
+	(*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
 struct dummy_model {
 	const char *name;
 	int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@
 	int iobox;
 	struct snd_kcontrol *cd_volume_ctl;
 	struct snd_kcontrol *cd_switch_ctl;
-	const struct dummy_timer_ops *timer_ops;
 };
 
 /*
@@ -231,6 +233,8 @@
  */
 
 struct dummy_systimer_pcm {
+	/* ops must be the first item */
+	const struct dummy_timer_ops *timer_ops;
 	spinlock_t lock;
 	struct timer_list timer;
 	unsigned long base_time;
@@ -368,6 +372,8 @@
  */
 
 struct dummy_hrtimer_pcm {
+	/* ops must be the first item */
+	const struct dummy_timer_ops *timer_ops;
 	ktime_t base_time;
 	ktime_t period_time;
 	atomic_t running;
@@ -494,31 +500,25 @@
 
 static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
-		return dummy->timer_ops->start(substream);
+		return get_dummy_ops(substream)->start(substream);
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
-		return dummy->timer_ops->stop(substream);
+		return get_dummy_ops(substream)->stop(substream);
 	}
 	return -EINVAL;
 }
 
 static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-	return dummy->timer_ops->prepare(substream);
+	return get_dummy_ops(substream)->prepare(substream);
 }
 
 static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-	return dummy->timer_ops->pointer(substream);
+	return get_dummy_ops(substream)->pointer(substream);
 }
 
 static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -564,17 +564,19 @@
 	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
 	struct dummy_model *model = dummy->model;
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	const struct dummy_timer_ops *ops;
 	int err;
 
-	dummy->timer_ops = &dummy_systimer_ops;
+	ops = &dummy_systimer_ops;
 #ifdef CONFIG_HIGH_RES_TIMERS
 	if (hrtimer)
-		dummy->timer_ops = &dummy_hrtimer_ops;
+		ops = &dummy_hrtimer_ops;
 #endif
 
-	err = dummy->timer_ops->create(substream);
+	err = ops->create(substream);
 	if (err < 0)
 		return err;
+	get_dummy_ops(substream) = ops;
 
 	runtime->hw = dummy->pcm_hw;
 	if (substream->pcm->device & 1) {
@@ -596,7 +598,7 @@
 			err = model->capture_constraints(substream->runtime);
 	}
 	if (err < 0) {
-		dummy->timer_ops->free(substream);
+		get_dummy_ops(substream)->free(substream);
 		return err;
 	}
 	return 0;
@@ -604,8 +606,7 @@
 
 static int dummy_pcm_close(struct snd_pcm_substream *substream)
 {
-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-	dummy->timer_ops->free(substream);
+	get_dummy_ops(substream)->free(substream);
 	return 0;
 }
 
diff -ur a/sound/oss/sequencer.c b/sound/oss/sequencer.c
--- a/sound/oss/sequencer.c	2017-03-23 15:07:35.000000000 +0100
+++ b/sound/oss/sequencer.c	2017-03-14 02:45:35.000000000 +0100
@@ -682,13 +682,8 @@
 			break;
 
 		case TMR_ECHO:
-			if (seq_mode == SEQ_2)
-				seq_copy_to_input(event_rec, 8);
-			else
-			{
-				parm = (parm << 8 | SEQ_ECHO);
-				seq_copy_to_input((unsigned char *) &parm, 4);
-			}
+			parm = (parm << 8 | SEQ_ECHO);
+			seq_copy_to_input((unsigned char *) &parm, 4);
 			break;
 
 		default:;
@@ -1331,7 +1326,6 @@
 	int mode = translate_mode(file);
 	struct synth_info inf;
 	struct seq_event_rec event_rec;
-	unsigned long flags;
 	int __user *p = arg;
 
 	orig_dev = dev = dev >> 4;
@@ -1486,9 +1480,7 @@
 		case SNDCTL_SEQ_OUTOFBAND:
 			if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
 				return -EFAULT;
-			spin_lock_irqsave(&lock,flags);
 			play_event(event_rec.arr);
-			spin_unlock_irqrestore(&lock,flags);
 			return 0;
 
 		case SNDCTL_MIDI_INFO:
diff -ur a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
--- a/sound/pci/emu10k1/emu10k1.c	2017-03-23 15:07:21.000000000 +0100
+++ b/sound/pci/emu10k1/emu10k1.c	2017-03-14 02:45:19.000000000 +0100
@@ -181,8 +181,10 @@
 	}
 #endif
  
-	strcpy(card->driver, emu->card_capabilities->driver);
-	strcpy(card->shortname, emu->card_capabilities->name);
+	strlcpy(card->driver, emu->card_capabilities->driver,
+		sizeof(card->driver));
+	strlcpy(card->shortname, emu->card_capabilities->name,
+		sizeof(card->shortname));
 	snprintf(card->longname, sizeof(card->longname),
 		 "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
 		 card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
diff -ur a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
--- a/sound/pci/emu10k1/emu10k1_callback.c	2017-03-23 15:07:21.000000000 +0100
+++ b/sound/pci/emu10k1/emu10k1_callback.c	2017-03-14 02:45:20.000000000 +0100
@@ -409,7 +409,7 @@
 	snd_emu10k1_ptr_write(hw, Z2, ch, 0);
 
 	/* invalidate maps */
-	temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
+	temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
 	snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
 	snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
 #if 0
@@ -430,7 +430,7 @@
 		snd_emu10k1_ptr_write(hw, CDF, ch, sample);
 
 		/* invalidate maps */
-		temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
+		temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
 		snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
 		snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
 		
diff -ur a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
--- a/sound/pci/emu10k1/emu10k1_main.c	2017-03-23 15:07:23.000000000 +0100
+++ b/sound/pci/emu10k1/emu10k1_main.c	2017-03-14 02:45:22.000000000 +0100
@@ -278,7 +278,7 @@
 	snd_emu10k1_ptr_write(emu, TCB, 0, 0);	/* taken from original driver */
 	snd_emu10k1_ptr_write(emu, TCBS, 0, 4);	/* taken from original driver */
 
-	silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
+	silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
 	for (ch = 0; ch < NUM_G; ch++) {
 		snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
 		snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
@@ -344,6 +344,11 @@
 		outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
 	}
 
+	if (emu->address_mode == 0) {
+		/* use 16M in 4G */
+		outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
+	}
+
 	return 0;
 }
 
@@ -1405,7 +1410,7 @@
 	 *
 	 */
 	{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
-	 .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
+	 .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
 	 .id = "Audigy2",
 	 .emu10k2_chip = 1,
 	 .ca0108_chip = 1,
@@ -1555,7 +1560,7 @@
 	 .adc_1361t = 1,  /* 24 bit capture instead of 16bit */
 	 .ac97_chip = 1} ,
 	{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
-	 .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
+	 .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
 	 .id = "Audigy2",
 	 .emu10k2_chip = 1,
 	 .ca0102_chip = 1,
@@ -1859,8 +1864,10 @@
 
 	is_audigy = emu->audigy = c->emu10k2_chip;
 
+	/* set addressing mode */
+	emu->address_mode = is_audigy ? 0 : 1;
 	/* set the DMA transfer mask */
-	emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
+	emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
 	if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
 	    pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
 		snd_printk(KERN_ERR "architecture does not support PCI busmaster DMA with mask 0x%lx\n", emu->dma_mask);
@@ -1883,7 +1890,7 @@
 
 	emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
 	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
-				32 * 1024, &emu->ptb_pages) < 0) {
+				(emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
 		err = -ENOMEM;
 		goto error;
 	}
@@ -1982,8 +1989,8 @@
 
 	/* Clear silent pages and set up pointers */
 	memset(emu->silent_page.area, 0, PAGE_SIZE);
-	silent_page = emu->silent_page.addr << 1;
-	for (idx = 0; idx < MAXPAGES; idx++)
+	silent_page = emu->silent_page.addr << emu->address_mode;
+	for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
 		((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
 
 	/* set up voice indices */
diff -ur a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
--- a/sound/pci/emu10k1/emupcm.c	2017-03-23 15:07:22.000000000 +0100
+++ b/sound/pci/emu10k1/emupcm.c	2017-03-14 02:45:20.000000000 +0100
@@ -379,7 +379,7 @@
 	snd_emu10k1_ptr_write(emu, Z1, voice, 0);
 	snd_emu10k1_ptr_write(emu, Z2, voice, 0);
 	/* invalidate maps */
-	silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
+	silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
 	snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
 	snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
 	/* modulation envelope */
diff -ur a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
--- a/sound/pci/emu10k1/memory.c	2017-03-23 15:07:20.000000000 +0100
+++ b/sound/pci/emu10k1/memory.c	2017-03-14 02:45:19.000000000 +0100
@@ -34,10 +34,11 @@
  * aligned pages in others
  */
 #define __set_ptb_entry(emu,page,addr) \
-	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
+	(((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
 
 #define UNIT_PAGES		(PAGE_SIZE / EMUPAGESIZE)
-#define MAX_ALIGN_PAGES		(MAXPAGES / UNIT_PAGES)
+#define MAX_ALIGN_PAGES0		(MAXPAGES0 / UNIT_PAGES)
+#define MAX_ALIGN_PAGES1		(MAXPAGES1 / UNIT_PAGES)
 /* get aligned page from offset address */
 #define get_aligned_page(offset)	((offset) >> PAGE_SHIFT)
 /* get offset address from aligned page */
@@ -122,7 +123,7 @@
 		}
 		page = blk->mapped_page + blk->pages;
 	}
-	size = MAX_ALIGN_PAGES - page;
+	size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
 	if (size >= max_size) {
 		*nextp = pos;
 		return page;
@@ -179,7 +180,7 @@
 		q = get_emu10k1_memblk(p, mapped_link);
 		end_page = q->mapped_page;
 	} else
-		end_page = MAX_ALIGN_PAGES;
+		end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
 
 	/* remove links */
 	list_del(&blk->mapped_link);
@@ -302,7 +303,7 @@
 	if (snd_BUG_ON(!emu))
 		return NULL;
 	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
-		       runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
+		       runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
 		return NULL;
 	hdr = emu->memhdr;
 	if (snd_BUG_ON(!hdr))
diff -ur a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
--- a/sound/pci/hda/hda_codec.c	2017-03-23 15:07:22.000000000 +0100
+++ b/sound/pci/hda/hda_codec.c	2017-03-14 02:45:19.000000000 +0100
@@ -2077,6 +2077,16 @@
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_amp_init_stereo);
 
+/* meta hook to call each driver's vmaster hook */
+static void vmaster_hook(void *private_data, int enabled)
+{
+	struct hda_vmaster_mute_hook *hook = private_data;
+
+	if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER)
+		enabled = hook->mute_mode;
+	hook->hook(hook->codec, enabled);
+}
+
 /**
  * snd_hda_codec_resume_amp - Resume all AMP commands from the cache
  * @codec: HD-audio codec
@@ -2770,9 +2780,9 @@
 
 	if (!hook->hook || !hook->sw_kctl)
 		return 0;
-	snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec);
 	hook->codec = codec;
 	hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER;
+	snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook);
 	if (!expose_enum_ctl)
 		return 0;
 	kctl = snd_ctl_new1(&vmaster_mute_mode, hook);
@@ -2795,14 +2805,7 @@
 	 */
 	if (hook->codec->bus->shutdown)
 		return;
-	switch (hook->mute_mode) {
-	case HDA_VMUTE_FOLLOW_MASTER:
-		snd_ctl_sync_vmaster_hook(hook->sw_kctl);
-		break;
-	default:
-		hook->hook(hook->codec, hook->mute_mode);
-		break;
-	}
+	snd_ctl_sync_vmaster_hook(hook->sw_kctl);
 }
 EXPORT_SYMBOL_HDA(snd_hda_sync_vmaster_hook);
 
diff -ur a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
--- a/sound/pci/hda/hda_intel.c	2017-03-23 15:07:21.000000000 +0100
+++ b/sound/pci/hda/hda_intel.c	2017-03-14 02:45:19.000000000 +0100
@@ -1100,6 +1100,36 @@
 		return azx_rirb_get_response(bus, addr);
 }
 
+#ifdef CONFIG_PM_SLEEP
+/* put codec down to D3 at hibernation for Intel SKL+;
+ * otherwise BIOS may still access the codec and screw up the driver
+ */
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+
+static int azx_freeze_noirq(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+
+	if (IS_SKL_PLUS(pci))
+		pci_set_power_state(pci, PCI_D3hot);
+
+	return 0;
+}
+
+static int azx_thaw_noirq(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+
+	if (IS_SKL_PLUS(pci))
+		pci_set_power_state(pci, PCI_D0);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
 #ifdef CONFIG_PM
 static void azx_power_notify(struct hda_bus *bus, bool power_up);
 #endif
@@ -2963,6 +2993,10 @@
 #ifdef CONFIG_PM
 static const struct dev_pm_ops azx_pm = {
 	SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
+#ifdef CONFIG_PM_SLEEP
+	.freeze_noirq = azx_freeze_noirq,
+	.thaw_noirq = azx_thaw_noirq,
+#endif
 	SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
 };
 
@@ -3852,6 +3886,11 @@
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
 	{ PCI_DEVICE(0x8086, 0x8d21),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+	/* Lewisburg */
+	{ PCI_DEVICE(0x8086, 0xa1f0),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+	{ PCI_DEVICE(0x8086, 0xa270),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
 	/* Lynx Point-LP */
 	{ PCI_DEVICE(0x8086, 0x9c20),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
diff -ur a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
--- a/sound/pci/hda/patch_cirrus.c	2017-03-23 15:07:15.000000000 +0100
+++ b/sound/pci/hda/patch_cirrus.c	2017-03-14 02:45:13.000000000 +0100
@@ -786,9 +786,7 @@
 
 	spec->spdif_present = spdif_present;
 	/* SPDIF TX on/off */
-	if (spdif_present)
-		snd_hda_set_pin_ctl(codec, spdif_pin,
-				    spdif_present ? PIN_OUT : 0);
+	snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
 
 	cs_automute(codec);
 }
diff -ur a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
--- a/sound/pci/hda/patch_conexant.c	2017-03-23 15:07:19.000000000 +0100
+++ b/sound/pci/hda/patch_conexant.c	2017-03-14 02:45:16.000000000 +0100
@@ -3470,6 +3470,14 @@
 	  .patch = patch_conexant_auto },
 	{ .id = 0x14f150b9, .name = "CX20665",
 	  .patch = patch_conexant_auto },
+	{ .id = 0x14f150f1, .name = "CX20721",
+	  .patch = patch_conexant_auto },
+	{ .id = 0x14f150f2, .name = "CX20722",
+	  .patch = patch_conexant_auto },
+	{ .id = 0x14f150f3, .name = "CX20723",
+	  .patch = patch_conexant_auto },
+	{ .id = 0x14f150f4, .name = "CX20724",
+	  .patch = patch_conexant_auto },
 	{ .id = 0x14f1510f, .name = "CX20751/2",
 	  .patch = patch_conexant_auto },
 	{ .id = 0x14f15110, .name = "CX20751/2",
@@ -3504,6 +3512,10 @@
 MODULE_ALIAS("snd-hda-codec-id:14f150ac");
 MODULE_ALIAS("snd-hda-codec-id:14f150b8");
 MODULE_ALIAS("snd-hda-codec-id:14f150b9");
+MODULE_ALIAS("snd-hda-codec-id:14f150f1");
+MODULE_ALIAS("snd-hda-codec-id:14f150f2");
+MODULE_ALIAS("snd-hda-codec-id:14f150f3");
+MODULE_ALIAS("snd-hda-codec-id:14f150f4");
 MODULE_ALIAS("snd-hda-codec-id:14f1510f");
 MODULE_ALIAS("snd-hda-codec-id:14f15110");
 MODULE_ALIAS("snd-hda-codec-id:14f15111");
diff -ur a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
--- a/sound/pci/hda/patch_realtek.c	2017-03-23 15:07:22.000000000 +0100
+++ b/sound/pci/hda/patch_realtek.c	2017-03-14 02:45:20.000000000 +0100
@@ -1132,7 +1132,7 @@
 		/* override all pins as BIOS on old Amilo is broken */
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
-			{ 0x14, 0x0121411f }, /* HP */
+			{ 0x14, 0x0121401f }, /* HP */
 			{ 0x15, 0x99030120 }, /* speaker */
 			{ 0x16, 0x99030130 }, /* bass speaker */
 			{ 0x17, 0x411111f0 }, /* N/A */
@@ -1152,7 +1152,7 @@
 		/* almost compatible with FUJITSU, but no bass and SPDIF */
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
-			{ 0x14, 0x0121411f }, /* HP */
+			{ 0x14, 0x0121401f }, /* HP */
 			{ 0x15, 0x99030120 }, /* speaker */
 			{ 0x16, 0x411111f0 }, /* N/A */
 			{ 0x17, 0x411111f0 }, /* N/A */
@@ -1360,7 +1360,7 @@
 	SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
 	SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
 	SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
-	SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
+	SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
 	SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
 	SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
 	SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),
@@ -1759,6 +1759,7 @@
 	ALC889_FIXUP_MBA11_VREF,
 	ALC889_FIXUP_MBA21_VREF,
 	ALC889_FIXUP_MP11_VREF,
+	ALC889_FIXUP_MP41_VREF,
 	ALC882_FIXUP_INV_DMIC,
 	ALC882_FIXUP_NO_PRIMARY_HP,
 	ALC887_FIXUP_ASUS_BASS,
@@ -1844,7 +1845,7 @@
 				  const struct hda_fixup *fix, int action)
 {
 	struct alc_spec *spec = codec->spec;
-	static hda_nid_t nids[2] = { 0x14, 0x15 };
+	static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
 	int i;
 
 	if (action != HDA_FIXUP_ACT_INIT)
@@ -2118,6 +2119,12 @@
 		.chained = true,
 		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
 	},
+	[ALC889_FIXUP_MP41_VREF] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc889_fixup_mbp_vref,
+		.chained = true,
+		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
+	},
 	[ALC882_FIXUP_INV_DMIC] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_inv_dmic_0x12,
@@ -2138,6 +2145,7 @@
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
 	SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+	SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
 	SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
 	SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
 	SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
@@ -2170,6 +2178,7 @@
 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
 
 	/* All Apple entries are in codec SSIDs */
 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2189,11 +2198,11 @@
 	SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
-	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
+	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
-	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
+	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -3351,6 +3360,7 @@
 	ALC269_FIXUP_LIFEBOOK,
 	ALC269_FIXUP_LIFEBOOK_EXTMIC,
 	ALC269_FIXUP_LIFEBOOK_HP_PIN,
+	ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
 	ALC269_FIXUP_AMIC,
 	ALC269_FIXUP_DMIC,
 	ALC269VB_FIXUP_AMIC,
@@ -3369,6 +3379,7 @@
 	ALC290_FIXUP_MONO_SPEAKERS,
 	ALC269_FIXUP_HEADSET_MODE,
 	ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
+	ALC269_FIXUP_ASPIRE_HEADSET_MIC,
 	ALC269_FIXUP_ASUS_X101_FUNC,
 	ALC269_FIXUP_ASUS_X101_VERB,
 	ALC269_FIXUP_ASUS_X101,
@@ -3472,6 +3483,10 @@
 			{ }
 		},
 	},
+	[ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+	},
 	[ALC269_FIXUP_AMIC] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -3584,6 +3599,15 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_headset_mode_no_hp_mic,
 	},
+	[ALC269_FIXUP_ASPIRE_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x01a1913c }, /* headset mic w/o jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE,
+	},
 	[ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -3718,11 +3742,15 @@
 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
 	SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+	SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
 	SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+	SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -4412,6 +4440,7 @@
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+	SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
diff -ur a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
--- a/sound/pci/hda/patch_sigmatel.c	2017-03-23 15:07:21.000000000 +0100
+++ b/sound/pci/hda/patch_sigmatel.c	2017-03-14 02:45:18.000000000 +0100
@@ -702,6 +702,7 @@
 static bool hp_blike_system(u32 subsystem_id)
 {
 	switch (subsystem_id) {
+	case 0x103c1473: /* HP ProBook 6550b */
 	case 0x103c1520:
 	case 0x103c1521:
 	case 0x103c1523:
diff -ur a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
--- a/sound/pci/intel8x0.c	2017-03-23 15:07:04.000000000 +0100
+++ b/sound/pci/intel8x0.c	2017-03-14 02:44:59.000000000 +0100
@@ -2880,6 +2880,7 @@
 
 static struct snd_pci_quirk intel8x0_clock_list[] = {
 	SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
+	SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
 	SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
 	SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
 	SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
diff -ur a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
--- a/sound/pci/rme9652/hdsp.c	2017-03-23 15:07:11.000000000 +0100
+++ b/sound/pci/rme9652/hdsp.c	2017-03-14 02:45:06.000000000 +0100
@@ -2918,7 +2918,7 @@
 {
 	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
 
-	ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
+	ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
 	return 0;
 }
 
@@ -2930,7 +2930,7 @@
 
 	if (!snd_hdsp_use_is_exclusive(hdsp))
 		return -EBUSY;
-	val = ucontrol->value.enumerated.item[0];
+	val = ucontrol->value.integer.value[0];
 	spin_lock_irq(&hdsp->lock);
 	if (val != hdsp_dds_offset(hdsp))
 		change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
diff -ur a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
--- a/sound/pci/rme9652/hdspm.c	2017-03-23 15:07:13.000000000 +0100
+++ b/sound/pci/rme9652/hdspm.c	2017-03-14 02:45:07.000000000 +0100
@@ -1407,6 +1407,9 @@
 {
 	u64 n;
 
+	if (snd_BUG_ON(rate <= 0))
+		return;
+
 	if (rate >= 112000)
 		rate /= 4;
 	else if (rate >= 56000)
@@ -2026,6 +2029,8 @@
 		} else {
 			/* slave mode, return external sample rate */
 			rate = hdspm_external_sample_rate(hdspm);
+			if (!rate)
+				rate = hdspm->system_sample_rate;
 		}
 	}
 
@@ -2069,8 +2074,11 @@
 					    ucontrol)
 {
 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+	int rate = ucontrol->value.integer.value[0];
 
-	hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
+	if (rate < 27000 || rate > 207000)
+		return -EINVAL;
+	hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
 	return 0;
 }
 
@@ -4135,7 +4143,7 @@
 {
 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-	ucontrol->value.enumerated.item[0] = hdspm->tco->term;
+	ucontrol->value.integer.value[0] = hdspm->tco->term;
 
 	return 0;
 }
@@ -4145,8 +4153,8 @@
 {
 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-	if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
-		hdspm->tco->term = ucontrol->value.enumerated.item[0];
+	if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
+		hdspm->tco->term = ucontrol->value.integer.value[0];
 
 		hdspm_tco_write(hdspm);
 
diff -ur a/sound/pci/rme96.c b/sound/pci/rme96.c
--- a/sound/pci/rme96.c	2017-03-23 15:07:03.000000000 +0100
+++ b/sound/pci/rme96.c	2017-03-14 02:44:57.000000000 +0100
@@ -702,10 +702,11 @@
 	{
 		/* change to/from double-speed: reset the DAC (if available) */
 		snd_rme96_reset_dac(rme96);
+		return 1; /* need to restore volume */
 	} else {
 		writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
+		return 0;
 	}
-	return 0;
 }
 
 static int
@@ -943,6 +944,7 @@
 	struct rme96 *rme96 = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	int err, rate, dummy;
+	bool apply_dac_volume = false;
 
 	runtime->dma_area = (void __force *)(rme96->iobase +
 					     RME96_IO_PLAY_BUFFER);
@@ -956,24 +958,26 @@
 	{
                 /* slave clock */
                 if ((int)params_rate(params) != rate) {
-			spin_unlock_irq(&rme96->lock);
-			return -EIO;                    
-                }
-	} else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) {
-		spin_unlock_irq(&rme96->lock);
-		return err;
-	}
-	if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) {
-		spin_unlock_irq(&rme96->lock);
-		return err;
+			err = -EIO;
+			goto error;
+		}
+	} else {
+		err = snd_rme96_playback_setrate(rme96, params_rate(params));
+		if (err < 0)
+			goto error;
+		apply_dac_volume = err > 0; /* need to restore volume later? */
 	}
+
+	err = snd_rme96_playback_setformat(rme96, params_format(params));
+	if (err < 0)
+		goto error;
 	snd_rme96_setframelog(rme96, params_channels(params), 1);
 	if (rme96->capture_periodsize != 0) {
 		if (params_period_size(params) << rme96->playback_frlog !=
 		    rme96->capture_periodsize)
 		{
-			spin_unlock_irq(&rme96->lock);
-			return -EBUSY;
+			err = -EBUSY;
+			goto error;
 		}
 	}
 	rme96->playback_periodsize =
@@ -984,9 +988,16 @@
 		rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
 		writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
 	}
+
+	err = 0;
+ error:
 	spin_unlock_irq(&rme96->lock);
-		
-	return 0;
+	if (apply_dac_volume) {
+		usleep_range(3000, 10000);
+		snd_rme96_apply_dac_volume(rme96);
+	}
+
+	return err;
 }
 
 static int
diff -ur a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
--- a/sound/soc/codecs/arizona.c	2017-03-23 15:06:18.000000000 +0100
+++ b/sound/soc/codecs/arizona.c	2017-03-14 02:44:16.000000000 +0100
@@ -1056,7 +1056,7 @@
 	int chan_limit = arizona->pdata.max_channels_clocked[dai->id - 1];
 	int bclk, lrclk, wl, frame, bclk_target;
 
-	if (params_rate(params) % 8000)
+	if (params_rate(params) % 4000)
 		rates = &arizona_44k1_bclk_rates[0];
 	else
 		rates = &arizona_48k_bclk_rates[0];
diff -ur a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
--- a/sound/soc/codecs/mc13783.c	2017-03-23 15:06:19.000000000 +0100
+++ b/sound/soc/codecs/mc13783.c	2017-03-14 02:44:16.000000000 +0100
@@ -602,14 +602,14 @@
 				AUDIO_SSI_SEL, 0);
 	else
 		mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
-				0, AUDIO_SSI_SEL);
+				AUDIO_SSI_SEL, AUDIO_SSI_SEL);
 
 	if (priv->dac_ssi_port == MC13783_SSI1_PORT)
 		mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
 				AUDIO_SSI_SEL, 0);
 	else
 		mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
-				0, AUDIO_SSI_SEL);
+				AUDIO_SSI_SEL, AUDIO_SSI_SEL);
 
 	mc13xxx_unlock(priv->mc13xxx);
 
diff -ur a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
--- a/sound/soc/codecs/wm8737.c	2017-03-23 15:06:20.000000000 +0100
+++ b/sound/soc/codecs/wm8737.c	2017-03-14 02:44:17.000000000 +0100
@@ -491,7 +491,8 @@
 
 			/* Fast VMID ramp at 2*2.5k */
 			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
-					    WM8737_VMIDSEL_MASK, 0x4);
+					    WM8737_VMIDSEL_MASK,
+					    2 << WM8737_VMIDSEL_SHIFT);
 
 			/* Bring VMID up */
 			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
@@ -505,7 +506,8 @@
 
 		/* VMID at 2*300k */
 		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
-				    WM8737_VMIDSEL_MASK, 2);
+				    WM8737_VMIDSEL_MASK,
+				    1 << WM8737_VMIDSEL_SHIFT);
 
 		break;
 
diff -ur a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
--- a/sound/soc/codecs/wm8903.h	2017-03-23 15:06:35.000000000 +0100
+++ b/sound/soc/codecs/wm8903.h	2017-03-14 02:44:28.000000000 +0100
@@ -171,7 +171,7 @@
 #define WM8903_VMID_BUF_ENA_WIDTH                    1  /* VMID_BUF_ENA */
 
 #define WM8903_VMID_RES_50K                          2
-#define WM8903_VMID_RES_250K                         3
+#define WM8903_VMID_RES_250K                         4
 #define WM8903_VMID_RES_5K                           6
 
 /*
diff -ur a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
--- a/sound/soc/codecs/wm8955.c	2017-03-23 15:06:30.000000000 +0100
+++ b/sound/soc/codecs/wm8955.c	2017-03-14 02:44:23.000000000 +0100
@@ -298,7 +298,7 @@
 		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
 				    WM8955_K_17_9_MASK,
 				    (pll.k >> 9) & WM8955_K_17_9_MASK);
-		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
 				    WM8955_K_8_0_MASK,
 				    pll.k & WM8955_K_8_0_MASK);
 		if (pll.k)
diff -ur a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
--- a/sound/soc/codecs/wm8958-dsp2.c	2017-03-23 15:06:21.000000000 +0100
+++ b/sound/soc/codecs/wm8958-dsp2.c	2017-03-14 02:44:18.000000000 +0100
@@ -459,7 +459,7 @@
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994 *control = wm8994->wm8994;
-	int value = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.enumerated.item[0];
 	int reg;
 
 	/* Don't allow on the fly reconfiguration */
@@ -549,7 +549,7 @@
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994 *control = wm8994->wm8994;
-	int value = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.enumerated.item[0];
 	int reg;
 
 	/* Don't allow on the fly reconfiguration */
@@ -582,7 +582,7 @@
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994 *control = wm8994->wm8994;
-	int value = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.enumerated.item[0];
 	int reg;
 
 	/* Don't allow on the fly reconfiguration */
@@ -748,7 +748,7 @@
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994 *control = wm8994->wm8994;
-	int value = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.enumerated.item[0];
 	int reg;
 
 	/* Don't allow on the fly reconfiguration */
diff -ur a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
--- a/sound/soc/codecs/wm8960.c	2017-03-23 15:06:23.000000000 +0100
+++ b/sound/soc/codecs/wm8960.c	2017-03-14 02:44:19.000000000 +0100
@@ -242,7 +242,7 @@
 SOC_ENUM("ADC Polarity", wm8960_enum[0]),
 SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
 
-SOC_ENUM("DAC Polarity", wm8960_enum[2]),
+SOC_ENUM("DAC Polarity", wm8960_enum[1]),
 SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
 		    wm8960_get_deemph, wm8960_put_deemph),
 
@@ -392,7 +392,7 @@
 	{ "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
 	{ "Right Input Mixer", NULL, "RINPUT1", },  /* Really Boost Switch */
 	{ "Right Input Mixer", NULL, "RINPUT2" },
-	{ "Right Input Mixer", NULL, "LINPUT3" },
+	{ "Right Input Mixer", NULL, "RINPUT3" },
 
 	{ "Left ADC", NULL, "Left Input Mixer" },
 	{ "Right ADC", NULL, "Right Input Mixer" },
diff -ur a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
--- a/sound/soc/codecs/wm8962.c	2017-03-23 15:06:27.000000000 +0100
+++ b/sound/soc/codecs/wm8962.c	2017-03-14 02:44:21.000000000 +0100
@@ -363,8 +363,8 @@
 	{ 16924, 0x0059 },   /* R16924 - HDBASS_PG_1 */
 	{ 16925, 0x999A },   /* R16925 - HDBASS_PG_0 */
 
-	{ 17048, 0x0083 },   /* R17408 - HPF_C_1 */
-	{ 17049, 0x98AD },   /* R17409 - HPF_C_0 */
+	{ 17408, 0x0083 },   /* R17408 - HPF_C_1 */
+	{ 17409, 0x98AD },   /* R17409 - HPF_C_0 */
 
 	{ 17920, 0x007F },   /* R17920 - ADCL_RETUNE_C1_1 */
 	{ 17921, 0xFFFF },   /* R17921 - ADCL_RETUNE_C1_0 */
diff -ur a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
--- a/sound/soc/codecs/wm8994.c	2017-03-23 15:06:35.000000000 +0100
+++ b/sound/soc/codecs/wm8994.c	2017-03-14 02:44:26.000000000 +0100
@@ -361,7 +361,7 @@
 	struct wm8994 *control = wm8994->wm8994;
 	struct wm8994_pdata *pdata = &control->pdata;
 	int drc = wm8994_get_drc(kcontrol->id.name);
-	int value = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.enumerated.item[0];
 
 	if (drc < 0)
 		return drc;
@@ -468,7 +468,7 @@
 	struct wm8994 *control = wm8994->wm8994;
 	struct wm8994_pdata *pdata = &control->pdata;
 	int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
-	int value = ucontrol->value.integer.value[0];
+	int value = ucontrol->value.enumerated.item[0];
 
 	if (block < 0)
 		return block;
@@ -2678,7 +2678,7 @@
 };
 
 static int fs_ratios[] = {
-	64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
+	64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
 };
 
 static int bclk_divs[] = {
diff -ur a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
--- a/sound/soc/dwc/designware_i2s.c	2017-03-23 15:06:44.000000000 +0100
+++ b/sound/soc/dwc/designware_i2s.c	2017-03-14 02:44:39.000000000 +0100
@@ -100,10 +100,10 @@
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		for (i = 0; i < 4; i++)
-			i2s_write_reg(dev->i2s_base, TOR(i), 0);
+			i2s_read_reg(dev->i2s_base, TOR(i));
 	} else {
 		for (i = 0; i < 4; i++)
-			i2s_write_reg(dev->i2s_base, ROR(i), 0);
+			i2s_read_reg(dev->i2s_base, ROR(i));
 	}
 }
 
diff -ur a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
--- a/sound/soc/pxa/Kconfig	2016-10-20 04:32:11.000000000 +0200
+++ b/sound/soc/pxa/Kconfig	2016-07-29 05:48:09.000000000 +0200
@@ -1,7 +1,6 @@
 config SND_PXA2XX_SOC
 	tristate "SoC Audio for the Intel PXA2xx chip"
 	depends on ARCH_PXA
-	select SND_ARM
 	select SND_PXA2XX_LIB
 	help
 	  Say Y or M if you want to add support for codecs attached to
@@ -24,7 +23,6 @@
 config SND_PXA2XX_SOC_AC97
 	tristate
 	select AC97_BUS
-	select SND_ARM
 	select SND_PXA2XX_LIB_AC97
 	select SND_SOC_AC97_BUS
 
diff -ur a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
--- a/sound/soc/samsung/s3c-i2s-v2.c	2017-03-23 15:06:15.000000000 +0100
+++ b/sound/soc/samsung/s3c-i2s-v2.c	2017-03-14 02:44:12.000000000 +0100
@@ -731,7 +731,7 @@
 #endif
 
 int s3c_i2sv2_register_component(struct device *dev, int id,
-			   struct snd_soc_component_driver *cmp_drv,
+			   const struct snd_soc_component_driver *cmp_drv,
 			   struct snd_soc_dai_driver *dai_drv)
 {
 	struct snd_soc_dai_ops *ops = drv->ops;
diff -ur a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
--- a/sound/soc/samsung/s3c-i2s-v2.h	2017-03-23 15:06:15.000000000 +0100
+++ b/sound/soc/samsung/s3c-i2s-v2.h	2017-03-14 02:44:13.000000000 +0100
@@ -101,7 +101,7 @@
  * soc core.
  */
 extern int s3c_i2sv2_register_component(struct device *dev, int id,
-					struct snd_soc_component_driver *cmp_drv,
+					const struct snd_soc_component_driver *cmp_drv,
 					struct snd_soc_dai_driver *dai_drv);
 
 #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
diff -ur a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
--- a/sound/soc/soc-compress.c	2017-03-23 15:06:04.000000000 +0100
+++ b/sound/soc/soc-compress.c	2017-03-14 02:44:03.000000000 +0100
@@ -383,17 +383,34 @@
 	struct snd_compr *compr;
 	char new_name[64];
 	int ret = 0, direction = 0;
+	int playback = 0, capture = 0;
 
 	/* check client and interface hw capabilities */
 	snprintf(new_name, sizeof(new_name), "%s %s-%d",
 			rtd->dai_link->stream_name, codec_dai->name, num);
 
 	if (codec_dai->driver->playback.channels_min)
+		playback = 1;
+	if (codec_dai->driver->capture.channels_min)
+		capture = 1;
+
+	capture = capture && cpu_dai->driver->capture.channels_min;
+	playback = playback && cpu_dai->driver->playback.channels_min;
+
+	/*
+	 * Compress devices are unidirectional so only one of the directions
+	 * should be set, check for that (xor)
+	 */
+	if (playback + capture != 1) {
+		dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
+				playback, capture);
+		return -EINVAL;
+	}
+
+	if(playback)
 		direction = SND_COMPRESS_PLAYBACK;
-	else if (codec_dai->driver->capture.channels_min)
-		direction = SND_COMPRESS_CAPTURE;
 	else
-		return -EINVAL;
+		direction = SND_COMPRESS_CAPTURE;
 
 	compr = kzalloc(sizeof(*compr), GFP_KERNEL);
 	if (compr == NULL) {
diff -ur a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
--- a/sound/soc/soc-pcm.c	2017-03-23 15:06:07.000000000 +0100
+++ b/sound/soc/soc-pcm.c	2017-03-14 02:44:04.000000000 +0100
@@ -1248,7 +1248,8 @@
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
 			continue;
 
 		dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
diff -ur a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
--- a/sound/synth/emux/emux_oss.c	2017-03-23 15:06:57.000000000 +0100
+++ b/sound/synth/emux/emux_oss.c	2017-03-14 02:44:52.000000000 +0100
@@ -67,7 +67,8 @@
 	struct snd_seq_oss_reg *arg;
 	struct snd_seq_device *dev;
 
-	if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
+	/* using device#1 here for avoiding conflicts with OPL3 */
+	if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
 			       sizeof(struct snd_seq_oss_reg), &dev) < 0)
 		return;
 
@@ -114,12 +115,8 @@
 	if (snd_BUG_ON(!arg || !emu))
 		return -ENXIO;
 
-	mutex_lock(&emu->register_mutex);
-
-	if (!snd_emux_inc_count(emu)) {
-		mutex_unlock(&emu->register_mutex);
+	if (!snd_emux_inc_count(emu))
 		return -EFAULT;
-	}
 
 	memset(&callback, 0, sizeof(callback));
 	callback.owner = THIS_MODULE;
@@ -131,7 +128,6 @@
 	if (p == NULL) {
 		snd_printk(KERN_ERR "can't create port\n");
 		snd_emux_dec_count(emu);
-		mutex_unlock(&emu->register_mutex);
 		return -ENOMEM;
 	}
 
@@ -144,8 +140,6 @@
 	reset_port_mode(p, arg->seq_mode);
 
 	snd_emux_reset_port(p);
-
-	mutex_unlock(&emu->register_mutex);
 	return 0;
 }
 
@@ -189,13 +183,11 @@
 	if (snd_BUG_ON(!emu))
 		return -ENXIO;
 
-	mutex_lock(&emu->register_mutex);
 	snd_emux_sounds_off_all(p);
 	snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
 	snd_seq_event_port_detach(p->chset.client, p->chset.port);
 	snd_emux_dec_count(emu);
 
-	mutex_unlock(&emu->register_mutex);
 	return 0;
 }
 
diff -ur a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
--- a/sound/synth/emux/emux_seq.c	2017-03-23 15:06:56.000000000 +0100
+++ b/sound/synth/emux/emux_seq.c	2017-03-14 02:44:51.000000000 +0100
@@ -122,12 +122,10 @@
 	if (emu->voices)
 		snd_emux_terminate_all(emu);
 		
-	mutex_lock(&emu->register_mutex);
 	if (emu->client >= 0) {
 		snd_seq_delete_kernel_client(emu->client);
 		emu->client = -1;
 	}
-	mutex_unlock(&emu->register_mutex);
 }
 
 /*
@@ -261,8 +259,8 @@
 /*
  * increment usage count
  */
-int
-snd_emux_inc_count(struct snd_emux *emu)
+static int
+__snd_emux_inc_count(struct snd_emux *emu)
 {
 	emu->used++;
 	if (!try_module_get(emu->ops.owner))
@@ -276,11 +274,21 @@
 	return 1;
 }
 
+int snd_emux_inc_count(struct snd_emux *emu)
+{
+	int ret;
+
+	mutex_lock(&emu->register_mutex);
+	ret = __snd_emux_inc_count(emu);
+	mutex_unlock(&emu->register_mutex);
+	return ret;
+}
+
 /*
  * decrease usage count
  */
-void
-snd_emux_dec_count(struct snd_emux *emu)
+static void
+__snd_emux_dec_count(struct snd_emux *emu)
 {
 	module_put(emu->card->module);
 	emu->used--;
@@ -289,6 +297,13 @@
 	module_put(emu->ops.owner);
 }
 
+void snd_emux_dec_count(struct snd_emux *emu)
+{
+	mutex_lock(&emu->register_mutex);
+	__snd_emux_dec_count(emu);
+	mutex_unlock(&emu->register_mutex);
+}
+
 /*
  * Routine that is called upon a first use of a particular port
  */
@@ -307,7 +322,7 @@
 
 	mutex_lock(&emu->register_mutex);
 	snd_emux_init_port(p);
-	snd_emux_inc_count(emu);
+	__snd_emux_inc_count(emu);
 	mutex_unlock(&emu->register_mutex);
 	return 0;
 }
@@ -330,7 +345,7 @@
 
 	mutex_lock(&emu->register_mutex);
 	snd_emux_sounds_off_all(p);
-	snd_emux_dec_count(emu);
+	__snd_emux_dec_count(emu);
 	mutex_unlock(&emu->register_mutex);
 	return 0;
 }
diff -ur a/sound/usb/midi.c b/sound/usb/midi.c
--- a/sound/usb/midi.c	2017-03-23 15:06:52.000000000 +0100
+++ b/sound/usb/midi.c	2017-03-14 02:44:46.000000000 +0100
@@ -172,6 +172,8 @@
 		u8 running_status_length;
 	} ports[0x10];
 	u8 seen_f5;
+	bool in_sysex;
+	u8 last_cin;
 	u8 error_resubmit;
 	int current_port;
 };
@@ -463,6 +465,39 @@
 }
 
 /*
+ * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
+ * but the previously seen CIN, but still with three data bytes.
+ */
+static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
+				     uint8_t *buffer, int buffer_length)
+{
+	unsigned int i, cin, length;
+
+	for (i = 0; i + 3 < buffer_length; i += 4) {
+		if (buffer[i] == 0 && i > 0)
+			break;
+		cin = buffer[i] & 0x0f;
+		if (ep->in_sysex &&
+		    cin == ep->last_cin &&
+		    (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
+			cin = 0x4;
+#if 0
+		if (buffer[i + 1] == 0x90) {
+			/*
+			 * Either a corrupted running status or a real note-on
+			 * message; impossible to detect reliably.
+			 */
+		}
+#endif
+		length = snd_usbmidi_cin_length[cin];
+		snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
+		ep->in_sysex = cin == 0x4;
+		if (!ep->in_sysex)
+			ep->last_cin = cin;
+	}
+}
+
+/*
  * CME protocol: like the standard protocol, but SysEx commands are sent as a
  * single USB packet preceded by a 0x0F byte.
  */
@@ -648,6 +683,12 @@
 	.output_packet = snd_usbmidi_output_standard_packet,
 };
 
+static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
+	.input = ch345_broken_sysex_input,
+	.output = snd_usbmidi_standard_output,
+	.output_packet = snd_usbmidi_output_standard_packet,
+};
+
 /*
  * AKAI MPD16 protocol:
  *
@@ -1323,6 +1364,7 @@
 		 * Various chips declare a packet size larger than 4 bytes, but
 		 * do not actually work with larger packets:
 		 */
+	case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
 	case USB_ID(0x0a92, 0x1020): /* ESI M4U */
 	case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
 	case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
@@ -2213,6 +2255,10 @@
 
 		err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
 		break;
+	case QUIRK_MIDI_CH345:
+		umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
+		err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
+		break;
 	default:
 		snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type);
 		err = -ENXIO;
@@ -2242,7 +2288,6 @@
 	else
 		err = snd_usbmidi_create_endpoints(umidi, endpoints);
 	if (err < 0) {
-		snd_usbmidi_free(umidi);
 		return err;
 	}
 
diff -ur a/sound/usb/mixer.c b/sound/usb/mixer.c
--- a/sound/usb/mixer.c	2017-03-23 15:06:52.000000000 +0100
+++ b/sound/usb/mixer.c	2017-03-14 02:44:47.000000000 +0100
@@ -762,6 +762,7 @@
 	case USB_ID(0x046d, 0x081d):  
 	case USB_ID(0x046d, 0x0825):  
 	case USB_ID(0x046d, 0x0826):  
+	case USB_ID(0x046d, 0x08ca):  
 	case USB_ID(0x046d, 0x0991):
 	 
 		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
diff -ur a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
--- a/sound/usb/mixer_maps.c	2017-03-23 15:06:50.000000000 +0100
+++ b/sound/usb/mixer_maps.c	2017-03-14 02:44:44.000000000 +0100
@@ -330,6 +330,20 @@
 	{ 0 }
 };
 
+/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
+static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
+static struct usbmix_name_map bose_companion5_map[] = {
+	{ 3, NULL, .dB = &bose_companion5_dB },
+	{ 0 }	/* terminator */
+};
+
+/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
+static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
+static struct usbmix_name_map dragonfly_1_2_map[] = {
+	{ 7, NULL, .dB = &dragonfly_1_2_dB },
+	{ 0 }	/* terminator */
+};
+
 /*
  * Control map entries
  */
@@ -418,6 +432,11 @@
 		.map = ebox44_map,
 	},
 	{
+		/* MAYA44 USB+ */
+		.id = USB_ID(0x2573, 0x0008),
+		.map = maya44_map,
+	},
+	{
 		/* KEF X300A */
 		.id = USB_ID(0x27ac, 0x1000),
 		.map = scms_usb3318_map,
@@ -427,5 +446,15 @@
 		.id = USB_ID(0x25c4, 0x0003),
 		.map = scms_usb3318_map,
 	},
+	{
+		/* Bose Companion 5 */
+		.id = USB_ID(0x05a7, 0x1020),
+		.map = bose_companion5_map,
+	},
+	{
+		/* Dragonfly DAC 1.2 */
+		.id = USB_ID(0x21b4, 0x0081),
+		.map = dragonfly_1_2_map,
+	},
 	{ 0 } /* terminator */
 };
diff -ur a/sound/usb/quirks.c b/sound/usb/quirks.c
--- a/sound/usb/quirks.c	2017-03-23 15:06:50.000000000 +0100
+++ b/sound/usb/quirks.c	2017-03-14 02:44:44.000000000 +0100
@@ -312,6 +312,7 @@
 		[QUIRK_MIDI_CME] = create_any_midi_quirk,
 		[QUIRK_MIDI_AKAI] = create_any_midi_quirk,
 		[QUIRK_MIDI_FTDI] = create_any_midi_quirk,
+		[QUIRK_MIDI_CH345] = create_any_midi_quirk,
 		[QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
 		[QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
 		[QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
@@ -898,8 +899,12 @@
 	 * "Playback Design" products need a 50ms delay after setting the
 	 * USB interface.
 	 */
-	if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
+	switch (le16_to_cpu(dev->descriptor.idVendor)) {
+	case 0x23ba: /* Playback Design */
+	case 0x0644: /* TEAC Corp. */
 		mdelay(50);
+		break;
+	}
 }
 
 void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
@@ -914,6 +919,14 @@
 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(20);
 
+	/*
+	 * "TEAC Corp." products need a 20ms delay after each
+	 * class compliant request
+	 */
+	if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
+	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+		mdelay(20);
+
 	/* Marantz/Denon devices with USB DAC functionality need a delay
 	 * after each class compliant request
 	 */
diff -ur a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
--- a/sound/usb/quirks-table.h	2017-03-23 15:06:55.000000000 +0100
+++ b/sound/usb/quirks-table.h	2017-03-14 02:44:49.000000000 +0100
@@ -2773,6 +2773,74 @@
 	}
 },
 
+/* Steinberg devices */
+{
+	/* Steinberg MI2 */
+	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
+	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+		.ifnum = QUIRK_ANY_INTERFACE,
+		.type = QUIRK_COMPOSITE,
+		.data = & (const struct snd_usb_audio_quirk[]) {
+			{
+				.ifnum = 0,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 1,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 2,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 3,
+				.type = QUIRK_MIDI_FIXED_ENDPOINT,
+				.data = &(const struct snd_usb_midi_endpoint_info) {
+					.out_cables = 0x0001,
+					.in_cables  = 0x0001
+				}
+			},
+			{
+				.ifnum = -1
+			}
+		}
+	}
+},
+{
+	/* Steinberg MI4 */
+	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
+	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+		.ifnum = QUIRK_ANY_INTERFACE,
+		.type = QUIRK_COMPOSITE,
+		.data = & (const struct snd_usb_audio_quirk[]) {
+			{
+				.ifnum = 0,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 1,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 2,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 3,
+				.type = QUIRK_MIDI_FIXED_ENDPOINT,
+				.data = &(const struct snd_usb_midi_endpoint_info) {
+					.out_cables = 0x0001,
+					.in_cables  = 0x0001
+				}
+			},
+			{
+				.ifnum = -1
+			}
+		}
+	}
+},
+
 /* TerraTec devices */
 {
 	USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
@@ -2973,6 +3041,17 @@
 	.idProduct = 0x1020,
 },
 
+/* QinHeng devices */
+{
+	USB_DEVICE(0x1a86, 0x752d),
+	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+		.vendor_name = "QinHeng",
+		.product_name = "CH345",
+		.ifnum = 1,
+		.type = QUIRK_MIDI_CH345
+	}
+},
+
 /* KeithMcMillen Stringport */
 {
 	USB_DEVICE(0x1f38, 0x0001),
diff -ur a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
--- a/sound/usb/usbaudio.h	2017-03-23 15:06:50.000000000 +0100
+++ b/sound/usb/usbaudio.h	2017-03-14 02:44:45.000000000 +0100
@@ -83,6 +83,7 @@
 	QUIRK_MIDI_AKAI,
 	QUIRK_MIDI_US122L,
 	QUIRK_MIDI_FTDI,
+	QUIRK_MIDI_CH345,
 	QUIRK_AUDIO_STANDARD_INTERFACE,
 	QUIRK_AUDIO_FIXED_ENDPOINT,
 	QUIRK_AUDIO_EDIROL_UAXX,
Nur in b/SynoBuildConf: build.
Nur in b/SynoBuildConf: build-virtual-headers.
Nur in b/SynoBuildConf: install.
Nur in b/SynoBuildConf: install-dev.
Nur in b/SynoBuildConf: install-dev-virtual-headers.
Nur in b/SynoBuildConf: install-virinst-T1tool.
Nur in a/synoconfigs: alpine4k.
Nur in a/synoconfigs: armada38x.
diff -ur a/synoconfigs/avoton b/synoconfigs/avoton
--- a/synoconfigs/avoton	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/avoton	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -33,7 +33,6 @@
 CONFIG_AUDIT_ARCH=y
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_HAVE_INTEL_TXT=y
 CONFIG_X86_64_SMP=y
 CONFIG_X86_HT=y
 CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11"
@@ -145,7 +144,6 @@
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP_ENABLED=y
 # CONFIG_MEMCG_KMEM is not set
-# CONFIG_CGROUP_HUGETLB is not set
 # CONFIG_CGROUP_PERF is not set
 CONFIG_CGROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -207,7 +205,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -285,8 +283,10 @@
 CONFIG_SYNO_SUPPORT_EUP=y
 # CONFIG_SYNO_BOOT_SATA_DOM is not set
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
+CONFIG_SYNO_CACHE_DEVICE_PREFIX="nvc"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
 # CONFIG_SYNO_ECC_NOTIFICATION is not set
 CONFIG_SYNO_DISPLAY_CPUINFO=y
@@ -337,6 +337,7 @@
 # CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY is not set
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -355,6 +356,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -363,6 +366,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -372,6 +376,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -382,6 +388,8 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+CONFIG_SYNO_PCI_HOST_SATA_CACHE=y
+CONFIG_SYNO_MAX_PCI_SLOT=1
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -447,6 +455,7 @@
 CONFIG_SYNO_SII3132_MEDIA_ERROR_RETRY=y
 # CONFIG_SYNO_EUNIT_LIMITATION is not set
 CONFIG_SYNO_AHCI_PMP_SII3x26_DEFER_CMD=y
+CONFIG_SYNO_DISABLE_SIL3x26=y
 CONFIG_SYNO_SATA_LINK_SPEED_RETRY=y
 CONFIG_SYNO_EARLY_NCQ_ANALYZE=y
 CONFIG_SYNO_DISK_NCQ_COMPATIBILITY=y
@@ -454,6 +463,9 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN=y
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+CONFIG_SYNO_DS1815P_SPEED_LIMIT=y
+CONFIG_SYNO_SIL_PORTING=y
 
 #
 # SAS
@@ -506,7 +518,8 @@
 # CONFIG_SYNO_ENHANCE_STORAGE_DISCONNECTION is not set
 CONFIG_SYNO_PHISON_USB_FACTORY=y
 # CONFIG_SYNO_USB_BUGGY_PORT_RESET_BIT_QUIRK is not set
-# CONFIG_SYNO_USB_VBUS_GPIO_CONTROL is not set
+CONFIG_SYNO_USB_VBUS_GPIO_CONTROL=y
+CONFIG_SYNO_USB_VBUS_NUM_GPIO=1
 CONFIG_SYNO_USB_DEVICE_QUIRKS=y
 CONFIG_SYNO_USB_HC_MORE_TRANSACTION_TRIES=y
 
@@ -556,6 +569,10 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
 # CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
@@ -582,6 +599,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -632,12 +650,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -665,7 +683,6 @@
 CONFIG_SYNO_BTRFS_SUPPORT_FULLY_CLONE_BETWEEN_CSUM_AND_NOCSUM_DIR=y
 CONFIG_SYNO_BTRFS_QGROUP_QUERY=y
 CONFIG_SYNO_BTRFS_RENAME_READONLY_SUBVOL=y
-CONFIG_SYNO_BTRFS_CORRECT_SPACEINFO_LOCK=y
 CONFIG_SYNO_BTRFS_FIX_ALLOC_CHUNK=y
 CONFIG_SYNO_BTRFS_CLEAR_SPACE_FULL=y
 CONFIG_SYNO_BTRFS_REMOVE_UNUSED_QGROUP=y
@@ -692,8 +709,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -709,6 +733,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -720,6 +746,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -733,7 +761,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -767,6 +794,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 CONFIG_SYNO_POWEROFF_INFO_PRINT=y
@@ -849,7 +877,6 @@
 #
 CONFIG_ZONE_DMA=y
 CONFIG_SMP=y
-# CONFIG_X86_X2APIC is not set
 CONFIG_X86_MPPARSE=y
 # CONFIG_X86_EXTENDED_PLATFORM is not set
 # CONFIG_X86_INTEL_LPSS is not set
@@ -1679,6 +1706,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3104,10 +3132,6 @@
 # CONFIG_DMADEVICES is not set
 # CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
-CONFIG_VFIO_IOMMU_TYPE1=m
-CONFIG_VFIO=m
-CONFIG_VFIO_PCI=m
-# CONFIG_VFIO_PCI_VGA is not set
 # CONFIG_VIRT_DRIVERS is not set
 CONFIG_VIRTIO=m
 
@@ -3202,14 +3226,7 @@
 CONFIG_CLKEVT_I8253=y
 CONFIG_CLKBLD_I8253=y
 # CONFIG_MAILBOX is not set
-CONFIG_IOMMU_API=y
-CONFIG_IOMMU_SUPPORT=y
-# CONFIG_AMD_IOMMU is not set
-CONFIG_DMAR_TABLE=y
-CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_DEFAULT_ON=y
-CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-CONFIG_IRQ_REMAP=y
+# CONFIG_IOMMU_SUPPORT is not set
 
 #
 # Remoteproc drivers
@@ -3337,8 +3354,8 @@
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_TMPFS_XATTR is not set
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
@@ -3518,6 +3535,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3620,7 +3638,6 @@
 CONFIG_SECURITY_NETWORK=y
 # CONFIG_SECURITY_NETWORK_XFRM is not set
 CONFIG_SECURITY_PATH=y
-# CONFIG_INTEL_TXT is not set
 # CONFIG_SECURITY_SELINUX is not set
 # CONFIG_SECURITY_SMACK is not set
 # CONFIG_SECURITY_TOMOYO is not set
@@ -3702,6 +3719,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
@@ -3787,7 +3806,6 @@
 CONFIG_KVM=m
 CONFIG_KVM_INTEL=m
 # CONFIG_KVM_AMD is not set
-CONFIG_KVM_DEVICE_ASSIGNMENT=y
 # CONFIG_BINARY_PRINTF is not set
 
 #
diff -ur a/synoconfigs/back-porting-records b/synoconfigs/back-porting-records
--- a/synoconfigs/back-porting-records	2016-10-20 04:32:07.000000000 +0200
+++ b/synoconfigs/back-porting-records	2017-02-14 17:25:11.000000000 +0100
@@ -1,3 +1,123 @@
+BUG ID: <DSM> #97387 - encode_text_output call trace
+Commit Hashes:
+	676d23690fb62b5d51ba5d659935e9f7d9da9f8e
+
+BUG ID: <DSM> #97126 - fix kernel panic hang at booting
+Commit Hashes:
+	4ed6a540fab8ea4388c1703b73ecfed68a2009d1
+
+BUG ID: <DSM> #93445 - grantley BUG when enable iommu passthrough
+Commit Hashes:
+	702e3706b077cebd9758acecbb2f2a584f95910c
+	d01a28e016909e5b1a1b3a5c94f59b8d347df86f
+	219d11c3edb4df284748db0802f11c49525ca8bd
+	b2fc5e663dc22324ffedddfebae4542ebfa61155
+	2b3bdd76e4347af9743294e7e2f9c6ce1964680d
+	5b1584325c3bb9ec39aa0338482987764d9b3cfd
+	527226390e80503a33c9a0e476f2e7514215978c
+	1b336181e9a79f1d96412dad9b710789e0a2a582
+	999ee7cce5475955fb65b0c5830ec2d5e836db5c
+	991d64a99cc4a816cb0be954b56c812d9c2ad398
+	448a8bdcef1a3cacb8a807a57a09f3d363b11c09
+	d72091cd56e19695509b0bce9bd65e56700b4d5a
+	23251d732439814f422edc7fcb247d3314688865
+	67d09e468336a3302d32e36c1942cd8b52dca61c
+	6480495fce2d737b92b98f028ec7673e7d38bf6e
+	1687c90db9aa60e048fee160c5eb147ed43a4a32
+	7b0123f8d34ca10dfafc2865948ff5b8627c42ea
+	a9d3e02d0ec3631f097d92d2fbc74688e9dd9d27
+	df5d581c7f380992dc5dc2bfae67b2c3853785ee
+	bafb328ad52e0a7ed38aafc24654e07fb72d9b3e
+	ee88a7b92cec0a6d6df496e0513ce9df40b20cf4
+	3f74a9f13043ca1c36a4a70018b5f378d4e2dbcd
+	f87676dc5a05ff26f254dbd949af756651ec5e4a
+	2d9595dcd6731126b934cd63bc79ca4abd78aaf1
+	add42dc36b604f7c1a94f8dc542dae8347d68537
+	3a88599cfab03c0f191510879cb60d32ba142e6a
+	4e78fee1dcfb28e065014f54bb2908f448a1de80
+	9891a1aca98d6782dbe4687ad17a6ff80dd6aeb5
+	488422f090a213c8fd0b961b120b8597463b5c10
+	b06313190c96ad7242bdbb4191dbc520daa34390
+	32962106e84e4d8884a0f437819424215187d5d9
+	b2d43ae0e180903fd43d00de49fb1f1efee8339c
+	afd04919b7e62c65149f5cdd5603aac03e8b9010
+	1e362e5980b511d17eba9b4f361a817be87a993a
+	c79297294993312d4601a989e7d18b3967044778
+	1bef4129b63a9e294bd245e4ce8dfe020affa80e
+	877a0344122b1bcd16b7a3cb0b805f7d14a542ab
+	36b53119d4b55d20d66cf668dee72a3908063617
+	149805e3948b8af46355e7b693658f2d37b74e05
+	a834b96e3bda82d90b8a12aed3dd552235100eea
+	f49a34b7ad2cc991843626604902b813252428d4
+	7453f3ca7b2f4a609772a577447658e155dcdf2a
+	520ca6caf4431f19415b930e2932587fc3b0dd61
+	c012e41df89d36fca69fd5af3dd70f0b1b71f924
+	12649b089d9527747368278d3057a2bdd531053f
+	9a8794a83562dd340c7152209285ec2016347508
+	0f65b3b06613e1dd9ab0fc153e6fd8e45cdfdd3d
+	b94567a8a5c94fa87e68d7753d3abf481e19aa57
+	bf92844099a6348fe237392be50f8b26bac1845c
+	a1c9c53c5d36bbf1a67c045db654211f115eef37
+	8e4781eb80daecd0c99086bb1d2018864dac67d5
+	08ddb325840267350b57aec3d2e6fe255c89e0e4
+	9150877739583218153947f36065687c65958df8
+	3dd6c5390842bbc050bebf02368bbcc0640df87d
+	7cdca2b1688f0907b56eb39831fe7c175f60d41b
+	0a16f81f919f684b4de6148c107029add2228123
+	fc5018733fdaa0747f845f7a11e4b9c6d6d6167e
+	3d2e36eb472de54ef2114bd849b10f2f36bcd7a8
+	da36047e31c485d0cfe05894221ce1aa24bc99d0
+	dfd28f55e4470096b8dcef2baa78eaf90857bf5c
+	d865b03141a717bf04152c53c0e64477b86464d4
+	8470834c0b8d0a77e81eaaaf90b76d8d15b81703
+	a44c2870f3e85b5812363e4b14deb1627be175bc
+	622c7267ffc82ced8d7b4224c931c21cdc2ace6a
+	4a7c29af87aad32812c936a6da5860faf96b70bb
+	e2294a9c0245212d474488d71405ded079fd005d
+	925589da08a4788cb8a22e330d742710743ed21f
+	47d03ad3b8a8314b1ce5c3f1b90dded506cbbf25
+	88e12a640dceec6e3957bb701980ecf2e4cd5492
+	24108252bc85a7a59da4416f867c0c89b9f0f4b0
+	3e21d2677e2e04a038eff21c334f59377b4b67c1
+	809dcb7847fe73b41aec6b6a118be334ff1721ff
+	6a8254aa5c027403e44492efeddfd4a5cf473760
+	7dd9e787e5edb14dc64c499978ce764efef575bf
+	43fceadfba6334916e717311cba98d7c40e4185e
+	7d8366cd983cefe5f6309fe511849679636ba7a6
+	9a3d49baa51de44ee66bab69182bcccbfa791ca4
+	1efbff5eb8a60410aecce617c104f6fe0cc1554f
+	8b5d2178fbe7d971eb27d90fd581d231248464ac
+	ae7e234d1b1992a88bf176576d664ab7b4fe952e
+	ca507015200dd8ecf067151d642ddc303744bc93
+	7b30212e310593dd07a3f229a5d18e11881bb92a
+	43db418d61c66d5373176b63e8d6fd10282c4d99
+	9b65f661b447bc504babb6738908d2cd7f3fd610
+	c5782a66ab88df9325d40f680d22cd1054561917
+	5b5f044ce944f0aef34ddda78abeb17218567ab3
+	524336a5d33aa4ec2ae3b25cd9c3d7c9b2193d9a
+	85cdb16f828fd8a66143a0c9aaa271626ccd4fdb
+	26404448d75381f66cbf9c0e24511933af7dacb9
+	b50921f27b572648ebe8dbd7237dfea91df85075
+	bb0cae6112f570c8be72e613c18a1ddfcc9e0cfb
+	1fe01afa8b5e83b24805b24a2f43a954eeca7bbd
+	f4b9ab5a8bc9aba37b069b3164834a8b0c835f5d
+	469f3b6bacfa023c0b77029dc390f15f9a07e034
+	1d97e8ed0a29257867d8b7e80c5ab047ac446d14
+	024241f4f2fb3a98bc40f6a06ad076abe124967a
+	02eabadb0e308c37eba6173b4c615fe44d61ba7f
+	f1cc74179541203c8c7d040c07cb244b59819857
+	7ebc0824430ff2ac4c9523b81edf0b52bb009af9
+	9c4f3ff60b60b336d4d0ce0ba52e92262392290a
+	99e7b8e1d31f83f07aa9cd0b5e28bdccda06c5f1
+	eb37855a6b3927d3b30019ed8a2445ea8fbf9041
+	70b6499d2630c9dab6e429dbe14452be0d058c7e
+	58e913d4a89ab0d0ceb8fb07c8f0cbe1cd7d9ebb
+	4f4fb281861b93005f5fe25d0c5e06580afd0e4b
+	0a7a7bf390024b49a692940b264f499e112f5518
+	2da6cfe2658172a21a374a503d5a2a12b6555c16
+
+First Ready Kernel version: 3.17
+
 BUG ID: <DSM> # - configfs backport for lio-4.x
 Commit Hashes:
 	1ae1602de028acaa42a0f6ff18d19756f8e825c6
diff -ur a/synoconfigs/braswell b/synoconfigs/braswell
--- a/synoconfigs/braswell	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/braswell	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -33,7 +33,6 @@
 CONFIG_AUDIT_ARCH=y
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_HAVE_INTEL_TXT=y
 CONFIG_X86_64_SMP=y
 CONFIG_X86_HT=y
 CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11"
@@ -145,7 +144,6 @@
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP_ENABLED=y
 # CONFIG_MEMCG_KMEM is not set
-# CONFIG_CGROUP_HUGETLB is not set
 # CONFIG_CGROUP_PERF is not set
 CONFIG_CGROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -207,7 +205,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -285,6 +283,7 @@
 CONFIG_SYNO_SUPPORT_EUP=y
 # CONFIG_SYNO_BOOT_SATA_DOM is not set
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -321,7 +320,7 @@
 CONFIG_SYNO_SOFTLOCKUP_COUNTER_MAX=10
 CONFIG_SYNO_SOFTLOCKUP_THRESH_EXTENSION=y
 CONFIG_SYNO_FIX_WRITEBACK_DIV_BY_ZERO=y
-CONFIG_SYNO_FIX_SMPBOOT_RACE=y
+# CONFIG_SYNO_FIX_SMPBOOT_RACE is not set
 
 #
 # Network
@@ -337,6 +336,7 @@
 # CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY is not set
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -355,6 +355,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -363,6 +365,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -372,6 +375,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -382,6 +387,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -454,6 +460,8 @@
 CONFIG_SYNO_CHERRYVIEW_GPIO_WRITE_RETRY=y
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN=y
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+# CONFIG_SYNO_SIL_PORTING is not set
 
 #
 # SAS
@@ -561,6 +569,10 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
 # CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
@@ -587,6 +599,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -637,12 +650,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -697,8 +710,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -714,6 +734,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -725,6 +747,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -738,7 +762,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -772,6 +795,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 CONFIG_SYNO_POWEROFF_INFO_PRINT=y
@@ -1685,6 +1709,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3162,10 +3187,6 @@
 # CONFIG_DMADEVICES is not set
 # CONFIG_AUXDISPLAY is not set
 # CONFIG_UIO is not set
-CONFIG_VFIO_IOMMU_TYPE1=m
-CONFIG_VFIO=m
-CONFIG_VFIO_PCI=m
-# CONFIG_VFIO_PCI_VGA is not set
 # CONFIG_VIRT_DRIVERS is not set
 CONFIG_VIRTIO=m
 
@@ -3261,14 +3282,7 @@
 CONFIG_CLKEVT_I8253=y
 CONFIG_CLKBLD_I8253=y
 # CONFIG_MAILBOX is not set
-CONFIG_IOMMU_API=y
-CONFIG_IOMMU_SUPPORT=y
-# CONFIG_AMD_IOMMU is not set
-CONFIG_DMAR_TABLE=y
-CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_DEFAULT_ON=y
-CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-# CONFIG_IRQ_REMAP is not set
+# CONFIG_IOMMU_SUPPORT is not set
 
 #
 # Remoteproc drivers
@@ -3396,8 +3410,8 @@
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_TMPFS_XATTR is not set
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
@@ -3577,6 +3591,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3679,7 +3694,6 @@
 CONFIG_SECURITY_NETWORK=y
 # CONFIG_SECURITY_NETWORK_XFRM is not set
 CONFIG_SECURITY_PATH=y
-# CONFIG_INTEL_TXT is not set
 # CONFIG_SECURITY_SELINUX is not set
 # CONFIG_SECURITY_SMACK is not set
 # CONFIG_SECURITY_TOMOYO is not set
@@ -3761,6 +3775,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
@@ -3846,7 +3862,6 @@
 CONFIG_KVM=m
 CONFIG_KVM_INTEL=m
 # CONFIG_KVM_AMD is not set
-CONFIG_KVM_DEVICE_ASSIGNMENT=y
 # CONFIG_BINARY_PRINTF is not set
 
 #
diff -ur a/synoconfigs/broadwell b/synoconfigs/broadwell
--- a/synoconfigs/broadwell	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/broadwell	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -145,7 +145,6 @@
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP_ENABLED=y
 # CONFIG_MEMCG_KMEM is not set
-# CONFIG_CGROUP_HUGETLB is not set
 # CONFIG_CGROUP_PERF is not set
 CONFIG_CGROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -207,7 +206,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -291,6 +290,7 @@
 CONFIG_SYNO_SATA_DOM_VENDOR_SECOND_SRC="SATADOM"
 CONFIG_SYNO_SATA_DOM_MODEL_SECOND_SRC="D150SH"
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -344,6 +344,7 @@
 CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY=y
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -362,6 +363,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -370,6 +373,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -379,6 +383,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -389,6 +395,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 CONFIG_SYNO_FIXED_DISK_NAME_MV14XX=y
 CONFIG_SYNO_MV1475_SGPIO_LED_CTRL=y
@@ -441,6 +448,8 @@
 CONFIG_SYNO_PMP_HOTPLUG_TASK=y
 CONFIG_SYNO_SATA_REDUCE_RETRY_TIMER=y
 CONFIG_SYNO_SATA_PM_CLEANUP_CLASS=y
+CONFIG_SYNO_AHCI_SOFTWARE_ACITIVITY=y
+# CONFIG_SYNO_SIL3132_ACTIVITY is not set
 CONFIG_SYNO_SATA_CHIP_REORDER=y
 CONFIG_SYNO_FORCE_EH_FULL_RECOVER_RETRY=y
 CONFIG_SYNO_SATA_AHCI_FBS_NONCQ=y
@@ -457,6 +466,8 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 CONFIG_SYNO_GET_DISK_SPEED=y
 CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN=y
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+# CONFIG_SYNO_SIL_PORTING is not set
 
 #
 # SAS
@@ -480,6 +491,8 @@
 # CONFIG_SYNO_SAS_RESERVATION_CONFLICT_WORKAROUND is not set
 # CONFIG_SYNO_SAS_ENCOLURE_PWR_CTL is not set
 CONFIG_SYNO_SCSI_MAX_QUEUE_DEPTH_LOCK=y
+CONFIG_SYNO_SAS_HBA_IDX=y
+CONFIG_SYNO_SAS_MAX_HBA_SLOT=3
 CONFIG_SYNO_SAS_HOST_DISK_LED_CTRL=y
 # CONFIG_SYNO_SAS_TASK_ABORT_MESSAGE is not set
 CONFIG_SYNO_MPT3_SATA_DEFAULT_NCQ_OFF=y
@@ -522,7 +535,8 @@
 CONFIG_SYNO_ENHANCE_STORAGE_DISCONNECTION=y
 CONFIG_SYNO_PHISON_USB_FACTORY=y
 CONFIG_SYNO_USB_BUGGY_PORT_RESET_BIT_QUIRK=y
-# CONFIG_SYNO_USB_VBUS_GPIO_CONTROL is not set
+CONFIG_SYNO_USB_VBUS_GPIO_CONTROL=y
+CONFIG_SYNO_USB_VBUS_NUM_GPIO=1
 CONFIG_SYNO_USB_DEVICE_QUIRKS=y
 CONFIG_SYNO_USB_HC_MORE_TRANSACTION_TRIES=y
 
@@ -559,7 +573,11 @@
 #
 # LEDs
 #
-# CONFIG_SYNO_LEDS_TRIGGER is not set
+CONFIG_SYNO_LEDS_TRIGGER=y
+CONFIG_SYNO_LP3943_FEATURES=y
+CONFIG_SYNO_LP3943_PROBE=y
+# CONFIG_SYNO_LP3943_PROBE_FIXED_BUS is not set
+CONFIG_SYNO_LP3943_PROBE_ACPI=y
 
 #
 # ALSA
@@ -573,9 +591,15 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
-# CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
+CONFIG_SYNO_IOMMU_PASSTHROUGH=y
+CONFIG_SYNO_WORKAROUND_NOT_PORTING_ACPI_COMPANION=y
+# CONFIG_SYNO_SKIP_IOMMU is not set
 
 #
 # File Systems
@@ -599,6 +623,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -649,12 +674,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -682,7 +707,6 @@
 CONFIG_SYNO_BTRFS_SUPPORT_FULLY_CLONE_BETWEEN_CSUM_AND_NOCSUM_DIR=y
 CONFIG_SYNO_BTRFS_QGROUP_QUERY=y
 CONFIG_SYNO_BTRFS_RENAME_READONLY_SUBVOL=y
-CONFIG_SYNO_BTRFS_CORRECT_SPACEINFO_LOCK=y
 CONFIG_SYNO_BTRFS_FIX_ALLOC_CHUNK=y
 CONFIG_SYNO_BTRFS_CLEAR_SPACE_FULL=y
 CONFIG_SYNO_BTRFS_REMOVE_UNUSED_QGROUP=y
@@ -709,8 +733,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -726,6 +757,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -737,6 +770,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -750,7 +785,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -784,6 +818,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 # CONFIG_SYNO_POWEROFF_INFO_PRINT is not set
@@ -1700,6 +1735,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3021,7 +3057,7 @@
 # CONFIG_LEDS_LM3530 is not set
 # CONFIG_LEDS_LM3642 is not set
 # CONFIG_LEDS_PCA9532 is not set
-# CONFIG_LEDS_LP3943 is not set
+CONFIG_LEDS_LP3943=m
 # CONFIG_LEDS_LP3944 is not set
 # CONFIG_LEDS_LP5521 is not set
 # CONFIG_LEDS_LP5523 is not set
@@ -3391,8 +3427,8 @@
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_TMPFS_XATTR is not set
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
@@ -3572,6 +3608,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3756,6 +3793,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
diff -ur a/synoconfigs/bromolow b/synoconfigs/bromolow
--- a/synoconfigs/bromolow	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/bromolow	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -145,7 +145,6 @@
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP_ENABLED=y
 # CONFIG_MEMCG_KMEM is not set
-# CONFIG_CGROUP_HUGETLB is not set
 # CONFIG_CGROUP_PERF is not set
 CONFIG_CGROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -207,7 +206,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -291,6 +290,7 @@
 CONFIG_SYNO_SATA_DOM_VENDOR_SECOND_SRC="SATADOM"
 CONFIG_SYNO_SATA_DOM_MODEL_SECOND_SRC="D150SH"
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -345,6 +345,7 @@
 CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY=y
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+# CONFIG_SYNO_CVE_2016_5696 is not set
 
 #
 # Device Drivers
@@ -363,6 +364,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -371,6 +374,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -380,6 +384,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -390,6 +396,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -458,6 +465,8 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN=y
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+# CONFIG_SYNO_SIL_PORTING is not set
 
 #
 # SAS
@@ -573,9 +582,15 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
-# CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
+CONFIG_SYNO_IOMMU_PASSTHROUGH=y
+CONFIG_SYNO_WORKAROUND_NOT_PORTING_ACPI_COMPANION=y
+# CONFIG_SYNO_SKIP_IOMMU is not set
 
 #
 # File Systems
@@ -599,6 +614,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -649,12 +665,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -682,7 +698,6 @@
 CONFIG_SYNO_BTRFS_SUPPORT_FULLY_CLONE_BETWEEN_CSUM_AND_NOCSUM_DIR=y
 CONFIG_SYNO_BTRFS_QGROUP_QUERY=y
 CONFIG_SYNO_BTRFS_RENAME_READONLY_SUBVOL=y
-CONFIG_SYNO_BTRFS_CORRECT_SPACEINFO_LOCK=y
 CONFIG_SYNO_BTRFS_FIX_ALLOC_CHUNK=y
 CONFIG_SYNO_BTRFS_CLEAR_SPACE_FULL=y
 CONFIG_SYNO_BTRFS_REMOVE_UNUSED_QGROUP=y
@@ -709,8 +724,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -726,6 +748,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -737,6 +761,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -750,7 +776,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -783,6 +808,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 # CONFIG_SYNO_POWEROFF_INFO_PRINT is not set
@@ -1698,6 +1724,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3389,8 +3416,8 @@
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_TMPFS_XATTR is not set
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
@@ -3570,6 +3597,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3754,6 +3782,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
diff -ur a/synoconfigs/cedarview b/synoconfigs/cedarview
--- a/synoconfigs/cedarview	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/cedarview	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -144,7 +144,6 @@
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP_ENABLED=y
 # CONFIG_MEMCG_KMEM is not set
-# CONFIG_CGROUP_HUGETLB is not set
 # CONFIG_CGROUP_PERF is not set
 CONFIG_CGROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -206,7 +205,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -283,6 +282,7 @@
 CONFIG_SYNO_SUPPORT_EUP=y
 # CONFIG_SYNO_BOOT_SATA_DOM is not set
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -336,6 +336,7 @@
 # CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY is not set
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -354,6 +355,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -362,6 +365,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -371,6 +375,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -381,6 +387,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -450,6 +457,8 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN=y
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+CONFIG_SYNO_SIL_PORTING=y
 
 #
 # SAS
@@ -552,6 +561,10 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
 # CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
@@ -578,6 +591,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -628,12 +642,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -661,7 +675,6 @@
 CONFIG_SYNO_BTRFS_SUPPORT_FULLY_CLONE_BETWEEN_CSUM_AND_NOCSUM_DIR=y
 CONFIG_SYNO_BTRFS_QGROUP_QUERY=y
 CONFIG_SYNO_BTRFS_RENAME_READONLY_SUBVOL=y
-CONFIG_SYNO_BTRFS_CORRECT_SPACEINFO_LOCK=y
 CONFIG_SYNO_BTRFS_FIX_ALLOC_CHUNK=y
 CONFIG_SYNO_BTRFS_CLEAR_SPACE_FULL=y
 CONFIG_SYNO_BTRFS_REMOVE_UNUSED_QGROUP=y
@@ -688,8 +701,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -705,6 +725,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -716,6 +738,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -729,7 +753,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -763,6 +786,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 # CONFIG_SYNO_POWEROFF_INFO_PRINT is not set
@@ -1660,6 +1684,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3315,8 +3340,8 @@
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_TMPFS_XATTR is not set
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
@@ -3496,6 +3521,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3677,6 +3703,8 @@
 # CONFIG_CRYPTO_CRC32C_INTEL is not set
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
diff -ur a/synoconfigs/dockerx64 b/synoconfigs/dockerx64
--- a/synoconfigs/dockerx64	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/dockerx64	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -205,7 +205,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -282,6 +282,7 @@
 CONFIG_SYNO_SUPPORT_EUP=y
 # CONFIG_SYNO_BOOT_SATA_DOM is not set
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -333,6 +334,7 @@
 # CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY is not set
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -351,6 +353,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -359,6 +363,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -368,6 +373,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -378,6 +385,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -448,6 +456,8 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 # CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN is not set
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+# CONFIG_SYNO_SIL_PORTING is not set
 
 #
 # SAS
@@ -550,6 +560,10 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
 # CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
@@ -576,6 +590,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -626,12 +641,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -686,8 +701,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -703,6 +725,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -714,6 +738,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -727,7 +753,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -761,6 +786,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 CONFIG_SYNO_POWEROFF_INFO_PRINT=y
@@ -1664,6 +1690,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3482,6 +3509,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3665,6 +3693,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
diff -ur a/synoconfigs/grantley b/synoconfigs/grantley
--- a/synoconfigs/grantley	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/grantley	2017-02-23 13:42:36.000000000 +0100
@@ -1,7 +1,6 @@
-
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -149,7 +148,6 @@
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP_ENABLED=y
 # CONFIG_MEMCG_KMEM is not set
-# CONFIG_CGROUP_HUGETLB is not set
 # CONFIG_CGROUP_PERF is not set
 CONFIG_CGROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -211,7 +209,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -293,6 +291,7 @@
 CONFIG_SYNO_SATA_DOM_VENDOR_SECOND_SRC="SATADOM"
 CONFIG_SYNO_SATA_DOM_MODEL_SECOND_SRC="3SE"
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -348,6 +347,7 @@
 CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY=y
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -366,6 +366,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -374,6 +376,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -383,6 +386,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -393,6 +398,7 @@
 # CONFIG_SYNO_SATA_PORT_MAP is not set
 CONFIG_SYNO_DISK_INDEX_MAP=y
 # CONFIG_SYNO_SATA_REMAP is not set
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 # CONFIG_SYNO_SATA_DISK_SEQ_REVERSE is not set
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -452,6 +458,8 @@
 CONFIG_SYNO_DISK_NCQ_COMPATIBILITY=y
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 # CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN is not set
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+# CONFIG_SYNO_SIL_PORTING is not set
 
 #
 # SAS
@@ -570,9 +578,15 @@
 CONFIG_SYNO_XR17V35X_SERIAL=y
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
-# CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
+CONFIG_SYNO_IOMMU_PASSTHROUGH=y
+CONFIG_SYNO_WORKAROUND_NOT_PORTING_ACPI_COMPANION=y
+# CONFIG_SYNO_SKIP_IOMMU is not set
 
 #
 # File Systems
@@ -596,6 +610,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -646,12 +661,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -679,7 +694,6 @@
 CONFIG_SYNO_BTRFS_SUPPORT_FULLY_CLONE_BETWEEN_CSUM_AND_NOCSUM_DIR=y
 CONFIG_SYNO_BTRFS_QGROUP_QUERY=y
 CONFIG_SYNO_BTRFS_RENAME_READONLY_SUBVOL=y
-CONFIG_SYNO_BTRFS_CORRECT_SPACEINFO_LOCK=y
 CONFIG_SYNO_BTRFS_FIX_ALLOC_CHUNK=y
 CONFIG_SYNO_BTRFS_CLEAR_SPACE_FULL=y
 CONFIG_SYNO_BTRFS_REMOVE_UNUSED_QGROUP=y
@@ -706,8 +720,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -723,6 +744,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -734,6 +757,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -747,7 +772,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -780,6 +804,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 # CONFIG_SYNO_POWEROFF_INFO_PRINT is not set
@@ -1705,6 +1730,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3272,7 +3298,7 @@
 # CONFIG_AMD_IOMMU is not set
 CONFIG_DMAR_TABLE=y
 CONFIG_INTEL_IOMMU=y
-# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
+CONFIG_INTEL_IOMMU_DEFAULT_ON=y
 CONFIG_INTEL_IOMMU_FLOPPY_WA=y
 CONFIG_IRQ_REMAP=y
 
@@ -3402,8 +3428,8 @@
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_TMPFS_XATTR is not set
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=y
 CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
@@ -3583,6 +3609,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3767,6 +3794,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
diff -ur a/synoconfigs/Kconfig.basic b/synoconfigs/Kconfig.basic
--- a/synoconfigs/Kconfig.basic	2017-03-23 17:52:30.000000000 +0100
+++ b/synoconfigs/Kconfig.basic	2017-03-15 04:35:41.000000000 +0100
@@ -82,6 +82,11 @@
 	default y
 	depends on SYNO_FIXED_DISK_NAME
 
+config SYNO_ISCSI_LOOPBACK_DEVICE
+	bool "Loopback Iscsi device name"
+	default y
+	depends on SYNO_FIXED_DISK_NAME
+
 config SYNO_ISCSI_DEVICE_PREFIX
 	string "The prefix of iSCSI device"
 	default "isd"
@@ -92,6 +97,11 @@
 	default "sd"
 	depends on SYNO_FIXED_DISK_NAME
 
+config SYNO_CACHE_DEVICE_PREFIX
+	string "The prefix of sata cache device"
+	default "nvc"
+	depends on SYNO_PCI_HOST_SATA_CACHE
+
 config SYNO_X86_AUTO_POWER_ON
 	bool "Supporting Auto-Poweron on X86_64 Platforms"
 	default n
@@ -138,6 +148,8 @@
 	depends on KVM
 
 config SYNO_EXPORT_GET_CMDLINE
+	bool "export the helper function to get the cmdline of a task_struct" 
+	default y 
 
 config SYNO_GPIO
 	  bool "Employ refactored GPIO-functions"
diff -ur a/synoconfigs/Kconfig.devices b/synoconfigs/Kconfig.devices
--- a/synoconfigs/Kconfig.devices	2017-03-23 17:52:30.000000000 +0100
+++ b/synoconfigs/Kconfig.devices	2017-03-15 04:35:41.000000000 +0100
@@ -43,6 +43,15 @@
 	bool "Sort the disk in autoastart array"
 	default y
 
+config SYNO_MD_BAD_SECTOR_AUTO_REMAP
+	bool "Auto remap bad sector in SATA layer"
+	default y
+
+config SYNO_MD_RAID5_BS_REWRITE
+	bool "remap bad sector on raid5 read error"
+	default y
+	depends on SYNO_MD_BAD_SECTOR_AUTO_REMAP && SYNO_MD_EIO_NODEV_HANDLER
+
 config SYNO_MD_STRIPE_MEMORY_ESTIMATION
 	bool "Raid5/6 stripe cache memory usage"
 	default y
@@ -76,6 +85,11 @@
 	default y
 	depends on SYNO_MD_EIO_NODEV_HANDLER && SYNO_SCSI_DEVICE_INDEX
 
+config SYNO_MD_AUTO_REMAP_REPORT
+	bool "report LVM/ MD remapped sector through scemd and synobios"
+	default y
+	depends on SYNO_MD_BAD_SECTOR_AUTO_REMAP && SYNO_MD_SECTOR_STATUS_REPORT
+
 config SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV
 	bool "Fix an issue that md might access NULL hotpluged redv"
 	default y
@@ -112,6 +126,14 @@
 	bool "Add an one-time retry for EIO error when updating superblock"
 	default y
 
+config SYNO_MD_RAID5D_PROXY
+	bool "run proxy thread when raid5 finish reshaping"
+	default y
+
+config SYNO_MD_RAID5_SKIP_COPY
+	bool "Fix data corruption in image backup"
+	default y
+
 endmenu #MD
 
 menu "SATA"
@@ -145,6 +167,17 @@
 	depends on SYNO_FIXED_DISK_NAME
 	default y
 
+config SYNO_MAX_PCI_SLOT
+	int "Maximun number of PCIe slot"
+	depends on SYNO_PCI_HOST_SATA_CACHE
+	default "1"
+
+config SYNO_PCI_HOST_SATA_CACHE
+	bool "Force SATA SSD on specific PCI slot used as cache device "
+	select SYNO_FIXED_DISK_NAME
+	select SYNO_SATA_REMAP
+	default n
+
 config SYNO_SATA_DISK_SEQ_REVERSE
 	bool "Reverse Disk Port Sequence"
 	depends on SYNO_FIXED_DISK_NAME
@@ -437,6 +470,11 @@
 	depends on (SYNO_BRASWELL || SYNO_ALPINE || SYNO_AVOTON) && SYNO_SATA_PM_DEVICE_GPIO
 	default n
 
+config SYNO_DISABLE_SIL3x26
+	bool "Disable SiI3x26 with 88SE9170 for DS1517+"
+	depends on SYNO_AVOTON && SYNO_SATA_PM_DEVICE_GPIO
+	default n
+
 config SYNO_SATA_LINK_SPEED_RETRY
 	bool "Enable SATA Link Speed Retry"
 	default y
@@ -477,6 +515,19 @@
 	bool "Rescan sata speed after thaw port"
 	default n
 
+config SYNO_AHCI_IMPL_WORKAROUND
+	bool "Force enable PORTS_IMPL workaround"
+	default n
+
+config SYNO_DS1815P_SPEED_LIMIT
+	bool "Force limit sata link speed for DS1815+"
+	default y
+	depends on SYNO_AVOTON
+
+config SYNO_SIL_PORTING
+	bool "Silicon Image SATA controller workarounds"
+	default y
+
 endmenu #SATA
 
 menu "SAS"
@@ -571,7 +622,7 @@
 
 config SYNO_SAS_HBA_IDX
 	bool "Indexing Synology SAS HBA"
-	depends on SYNO_GRANTLEY
+	depends on SYNO_GRANTLEY || SYNO_BROADWELL
         default n
 
 config SYNO_SAS_MAX_HBA_SLOT
@@ -881,6 +932,16 @@
 	bool "enable iommu=pt by default"
 	default y
 
+config SYNO_WORKAROUND_NOT_PORTING_ACPI_COMPANION
+	bool "disable logic using ACPI_COMPANION"
+	depends on INTEL_IOMMU
+	default y
+
+config SYNO_SKIP_IOMMU
+	bool "skip some PCIe deivce to avoid DMAR error"
+	depends on INTEL_IOMMU
+	default y
+
 endmenu #IOMMU
 
 endmenu #Device Drivers
diff -ur a/synoconfigs/Kconfig.fs b/synoconfigs/Kconfig.fs
--- a/synoconfigs/Kconfig.fs	2017-03-23 17:52:30.000000000 +0100
+++ b/synoconfigs/Kconfig.fs	2017-03-15 04:35:41.000000000 +0100
@@ -65,6 +65,10 @@
 	default y
 	depends on FSNOTIFY && ANON_INODES
 
+config SYNO_FS_RELATIME_PERIOD
+	bool "Add mount option to set update period of relatime"
+	default y
+
 endmenu #Basic
 
 menu "CIFS"
@@ -243,11 +247,6 @@
 	default y
 	depends on EXT4_FS
 
-config SYNO_EXT4_META_BG_BACKUP_DESC_FIX
-	bool "Fix update correct backup descriptor in meta_bg"
-	default y
-	depends on EXT4_FS
-
 config SYNO_EXT4_SKIP_JOURNAL_SYMLINK
 	bool "Use writeback mode instead of jounal mode when doing ext4 symlink"
 	default y
@@ -273,6 +272,11 @@
 	default y
 	depends on EXT4_FS
 
+config SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT
+	bool "Add parallel group desc prefetching to enhance mount time."
+	default y
+	depends on EXT4_FS
+
 endmenu #EXT4
 
 menu "BTRFS"
@@ -392,11 +396,6 @@
 	default y
 	depends on BTRFS_FS
 
-config SYNO_BTRFS_CORRECT_SPACEINFO_LOCK
-	bool "Correct btrfs statfs block group list lock"
-	default y
-	depends on BTRFS_FS
-
 config SYNO_BTRFS_FIX_ALLOC_CHUNK
 	bool "Chunk allocation may fail if device tree has hole"
 	default y
@@ -527,16 +526,51 @@
 	default y
 	depends on BTRFS_FS
 
+config SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME
+	bool "add btrfs send support pause/resume"
+	default y
+	depends on BTRFS_FS
+
 config SYNO_BTRFS_MERGE_HOLES
 	bool "file hole can be merged with both previous and next hole items"
 	default y
 	depends on BTRFS_FS
 
+config SYNO_BTRFS_DEFRAG
+	bool "add support for doing defrag on nocow file"
+	default y
+	depends on BTRFS_FS
+
 config SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC
 	bool "fix punch hole no space when split leaf, may lead to BUG_ON"
 	default y
 	depends on BTRFS_FS
 
+config SYNO_BTRFS_FIX_SNAPSHOT_HANG
+	bool "fix snapshot hang"
+	default y
+	depends on BTRFS_FS
+
+config SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA
+	bool "btrfs send uses async page cache readhead to accelerate"
+	default y
+	depends on BTRFS_FS
+
+config SYNO_BTRFS_BACKREF
+	bool "provide backref walking mechanism framework"
+	default y
+	depends on BTRFS_FS
+
+config SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION
+	bool "add ioctl to calculate actual disk size of snapshots"
+	default y
+	depends on BTRFS_FS && SYNO_BTRFS_BACKREF
+
+config SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP
+	bool "Avoid soft lockup when cache_block_group with mount option nospace_cache"
+	default y
+	depends on BTRFS_FS
+
 endmenu #BTRFS
 
 menu "ECRYPT"
@@ -601,6 +635,16 @@
 	default y
 	depends on ECRYPT_FS
 
+config SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK
+	bool "Skip security check during kernel_write."
+	default y
+	depends on ECRYPT_FS
+
+config SYNO_ECRYPTFS_FAST_LOOKUP
+	bool "Fast lookup, read i_size from xattr"
+	default y
+	depends on ECRYPT_FS
+
 endmenu #ECRYPT
 menu "NFS"
 
@@ -635,6 +679,14 @@
 	bool "disable NFSv4 over UDP"
 	default y
 
+config SYNO_NFSD_HIDDEN_FILE
+	bool "Hide system directories"
+	default y
+
+config SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE
+	bool "Avoid parent mutex hung task when unlink big file"
+	default y
+
 endmenu #NFS
 
 menu "HFSPLUS"
@@ -684,11 +736,6 @@
 	default y
 	depends on HFSPLUS_FS
 
-config SYNO_HFSPLUS_GET_PAGE_IF_IN_USE
-	bool "Add page get/put mech for bnode page to prevent bad page"
-	default y
-	depends on HFSPLUS_FS
-
 endmenu #HFSPLUS
 
 menu "UDF"
diff -ur a/synoconfigs/Kconfig.misc b/synoconfigs/Kconfig.misc
--- a/synoconfigs/Kconfig.misc	2017-03-23 17:52:30.000000000 +0100
+++ b/synoconfigs/Kconfig.misc	2017-03-15 04:35:41.000000000 +0100
@@ -31,6 +31,11 @@
 	bool "Reduce the rate of IO error print messages"
 	default y
 
+config SYNO_IO_ERROR_LIMIT_MSG_SHIFT
+	int "Reduce the rate of IO error print messages - for the variable number"
+	depends on SYNO_IO_ERROR_LIMIT_MSG
+	default "12"
+
 config SYNO_EMULATE_U64_DIVISOR
 	bool "Emulate caculating of u64 remainder"
 	default y
diff -ur a/synoconfigs/Kconfig.network b/synoconfigs/Kconfig.network
--- a/synoconfigs/Kconfig.network	2017-03-23 17:52:30.000000000 +0100
+++ b/synoconfigs/Kconfig.network	2017-03-15 04:35:41.000000000 +0100
@@ -61,4 +61,8 @@
 	bool "Export in6addr_any value for modules"
 	default y
 
+config SYNO_CVE_2016_5696
+	bool "Fix CVE-2016-5696"
+	default y
+
 endmenu #Network
diff -ur a/synoconfigs/kvmx64 b/synoconfigs/kvmx64
--- a/synoconfigs/kvmx64	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/kvmx64	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -205,7 +205,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -286,6 +286,7 @@
 CONFIG_SYNO_SATA_DOM_VENDOR_SECOND_SRC="SATADOM"
 CONFIG_SYNO_SATA_DOM_MODEL_SECOND_SRC="D150SH"
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -341,6 +342,7 @@
 CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY=y
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -359,6 +361,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -367,6 +371,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -376,6 +381,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -386,6 +393,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -456,6 +464,8 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 # CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN is not set
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+# CONFIG_SYNO_SIL_PORTING is not set
 
 #
 # SAS
@@ -593,6 +603,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -643,12 +654,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -703,8 +714,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -720,6 +738,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -731,6 +751,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -744,7 +766,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -777,6 +798,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 # CONFIG_SYNO_POWEROFF_INFO_PRINT is not set
@@ -1697,6 +1719,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3554,6 +3577,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3737,6 +3761,8 @@
 CONFIG_CRYPTO_CRC32C_INTEL=m
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
Nur in a/synoconfigs: monaco.
diff -ur a/synoconfigs/x86_64 b/synoconfigs/x86_64
--- a/synoconfigs/x86_64	2016-11-02 19:16:45.000000000 +0100
+++ b/synoconfigs/x86_64	2017-02-23 13:42:36.000000000 +0100
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.10.77 Kernel Configuration
+# Linux/x86_64 3.10.102 Kernel Configuration
 #
 CONFIG_64BIT=y
 CONFIG_X86_64=y
@@ -205,7 +205,7 @@
 CONFIG_PERF_EVENTS=y
 # CONFIG_DEBUG_PERF_USE_VMALLOC is not set
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
@@ -282,6 +282,7 @@
 CONFIG_SYNO_SUPPORT_EUP=y
 # CONFIG_SYNO_BOOT_SATA_DOM is not set
 CONFIG_SYNO_ISCSI_DEVICE=y
+CONFIG_SYNO_ISCSI_LOOPBACK_DEVICE=y
 CONFIG_SYNO_ISCSI_DEVICE_PREFIX="isd"
 CONFIG_SYNO_SATA_DEVICE_PREFIX="sd"
 CONFIG_SYNO_X86_AUTO_POWER_ON=y
@@ -334,6 +335,7 @@
 # CONFIG_SYNO_SFP_UNSUPPORTED_NOTIFY is not set
 # CONFIG_SYNO_PHY_INIT is not set
 CONFIG_SYNO_IPV6_EXPORT_IN6ADDR_ANY=y
+CONFIG_SYNO_CVE_2016_5696=y
 
 #
 # Device Drivers
@@ -352,6 +354,8 @@
 CONFIG_SYNO_MD_ALL_DETECTED_DEVICES_LOCK=y
 CONFIG_SYNO_MD_AUTOSTART_SEQUENCE=y
 CONFIG_SYNO_MD_DISK_SORT=y
+CONFIG_SYNO_MD_BAD_SECTOR_AUTO_REMAP=y
+CONFIG_SYNO_MD_RAID5_BS_REWRITE=y
 CONFIG_SYNO_MD_STRIPE_MEMORY_ESTIMATION=y
 CONFIG_SYNO_MD_RAID6_RMW=y
 CONFIG_SYNO_MD_RAID5_ENABLE_SSD_TRIM=y
@@ -360,6 +364,7 @@
 CONFIG_SYNO_MD_FIX_ACCESS_RELEASED_DEVICE=y
 CONFIG_SYNO_MD_FAST_VOLUME_WAKEUP=y
 CONFIG_SYNO_MD_SECTOR_STATUS_REPORT=y
+CONFIG_SYNO_MD_AUTO_REMAP_REPORT=y
 CONFIG_SYNO_MD_FIX_LINEAR_ACCESS_NULL_RDEV=y
 CONFIG_SYNO_MD_FIX_RAID5_RESHAPE_HANG=y
 CONFIG_SYNO_MD_SYNC_MSG=y
@@ -369,6 +374,8 @@
 CONFIG_SYNO_MD_ROOT_SWAP_PARALLEL_RESYNC=y
 CONFIG_SYNO_MD_RESHAPE_AND_MOUNT_DEADLOCK_WORKAROUND=y
 CONFIG_SYNO_MD_SB_RETRY=y
+CONFIG_SYNO_MD_RAID5D_PROXY=y
+CONFIG_SYNO_MD_RAID5_SKIP_COPY=y
 
 #
 # SATA
@@ -379,6 +386,7 @@
 CONFIG_SYNO_SATA_PORT_MAP=y
 CONFIG_SYNO_DISK_INDEX_MAP=y
 CONFIG_SYNO_SATA_REMAP=y
+# CONFIG_SYNO_PCI_HOST_SATA_CACHE is not set
 CONFIG_SYNO_SATA_DISK_SEQ_REVERSE=y
 # CONFIG_SYNO_FIXED_DISK_NAME_MV14XX is not set
 # CONFIG_SYNO_MV1475_SGPIO_LED_CTRL is not set
@@ -447,6 +455,8 @@
 # CONFIG_SYNO_MV9215_EXTPORT_NCQ_OFF is not set
 # CONFIG_SYNO_GET_DISK_SPEED is not set
 CONFIG_SYNO_EUNIT_SPD_UNKNOWN_RESCAN=y
+CONFIG_SYNO_AHCI_IMPL_WORKAROUND=y
+CONFIG_SYNO_SIL_PORTING=y
 
 #
 # SAS
@@ -549,6 +559,10 @@
 # CONFIG_SYNO_XR17V35X_SERIAL is not set
 
 #
+# Virtio
+#
+
+#
 # IOMMU
 #
 # CONFIG_SYNO_IOMMU_PASSTHROUGH is not set
@@ -575,6 +589,7 @@
 CONFIG_SYNO_FS_EXPORT_SYMBOL_LOOKUP_HASH=y
 CONFIG_SYNO_FS_REMOVE_RCU_WALK_PATH=y
 CONFIG_SYNO_FS_NOTIFY=y
+CONFIG_SYNO_FS_RELATIME_PERIOD=y
 
 #
 # CIFS
@@ -625,12 +640,12 @@
 CONFIG_SYNO_EXT4_INODE_NUM_OVERFLOW_FIX=y
 CONFIG_SYNO_EXT4_CASELESS_STAT=y
 CONFIG_SYNO_EXT4_SKIP_ADD_RESERVED_BLOCKS=y
-CONFIG_SYNO_EXT4_META_BG_BACKUP_DESC_FIX=y
 CONFIG_SYNO_EXT4_SKIP_JOURNAL_SYMLINK=y
 CONFIG_SYNO_EXT4_OLDALLOC=y
 CONFIG_SYNO_EXT4_MBALLOC_RANDOM=y
 CONFIG_SYNO_EXT4_PROTECT_DISKSIZE_WRITE=y
 CONFIG_SYNO_EXT4_FORCE_UPDATE_DA_FILE_SIZE=y
+CONFIG_SYNO_EXT4_PARALLEL_GROUP_DESC_PREFETCH_WHEN_MOUNT=y
 
 #
 # BTRFS
@@ -658,7 +673,6 @@
 CONFIG_SYNO_BTRFS_SUPPORT_FULLY_CLONE_BETWEEN_CSUM_AND_NOCSUM_DIR=y
 CONFIG_SYNO_BTRFS_QGROUP_QUERY=y
 CONFIG_SYNO_BTRFS_RENAME_READONLY_SUBVOL=y
-CONFIG_SYNO_BTRFS_CORRECT_SPACEINFO_LOCK=y
 CONFIG_SYNO_BTRFS_FIX_ALLOC_CHUNK=y
 CONFIG_SYNO_BTRFS_CLEAR_SPACE_FULL=y
 CONFIG_SYNO_BTRFS_REMOVE_UNUSED_QGROUP=y
@@ -685,8 +699,15 @@
 CONFIG_SYNO_BTRFS_GLOBAL_RESERVE_MINIMAL_VALUE=y
 CONFIG_SYNO_BTRFS_CHECK_INTEGRITY=y
 CONFIG_SYNO_BTRFS_TREE_LOG_RECOVER_FIX=y
+CONFIG_SYNO_BTRFS_SEND_SUPPORT_PAUSE_RESUME=y
 CONFIG_SYNO_BTRFS_MERGE_HOLES=y
+CONFIG_SYNO_BTRFS_DEFRAG=y
 CONFIG_SYNO_BTRFS_FIX_PUNCH_HOLE_ENOSPC=y
+CONFIG_SYNO_BTRFS_FIX_SNAPSHOT_HANG=y
+CONFIG_SYNO_BTRFS_SEND_ASYNC_PAGECACHE_RA=y
+CONFIG_SYNO_BTRFS_BACKREF=y
+CONFIG_SYNO_BTRFS_SNAPSHOT_SIZE_CALCULATION=y
+CONFIG_SYNO_BTRFS_AVOID_CACHE_BLOCK_GROUP_SOFT_LOCKUP=y
 
 #
 # ECRYPT
@@ -702,6 +723,8 @@
 CONFIG_SYNO_ECRYPTFS_FILENAME_SYSCALL=y
 CONFIG_SYNO_ECRYPTFS_BLOCK_BTRFS_CLONE=y
 CONFIG_SYNO_FS_ECRYPTFS_LOWER_INIT=y
+CONFIG_SYNO_ECRYPTFS_SKIP_KERNEL_WRITE_CHECK=y
+CONFIG_SYNO_ECRYPTFS_FAST_LOOKUP=y
 
 #
 # NFS
@@ -713,6 +736,8 @@
 CONFIG_SYNO_NFSD_UDP_DEF_PACKET_SIZE=8192
 CONFIG_SYNO_NFSD_UNIX_PRI=y
 CONFIG_SYNO_NFS4_DISABLE_UDP=y
+CONFIG_SYNO_NFSD_HIDDEN_FILE=y
+CONFIG_SYNO_NFSD_AVOID_HUNG_TASK_WHEN_UNLINK_BIG_FILE=y
 
 #
 # HFSPLUS
@@ -726,7 +751,6 @@
 CONFIG_SYNO_HFSPLUS_CASELESS_CREATE_BY_NEW_NAME=y
 CONFIG_SYNO_HFSPLUS_EA=y
 CONFIG_SYNO_HFSPLUS_BREC_FIND_RET_CHECK=y
-CONFIG_SYNO_HFSPLUS_GET_PAGE_IF_IN_USE=y
 
 #
 # UDF
@@ -760,6 +784,7 @@
 CONFIG_SYNO_MAX_READAHEAD_SIZE=192
 CONFIG_SYNO_BLOCK_LIMIT_BAD_SECTOR_MSG=y
 CONFIG_SYNO_IO_ERROR_LIMIT_MSG=y
+CONFIG_SYNO_IO_ERROR_LIMIT_MSG_SHIFT=12
 CONFIG_SYNO_EMULATE_U64_DIVISOR=y
 CONFIG_SYNO_APPARMOR_PATCH=y
 # CONFIG_SYNO_POWEROFF_INFO_PRINT is not set
@@ -1654,6 +1679,7 @@
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_ESAS2R is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
@@ -3419,6 +3445,7 @@
 # CONFIG_LOCK_STAT is not set
 # CONFIG_DEBUG_ATOMIC_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_KOBJECT is not set
 CONFIG_DEBUG_BUGVERBOSE=y
@@ -3600,6 +3627,8 @@
 # CONFIG_CRYPTO_CRC32C_INTEL is not set
 # CONFIG_CRYPTO_CRC32 is not set
 # CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
 # CONFIG_CRYPTO_GHASH is not set
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=m
diff -ur a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
--- a/tools/lib/traceevent/event-parse.c	2017-03-23 14:38:22.000000000 +0100
+++ b/tools/lib/traceevent/event-parse.c	2017-03-14 02:12:55.000000000 +0100
@@ -4188,13 +4188,12 @@
 				    sizeof(long) != 8) {
 					char *p;
 
-					ls = 2;
 					/* make %l into %ll */
-					p = strchr(format, 'l');
-					if (p)
+					if (ls == 1 && (p = strchr(format, 'l')))
 						memmove(p+1, p, strlen(p)+1);
 					else if (strcmp(format, "%p") == 0)
 						strcpy(format, "0x%llx");
+					ls = 2;
 				}
 				switch (ls) {
 				case -2:
diff -ur a/tools/Makefile b/tools/Makefile
--- a/tools/Makefile	2016-10-20 04:32:09.000000000 +0200
+++ b/tools/Makefile	2016-07-29 05:48:09.000000000 +0200
@@ -22,6 +22,10 @@
 	@echo '  from the kernel command line to build and install one of'
 	@echo '  the tools above'
 	@echo ''
+	@echo '  $$ make tools/all'
+	@echo ''
+	@echo '  builds all tools.'
+	@echo ''
 	@echo '  $$ make tools/install'
 	@echo ''
 	@echo '  installs all tools.'
@@ -50,6 +54,10 @@
 turbostat x86_energy_perf_policy: FORCE
 	$(call descend,power/x86/$@)
 
+all: cgroup cpupower firewire lguest \
+		perf selftests turbostat usb \
+		virtio vm net x86_energy_perf_policy
+
 cpupower_install:
 	$(call descend,power/$(@:_install=),install)
 
diff -ur a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
--- a/tools/perf/Documentation/perf-stat.txt	2016-10-20 04:32:09.000000000 +0200
+++ b/tools/perf/Documentation/perf-stat.txt	2016-07-29 05:48:09.000000000 +0200
@@ -50,6 +50,14 @@
 --scale::
 	scale/normalize counter values
 
+-d::
+--detailed::
+	print more detailed statistics, can be specified up to 3 times
+
+	   -d:          detailed events, L1 and LLC data cache
+        -d -d:     more detailed events, dTLB and iTLB events
+     -d -d -d:     very detailed events, adding prefetch events
+
 -r::
 --repeat=<n>::
 	repeat command and print average + stddev (max: 100). 0 means forever.
diff -ur a/tools/perf/util/header.c b/tools/perf/util/header.c
--- a/tools/perf/util/header.c	2017-03-23 14:38:08.000000000 +0100
+++ b/tools/perf/util/header.c	2017-03-14 02:12:32.000000000 +0100
@@ -1726,7 +1726,7 @@
 	if (ph->needs_swap)
 		nr = bswap_32(nr);
 
-	ph->env.nr_cpus_online = nr;
+	ph->env.nr_cpus_avail = nr;
 
 	ret = readn(fd, &nr, sizeof(nr));
 	if (ret != sizeof(nr))
@@ -1735,7 +1735,7 @@
 	if (ph->needs_swap)
 		nr = bswap_32(nr);
 
-	ph->env.nr_cpus_avail = nr;
+	ph->env.nr_cpus_online = nr;
 	return 0;
 }
 
diff -ur a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
--- a/tools/testing/selftests/efivarfs/efivarfs.sh	2016-10-20 04:32:09.000000000 +0200
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh	2016-07-29 05:48:09.000000000 +0200
@@ -88,7 +88,11 @@
 		exit 1
 	fi
 
-	rm $file
+	rm $file 2>/dev/null
+	if [ $? -ne 0 ]; then
+		chattr -i $file
+		rm $file
+	fi
 
 	if [ -e $file ]; then
 		echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@
 		exit 1
 	fi
 
+	chattr -i $file
 	printf "$attrs" > $file
 
 	if [ -e $file ]; then
@@ -141,7 +146,11 @@
 			echo "$file could not be created" >&2
 			ret=1
 		else
-			rm $file
+			rm $file 2>/dev/null
+			if [ $? -ne 0 ]; then
+				chattr -i $file
+				rm $file
+			fi
 		fi
 	done
 
@@ -174,7 +183,11 @@
 
 		if [ -e $file ]; then
 			echo "Creating $file should have failed" >&2
-			rm $file
+			rm $file 2>/dev/null
+			if [ $? -ne 0 ]; then
+				chattr -i $file
+				rm $file
+			fi
 			ret=1
 		fi
 	done
diff -ur a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
--- a/tools/testing/selftests/efivarfs/open-unlink.c	2017-03-23 14:38:17.000000000 +0100
+++ b/tools/testing/selftests/efivarfs/open-unlink.c	2017-03-14 02:12:51.000000000 +0100
@@ -1,10 +1,68 @@
+#include <errno.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <sys/ioctl.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <linux/fs.h>
+
+static int set_immutable(const char *path, int immutable)
+{
+	unsigned int flags;
+	int fd;
+	int rc;
+	int error;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0)
+		return fd;
+
+	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+	if (rc < 0) {
+		error = errno;
+		close(fd);
+		errno = error;
+		return rc;
+	}
+
+	if (immutable)
+		flags |= FS_IMMUTABLE_FL;
+	else
+		flags &= ~FS_IMMUTABLE_FL;
+
+	rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
+	error = errno;
+	close(fd);
+	errno = error;
+	return rc;
+}
+
+static int get_immutable(const char *path)
+{
+	unsigned int flags;
+	int fd;
+	int rc;
+	int error;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0)
+		return fd;
+
+	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+	if (rc < 0) {
+		error = errno;
+		close(fd);
+		errno = error;
+		return rc;
+	}
+	close(fd);
+	if (flags & FS_IMMUTABLE_FL)
+		return 1;
+	return 0;
+}
 
 int main(int argc, char **argv)
 {
@@ -27,7 +85,7 @@
 	buf[4] = 0;
 
 	/* create a test variable */
-	fd = open(path, O_WRONLY | O_CREAT);
+	fd = open(path, O_WRONLY | O_CREAT, 0600);
 	if (fd < 0) {
 		perror("open(O_WRONLY)");
 		return EXIT_FAILURE;
@@ -41,6 +99,18 @@
 
 	close(fd);
 
+	rc = get_immutable(path);
+	if (rc < 0) {
+		perror("ioctl(FS_IOC_GETFLAGS)");
+		return EXIT_FAILURE;
+	} else if (rc) {
+		rc = set_immutable(path, 0);
+		if (rc < 0) {
+			perror("ioctl(FS_IOC_SETFLAGS)");
+			return EXIT_FAILURE;
+		}
+	}
+
 	fd = open(path, O_RDONLY);
 	if (fd < 0) {
 		perror("open");
diff -ur a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
--- a/virt/kvm/async_pf.c	2017-03-23 15:04:18.000000000 +0100
+++ b/virt/kvm/async_pf.c	2017-03-14 02:42:07.000000000 +0100
@@ -158,7 +158,7 @@
 	 * do alloc nowait since if we are going to sleep anyway we
 	 * may as well sleep faulting in page
 	 */
-	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
 	if (!work)
 		return 0;
 
diff -ur a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
--- a/virt/kvm/kvm_main.c	2017-03-23 15:04:23.000000000 +0100
+++ b/virt/kvm/kvm_main.c	2017-03-14 02:42:10.000000000 +0100
@@ -468,6 +468,16 @@
 	if (!kvm)
 		return ERR_PTR(-ENOMEM);
 
+	spin_lock_init(&kvm->mmu_lock);
+	atomic_inc(&current->mm->mm_count);
+	kvm->mm = current->mm;
+	kvm_eventfd_init(kvm);
+	mutex_init(&kvm->lock);
+	mutex_init(&kvm->irq_lock);
+	mutex_init(&kvm->slots_lock);
+	atomic_set(&kvm->users_count, 1);
+	INIT_LIST_HEAD(&kvm->devices);
+
 	r = kvm_arch_init_vm(kvm, type);
 	if (r)
 		goto out_err_nodisable;
@@ -497,16 +507,6 @@
 			goto out_err;
 	}
 
-	spin_lock_init(&kvm->mmu_lock);
-	kvm->mm = current->mm;
-	atomic_inc(&kvm->mm->mm_count);
-	kvm_eventfd_init(kvm);
-	mutex_init(&kvm->lock);
-	mutex_init(&kvm->irq_lock);
-	mutex_init(&kvm->slots_lock);
-	atomic_set(&kvm->users_count, 1);
-	INIT_LIST_HEAD(&kvm->devices);
-
 	r = kvm_init_mmu_notifier(kvm);
 	if (r)
 		goto out_err;
@@ -526,6 +526,7 @@
 		kfree(kvm->buses[i]);
 	kfree(kvm->memslots);
 	kvm_arch_free_vm(kvm);
+	mmdrop(current->mm);
 	return ERR_PTR(r);
 }
 
