Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf tooling fixes from Thomas Gleixner:
 "Core libraries:
   - Fix max perf_event_attr.precise_ip detection.
   - Fix parser error for uncore event alias
   - Fixup ordering of kernel maps after obtaining the main kernel map
     address.

  Intel PT:
   - Fix TSC slip where A TSC packet can slip past MTC packets so that
     the timestamp appears to go backwards.
   - Fixes for exported-sql-viewer GUI conversion to python3.

  ARM coresight:
   - Fix the build by adding a missing case value for enumeration value
     introduced in newer library, that now is the required one.

  tool headers:
   - Syncronize kernel headers with the kernel, getting new io_uring and
     pidfd_send_signal syscalls so that 'perf trace' can handle them"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf pmu: Fix parser error for uncore event alias
  perf scripts python: exported-sql-viewer.py: Fix python3 support
  perf scripts python: exported-sql-viewer.py: Fix never-ending loop
  perf machine: Update kernel map address and re-order properly
  tools headers uapi: Sync powerpc's asm/kvm.h copy with the kernel sources
  tools headers: Update x86's syscall_64.tbl and uapi/asm-generic/unistd
  tools headers uapi: Update drm/i915_drm.h
  tools arch x86: Sync asm/cpufeatures.h with the kernel sources
  tools headers uapi: Sync linux/fcntl.h to get the F_SEAL_FUTURE_WRITE addition
  tools headers uapi: Sync asm-generic/mman-common.h and linux/mman.h
  perf evsel: Fix max perf_event_attr.precise_ip detection
  perf intel-pt: Fix TSC slip
  perf cs-etm: Add missing case value
diff --git a/.mailmap b/.mailmap
index 37e1847..b2cde86 100644
--- a/.mailmap
+++ b/.mailmap
@@ -224,3 +224,5 @@
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/i2c-xscale.txt
rename to Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/i2c-mtk.txt
rename to Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt
rename to Documentation/devicetree/bindings/i2c/i2c-stu300.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
rename to Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
similarity index 100%
rename from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt
rename to Documentation/devicetree/bindings/i2c/i2c-wmt.txt
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 742cb47..bcfb131 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -16,6 +16,7 @@
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
   * "mediatek,mt8173-uart" for MT8173 compatible UARTS
+  * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
   * "mediatek,mt6577-uart" for MT6577 and all of the above
 
 - reg: The base address of the UART register bank.
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d1ee484..ee9984f 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@
   * Intel Cannon Lake (PCH)
   * Intel Cedar Fork (PCH)
   * Intel Ice Lake (PCH)
+  * Intel Comet Lake (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/MAINTAINERS b/MAINTAINERS
index 3e5a5d2..43b36db 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2356,7 +2356,7 @@
 F:	arch/arm64/boot/dts/socionext/uniphier*
 F:	drivers/bus/uniphier-system-bus.c
 F:	drivers/clk/uniphier/
-F:	drivers/dmaengine/uniphier-mdmac.c
+F:	drivers/dma/uniphier-mdmac.c
 F:	drivers/gpio/gpio-uniphier.c
 F:	drivers/i2c/busses/i2c-uniphier*
 F:	drivers/irqchip/irq-uniphier-aidet.c
@@ -6408,7 +6408,6 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
 S:	Maintained
 F:	kernel/futex.c
-F:	kernel/futex_compat.c
 F:	include/asm-generic/futex.h
 F:	include/linux/futex.h
 F:	include/uapi/linux/futex.h
diff --git a/Makefile b/Makefile
index c0a3406..52f067e 100644
--- a/Makefile
+++ b/Makefile
@@ -31,26 +31,12 @@
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
-# Ugly workaround for Debian make-kpkg:
-# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
-# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
-# displays warning to discourage such abusage.
-ifneq ($(word 2, $(MAKEFILE_LIST)),)
-$(warning Do not include top Makefile of Linux Kernel)
-sub-make-done := 1
-MAKEFLAGS += -rR
-endif
-
-ifneq ($(sub-make-done),1)
+ifneq ($(sub_make_done),1)
 
 # Do not use make's built-in rules and variables
 # (this increases performance and avoids hard-to-debug behaviour)
 MAKEFLAGS += -rR
 
-# 'MAKEFLAGS += -rR' does not become immediately effective for old
-# GNU Make versions. Cancel implicit rules for this Makefile.
-$(lastword $(MAKEFILE_LIST)): ;
-
 # Avoid funny character set dependencies
 unexport LC_ALL
 LC_COLLATE=C
@@ -153,6 +139,7 @@
 # 'sub-make' below.
 MAKEFLAGS += --include-dir=$(CURDIR)
 
+need-sub-make := 1
 else
 
 # Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@
 
 endif # ifneq ($(KBUILD_OUTPUT),)
 
+ifneq ($(filter 3.%,$(MAKE_VERSION)),)
+# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
+# We need to invoke sub-make to avoid implicit rules in the top Makefile.
+need-sub-make := 1
+# Cancel implicit rules for this Makefile.
+$(lastword $(MAKEFILE_LIST)): ;
+endif
+
+export sub_make_done := 1
+
+ifeq ($(need-sub-make),1)
+
 PHONY += $(MAKECMDGOALS) sub-make
 
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@
 
 # Invoke a second make in the output directory, passing relevant variables
 sub-make:
-	$(Q)$(MAKE) sub-make-done=1 \
+	$(Q)$(MAKE) \
 	$(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
 	-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
-else # sub-make-done
+endif # need-sub-make
+endif # sub_make_done
+
 # We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(need-sub-make),)
 
 # Do not print "Entering directory ...",
 # but we want to display it when entering to the output directory
@@ -497,7 +499,8 @@
 ifneq ($(KBUILD_SRC),)
 	$(Q)ln -fsn $(srctree) source
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
-	$(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
+	$(Q)test -e .gitignore || \
+	{ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
 endif
 
 ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -677,7 +680,7 @@
 KBUILD_CFLAGS	+= $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= $(call cc-option,-Oz,-Os)
+KBUILD_CFLAGS	+= -Os
 else
 KBUILD_CFLAGS   += -O2
 endif
@@ -950,9 +953,11 @@
 endif
 export mod_sign_cmd
 
+HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 ifdef CONFIG_STACK_VALIDATION
   has_libelf := $(call try-run,\
-		echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
+		echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
@@ -1757,7 +1762,7 @@
 
 endif   # ifeq ($(config-targets),1)
 endif   # ifeq ($(mixed-targets),1)
-endif   # sub-make-done
+endif   # need-sub-make
 
 PHONY += FORCE
 FORCE:
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f8482fe..413d566 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
 
 	num_standard_resources = memblock.memory.cnt;
 	res_size = num_standard_resources * sizeof(*standard_resources);
-	standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
+	standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
 	if (!standard_resources)
 		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 844d8e7..b7f6f6e 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -215,11 +215,20 @@
 	beq	.Lzero
 
 .Lcmp_rest_lt8bytes:
-	/* Here we have only less than 8 bytes to compare with. at least s1
-	 * Address is aligned with 8 bytes.
-	 * The next double words are load and shift right with appropriate
-	 * bits.
+	/*
+	 * Here we have less than 8 bytes to compare. At least s1 is aligned to
+	 * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
+	 * page boundary, otherwise we might read past the end of the buffer and
+	 * trigger a page fault. We use 4K as the conservative minimum page
+	 * size. If we detect that case we go to the byte-by-byte loop.
+	 *
+	 * Otherwise the next double word is loaded from s1 and s2, and shifted
+	 * right to compare the appropriate bits.
 	 */
+	clrldi	r6,r4,(64-12)	// r6 = r4 & 0xfff
+	cmpdi	r6,0xff8
+	bgt	.Lshort
+
 	subfic  r6,r5,8
 	slwi	r6,r6,3
 	LD	rA,0,r3
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 6ed2212..921f121 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
 
 		ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
 	} else {
-		const __be32 *indexes;
-
-		indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
-		if (indexes == NULL)
-			goto err_of_node_put;
+		u32 nr_drc_indexes, thread_drc_index;
 
 		/*
-		 * The first element indexes[0] is the number of drc_indexes
-		 * returned in the list.  Hence thread_index+1 will get the
-		 * drc_index corresponding to core number thread_index.
+		 * The first element of ibm,drc-indexes array is the
+		 * number of drc_indexes returned in the list.  Hence
+		 * thread_index+1 will get the drc_index corresponding
+		 * to core number thread_index.
 		 */
-		ret = indexes[thread_index + 1];
+		rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+						0, &nr_drc_indexes);
+		if (rc)
+			goto err_of_node_put;
+
+		WARN_ON_ONCE(thread_index > nr_drc_indexes);
+		rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+						thread_index + 1,
+						&thread_drc_index);
+		if (rc)
+			goto err_of_node_put;
+
+		ret = thread_drc_index;
 	}
 
 	rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index d97d527..452dcfd 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
 		"UE",
 		"SLB",
 		"ERAT",
+		"Unknown",
 		"TLB",
 		"D-Cache",
 		"Unknown",
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c1f9b3c..5ad9241 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2217,14 +2217,8 @@
 	   If unsure, leave at the default value.
 
 config HOTPLUG_CPU
-	bool "Support for hot-pluggable CPUs"
+	def_bool y
 	depends on SMP
-	---help---
-	  Say Y here to allow turning CPUs off and on. CPUs can be
-	  controlled through /sys/devices/system/cpu.
-	  ( Note: power management support will enable this option
-	    automatically on SMP systems. )
-	  Say N if you want to disable CPU hotplug.
 
 config BOOTPARAM_HOTPLUG_CPU0
 	bool "Set default setting of cpu0_hotpluggable"
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6e0f2d97..d95f948 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
 		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
 		flush_rq->tag = -1;
 	} else {
-		blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+		blk_mq_put_driver_tag(flush_rq);
 		flush_rq->internal_tag = -1;
 	}
 
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 
 	if (q->elevator) {
 		WARN_ON(rq->tag < 0);
-		blk_mq_put_driver_tag_hctx(hctx, rq);
+		blk_mq_put_driver_tag(rq);
 	}
 
 	/*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 70b210a..3ff3d7b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 }
 
 /*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
  */
 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
@@ -1071,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
 	spin_lock(&hctx->dispatch_wait_lock);
-	list_del_init(&wait->entry);
+	if (!list_empty(&wait->entry)) {
+		struct sbitmap_queue *sbq;
+
+		list_del_init(&wait->entry);
+		sbq = &hctx->tags->bitmap_tags;
+		atomic_dec(&sbq->ws_active);
+	}
 	spin_unlock(&hctx->dispatch_wait_lock);
 
 	blk_mq_run_hw_queue(hctx, true);
@@ -1087,6 +1094,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
 				 struct request *rq)
 {
+	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
 	struct wait_queue_head *wq;
 	wait_queue_entry_t *wait;
 	bool ret;
@@ -1109,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
 	if (!list_empty_careful(&wait->entry))
 		return false;
 
-	wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+	wq = &bt_wait_ptr(sbq, hctx)->wait;
 
 	spin_lock_irq(&wq->lock);
 	spin_lock(&hctx->dispatch_wait_lock);
@@ -1119,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
 		return false;
 	}
 
+	atomic_inc(&sbq->ws_active);
 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
 	__add_wait_queue(wq, wait);
 
@@ -1139,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
 	 * someone else gets the wakeup.
 	 */
 	list_del_init(&wait->entry);
+	atomic_dec(&sbq->ws_active);
 	spin_unlock(&hctx->dispatch_wait_lock);
 	spin_unlock_irq(&wq->lock);
 
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 0ed8e5a..d704fc7 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -224,15 +224,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
 	}
 }
 
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
-				       struct request *rq)
-{
-	if (rq->tag == -1 || rq->internal_tag == -1)
-		return;
-
-	__blk_mq_put_driver_tag(hctx, rq);
-}
-
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
 	if (rq->tag == -1 || rq->internal_tag == -1)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbab..eec263c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
 
 	acpi_permanent_mmap = true;
 
-	/* Initialize debug output. Linux does not use ACPICA defaults */
-	acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
 #ifdef CONFIG_X86
 	/*
 	 * If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fc..d4244e7 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
 	cpc_read(cpunum, nominal_reg, &nom);
 	perf_caps->nominal_perf = nom;
 
-	cpc_read(cpunum, guaranteed_reg, &guaranteed);
-	perf_caps->guaranteed_perf = guaranteed;
+	if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
+	    IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+		perf_caps->guaranteed_perf = 0;
+	} else {
+		cpc_read(cpunum, guaranteed_reg, &guaranteed);
+		perf_caps->guaranteed_perf = guaranteed;
+	}
 
 	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
 	perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882..4b9c7ca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
 	size_t object_size = 0;
 
 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
-	if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+	    !IS_ALIGNED(offset, sizeof(u32)))
 		return 0;
 	binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
 				      offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467..195f120 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
 	index = page - alloc->pages;
 	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+	mm = alloc->vma_vm_mm;
+	if (!mmget_not_zero(mm))
+		goto err_mmget;
+	if (!down_write_trylock(&mm->mmap_sem))
+		goto err_down_write_mmap_sem_failed;
 	vma = binder_alloc_get_vma(alloc);
-	if (vma) {
-		if (!mmget_not_zero(alloc->vma_vm_mm))
-			goto err_mmget;
-		mm = alloc->vma_vm_mm;
-		if (!down_read_trylock(&mm->mmap_sem))
-			goto err_down_write_mmap_sem_failed;
-	}
 
 	list_lru_isolate(lru, item);
 	spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 		zap_page_range(vma, page_addr, PAGE_SIZE);
 
 		trace_binder_unmap_user_end(alloc, index);
-
-		up_read(&mm->mmap_sem);
-		mmput(mm);
 	}
+	up_write(&mm->mmap_sem);
+	mmput(mm);
 
 	trace_binder_unmap_kernel_start(alloc, index);
 
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9..173e6f2 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-	char buf[16];
+	char *buf;
 	unsigned int ret;
-	struct rm_feature_desc *desc = (void *)(buf + 8);
+	struct rm_feature_desc *desc;
 	struct ata_taskfile tf;
 	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
 			0, 0, 0,/* reserved */
-			0, sizeof(buf),
+			0, 16,
 			0, 0, 0,
 	};
 
+	buf = kzalloc(16, GFP_KERNEL);
+	if (!buf)
+		return ODD_MECH_TYPE_UNSUPPORTED;
+	desc = (void *)(buf + 8);
+
 	ata_tf_init(dev, &tf);
 	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.command = ATA_CMD_PACKET;
 	tf.protocol = ATAPI_PROT_PIO;
-	tf.lbam = sizeof(buf);
+	tf.lbam = 16;
 
 	ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-				buf, sizeof(buf), 0);
-	if (ret)
+				buf, 16, 0);
+	if (ret) {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 
-	if (be16_to_cpu(desc->feature_code) != 3)
+	if (be16_to_cpu(desc->feature_code) != 3) {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 
-	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+		kfree(buf);
 		return ODD_MECH_TYPE_SLOT;
-	else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+	} else if (desc->mech_type == 1 && desc->load == 0 &&
+		   desc->eject == 1) {
+		kfree(buf);
 		return ODD_MECH_TYPE_DRAWER;
-	else
+	} else {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 }
 
 /* Test if ODD is zero power ready by sense code */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e7a5f1d..399cad7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
 	struct zram *zram = dev_to_zram(dev);
 	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
 	int index;
-	char mode_buf[8];
-	ssize_t sz;
 
-	sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-	if (sz <= 0)
-		return -EINVAL;
-
-	/* ignore trailing new line */
-	if (mode_buf[sz - 1] == '\n')
-		mode_buf[sz - 1] = 0x00;
-
-	if (strcmp(mode_buf, "all"))
+	if (!sysfs_streq(buf, "all"))
 		return -EINVAL;
 
 	down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
 	struct bio bio;
 	struct bio_vec bio_vec;
 	struct page *page;
-	ssize_t ret, sz;
-	char mode_buf[8];
-	int mode = -1;
+	ssize_t ret;
+	int mode;
 	unsigned long blk_idx = 0;
 
-	sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-	if (sz <= 0)
-		return -EINVAL;
-
-	/* ignore trailing newline */
-	if (mode_buf[sz - 1] == '\n')
-		mode_buf[sz - 1] = 0x00;
-
-	if (!strcmp(mode_buf, "idle"))
+	if (sysfs_streq(buf, "idle"))
 		mode = IDLE_WRITEBACK;
-	else if (!strcmp(mode_buf, "huge"))
+	else if (sysfs_streq(buf, "huge"))
 		mode = HUGE_WRITEBACK;
-
-	if (mode == -1)
+	else
 		return -EINVAL;
 
 	down_read(&zram->init_lock);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e22f0db..b599c73 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
 	if (ret)
 		return ret;
 
-	return cppc_perf.guaranteed_perf;
+	if (cppc_perf.guaranteed_perf)
+		return cppc_perf.guaranteed_perf;
+
+	return cppc_perf.nominal_perf;
 }
 
 #else /* CONFIG_ACPI_CPPC_LIB */
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427..2b51e07 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
 
 	clk_put(priv->clk);
 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-	kfree(priv);
 	dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+	kfree(priv);
 
 	return 0;
 }
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 4e0eede..ac0301b 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
 
 	dmadev->nr_channels = nr_channels;
 	dmadev->nr_requests = nr_requests;
-	ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+	device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
 				       dmadev->ahb_addr_masks,
 				       count);
-	if (ret)
-		return ret;
 	dmadev->nr_ahb_addr_masks = count;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0..12acdac 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	if (err < 0)
 		goto out;
 
-	if (err & BIT(pos))
-		err = -EACCES;
+	if (value & BIT(pos)) {
+		err = -EPERM;
+		goto out;
+	}
 
 	err = 0;
 
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4..2175070 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
 
 	gpio->offset_timer =
 		devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+	if (!gpio->offset_timer)
+		return -ENOMEM;
 
 	return aspeed_gpio_setup_irqs(gpio, pdev);
 }
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd236..a09d2f9 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
 	mutex_init(&exar_gpio->lock);
 
 	index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+	if (index < 0)
+		goto err_destroy;
 
 	sprintf(exar_gpio->name, "exar_gpio%d", index);
 	exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959..b6a4efc 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
 	struct gpio_mockup_chip *chip;
 	struct seq_file *sfile;
 	struct gpio_chip *gc;
+	int val, cnt;
 	char buf[3];
-	int val, rv;
 
 	if (*ppos != 0)
 		return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
 	gc = &chip->gc;
 
 	val = gpio_mockup_get(gc, priv->offset);
-	snprintf(buf, sizeof(buf), "%d\n", val);
+	cnt = snprintf(buf, sizeof(buf), "%d\n", val);
 
-	rv = copy_to_user(usr_buf, buf, sizeof(buf));
-	if (rv)
-		return rv;
-
-	return sizeof(buf) - 1;
+	return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
 }
 
 static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab..6a3ec57 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
 	 * to determine if the flags should have inverted semantics.
 	 */
 	if (IS_ENABLED(CONFIG_SPI_MASTER) &&
-	    of_property_read_bool(np, "cs-gpios")) {
+	    of_property_read_bool(np, "cs-gpios") &&
+	    !strcmp(propname, "cs-gpios")) {
 		struct device_node *child;
 		u32 cs;
 		int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
 				 * conflict and the "spi-cs-high" flag will
 				 * take precedence.
 				 */
-				if (of_property_read_bool(np, "spi-cs-high")) {
+				if (of_property_read_bool(child, "spi-cs-high")) {
 					if (*flags & OF_GPIO_ACTIVE_LOW) {
 						pr_warn("%s GPIO handle specifies active low - ignored\n",
-							of_node_full_name(np));
+							of_node_full_name(child));
 						*flags &= ~OF_GPIO_ACTIVE_LOW;
 					}
 				} else {
 					if (!(*flags & OF_GPIO_ACTIVE_LOW))
 						pr_info("%s enforce active low on chipselect handle\n",
-							of_node_full_name(np));
+							of_node_full_name(child));
 					*flags |= OF_GPIO_ACTIVE_LOW;
 				}
 				break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
 	of_node_get(chip->of_node);
 
-	return of_gpiochip_scan_gpios(chip);
+	status = of_gpiochip_scan_gpios(chip);
+	if (status) {
+		of_node_put(chip->of_node);
+		gpiochip_remove_pin_ranges(chip);
+	}
+
+	return status;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af07..0495bf1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 	}
 
 	config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
-	return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+	return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_debounce);
 
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 	packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
 					  !transitory);
 	gpio = gpio_chip_hwgpio(desc);
-	rc = gpio_set_config(chip, gpio, packed);
+	rc = chip->set_config(chip, gpio, packed);
 	if (rc == -ENOTSUPP) {
 		dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
 				gpio);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fb27783..81127f7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
 	struct amdgpu_dm_connector *aconnector =
 			to_amdgpu_dm_connector(new_con_state->base.connector);
 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
+	int vrefresh = drm_mode_vrefresh(mode);
 
 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
-		aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+					vrefresh >= aconnector->min_vfreq &&
+					vrefresh <= aconnector->max_vfreq;
 
 	if (new_crtc_state->vrr_supported) {
 		new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b0..05bbc2b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
 	synchronize_srcu(&drm_unplug_srcu);
 
 	drm_dev_unregister(dev);
-
-	mutex_lock(&drm_global_mutex);
-	if (dev->open_count == 0)
-		drm_dev_put(dev);
-	mutex_unlock(&drm_global_mutex);
+	drm_dev_put(dev);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349f..af2ab64 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 				best_depth = fmt->depth;
 		}
 	}
-	if (sizes.surface_depth != best_depth) {
+	if (sizes.surface_depth != best_depth && best_depth) {
 		DRM_INFO("requested bpp %d, scaled depth down to %d",
 			 sizes.surface_bpp, best_depth);
 		sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbc..7caa3c7 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
 
 	drm_close_helper(filp);
 
-	if (!--dev->open_count) {
+	if (!--dev->open_count)
 		drm_lastclose(dev);
-		if (drm_dev_is_unplugged(dev))
-			drm_put_dev(dev);
-	}
+
 	mutex_unlock(&drm_global_mutex);
 
 	drm_minor_release(minor);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3..3592d04 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
 	}
 
 	if (index_mode)	{
-		if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+		if (guest_gma >= I915_GTT_PAGE_SIZE) {
 			ret = -EFAULT;
 			goto err;
 		}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd..d7052ab 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
 	}
 
 	list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+	mutex_lock(&gvt->gtt.ppgtt_mm_lock);
 	list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+	mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
 	return mm;
 }
 
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
 		if (ret)
 			return ret;
 
+		mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
 		list_move_tail(&mm->ppgtt_mm.lru_list,
 			       &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+		mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
 	}
 
 	return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
 	struct intel_vgpu_mm *mm;
 	struct list_head *pos, *n;
 
+	mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
 	list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
 			continue;
 
 		list_del_init(&mm->ppgtt_mm.lru_list);
+		mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
 		invalidate_ppgtt_mm(mm);
 		return 1;
 	}
+	mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
 	return 0;
 }
 
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 		}
 	}
 	INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+	mutex_init(&gvt->gtt.ppgtt_mm_lock);
 	return 0;
 }
 
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
 	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
 		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
 		if (mm->type == INTEL_GVT_MM_PPGTT) {
+			mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
 			list_del_init(&mm->ppgtt_mm.lru_list);
+			mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
 			if (mm->ppgtt_mm.shadowed)
 				invalidate_ppgtt_mm(mm);
 		}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04c..edb610d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
 	void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
 	struct list_head oos_page_use_list_head;
 	struct list_head oos_page_free_list_head;
+	struct mutex ppgtt_mm_lock;
 	struct list_head ppgtt_mm_lru_list_head;
 
 	struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb..7902fb1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
 
 	{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
 	{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+	{RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
 
 	{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
 	{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f93..159192c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
 	int i = 0;
 
 	if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-		return -1;
+		return -EINVAL;
 
 	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
 		px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	if (workload->shadow)
 		return 0;
 
-	ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
-	if (ret < 0) {
-		gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-		return ret;
-	}
-
 	/* pin shadow context by gvt even the shadow context will be pinned
 	 * when i915 alloc request. That is because gvt will update the guest
 	 * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+	struct i915_request *rq;
 	int ring_id = workload->ring_id;
 	int ret;
 
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 	mutex_lock(&vgpu->vgpu_lock);
 	mutex_lock(&dev_priv->drm.struct_mutex);
 
+	ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+	if (ret < 0) {
+		gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+		goto err_req;
+	}
+
 	ret = intel_gvt_workload_req_alloc(workload);
 	if (ret)
 		goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
 	ret = prepare_workload(workload);
 out:
+	if (ret) {
+		/* We might still need to add request with
+		 * clean ctx to retire it properly..
+		 */
+		rq = fetch_and_zero(&workload->req);
+		i915_request_put(rq);
+	}
+
 	if (!IS_ERR_OR_NULL(workload->req)) {
 		gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
 				ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
 		goto out;
 	}
 
-	if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+	if (!scheduler->current_vgpu->active ||
+	    list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
 		goto out;
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb..a67a63b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
 				 INTEL_DEVID(dev_priv) == 0x5915 || \
 				 INTEL_DEVID(dev_priv) == 0x591E)
 #define IS_AML_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x591C || \
-				 INTEL_DEVID(dev_priv) == 0x87C0)
+				 INTEL_DEVID(dev_priv) == 0x87C0 || \
+				 INTEL_DEVID(dev_priv) == 0x87CA)
 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 				 INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586..047855d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
 #define GEN11_GT_VEBOX_VDBOX_DISABLE	_MMIO(0x9140)
 #define   GEN11_GT_VDBOX_DISABLE_MASK	0xff
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT	16
-#define   GEN11_GT_VEBOX_DISABLE_MASK	(0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define   GEN11_GT_VEBOX_DISABLE_MASK	(0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
 #define GEN11_EU_DISABLE _MMIO(0x9134)
 #define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
 #define TRANS_DDI_FUNC_CTL2(tran)	_MMIO_TRANS2(tran, \
 						     _TRANS_DDI_FUNC_CTL2_A)
 #define  PORT_SYNC_MODE_ENABLE			(1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)	((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)	((x) << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK	(0x7 << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT	0
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce71..b9b0ea4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
 			struct i915_gem_context *ctx;
 
 			ctx = live_context(i915, file);
-			if (!ctx)
+			if (IS_ERR(ctx))
 				break;
 
 			/* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3..8a4ebcb 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
 	ret = drm_dev_register(drm, 0);
 	if (ret)
-		goto free_drm;
+		goto uninstall_irq;
 
 	drm_fbdev_generic_setup(drm, 32);
 
 	return 0;
 
+uninstall_irq:
+	drm_irq_uninstall(drm);
 free_drm:
 	drm_dev_put(drm);
 
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
 
 static void meson_drv_unbind(struct device *dev)
 {
-	struct drm_device *drm = dev_get_drvdata(dev);
-	struct meson_drm *priv = drm->dev_private;
+	struct meson_drm *priv = dev_get_drvdata(dev);
+	struct drm_device *drm = priv->drm;
 
 	if (priv->canvas) {
 		meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
 	}
 
 	drm_dev_unregister(drm);
+	drm_irq_uninstall(drm);
 	drm_kms_helper_poll_fini(drm);
 	drm_mode_config_cleanup(drm);
 	drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f..563953e 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 	DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
 	/* If sink max TMDS clock, we reject the mode */
-	if (mode->clock > connector->display_info.max_tmds_clock)
+	if (connector->display_info.max_tmds_clock &&
+	    mode->clock > connector->display_info.max_tmds_clock)
 		return MODE_BAD;
 
 	/* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c60..0d4ade9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
 	clk_disable(vop->hclk);
 }
 
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+	if (win->phy->scl && win->phy->scl->ext) {
+		VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+		VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+		VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+		VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+	}
+
+	VOP_WIN_SET(vop, win, enable, 0);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
 		struct vop_win *vop_win = &vop->win[i];
 		const struct vop_win_data *win = vop_win->data;
 
-		VOP_WIN_SET(vop, win, enable, 0);
+		vop_win_disable(vop, win);
 	}
 	spin_unlock(&vop->reg_lock);
 
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
 
 	spin_lock(&vop->reg_lock);
 
-	VOP_WIN_SET(vop, win, enable, 0);
+	vop_win_disable(vop, win);
 
 	spin_unlock(&vop->reg_lock);
 }
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
 		int channel = i * 2 + 1;
 
 		VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
-		VOP_WIN_SET(vop, win, enable, 0);
+		vop_win_disable(vop, win);
 		VOP_WIN_SET(vop, win, gate, 1);
 	}
 
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cf..b3436c2 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
 					      struct drm_plane_state *old_state)
 {
-	struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
 	struct tegra_plane *p = to_tegra_plane(plane);
+	struct tegra_dc *dc;
 	u32 value;
 
 	/* rien ne va plus */
 	if (!old_state || !old_state->crtc)
 		return;
 
+	dc = to_tegra_dc(old_state->crtc);
+
 	/*
 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
 	 * on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9..982ce37 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
 	if (vic->booted)
 		return 0;
 
+#ifdef CONFIG_IOMMU_API
 	if (vic->config->supports_sid) {
 		struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
 		u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
 			vic_writel(vic, value, VIC_THI_STREAMID1);
 		}
 	}
+#endif
 
 	/* setup clockgating registers */
 	vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c2..c1bd5e3 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
 #include "udl_connector.h"
 #include "udl_drv.h"
 
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
-							   u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+			       size_t len)
 {
 	int ret, i;
 	u8 *read_buff;
+	struct udl_device *udl = data;
 
 	read_buff = kmalloc(2, GFP_KERNEL);
 	if (!read_buff)
-		return false;
+		return -1;
 
-	for (i = 0; i < EDID_LENGTH; i++) {
-		int bval = (i + block_idx * EDID_LENGTH) << 8;
+	for (i = 0; i < len; i++) {
+		int bval = (i + block * EDID_LENGTH) << 8;
 		ret = usb_control_msg(udl->udev,
 				      usb_rcvctrlpipe(udl->udev, 0),
 					  (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
 		if (ret < 1) {
 			DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
 			kfree(read_buff);
-			return false;
+			return -1;
 		}
-		buff[i] = read_buff[1];
+		buf[i] = read_buff[1];
 	}
 
 	kfree(read_buff);
-	return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
-			 int *result_buff_size)
-{
-	int i, extensions;
-	u8 *block_buff = NULL, *buff_ptr;
-
-	block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
-	if (block_buff == NULL)
-		return false;
-
-	if (udl_get_edid_block(udl, 0, block_buff) &&
-	    memchr_inv(block_buff, 0, EDID_LENGTH)) {
-		extensions = ((struct edid *)block_buff)->extensions;
-		if (extensions > 0) {
-			/* we have to read all extensions one by one */
-			*result_buff_size = EDID_LENGTH * (extensions + 1);
-			*result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
-			buff_ptr = *result_buff;
-			if (buff_ptr == NULL) {
-				kfree(block_buff);
-				return false;
-			}
-			memcpy(buff_ptr, block_buff, EDID_LENGTH);
-			kfree(block_buff);
-			buff_ptr += EDID_LENGTH;
-			for (i = 1; i < extensions; ++i) {
-				if (udl_get_edid_block(udl, i, buff_ptr)) {
-					buff_ptr += EDID_LENGTH;
-				} else {
-					kfree(*result_buff);
-					*result_buff = NULL;
-					return false;
-				}
-			}
-			return true;
-		}
-		/* we have only base edid block */
-		*result_buff = block_buff;
-		*result_buff_size = EDID_LENGTH;
-		return true;
-	}
-
-	kfree(block_buff);
-
-	return false;
+	return 0;
 }
 
 static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-	u8 *edid_buff = NULL;
-	int edid_buff_size = 0;
 	struct udl_device *udl = connector->dev->dev_private;
 	struct udl_drm_connector *udl_connector =
 					container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
 		udl_connector->edid = NULL;
 	}
 
-
-	if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+	udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+	if (!udl_connector->edid)
 		return connector_status_disconnected;
 
-	udl_connector->edid = (struct edid *)edid_buff;
-	
 	return connector_status_connected;
 }
 
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930fac..11a8f99 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
 	ret = drm_gem_handle_create(file, &obj->base, handle);
 	drm_gem_object_put_unlocked(&obj->base);
 	if (ret)
-		goto err;
+		return ERR_PTR(ret);
 
 	return &obj->base;
-
-err:
-	__vgem_gem_destroy(obj);
-	return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb..69048e7 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
 
 	ret = drm_gem_handle_create(file, &obj->gem, handle);
 	drm_gem_object_put_unlocked(&obj->gem);
-	if (ret) {
-		drm_gem_object_release(&obj->gem);
-		kfree(obj);
+	if (ret)
 		return ERR_PTR(ret);
-	}
 
 	return &obj->gem;
 }
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f2c6819..f8979ab 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@
 	    Cannon Lake (PCH)
 	    Cedar Fork (PCH)
 	    Ice Lake (PCH)
+	    Comet Lake (PCH)
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145..679c6c4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
  * Cannon Lake-LP (PCH)		0x9da3	32	hard	yes	yes	yes
  * Cedar Fork (PCH)		0x18df	32	hard	yes	yes	yes
  * Ice Lake-LP (PCH)		0x34a3	32	hard	yes	yes	yes
+ * Comet Lake (PCH)		0x02a3	32	hard	yes	yes	yes
  *
  * Features supported by this driver:
  * Software PEC				no
@@ -240,6 +241,7 @@
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS	0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS	0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS		0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS		0x02a3
 
 struct i801_mux_config {
 	char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
 	{ 0, }
 };
 
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
 	case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
 	case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+	case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
 		priv->features |= FEATURE_I2C_BLOCK_READ;
 		priv->features |= FEATURE_IRQ;
 		priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 21cb088..f7cdd2a 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3169,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
 		return;
 
 	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+		int type, prot = 0;
 		size_t length;
-		int prot = 0;
 
 		if (devid < entry->devid_start || devid > entry->devid_end)
 			continue;
 
+		type   = IOMMU_RESV_DIRECT;
 		length = entry->address_end - entry->address_start;
 		if (entry->prot & IOMMU_PROT_IR)
 			prot |= IOMMU_READ;
 		if (entry->prot & IOMMU_PROT_IW)
 			prot |= IOMMU_WRITE;
+		if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+			/* Exclusion range */
+			type = IOMMU_RESV_RESERVED;
 
 		region = iommu_alloc_resv_region(entry->address_start,
-						 length, prot,
-						 IOMMU_RESV_DIRECT);
+						 length, prot, type);
 		if (!region) {
 			dev_err(dev, "Out of memory allocating dm-regions\n");
 			return;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f773792..1b13786 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
 	if (e == NULL)
 		return -ENOMEM;
 
+	if (m->flags & IVMD_FLAG_EXCL_RANGE)
+		init_exclusion_range(m);
+
 	switch (m->type) {
 	default:
 		kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
 
 	while (p < end) {
 		m = (struct ivmd_header *)p;
-		if (m->flags & IVMD_FLAG_EXCL_RANGE)
-			init_exclusion_range(m);
-		else if (m->flags & IVMD_FLAG_UNITY_MAP)
+		if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
 			init_unity_map_range(m);
 
 		p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741..87965e4 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
 #define IOMMU_PROT_IR 0x01
 #define IOMMU_PROT_IW 0x02
 
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE	(1 << 2)
+
 /* IOMMU capabilities */
 #define IOMMU_CAP_IOTLB   24
 #define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f101afc..9a8a887 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -160,6 +160,14 @@
 
 #define ARM_V7S_TCR_PD1			BIT(5)
 
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
 typedef u32 arm_v7s_iopte;
 
 static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
 	void *table = NULL;
 
 	if (lvl == 1)
-		table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+		table = (void *)__get_free_pages(
+			__GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
 	else if (lvl == 2)
-		table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+		table = kmem_cache_zalloc(data->l2_tables, gfp);
 	phys = virt_to_phys(table);
-	if (phys != (arm_v7s_iopte)phys)
+	if (phys != (arm_v7s_iopte)phys) {
 		/* Doesn't fit in PTE */
+		dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
 		goto out_free;
+	}
 	if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
 		dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
 		if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
 	data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
 					    ARM_V7S_TABLE_SIZE(2),
 					    ARM_V7S_TABLE_SIZE(2),
-					    SLAB_CACHE_DMA, NULL);
+					    ARM_V7S_TABLE_SLAB_FLAGS, NULL);
 	if (!data->l2_tables)
 		goto out_free_data;
 
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33a982e..109de67 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
 		dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
 		if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
-			dev_warn(dev,
-				 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
-				 iommu_def_domain_type);
 			dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+			if (dom) {
+				dev_warn(dev,
+					 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+					 iommu_def_domain_type);
+			}
 		}
 
 		group->default_domain = dom;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b..7cb4d68 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
 	const struct i2c_device_id *id)
 {
 	int devid;
+	const struct of_device_id *of_id;
 	struct pca9532_data *data = i2c_get_clientdata(client);
 	struct pca9532_platform_data *pca9532_pdata =
 			dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
-		devid = (int)(uintptr_t)of_match_device(
-			of_pca9532_leds_match, &client->dev)->data;
+		of_id = of_match_device(of_pca9532_leds_match,
+				&client->dev);
+		if (unlikely(!of_id))
+			return -EINVAL;
+		devid = (int)(uintptr_t) of_id->data;
 	} else {
 		devid = id->driver_data;
 	}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed4..136f86a 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
 		trigger_data->net_dev = NULL;
 	}
 
-	strncpy(trigger_data->device_name, buf, size);
+	memcpy(trigger_data->device_name, buf, size);
+	trigger_data->device_name[size] = 0;
 	if (size > 0 && trigger_data->device_name[size - 1] == '\n')
 		trigger_data->device_name[size - 1] = 0;
 
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
 		container_of(nb, struct led_netdev_data, notifier);
 
 	if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
-	    && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
-	    && evt != NETDEV_CHANGENAME)
+	    && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
 		return NOTIFY_DONE;
 
-	if (strcmp(dev->name, trigger_data->device_name))
+	if (!(dev == trigger_data->net_dev ||
+	      (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
 		return NOTIFY_DONE;
 
 	cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
 		dev_hold(dev);
 		trigger_data->net_dev = dev;
 		break;
-	case NETDEV_CHANGENAME:
 	case NETDEV_UNREGISTER:
-		if (trigger_data->net_dev) {
-			dev_put(trigger_data->net_dev);
-			trigger_data->net_dev = NULL;
-		}
+		dev_put(trigger_data->net_dev);
+		trigger_data->net_dev = NULL;
 		break;
 	case NETDEV_UP:
 	case NETDEV_CHANGE:
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236..19c8421 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
 
 	/* We also need to update CI for internal queues */
 	if (cs->submitted) {
+		int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+		WARN_ONCE((cs_cnt < 0),
+			"hl%d: error in CS active cnt %d\n",
+			hdev->id, cs_cnt);
+
 		hl_int_hw_queue_update_ci(cs);
 
 		spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12a..974a877 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
 	enum vm_type_t *vm_type;
 	bool once = true;
+	u64 j;
 	int i;
 
 	if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
 			} else {
 				phys_pg_pack = hnode->ptr;
 				seq_printf(s,
-					"    0x%-14llx      %-10u       %-4u\n",
+					"    0x%-14llx      %-10llu       %-4u\n",
 					hnode->vaddr, phys_pg_pack->total_size,
 					phys_pg_pack->handle);
 			}
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
 						phys_pg_pack->page_size);
 			seq_puts(s, "   physical address\n");
 			seq_puts(s, "---------------------\n");
-			for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+			for (j = 0 ; j < phys_pg_pack->npages ; j++) {
 				seq_printf(s, "    0x%-14llx\n",
-						phys_pg_pack->pages[i]);
+						phys_pg_pack->pages[j]);
 			}
 		}
 		spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6..77d51be 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
 #include <linux/sched/signal.h>
 #include <linux/hwmon.h>
 
+#define HL_PLDM_PENDING_RESET_PER_SEC	(HL_PENDING_RESET_PER_SEC * 10)
+
 bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
 {
 	if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
 	spin_lock_init(&hdev->hw_queues_mirror_lock);
 	atomic_set(&hdev->in_reset, 0);
 	atomic_set(&hdev->fd_open_cnt, 0);
+	atomic_set(&hdev->cs_active_cnt, 0);
 
 	return 0;
 
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
 
 	pci_save_state(hdev->pdev);
 
+	/* Block future CS/VM/JOB completion operations */
+	rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+	if (rc) {
+		dev_err(hdev->dev, "Can't suspend while in reset\n");
+		return -EIO;
+	}
+
+	/* This blocks all other stuff that is not blocked by in_reset */
+	hdev->disabled = true;
+
+	/*
+	 * Flush anyone that is inside the critical section of enqueue
+	 * jobs to the H/W
+	 */
+	hdev->asic_funcs->hw_queues_lock(hdev);
+	hdev->asic_funcs->hw_queues_unlock(hdev);
+
+	/* Flush processes that are sending message to CPU */
+	mutex_lock(&hdev->send_cpu_message_lock);
+	mutex_unlock(&hdev->send_cpu_message_lock);
+
 	rc = hdev->asic_funcs->suspend(hdev);
 	if (rc)
 		dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
 
 	pci_set_power_state(hdev->pdev, PCI_D0);
 	pci_restore_state(hdev->pdev);
-	rc = pci_enable_device(hdev->pdev);
+	rc = pci_enable_device_mem(hdev->pdev);
 	if (rc) {
 		dev_err(hdev->dev,
 			"Failed to enable PCI device in resume\n");
 		return rc;
 	}
 
+	pci_set_master(hdev->pdev);
+
 	rc = hdev->asic_funcs->resume(hdev);
 	if (rc) {
-		dev_err(hdev->dev,
-			"Failed to enable PCI access from device CPU\n");
-		return rc;
+		dev_err(hdev->dev, "Failed to resume device after suspend\n");
+		goto disable_device;
+	}
+
+
+	hdev->disabled = false;
+	atomic_set(&hdev->in_reset, 0);
+
+	rc = hl_device_reset(hdev, true, false);
+	if (rc) {
+		dev_err(hdev->dev, "Failed to reset device during resume\n");
+		goto disable_device;
 	}
 
 	return 0;
+
+disable_device:
+	pci_clear_master(hdev->pdev);
+	pci_disable_device(hdev->pdev);
+
+	return rc;
 }
 
 static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
 	struct hl_device_reset_work *device_reset_work =
 		container_of(work, struct hl_device_reset_work, reset_work);
 	struct hl_device *hdev = device_reset_work->hdev;
-	u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+	u16 pending_total, pending_cnt;
 	struct task_struct *task = NULL;
 
+	if (hdev->pldm)
+		pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+	else
+		pending_total = HL_PENDING_RESET_PER_SEC;
+
+	pending_cnt = pending_total;
+
 	/* Flush all processes that are inside hl_open */
 	mutex_lock(&hdev->fd_open_cnt_lock);
 
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
 		}
 	}
 
+	pending_cnt = pending_total;
+
+	while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+		pending_cnt--;
+
+		ssleep(1);
+	}
+
+	if (atomic_read(&hdev->fd_open_cnt))
+		dev_crit(hdev->dev,
+			"Going to hard reset with open user contexts\n");
+
 	mutex_unlock(&hdev->fd_open_cnt_lock);
 
 	hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c..ea979eb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
 	return retval;
 }
 
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
-	WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
-	WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
-	WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
-	WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
-	WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
 /*
  * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
  *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
 	return retval;
 }
 
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
-	WREG32(mmMME_QM_GLBL_CFG1, 0);
-	WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC0_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC1_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC2_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC3_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC4_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC5_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC6_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
-	WREG32(mmTPC7_QM_GLBL_CFG1, 0);
-	WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
 static void goya_dma_stall(struct hl_device *hdev)
 {
 	WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
 {
 	int rc;
 
-	rc = goya_stop_internal_queues(hdev);
-
-	if (rc) {
-		dev_err(hdev->dev, "failed to stop internal queues\n");
-		return rc;
-	}
-
-	rc = goya_stop_external_queues(hdev);
-
-	if (rc) {
-		dev_err(hdev->dev, "failed to stop external queues\n");
-		return rc;
-	}
-
 	rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
 	if (rc)
 		dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
 
 int goya_resume(struct hl_device *hdev)
 {
-	int rc;
-
-	goya_resume_external_queues(hdev);
-	goya_resume_internal_queues(hdev);
-
-	rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
-	if (rc)
-		dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
-	return rc;
+	return goya_init_iatu(hdev);
 }
 
 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 
 	*dma_handle = hdev->asic_prop.sram_base_address;
 
-	base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+	base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
 
 	switch (queue_id) {
 	case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9..a8ee52c8 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
  * struct hl_vm_phys_pg_pack - physical page pack.
  * @vm_type: describes the type of the virtual area descriptor.
  * @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
  * @mapping_cnt: number of shared mappings.
  * @asid: the context related to this list.
- * @npages: num physical pages in the pack.
  * @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
  * @flags: HL_MEM_* flags related to this list.
  * @handle: the provided handle related to this list.
  * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
 struct hl_vm_phys_pg_pack {
 	enum vm_type_t		vm_type; /* must be first */
 	u64			*pages;
+	u64			npages;
+	u64			total_size;
 	atomic_t		mapping_cnt;
 	u32			asid;
-	u32			npages;
 	u32			page_size;
-	u32			total_size;
 	u32			flags;
 	u32			handle;
 	u32			offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
  * @cb_pool_lock: protects the CB pool.
  * @user_ctx: current user context executing.
  * @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
  *             value is saved so in case of hard-reset, KMD will restore this
  *             value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ *                 means already in H/W queues)
  * @major: habanalabs KMD major.
  * @high_pll: high PLL profile frequency.
  * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
 	struct hl_ctx			*user_ctx;
 
 	atomic64_t			dram_used_mem;
+	u64				timeout_jiffies;
+	u64				max_power;
 	atomic_t			in_reset;
 	atomic_t			curr_pll_profile;
 	atomic_t			fd_open_cnt;
-	u64				timeout_jiffies;
-	u64				max_power;
+	atomic_t			cs_active_cnt;
 	u32				major;
 	u32				high_pll;
 	u32				soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece2..ef3bb69 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
 		spin_unlock(&hdev->hw_queues_mirror_lock);
 	}
 
-	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+	atomic_inc(&hdev->cs_active_cnt);
+
+	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
 		if (job->ext_queue)
 			ext_hw_queue_schedule_job(job);
 		else
 			int_hw_queue_schedule_job(job);
-	}
 
 	cs->submitted = true;
 
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1..ce1fda4 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 	struct hl_device *hdev = ctx->hdev;
 	struct hl_vm *vm = &hdev->vm;
 	struct hl_vm_phys_pg_pack *phys_pg_pack;
-	u64 paddr = 0;
-	u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
-	int handle, rc, i;
+	u64 paddr = 0, total_size, num_pgs, i;
+	u32 num_curr_pgs, page_size, page_shift;
+	int handle, rc;
 	bool contiguous;
 
 	num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 		paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
 		if (!paddr) {
 			dev_err(hdev->dev,
-				"failed to allocate %u huge contiguous pages\n",
+				"failed to allocate %llu huge contiguous pages\n",
 				num_pgs);
 			return -ENOMEM;
 		}
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 	phys_pg_pack->flags = args->flags;
 	phys_pg_pack->contiguous = contiguous;
 
-	phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+	phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
 	if (!phys_pg_pack->pages) {
 		rc = -ENOMEM;
 		goto pages_arr_err;
@@ -148,7 +148,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 			gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
 					page_size);
 
-	kfree(phys_pg_pack->pages);
+	kvfree(phys_pg_pack->pages);
 pages_arr_err:
 	kfree(phys_pg_pack);
 pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
 		struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
 	struct hl_vm *vm = &hdev->vm;
-	int i;
+	u64 i;
 
 	if (!phys_pg_pack->created_from_userptr) {
 		if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
 		}
 	}
 
-	kfree(phys_pg_pack->pages);
+	kvfree(phys_pg_pack->pages);
 	kfree(phys_pg_pack);
 }
 
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
  * - Return the start address of the virtual block
  */
 static u64 get_va_block(struct hl_device *hdev,
-		struct hl_va_range *va_range, u32 size, u64 hint_addr,
+		struct hl_va_range *va_range, u64 size, u64 hint_addr,
 		bool is_userptr)
 {
 	struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
 	}
 
 	if (!new_va_block) {
-		dev_err(hdev->dev, "no available va block for size %u\n", size);
+		dev_err(hdev->dev, "no available va block for size %llu\n",
+				size);
 		goto out;
 	}
 
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 	struct hl_vm_phys_pg_pack *phys_pg_pack;
 	struct scatterlist *sg;
 	dma_addr_t dma_addr;
-	u64 page_mask;
-	u32 npages, total_npages, page_size = PAGE_SIZE;
+	u64 page_mask, total_npages;
+	u32 npages, page_size = PAGE_SIZE;
 	bool first = true, is_huge_page_opt = true;
 	int rc, i, j;
 
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 
 	page_mask = ~(((u64) page_size) - 1);
 
-	phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+	phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+						GFP_KERNEL);
 	if (!phys_pg_pack->pages) {
 		rc = -ENOMEM;
 		goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
 		struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
 	struct hl_device *hdev = ctx->hdev;
-	u64 next_vaddr = vaddr, paddr;
+	u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
 	u32 page_size = phys_pg_pack->page_size;
-	int i, rc = 0, mapped_pg_cnt = 0;
+	int rc = 0;
 
 	for (i = 0 ; i < phys_pg_pack->npages ; i++) {
 		paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
 		rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
 		if (rc) {
 			dev_err(hdev->dev,
-				"map failed for handle %u, npages: %d, mapped: %d",
+				"map failed for handle %u, npages: %llu, mapped: %llu",
 				phys_pg_pack->handle, phys_pg_pack->npages,
 				mapped_pg_cnt);
 			goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
 	struct hl_vm_hash_node *hnode = NULL;
 	struct hl_userptr *userptr = NULL;
 	enum vm_type_t *vm_type;
-	u64 next_vaddr;
+	u64 next_vaddr, i;
 	u32 page_size;
 	bool is_userptr;
-	int i, rc;
+	int rc;
 
 	/* protect from double entrance */
 	mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99c..3a5a2ce 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 {
 	struct hl_device *hdev = ctx->hdev;
-	u64 real_virt_addr;
+	u64 real_virt_addr, real_phys_addr;
 	u32 real_page_size, npages;
 	int i, rc, mapped_cnt = 0;
 
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 
 	npages = page_size / real_page_size;
 	real_virt_addr = virt_addr;
+	real_phys_addr = phys_addr;
 
 	for (i = 0 ; i < npages ; i++) {
-		rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+		rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
 				real_page_size);
 		if (rc)
 			goto err;
 
 		real_virt_addr += real_page_size;
+		real_phys_addr += real_page_size;
 		mapped_cnt++;
 	}
 
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb7..f0716f6 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
 		struct nvme_ns *ns)
 {
-	enum nvme_ana_state old;
-
 	mutex_lock(&ns->head->lock);
-	old = ns->ana_state;
 	ns->ana_grpid = le32_to_cpu(desc->grpid);
 	ns->ana_state = desc->state;
 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
-	if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+	if (nvme_state_is_live(ns->ana_state))
 		nvme_mpath_set_live(ns);
 	mutex_unlock(&ns->head->lock);
 }
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e7e0888..68c49dd 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
 	return ret;
 }
 
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 {
 	union nvme_result res = {};
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2d73b66..b3e765a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 
 	ret = nvmet_p2pmem_ns_enable(ns);
 	if (ret)
-		goto out_unlock;
+		goto out_dev_disable;
 
 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 out_dev_put:
 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
 	nvmet_ns_dev_disable(ns);
 	goto out_unlock;
 }
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 3e43212..bc6ebb5 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
 	return ret;
 }
 
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
 {
-	bv->bv_page = sg_page_iter_page(iter);
-	bv->bv_offset = iter->sg->offset;
-	bv->bv_len = PAGE_SIZE - iter->sg->offset;
+	bv->bv_page = sg_page(sg);
+	bv->bv_offset = sg->offset;
+	bv->bv_len = sg->length;
 }
 
 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 
 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 {
-	ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
-	struct sg_page_iter sg_pg_iter;
+	ssize_t nr_bvec = req->sg_cnt;
 	unsigned long bv_cnt = 0;
 	bool is_sync = false;
 	size_t len = 0, total_len = 0;
 	ssize_t ret = 0;
 	loff_t pos;
-
+	int i;
+	struct scatterlist *sg;
 
 	if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
 		is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 	}
 
 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
-	for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
-		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+	for_each_sg(req->sg, sg, req->sg_cnt, i) {
+		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
 		len += req->f.bvec[bv_cnt].bv_len;
 		total_len += req->f.bvec[bv_cnt].bv_len;
 		bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
 
 static void nvmet_file_execute_rw(struct nvmet_req *req)
 {
-	ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+	ssize_t nr_bvec = req->sg_cnt;
 
 	if (!req->sg_cnt || !nr_bvec) {
 		nvmet_req_complete(req, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d886..d994839 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
 			   enum pcie_link_width *width);
 void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
 
 /* Single Root I/O Virtualization */
 struct pci_sriov {
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index d2eae3b..4fa9e352 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
 {
 	u16 lnk_ctl;
 
+	pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
 	lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
 }
 
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
 {
 	struct pcie_device *srv = context;
 	struct pci_dev *port = srv->port;
-	struct pci_dev *dev;
 	u16 link_status, events;
 	int ret;
 
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
 	if (ret != PCIBIOS_SUCCESSFUL || !events)
 		return IRQ_NONE;
 
+	pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+	pcie_update_link_speed(port->subordinate, link_status);
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+	struct pcie_device *srv = context;
+	struct pci_dev *port = srv->port;
+	struct pci_dev *dev;
+
 	/*
 	 * Print status from downstream devices, not this root port or
 	 * downstream switch port.
 	 */
 	down_read(&pci_bus_sem);
 	list_for_each_entry(dev, &port->subordinate->devices, bus_list)
-		__pcie_print_link_status(dev, false);
+		pcie_report_downtraining(dev);
 	up_read(&pci_bus_sem);
 
-	pcie_update_link_speed(port->subordinate, link_status);
-	pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
 	return IRQ_HANDLED;
 }
 
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
 	if (!pcie_link_bandwidth_notification_supported(srv->port))
 		return -ENODEV;
 
-	ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler,
+	ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+				   pcie_bw_notification_handler,
 				   IRQF_SHARED, "PCIe BW notif", srv);
 	if (ret)
 		return ret;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2ec0df0..7e12d01 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
 	return dev;
 }
 
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
 {
 	if (!pci_is_pcie(dev))
 		return;
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097..4bbd9ed 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
 	struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
 	int new_mode;
 
-	if (phy->index != 0)
+	if (phy->index != 0) {
+		if (mode == PHY_MODE_USB_HOST)
+			return 0;
 		return -EINVAL;
+	}
 
 	switch (mode) {
 	case PHY_MODE_USB_HOST:
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a646..e8fc28d 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
 	add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+				     int clear, char *dbftag)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	read_lock(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		_zfcp_erp_port_forced_reopen(port, clear, dbftag);
+	read_unlock(&adapter->port_list_lock);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
 				      int clear, char *dbftag)
 {
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
 		struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
 		int lun_status;
 
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL)
+			continue;
 		if (zsdev->port != port)
 			continue;
 		/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b..c6acca5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
 				 char *dbftag);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+					    int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e..33eddb0 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 	list_for_each_entry(port, &adapter->port_list, list) {
 		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
 			zfcp_fc_test_link(port);
-		if (!port->d_id)
-			zfcp_erp_port_reopen(port,
-					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
 	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fc_els_rscn *head;
 	struct fc_els_rscn_page *page;
 	u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 	no_entries = be16_to_cpu(head->rscn_plen) /
 		sizeof(struct fc_els_rscn_page);
 
+	if (no_entries > 1) {
+		/* handle failed ports */
+		unsigned long flags;
+		struct zfcp_port *port;
+
+		read_lock_irqsave(&adapter->port_list_lock, flags);
+		list_for_each_entry(port, &adapter->port_list, list) {
+			if (port->d_id)
+				continue;
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1");
+		}
+		read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	}
+
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
 		page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07..221d0df 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret = SUCCESS, fc_ret;
 
+	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+		zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+		zfcp_erp_wait(adapter);
+	}
 	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
 	zfcp_erp_wait(adapter);
 	fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 1df5171..11fb68d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
 	return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+	return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-	if (unlikely(pci_channel_offline(dev->pdev)))
+	if (unlikely(aac_pci_offline(dev)))
 		return -1;
 
 	return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e67e032..78430a7 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
 					return -ETIMEDOUT;
 				}
 
-				if (unlikely(pci_channel_offline(dev->pdev)))
+				if (unlikely(aac_pci_offline(dev)))
 					return -EFAULT;
 
 				if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-		if (unlikely(pci_channel_offline(dev->pdev)))
+		if (unlikely(aac_pci_offline(dev)))
 			return -EFAULT;
 
 		fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f1..3ad997a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
 
 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
 };
 
 static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
 	if (rsp->flags & FCP_RSP_LEN_VALID)
 		rsp_code = rsp->data.info.rsp_code;
 
-	scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
-		    cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-			    rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
 			    fc_rsp->scsi_status);
 		rsp_rc = -EIO;
 	} else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-			    rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
 			    fc_rsp->scsi_status);
 		rsp_rc = -EIO;
 	} else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
 			/* We need to re-setup the interpartition connection */
-			dev_info(vhost->dev, "Re-enabling adapter\n");
+			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
 			vhost->client_migrated = 1;
 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
-		} else {
-			dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
 			ibmvfc_purge_requests(vhost, DID_ERROR);
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+		} else {
+			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
 		}
 		return;
 	case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 
 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-			rsp->status, rsp->error, status);
+			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
 		break;
 	}
 
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 
 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
-			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
-			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
 		break;
 	}
 
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
-			 mad->iu.status, mad->iu.error,
+			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
 		break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 
 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-			rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
-			rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
-			rsp->fc_explain, status);
+			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+			status);
 		break;
 	}
 
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
 		level += ibmvfc_retry_host_init(vhost);
 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-			   rsp->status, rsp->error);
+			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
 		break;
 	case IBMVFC_MAD_DRIVER_FAILED:
 		break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-						rsp->status, rsp->error);
+						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
 		ibmvfc_free_event(evt);
 		return;
 	case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c..459cc28 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
 	IBMVFC_CRQ_XPORT_EVENT		= 0xFF,
 };
 
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
 	IBMVFC_CRQ_INIT			= 0x01,
 	IBMVFC_CRQ_INIT_COMPLETE	= 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+	IBMVFC_PARTNER_FAILED		= 0x01,
+	IBMVFC_PARTNER_DEREGISTER	= 0x02,
 	IBMVFC_PARTITION_MIGRATED	= 0x06,
 };
 
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e577744..1d8c584 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 
 	if (smid < ioc->hi_priority_smid) {
 		struct scsiio_tracker *st;
+		void *request;
 
 		st = _get_st_from_smid(ioc, smid);
 		if (!st) {
 			_base_recovery_check(ioc);
 			return;
 		}
+
+		/* Clear MPI request frame */
+		request = mpt3sas_base_get_msg_frame(ioc, smid);
+		memset(request, 0, ioc->request_sz);
+
 		mpt3sas_base_clear_st(ioc, st);
 		_base_recovery_check(ioc);
 		return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8bb5b8f..1ccfbc7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
 	struct scsi_cmnd *scmd = NULL;
 	struct scsiio_tracker *st;
+	Mpi25SCSIIORequest_t *mpi_request;
 
 	if (smid > 0  &&
 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
 		u32 unique_tag = smid - 1;
 
+		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+		/*
+		 * If SCSI IO request is outstanding at driver level then
+		 * DevHandle filed must be non-zero. If DevHandle is zero
+		 * then it means that this smid is free at driver level,
+		 * so return NULL.
+		 */
+		if (!mpi_request->DevHandle)
+			return scmd;
+
 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
 		if (scmd) {
 			st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 16a18d5..6e4f493 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
 		return -EINVAL;
 	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
 	conn = cls_conn->dd_data;
 	qla_conn = conn->dd_data;
 	qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040f..3b119ca 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
 
 	mutex_lock(&sdev->state_mutex);
 	ret = scsi_device_set_state(sdev, state);
+	/*
+	 * If the device state changes to SDEV_RUNNING, we need to run
+	 * the queue to avoid I/O hang.
+	 */
+	if (ret == 0 && state == SDEV_RUNNING)
+		blk_mq_run_hw_queues(sdev->request_queue, true);
 	mutex_unlock(&sdev->state_mutex);
 
 	return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 251db30..2b2bc4b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
 	}
 
-	/*
-	 * XXX and what if there are packets in flight and this close()
-	 * XXX is followed by a "rmmod sd_mod"?
-	 */
-
 	scsi_disk_put(sdkp);
 }
 
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
 	unsigned int opt_xfer_bytes =
 		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 
+	if (sdkp->opt_xfer_blocks == 0)
+		return false;
+
 	if (sdkp->opt_xfer_blocks > dev_max) {
 		sd_first_printk(KERN_WARNING, sdkp,
 				"Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
 {
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct gendisk *disk = sdkp->disk;
-	
+	struct request_queue *q = disk->queue;
+
 	ida_free(&sd_index_ida, sdkp->index);
 
+	/*
+	 * Wait until all requests that are in progress have completed.
+	 * This is necessary to avoid that e.g. scsi_end_request() crashes
+	 * due to clearing the disk->private_data pointer. Wait from inside
+	 * scsi_disk_release() instead of from sd_release() to avoid that
+	 * freezing and unfreezing the request queue affects user space I/O
+	 * in case multiple processes open a /dev/sd... node concurrently.
+	 */
+	blk_mq_freeze_queue(q);
+	blk_mq_unfreeze_queue(q);
+
 	disk->private_data = NULL;
 	put_disk(disk);
 	put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b9..62951e8 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@
 
 source "drivers/staging/mt7621-mmc/Kconfig"
 
-source "drivers/staging/mt7621-eth/Kconfig"
-
 source "drivers/staging/mt7621-dts/Kconfig"
 
 source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce..d1b17dd 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@
 obj-$(CONFIG_SOC_MT7621)	+= mt7621-dma/
 obj-$(CONFIG_DMA_RALINK)	+= ralink-gdma/
 obj-$(CONFIG_MTK_MMC)		+= mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING)	+= mt7621-eth/
 obj-$(CONFIG_SOC_MT7621)	+= mt7621-dts/
 obj-$(CONFIG_STAGING_GASKET_FRAMEWORK)	+= gasket/
 obj-$(CONFIG_XIL_AXIS_FIFO)	+= axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 6875372..d972588 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
 #
 config XIL_AXIS_FIFO
 	tristate "Xilinx AXI-Stream FIFO IP core driver"
+	depends on OF
 	default n
 	help
 	  This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569c..0dff1ac 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
 			   unsigned int mask);
 unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
 				     unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+				       struct comedi_cmd *cmd);
 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
 				unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f..5a32b8f 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
 
 /**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
  * @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
  *
  * Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
  *
  * For digital input, output or input/output subdevices, samples for
  * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
  *
  * Returns the overall scan length in bytes.
  */
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+				       struct comedi_cmd *cmd)
 {
-	struct comedi_cmd *cmd = &s->async->cmd;
 	unsigned int num_samples;
 	unsigned int bits_per_sample;
 
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
 	}
 	return comedi_samples_to_bytes(s, num_samples);
 }
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag.  For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+	struct comedi_cmd *cmd = &s->async->cmd;
+
+	return comedi_bytes_per_scan_cmd(s, cmd);
+}
 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
 
 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59a..b04dad8 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
 			   struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
 	struct ni_private *devpriv = dev->private;
+	unsigned int bytes_per_scan;
 	int err = 0;
 
 	/* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
 	err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
 	err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
 					   cmd->chanlist_len);
-	err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
-					    s->async->prealloc_bufsz /
-					    comedi_bytes_per_scan(s));
+	bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+	if (bytes_per_scan) {
+		err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+						    s->async->prealloc_bufsz /
+						    bytes_per_scan);
+	}
 
 	if (err)
 		return 3;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12..9bbc687 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
 	[EROFS_FT_SYMLINK]	= DT_LNK,
 };
 
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+			     unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+	/* since the on-disk name could not have the trailing '\0' */
+	unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+	memcpy(dbg_namebuf, de_name, de_namelen);
+	dbg_namebuf[de_namelen] = '\0';
+
+	debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+		de_namelen, d_type);
+#endif
+}
+
 static int erofs_fill_dentries(struct dir_context *ctx,
 			       void *dentry_blk, unsigned int *ofs,
 			       unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
 	de = dentry_blk + *ofs;
 	while (de < end) {
 		const char *de_name;
-		int de_namelen;
+		unsigned int de_namelen;
 		unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
-		unsigned int dbg_namelen;
-		unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
 
-		if (unlikely(de->file_type < EROFS_FT_MAX))
+		if (de->file_type < EROFS_FT_MAX)
 			d_type = erofs_filetype_table[de->file_type];
 		else
 			d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
 		nameoff = le16_to_cpu(de->nameoff);
 		de_name = (char *)dentry_blk + nameoff;
 
-		de_namelen = unlikely(de + 1 >= end) ?
-			/* last directory entry */
-			strnlen(de_name, maxsize - nameoff) :
-			le16_to_cpu(de[1].nameoff) - nameoff;
+		/* the last dirent in the block? */
+		if (de + 1 >= end)
+			de_namelen = strnlen(de_name, maxsize - nameoff);
+		else
+			de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
 
 		/* a corrupted entry is found */
-		if (unlikely(de_namelen < 0)) {
+		if (unlikely(nameoff + de_namelen > maxsize ||
+			     de_namelen > EROFS_NAME_LEN)) {
 			DBG_BUGON(1);
 			return -EIO;
 		}
 
-#ifdef CONFIG_EROFS_FS_DEBUG
-		dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
-		memcpy(dbg_namebuf, de_name, dbg_namelen);
-		dbg_namebuf[dbg_namelen] = '\0';
-
-		debugln("%s, found de_name %s de_len %d d_type %d", __func__,
-			dbg_namebuf, de_namelen, d_type);
-#endif
-
+		debug_one_dentry(d_type, de_name, de_namelen);
 		if (!dir_emit(ctx, de_name, de_namelen,
 			      le64_to_cpu(de->nid), d_type))
 			/* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc5..31eef83 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 	overlapped = false;
 	compressed_pages = grp->compressed_pages;
 
+	err = 0;
 	for (i = 0; i < clusterpages; ++i) {
 		unsigned int pagenr;
 
@@ -981,26 +982,39 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 		DBG_BUGON(!page);
 		DBG_BUGON(!page->mapping);
 
-		if (z_erofs_is_stagingpage(page))
-			continue;
+		if (!z_erofs_is_stagingpage(page)) {
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
-		if (page->mapping == MNGD_MAPPING(sbi)) {
-			DBG_BUGON(!PageUptodate(page));
-			continue;
-		}
+			if (page->mapping == MNGD_MAPPING(sbi)) {
+				if (unlikely(!PageUptodate(page)))
+					err = -EIO;
+				continue;
+			}
 #endif
 
-		/* only non-head page could be reused as a compressed page */
-		pagenr = z_erofs_onlinepage_index(page);
+			/*
+			 * only if non-head page can be selected
+			 * for inplace decompression
+			 */
+			pagenr = z_erofs_onlinepage_index(page);
 
-		DBG_BUGON(pagenr >= nr_pages);
-		DBG_BUGON(pages[pagenr]);
-		++sparsemem_pages;
-		pages[pagenr] = page;
+			DBG_BUGON(pagenr >= nr_pages);
+			DBG_BUGON(pages[pagenr]);
+			++sparsemem_pages;
+			pages[pagenr] = page;
 
-		overlapped = true;
+			overlapped = true;
+		}
+
+		/* PG_error needs checking for inplaced and staging pages */
+		if (unlikely(PageError(page))) {
+			DBG_BUGON(PageUptodate(page));
+			err = -EIO;
+		}
 	}
 
+	if (unlikely(err))
+		goto out;
+
 	llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
 
 	if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ static int z_erofs_vle_unzip(struct super_block *sb,
 
 skip_allocpage:
 	vout = erofs_vmap(pages, nr_pages);
+	if (!vout) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	err = z_erofs_vle_unzip_vmap(compressed_pages,
 		clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
 	if (page->mapping == mc) {
 		WRITE_ONCE(grp->compressed_pages[nr], page);
 
+		ClearPageError(page);
 		if (!PagePrivate(page)) {
 			/*
 			 * impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a..0daac9b98 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
 
 	nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
 
-	if (clusterpages == 1)
+	if (clusterpages == 1) {
 		vin = kmap_atomic(compressed_pages[0]);
-	else
+	} else {
 		vin = erofs_vmap(compressed_pages, clusterpages);
+		if (!vin)
+			return -ENOMEM;
+	}
 
 	preempt_disable();
 	vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b733855..250c15a 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
 	status = "okay";
 };
 
-&ethernet {
-	//mtd-mac-address = <&factory 0xe000>;
-	gmac1: mac@0 {
-		compatible = "mediatek,eth-mac";
-		reg = <0>;
-		phy-handle = <&phy1>;
-	};
-
-	mdio-bus {
-		phy1: ethernet-phy@1 {
-			reg = <1>;
-			phy-mode = "rgmii";
-		};
-	};
-};
-
 &pinctrl {
 	state_default: pinctrl0 {
 		gpio {
@@ -141,3 +125,16 @@
 		};
 	};
 };
+
+&switch0 {
+	ports {
+		port@0 {
+			label = "ethblack";
+			status = "ok";
+		};
+		port@4 {
+			label = "ethblue";
+			status = "ok";
+		};
+	};
+};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff368..17020e2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
 
 		mediatek,ethsys = <&ethsys>;
 
-		mediatek,switch = <&gsw>;
 
+		gmac0: mac@0 {
+			compatible = "mediatek,eth-mac";
+			reg = <0>;
+			phy-mode = "rgmii";
+			fixed-link {
+				speed = <1000>;
+				full-duplex;
+				pause;
+			};
+		};
+		gmac1: mac@1 {
+			compatible = "mediatek,eth-mac";
+			reg = <1>;
+			status = "off";
+			phy-mode = "rgmii";
+			phy-handle = <&phy5>;
+		};
 		mdio-bus {
 			#address-cells = <1>;
 			#size-cells = <0>;
 
-			phy1f: ethernet-phy@1f {
-				reg = <0x1f>;
+			phy5: ethernet-phy@5 {
+				reg = <5>;
 				phy-mode = "rgmii";
 			};
+
+			switch0: switch0@0 {
+				compatible = "mediatek,mt7621";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0>;
+				mediatek,mcm;
+				resets = <&rstctrl 2>;
+				reset-names = "mcm";
+
+				ports {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+					port@0 {
+						status = "off";
+						reg = <0>;
+						label = "lan0";
+					};
+					port@1 {
+						status = "off";
+						reg = <1>;
+						label = "lan1";
+					};
+					port@2 {
+						status = "off";
+						reg = <2>;
+						label = "lan2";
+					};
+					port@3 {
+						status = "off";
+						reg = <3>;
+						label = "lan3";
+					};
+					port@4 {
+						status = "off";
+						reg = <4>;
+						label = "lan4";
+					};
+					port@6 {
+						reg = <6>;
+						label = "cpu";
+						ethernet = <&gmac0>;
+						phy-mode = "trgmii";
+						fixed-link {
+							speed = <1000>;
+							full-duplex;
+						};
+					};
+				};
+			};
 		};
 	};
 
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b385..0000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
-  "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
-  these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
-  setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
-	compatible = "mediatek,mt7623-gsw";
-	reg = <0 0x1b110000 0 0x300000>;
-
-	interrupt-parent = <&pio>;
-	interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
-	clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
-		 <&ethsys CLK_ETHSYS_ESW>,
-		 <&ethsys CLK_ETHSYS_GP2>,
-		 <&ethsys CLK_ETHSYS_GP1>;
-	clock-names = "trgpll", "esw", "gp2", "gp1";
-
-	mt7530-supply = <&mt6323_vpa_reg>;
-
-	mediatek,pctl-regmap = <&syscfg_pctl_a>;
-	mediatek,reset-pin = <&pio 15 0>;
-
-	status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c..0000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
-	bool "MediaTek ethernet driver - staging version"
-	depends on RALINK
-	---help---
-	  If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
-	prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
-	bool "MT7621"
-	depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
-	tristate "MediaTek SoC Gigabit Ethernet support"
-	depends on NET_VENDOR_MEDIATEK_STAGING
-	select PHYLIB
-	---help---
-	  This driver supports the gigabit ethernet MACs in the
-	  MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
-	def_bool NET_MEDIATEK_SOC_STAGING
-	depends on NET_MEDIATEK_MT7621
-	select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
-	def_bool NET_MEDIATEK_SOC_STAGING
-	depends on NET_MEDIATEK_MT7621
-	select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
-	def_tristate NET_MEDIATEK_SOC_STAGING
-	depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3..0000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y					+= mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO)		+= mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620)	+= mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621)	+= soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621)		+= gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING)		+= mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4..0000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
-  in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
-  is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e..0000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
-	char name[ETH_GSTRING_LEN];
-	unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
-	.name = #stat, \
-	.idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
-	MTK_HW_STAT(tx_bytes),
-	MTK_HW_STAT(tx_packets),
-	MTK_HW_STAT(tx_skip),
-	MTK_HW_STAT(tx_collisions),
-	MTK_HW_STAT(rx_bytes),
-	MTK_HW_STAT(rx_packets),
-	MTK_HW_STAT(rx_overflow),
-	MTK_HW_STAT(rx_fcs_errors),
-	MTK_HW_STAT(rx_short_errors),
-	MTK_HW_STAT(rx_long_errors),
-	MTK_HW_STAT(rx_checksum_errors),
-	MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN	ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
-				  struct ethtool_link_ksettings *cmd)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	int err;
-
-	if (!mac->phy_dev)
-		return -ENODEV;
-
-	if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-		err = phy_read_status(mac->phy_dev);
-		if (err)
-			return -ENODEV;
-	}
-
-	phy_ethtool_ksettings_get(mac->phy_dev, cmd);
-	return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
-				  const struct ethtool_link_ksettings *cmd)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	if (!mac->phy_dev)
-		return -ENODEV;
-
-	if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
-		if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
-			mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
-			mac->phy_flags = MTK_PHY_FLAG_PORT;
-		} else if (mac->hw->mii_bus) {
-			mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
-						       cmd->base.phy_address);
-			if (!mac->phy_dev)
-				return -ENODEV;
-			mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-		} else {
-			return -ENODEV;
-		}
-	}
-
-	return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
-			    struct ethtool_drvinfo *info)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_soc_data *soc = mac->hw->soc;
-
-	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
-	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
-	if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
-		info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	if (!mac->phy_dev)
-		return -EOPNOTSUPP;
-
-	return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	int err;
-
-	if (!mac->phy_dev)
-		goto out_get_link;
-
-	if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-		err = genphy_update_link(mac->phy_dev);
-		if (err)
-			goto out_get_link;
-	}
-
-	return mac->phy_dev->link;
-
-out_get_link:
-	return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
-			     struct ethtool_ringparam *ring)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	if ((ring->tx_pending < 2) ||
-	    (ring->rx_pending < 2) ||
-	    (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
-	    (ring->tx_pending > mac->hw->soc->dma_ring_size))
-		return -EINVAL;
-
-	dev->netdev_ops->ndo_stop(dev);
-
-	mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
-	mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
-	return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
-			      struct ethtool_ringparam *ring)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	ring->rx_max_pending = mac->hw->soc->dma_ring_size;
-	ring->tx_max_pending = mac->hw->soc->dma_ring_size;
-	ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
-	ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-	int i;
-
-	switch (stringset) {
-	case ETH_SS_STATS:
-		for (i = 0; i < MTK_HW_STATS_LEN; i++) {
-			memcpy(data, mtk_ethtool_hw_stats[i].name,
-			       ETH_GSTRING_LEN);
-			data += ETH_GSTRING_LEN;
-		}
-		break;
-	}
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return MTK_HW_STATS_LEN;
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
-				  struct ethtool_stats *stats, u64 *data)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_hw_stats *hwstats = mac->hw_stats;
-	unsigned int start;
-	int i;
-
-	if (netif_running(dev) && netif_device_present(dev)) {
-		if (spin_trylock(&hwstats->stats_lock)) {
-			mtk_stats_update_mac(mac);
-			spin_unlock(&hwstats->stats_lock);
-		}
-	}
-
-	do {
-		start = u64_stats_fetch_begin_irq(&hwstats->syncp);
-		for (i = 0; i < MTK_HW_STATS_LEN; i++)
-			data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
-	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
-	.get_link_ksettings     = mtk_get_link_ksettings,
-	.set_link_ksettings     = mtk_set_link_ksettings,
-	.get_drvinfo		= mtk_get_drvinfo,
-	.get_msglevel		= mtk_get_msglevel,
-	.set_msglevel		= mtk_set_msglevel,
-	.nway_reset		= mtk_nway_reset,
-	.get_link		= mtk_get_link,
-	.set_ringparam		= mtk_set_ringparam,
-	.get_ringparam		= mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
-	struct mtk_mac *mac = netdev_priv(netdev);
-	struct mtk_soc_data *soc = mac->hw->soc;
-
-	if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-		mtk_ethtool_ops.get_strings = mtk_get_strings;
-		mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
-		mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
-	}
-
-	netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469a..0000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e54..0000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT	(5 * HZ)
-
-#define MT7620_GSW_REG_PIAC	0x0004
-
-#define GSW_NUM_VLANS		16
-#define GSW_NUM_VIDS		4096
-#define GSW_NUM_PORTS		7
-#define GSW_PORT6		6
-
-#define GSW_MDIO_ACCESS		BIT(31)
-#define GSW_MDIO_READ		BIT(19)
-#define GSW_MDIO_WRITE		BIT(18)
-#define GSW_MDIO_START		BIT(16)
-#define GSW_MDIO_ADDR_SHIFT	20
-#define GSW_MDIO_REG_SHIFT	25
-
-#define GSW_REG_PORT_PMCR(x)	(0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x)	(0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0		0x3fE4
-#define GSW_REG_SMACCR1		0x3fE8
-#define GSW_REG_CKGCR		0x3ff0
-
-#define GSW_REG_IMR		0x7008
-#define GSW_REG_ISR		0x700c
-#define GSW_REG_GPC1		0x7014
-
-#define SYSC_REG_CHIP_REV_ID	0x0c
-#define SYSC_REG_CFG		0x10
-#define SYSC_REG_CFG1		0x14
-#define RST_CTRL_MCM		BIT(2)
-#define SYSC_PAD_RGMII2_MDIO	0x58
-#define SYSC_GPIO_MODE		0x60
-
-#define PORT_IRQ_ST_CHG		0x7f
-
-#define MT7621_ESW_PHY_POLLING	0x0000
-#define MT7620_ESW_PHY_POLLING	0x7000
-
-#define	PMCR_IPG		BIT(18)
-#define	PMCR_MAC_MODE		BIT(16)
-#define	PMCR_FORCE		BIT(15)
-#define	PMCR_TX_EN		BIT(14)
-#define	PMCR_RX_EN		BIT(13)
-#define	PMCR_BACKOFF		BIT(9)
-#define	PMCR_BACKPRES		BIT(8)
-#define	PMCR_RX_FC		BIT(5)
-#define	PMCR_TX_FC		BIT(4)
-#define	PMCR_SPEED(_x)		(_x << 2)
-#define	PMCR_DUPLEX		BIT(1)
-#define	PMCR_LINK		BIT(0)
-
-#define PHY_AN_EN		BIT(31)
-#define PHY_PRE_EN		BIT(30)
-#define PMY_MDC_CONF(_x)	((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0		0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0		0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5	BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR	0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR	0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT	0x7a54
-#define MT7530_TRGMII_TD1_ODT	0x7a5c
-#define MT7530_TRGMII_TD2_ODT	0x7a64
-#define MT7530_TRGMII_TD3_ODT	0x7a6c
-#define MT7530_TRGMII_TD4_ODT	0x7a74
-#define MT7530_TRGMII_TD5_ODT	0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL	0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL	0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR            0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR	0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL	0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP		0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP		0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS	0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN	0x7008
-/* system control register */
-#define MT7530_SYS_CTRL		0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x)	(0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x)	(0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT	6
-#define MT7621_XTAL_MASK	0x7
-#define MT7621_XTAL_25		6
-#define MT7621_XTAL_40		3
-#define MT7621_MDIO_DRV_MASK	(3 << 4)
-#define MT7621_GE1_MODE_MASK	(3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV	BIT(30)
-#define P6ECR_INTF_MODE_RGMII	BIT(1)
-#define P5RGMIIRXCR_C_ALIGN	BIT(8)
-#define P5RGMIIRXCR_DELAY_2	BIT(1)
-#define P5RGMIITXCR_DELAY_2	(BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL	(BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL		BIT(16)
-#define MHWTRAP_P5_MAC_SEL	BIT(13)
-#define MHWTRAP_P6_DIS		BIT(8)
-#define MHWTRAP_P5_RGMII_MODE	BIT(7)
-#define MHWTRAP_P5_DIS		BIT(6)
-#define MHWTRAP_PHY_ACCESS	BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT	9
-#define HWTRAP_XTAL_MASK	0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST		BIT(1)
-#define SYS_CTRL_REG_RST	BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96	BIT(18)
-#define PMCR_MAC_MODE		BIT(16)
-#define PMCR_FORCE_MODE		BIT(15)
-#define PMCR_TX_EN		BIT(14)
-#define PMCR_RX_EN		BIT(13)
-#define PMCR_BACK_PRES_EN	BIT(9)
-#define PMCR_BACKOFF_EN		BIT(8)
-#define PMCR_TX_FC_EN		BIT(5)
-#define PMCR_RX_FC_EN		BIT(4)
-#define PMCR_FORCE_SPEED_1000	BIT(3)
-#define PMCR_FORCE_FDX		BIT(1)
-#define PMCR_FORCE_LNK		BIT(0)
-#define PMCR_FIXED_LINK		(PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
-				 PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
-				 PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
-				 PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
-				 PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC	(PMCR_FIXED_LINK | \
-				 PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE		0x390
-#define GSW_TRGMII_TD0_ODT	0x354
-#define GSW_TRGMII_TD1_ODT	0x35c
-#define GSW_TRGMII_TD2_ODT	0x364
-#define GSW_TRGMII_TD3_ODT	0x36c
-#define GSW_TRGMII_TXCTL_ODT	0x374
-#define GSW_TRGMII_TCK_ODT	0x37c
-#define GSW_TRGMII_RCK_CTRL	0x300
-
-#define INTF_MODE_TRGMII	BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST	BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR		0x200
-#define MTK_MAC_P1_MCR		0x100
-
-#define MAC_MCR_MAX_RX_2K	BIT(29)
-#define MAC_MCR_IPG_CFG		(BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE	BIT(15)
-#define MAC_MCR_TX_EN		BIT(14)
-#define MAC_MCR_RX_EN		BIT(13)
-#define MAC_MCR_BACKOFF_EN	BIT(9)
-#define MAC_MCR_BACKPR_EN	BIT(8)
-#define MAC_MCR_FORCE_RX_FC	BIT(5)
-#define MAC_MCR_FORCE_TX_FC	BIT(4)
-#define MAC_MCR_SPEED_1000	BIT(3)
-#define MAC_MCR_FORCE_DPX	BIT(1)
-#define MAC_MCR_FORCE_LINK	BIT(0)
-#define MAC_MCR_FIXED_LINK	(MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-				 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
-				 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
-				 MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
-				 MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
-				 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC	(MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-				 MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define	MT7623_XTAL_40		0
-#define MT7623_XTAL_20		1
-#define MT7623_XTAL_25		3
-
-/* GPIO port control registers */
-#define	GPIO_OD33_CTRL8		0x4c0
-#define	GPIO_BIAS_CTRL		0xed0
-#define GPIO_DRV_SEL10		0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
-	PORT4_EPHY = 0,
-	PORT4_EXT,
-};
-
-/* struct mt7620_gsw -	the structure that holds the SoC specific data
- * @dev:		The Device struct
- * @base:		The base address
- * @piac_offset:	The PIAC base may change depending on SoC
- * @irq:		The IRQ we are using
- * @port4:		The port4 mode on MT7620
- * @autopoll:		Is MDIO autopolling enabled
- * @ethsys:		The ethsys register map
- * @pctl:		The pin control register map
- * @clk_gsw:		The switch clock
- * @clk_gp1:		The gmac1 clock
- * @clk_gp2:		The gmac2 clock
- * @clk_trgpll:		The trgmii pll clock
- */
-struct mt7620_gsw {
-	struct device		*dev;
-	void __iomem		*base;
-	u32			piac_offset;
-	int			irq;
-	int			port4;
-	unsigned long int	autopoll;
-
-	struct regmap		*ethsys;
-	struct regmap		*pctl;
-
-	struct clk		*clk_gsw;
-	struct clk		*clk_gp1;
-	struct clk		*clk_gp2;
-	struct clk		*clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-			     int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-		      u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b1..0000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
-	iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
-	return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
-	struct mtk_eth *eth = (struct mtk_eth *)_eth;
-	struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-	u32 reg, i;
-
-	reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
-	for (i = 0; i < 5; i++) {
-		unsigned int link;
-
-		if ((reg & BIT(i)) == 0)
-			continue;
-
-		link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
-		if (link == eth->link[i])
-			continue;
-
-		eth->link[i] = link;
-		if (link)
-			netdev_info(*eth->netdev,
-				    "port %d link up\n", i);
-		else
-			netdev_info(*eth->netdev,
-				    "port %d link down\n", i);
-	}
-
-	mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
-	return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
-			   struct device_node *np)
-{
-	u32 i;
-	u32 val;
-
-	/* hardware reset the switch */
-	mtk_reset(eth, RST_CTRL_MCM);
-	mdelay(10);
-
-	/* reduce RGMII2 PAD driving strength */
-	rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
-	/* gpio mux - RGMII1=Normal mode */
-	rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
-	/* set GMAC1 RGMII mode */
-	rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
-	/* enable MDIO to control MT7530 */
-	rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
-	/* turn off all PHYs */
-	for (i = 0; i <= 4; i++) {
-		val = _mt7620_mii_read(gsw, i, 0x0);
-		val |= BIT(11);
-		_mt7620_mii_write(gsw, i, 0x0, val);
-	}
-
-	/* reset the switch */
-	mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
-			SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
-	usleep_range(10, 20);
-
-	if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
-		/* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-		mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
-		mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
-	} else {
-		/* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-		mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
-		mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
-	}
-
-	/* GE2, Link down */
-	mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
-	/* Enable Port 6, P5 as GMAC5, P5 disable */
-	val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
-	/* Enable Port 6 */
-	val &= ~MHWTRAP_P6_DIS;
-	/* Disable Port 5 */
-	val |= MHWTRAP_P5_DIS;
-	/* manual override of HW-Trap */
-	val |= MHWTRAP_MANUAL;
-	mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
-	val = rt_sysc_r32(SYSC_REG_CFG);
-	val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
-	if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
-		/* 40Mhz */
-
-		/* disable MT7530 core clock */
-		_mt7620_mii_write(gsw, 0, 13, 0x1f);
-		_mt7620_mii_write(gsw, 0, 14, 0x410);
-		_mt7620_mii_write(gsw, 0, 13, 0x401f);
-		_mt7620_mii_write(gsw, 0, 14, 0x0);
-
-		/* disable MT7530 PLL */
-		_mt7620_mii_write(gsw, 0, 13, 0x1f);
-		_mt7620_mii_write(gsw, 0, 14, 0x40d);
-		_mt7620_mii_write(gsw, 0, 13, 0x401f);
-		_mt7620_mii_write(gsw, 0, 14, 0x2020);
-
-		/* for MT7530 core clock = 500Mhz */
-		_mt7620_mii_write(gsw, 0, 13, 0x1f);
-		_mt7620_mii_write(gsw, 0, 14, 0x40e);
-		_mt7620_mii_write(gsw, 0, 13, 0x401f);
-		_mt7620_mii_write(gsw, 0, 14, 0x119);
-
-		/* enable MT7530 PLL */
-		_mt7620_mii_write(gsw, 0, 13, 0x1f);
-		_mt7620_mii_write(gsw, 0, 14, 0x40d);
-		_mt7620_mii_write(gsw, 0, 13, 0x401f);
-		_mt7620_mii_write(gsw, 0, 14, 0x2820);
-
-		usleep_range(20, 40);
-
-		/* enable MT7530 core clock */
-		_mt7620_mii_write(gsw, 0, 13, 0x1f);
-		_mt7620_mii_write(gsw, 0, 14, 0x410);
-		_mt7620_mii_write(gsw, 0, 13, 0x401f);
-	}
-
-	/* RGMII */
-	_mt7620_mii_write(gsw, 0, 14, 0x1);
-
-	/* set MT7530 central align */
-	mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
-	mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
-			MT7530_TRGMII_TXCTRL);
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
-	/* delay setting for 10/1000M */
-	mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
-			P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
-	mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
-	/* lower Tx Driving*/
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
-	mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
-	/* turn on all PHYs */
-	for (i = 0; i <= 4; i++) {
-		val = _mt7620_mii_read(gsw, i, 0);
-		val &= ~BIT(11);
-		_mt7620_mii_write(gsw, i, 0, val);
-	}
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x)    (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x)    (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x)  (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT                6
-
-	/* This is copied from mt7530_apply_config in libreCMC driver */
-	{
-		int i;
-
-		for (i = 0; i < MT7530_NUM_PORTS; i++)
-			mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
-		mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
-				0x00ff0000);
-
-		for (i = 0; i < MT7530_NUM_PORTS; i++)
-			mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
-	}
-
-	/* enable irq */
-	mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
-	mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
-	{ .compatible = "mediatek,mt7621-gsw" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
-	struct device_node *np = eth->switch_np;
-	struct platform_device *pdev = of_find_device_by_node(np);
-	struct mt7620_gsw *gsw;
-
-	if (!pdev)
-		return -ENODEV;
-
-	if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
-		return -EINVAL;
-
-	gsw = platform_get_drvdata(pdev);
-	eth->sw_priv = gsw;
-
-	if (!gsw->irq)
-		return -EINVAL;
-
-	request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
-		    "gsw", eth);
-	disable_irq(gsw->irq);
-
-	mt7621_hw_init(eth, gsw, np);
-
-	enable_irq(gsw->irq);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	struct mt7620_gsw *gsw;
-
-	gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
-	if (!gsw)
-		return -ENOMEM;
-
-	gsw->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(gsw->base))
-		return PTR_ERR(gsw->base);
-
-	gsw->dev = &pdev->dev;
-	gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
-	platform_set_drvdata(pdev, gsw);
-
-	return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
-	platform_set_drvdata(pdev, NULL);
-
-	return 0;
-}
-
-static struct platform_driver gsw_driver = {
-	.probe = mt7621_gsw_probe,
-	.remove = mt7621_gsw_remove,
-	.driver = {
-		.name = "mt7621-gsw",
-		.of_match_table = mediatek_gsw_match,
-	},
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a4..0000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
-	/* TODO */
-	return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
-	struct mtk_eth *eth = netdev_priv(dev);
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&eth->phy->lock, flags);
-	for (i = 0; i < 8; i++) {
-		if (eth->phy->phy_node[i]) {
-			struct phy_device *phydev = eth->phy->phy[i];
-			int status_change = 0;
-
-			if (phydev->link)
-				if (eth->phy->duplex[i] != phydev->duplex ||
-				    eth->phy->speed[i] != phydev->speed)
-					status_change = 1;
-
-			if (phydev->link != eth->link[i])
-				status_change = 1;
-
-			switch (phydev->speed) {
-			case SPEED_1000:
-			case SPEED_100:
-			case SPEED_10:
-				eth->link[i] = phydev->link;
-				eth->phy->duplex[i] = phydev->duplex;
-				eth->phy->speed[i] = phydev->speed;
-
-				if (status_change &&
-				    eth->soc->mdio_adjust_link)
-					eth->soc->mdio_adjust_link(eth, i);
-				break;
-			}
-		}
-	}
-	spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-			 struct device_node *phy_node)
-{
-	const __be32 *_port = NULL;
-	struct phy_device *phydev;
-	int phy_mode, port;
-
-	_port = of_get_property(phy_node, "reg", NULL);
-
-	if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
-		pr_err("%pOFn: invalid port id\n", phy_node);
-		return -EINVAL;
-	}
-	port = be32_to_cpu(*_port);
-	phy_mode = of_get_phy_mode(phy_node);
-	if (phy_mode < 0) {
-		dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
-		eth->phy->phy_node[port] = NULL;
-		return -EINVAL;
-	}
-
-	phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
-				mtk_phy_link_adjust, 0, phy_mode);
-	if (!phydev) {
-		dev_err(eth->dev, "could not connect to PHY\n");
-		eth->phy->phy_node[port] = NULL;
-		return -ENODEV;
-	}
-
-	phydev->supported &= PHY_1000BT_FEATURES;
-	phydev->advertising = phydev->supported;
-
-	dev_info(eth->dev,
-		 "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
-		 port, phydev_name(phydev), phydev->phy_id,
-		 phydev->drv->name);
-
-	eth->phy->phy[port] = phydev;
-	eth->link[port] = 0;
-
-	return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
-		     struct phy_device *phy)
-{
-	phy_attach(eth->netdev[mac->id], phydev_name(phy),
-		   PHY_INTERFACE_MODE_MII);
-
-	phy->autoneg = AUTONEG_ENABLE;
-	phy->speed = 0;
-	phy->duplex = 0;
-	phy_set_max_speed(phy, SPEED_100);
-	phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
-	phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
-	struct mtk_eth *eth = mac->hw;
-	int i;
-
-	for (i = 0; i < 8; i++) {
-		if (eth->phy->phy_node[i]) {
-			if (!mac->phy_dev) {
-				mac->phy_dev = eth->phy->phy[i];
-				mac->phy_flags = MTK_PHY_FLAG_PORT;
-			}
-		} else if (eth->mii_bus) {
-			struct phy_device *phy;
-
-			phy = mdiobus_get_phy(eth->mii_bus, i);
-			if (phy) {
-				phy_init(eth, mac, phy);
-				if (!mac->phy_dev) {
-					mac->phy_dev = phy;
-					mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
-	struct mtk_eth *eth = mac->hw;
-	unsigned long flags;
-	int i;
-
-	for (i = 0; i < 8; i++)
-		if (eth->phy->phy_fixed[i]) {
-			spin_lock_irqsave(&eth->phy->lock, flags);
-			eth->link[i] = 0;
-			if (eth->soc->mdio_adjust_link)
-				eth->soc->mdio_adjust_link(eth, i);
-			spin_unlock_irqrestore(&eth->phy->lock, flags);
-		} else if (eth->phy->phy[i]) {
-			phy_disconnect(eth->phy->phy[i]);
-		} else if (eth->mii_bus) {
-			struct phy_device *phy =
-				mdiobus_get_phy(eth->mii_bus, i);
-
-			if (phy)
-				phy_detach(phy);
-		}
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
-	struct mtk_eth *eth = mac->hw;
-	unsigned long flags;
-	int i;
-
-	for (i = 0; i < 8; i++) {
-		if (eth->phy->phy_fixed[i]) {
-			spin_lock_irqsave(&eth->phy->lock, flags);
-			eth->link[i] = 1;
-			if (eth->soc->mdio_adjust_link)
-				eth->soc->mdio_adjust_link(eth, i);
-			spin_unlock_irqrestore(&eth->phy->lock, flags);
-		} else if (eth->phy->phy[i]) {
-			phy_start(eth->phy->phy[i]);
-		}
-	}
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
-	struct mtk_eth *eth = mac->hw;
-	unsigned long flags;
-	int i;
-
-	for (i = 0; i < 8; i++)
-		if (eth->phy->phy_fixed[i]) {
-			spin_lock_irqsave(&eth->phy->lock, flags);
-			eth->link[i] = 0;
-			if (eth->soc->mdio_adjust_link)
-				eth->soc->mdio_adjust_link(eth, i);
-			spin_unlock_irqrestore(&eth->phy->lock, flags);
-		} else if (eth->phy->phy[i]) {
-			phy_stop(eth->phy->phy[i]);
-		}
-}
-
-static struct mtk_phy phy_ralink = {
-	.connect = mtk_phy_connect,
-	.disconnect = mtk_phy_disconnect,
-	.start = mtk_phy_start,
-	.stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
-	struct device_node *mii_np;
-	int err;
-
-	if (!eth->soc->mdio_read || !eth->soc->mdio_write)
-		return 0;
-
-	spin_lock_init(&phy_ralink.lock);
-	eth->phy = &phy_ralink;
-
-	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
-	if (!mii_np) {
-		dev_err(eth->dev, "no %s child node found", "mdio-bus");
-		return -ENODEV;
-	}
-
-	if (!of_device_is_available(mii_np)) {
-		err = 0;
-		goto err_put_node;
-	}
-
-	eth->mii_bus = mdiobus_alloc();
-	if (!eth->mii_bus) {
-		err = -ENOMEM;
-		goto err_put_node;
-	}
-
-	eth->mii_bus->name = "mdio";
-	eth->mii_bus->read = eth->soc->mdio_read;
-	eth->mii_bus->write = eth->soc->mdio_write;
-	eth->mii_bus->reset = mtk_mdio_reset;
-	eth->mii_bus->priv = eth;
-	eth->mii_bus->parent = eth->dev;
-
-	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
-	err = of_mdiobus_register(eth->mii_bus, mii_np);
-	if (err)
-		goto err_free_bus;
-
-	return 0;
-
-err_free_bus:
-	kfree(eth->mii_bus);
-err_put_node:
-	of_node_put(mii_np);
-	eth->mii_bus = NULL;
-	return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
-	if (!eth->mii_bus)
-		return;
-
-	mdiobus_unregister(eth->mii_bus);
-	of_node_put(eth->mii_bus->dev.of_node);
-	kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e238..0000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-			 struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c..0000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
-	unsigned long t_start = jiffies;
-
-	while (1) {
-		if (!(mtk_switch_r32(gsw,
-				     gsw->piac_offset + MT7620_GSW_REG_PIAC) &
-				     GSW_MDIO_ACCESS))
-			return 0;
-		if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
-			break;
-	}
-
-	dev_err(gsw->dev, "mdio: MDIO timeout\n");
-	return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-		      u32 phy_register, u32 write_data)
-{
-	if (mt7620_mii_busy_wait(gsw))
-		return -1;
-
-	write_data &= 0xffff;
-
-	mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
-		(phy_register << GSW_MDIO_REG_SHIFT) |
-		(phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
-		MT7620_GSW_REG_PIAC);
-
-	if (mt7620_mii_busy_wait(gsw))
-		return -1;
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
-	u32 d;
-
-	if (mt7620_mii_busy_wait(gsw))
-		return 0xffff;
-
-	mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
-		(phy_reg << GSW_MDIO_REG_SHIFT) |
-		(phy_addr << GSW_MDIO_ADDR_SHIFT),
-		MT7620_GSW_REG_PIAC);
-
-	if (mt7620_mii_busy_wait(gsw))
-		return 0xffff;
-
-	d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
-	return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
-	struct mtk_eth *eth = bus->priv;
-	struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-	return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
-	struct mtk_eth *eth = bus->priv;
-	struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-	return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
-	_mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-	_mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf,  val & 0xffff);
-	_mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
-	u16 high, low;
-
-	_mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-	low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
-	high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
-	return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
-	u32 val = mt7530_mdio_r32(gsw, reg);
-
-	val &= ~mask;
-	val |= set;
-	mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
-	switch (speed) {
-	case 2:
-	case SPEED_1000:
-		return "1000";
-	case 1:
-	case SPEED_100:
-		return "100";
-	case 0:
-	case SPEED_10:
-		return "10";
-	}
-
-	return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
-	struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-	int i;
-
-	for (i = 0; i < GSW_PORT6; i++)
-		if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
-			return 1;
-	return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-			     int speed, int duplex)
-{
-	struct mt7620_gsw *gsw = eth->sw_priv;
-
-	if (link)
-		dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
-			 port, mtk_speed_str(speed),
-			 (duplex) ? "Full" : "Half");
-	else
-		dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
-	mt7620_print_link_state(eth, port, eth->link[port],
-				eth->phy->speed[port],
-				(eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 6027b19..0000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2176 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define	MAX_RX_LENGTH		1536
-#define MTK_RX_ETH_HLEN		(VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN		(NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC		0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
-		(NETIF_MSG_DRV | \
-		NETIF_MSG_PROBE | \
-		NETIF_MSG_LINK | \
-		NETIF_MSG_TIMER | \
-		NETIF_MSG_IFDOWN | \
-		NETIF_MSG_IFUP | \
-		NETIF_MSG_RX_ERR | \
-		NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF	(TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X)	(((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X)	(((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL	0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
-	[MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
-	[MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
-	[MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
-	[MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
-	[MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
-	[MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
-	[MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
-	[MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
-	[MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
-	[MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
-	[MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
-	[MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
-	[MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
-	[MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
-	[MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
-	[MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
-	__raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
-	return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
-	mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
-	return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
-	u32 val;
-
-	regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
-	val |= reset_bits;
-	regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-	usleep_range(10, 20);
-	val &= ~reset_bits;
-	regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-	usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
-	if (eth->soc->dma_type & MTK_PDMA)
-		mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
-	if (eth->soc->dma_type & MTK_QDMA)
-		mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
-	u32 status = 0;
-
-	if (eth->soc->dma_type & MTK_PDMA)
-		status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
-	if (eth->soc->dma_type & MTK_QDMA)
-		status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
-	return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
-	u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-	if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-		status_reg = MTK_REG_MTK_INT_STATUS2;
-
-	mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
-	u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-	if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-		status_reg = MTK_REG_MTK_INT_STATUS2;
-
-	return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
-	u32 val;
-
-	if (eth->soc->dma_type & MTK_PDMA) {
-		val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-		mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
-		/* flush write */
-		mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-	}
-	if (eth->soc->dma_type & MTK_QDMA) {
-		val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-		mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
-		/* flush write */
-		mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-	}
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
-	u32 val;
-
-	if (eth->soc->dma_type & MTK_PDMA) {
-		val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-		mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
-		/* flush write */
-		mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-	}
-	if (eth->soc->dma_type & MTK_QDMA) {
-		val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-		mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
-		/* flush write */
-		mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-	}
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
-	u32 enabled = 0;
-
-	if (eth->soc->dma_type & MTK_PDMA)
-		enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-	if (eth->soc->dma_type & MTK_QDMA)
-		enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
-	return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
-				      unsigned char *macaddr)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&mac->hw->page_lock, flags);
-	mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
-	mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
-		(macaddr[4] << 8) | macaddr[5],
-		MTK_GDMA1_MAC_ADRL);
-	spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
-	int ret = eth_mac_addr(dev, p);
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-
-	if (ret)
-		return ret;
-
-	if (eth->soc->set_mac)
-		eth->soc->set_mac(mac, dev->dev_addr);
-	else
-		mtk_hw_set_macaddr(mac, p);
-
-	return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
-	/* make sure buf_size will be at least MAX_RX_LENGTH */
-	if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
-		mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
-	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
-		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
-	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
-		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-	WARN_ON(buf_size < MAX_RX_LENGTH);
-
-	return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
-			       struct mtk_rx_dma *dma_rxd)
-{
-	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
-	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
-	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
-	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
-				    struct mtk_tx_dma *dma_txd)
-{
-	WRITE_ONCE(dma_txd->txd1, txd->txd1);
-	WRITE_ONCE(dma_txd->txd3, txd->txd3);
-	WRITE_ONCE(dma_txd->txd4, txd->txd4);
-	/* clean dma done flag last */
-	WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-	int i;
-
-	if (ring->rx_data && ring->rx_dma) {
-		for (i = 0; i < ring->rx_ring_size; i++) {
-			if (!ring->rx_data[i])
-				continue;
-			if (!ring->rx_dma[i].rxd1)
-				continue;
-			dma_unmap_single(eth->dev,
-					 ring->rx_dma[i].rxd1,
-					 ring->rx_buf_size,
-					 DMA_FROM_DEVICE);
-			skb_free_frag(ring->rx_data[i]);
-		}
-		kfree(ring->rx_data);
-		ring->rx_data = NULL;
-	}
-
-	if (ring->rx_dma) {
-		dma_free_coherent(eth->dev,
-				  ring->rx_ring_size * sizeof(*ring->rx_dma),
-				  ring->rx_dma,
-				  ring->rx_phys);
-		ring->rx_dma = NULL;
-	}
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-	int i, pad = 0;
-
-	ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-	ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
-	ring->rx_ring_size = eth->soc->dma_ring_size;
-	ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
-				GFP_KERNEL);
-	if (!ring->rx_data)
-		goto no_rx_mem;
-
-	for (i = 0; i < ring->rx_ring_size; i++) {
-		ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
-		if (!ring->rx_data[i])
-			goto no_rx_mem;
-	}
-
-	ring->rx_dma =
-		dma_alloc_coherent(eth->dev,
-				   ring->rx_ring_size * sizeof(*ring->rx_dma),
-				   &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
-	if (!ring->rx_dma)
-		goto no_rx_mem;
-
-	if (!eth->soc->rx_2b_offset)
-		pad = NET_IP_ALIGN;
-
-	for (i = 0; i < ring->rx_ring_size; i++) {
-		dma_addr_t dma_addr = dma_map_single(eth->dev,
-				ring->rx_data[i] + NET_SKB_PAD + pad,
-				ring->rx_buf_size,
-				DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-			goto no_rx_mem;
-		ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
-		if (eth->soc->rx_sg_dma)
-			ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-		else
-			ring->rx_dma[i].rxd2 = RX_DMA_LSO;
-	}
-	ring->rx_calc_idx = ring->rx_ring_size - 1;
-	/* make sure that all changes to the dma ring are flushed before we
-	 * continue
-	 */
-	wmb();
-
-	return 0;
-
-no_rx_mem:
-	return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
-	if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-		dma_unmap_single(dev,
-				 dma_unmap_addr(tx_buf, dma_addr0),
-				 dma_unmap_len(tx_buf, dma_len0),
-				 DMA_TO_DEVICE);
-	} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-		dma_unmap_page(dev,
-			       dma_unmap_addr(tx_buf, dma_addr0),
-			       dma_unmap_len(tx_buf, dma_len0),
-			       DMA_TO_DEVICE);
-	}
-	if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
-		dma_unmap_page(dev,
-			       dma_unmap_addr(tx_buf, dma_addr1),
-			       dma_unmap_len(tx_buf, dma_len1),
-			       DMA_TO_DEVICE);
-
-	tx_buf->flags = 0;
-	if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
-		dev_kfree_skb_any(tx_buf->skb);
-	tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	int i;
-
-	if (ring->tx_buf) {
-		for (i = 0; i < ring->tx_ring_size; i++)
-			mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-		kfree(ring->tx_buf);
-		ring->tx_buf = NULL;
-	}
-
-	if (ring->tx_dma) {
-		dma_free_coherent(eth->dev,
-				  ring->tx_ring_size * sizeof(*ring->tx_dma),
-				  ring->tx_dma,
-				  ring->tx_phys);
-		ring->tx_dma = NULL;
-	}
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	int i;
-
-	if (ring->tx_buf) {
-		for (i = 0; i < ring->tx_ring_size; i++)
-			mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-		kfree(ring->tx_buf);
-		ring->tx_buf = NULL;
-	}
-
-	if (ring->tx_dma) {
-		dma_free_coherent(eth->dev,
-				  ring->tx_ring_size * sizeof(*ring->tx_dma),
-				  ring->tx_dma,
-				  ring->tx_phys);
-		ring->tx_dma = NULL;
-	}
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
-	struct mtk_hw_stats *hw_stats = mac->hw_stats;
-	unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-	u64 stats;
-
-	base += hw_stats->reg_offset;
-
-	u64_stats_update_begin(&hw_stats->syncp);
-
-	if (mac->hw->soc->new_stats) {
-		hw_stats->rx_bytes += mtk_r32(mac->hw, base);
-		stats =  mtk_r32(mac->hw, base + 0x04);
-		if (stats)
-			hw_stats->rx_bytes += (stats << 32);
-		hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
-		hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
-		hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
-		hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
-		hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
-		hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
-		hw_stats->rx_flow_control_packets +=
-						mtk_r32(mac->hw, base + 0x24);
-		hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
-		hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
-		hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
-		stats =  mtk_r32(mac->hw, base + 0x34);
-		if (stats)
-			hw_stats->tx_bytes += (stats << 32);
-		hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
-	} else {
-		hw_stats->tx_bytes += mtk_r32(mac->hw, base);
-		hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
-		hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
-		hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
-		hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
-		hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
-		hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
-		hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
-		hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
-		hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
-		hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
-		hw_stats->rx_flow_control_packets +=
-						mtk_r32(mac->hw, base + 0x3c);
-	}
-
-	u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
-			    struct rtnl_link_stats64 *storage)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_hw_stats *hw_stats = mac->hw_stats;
-	unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-	unsigned int start;
-
-	if (!base) {
-		netdev_stats_to_stats64(storage, &dev->stats);
-		return;
-	}
-
-	if (netif_running(dev) && netif_device_present(dev)) {
-		if (spin_trylock(&hw_stats->stats_lock)) {
-			mtk_stats_update_mac(mac);
-			spin_unlock(&hw_stats->stats_lock);
-		}
-	}
-
-	do {
-		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
-		storage->rx_packets = hw_stats->rx_packets;
-		storage->tx_packets = hw_stats->tx_packets;
-		storage->rx_bytes = hw_stats->rx_bytes;
-		storage->tx_bytes = hw_stats->tx_bytes;
-		storage->collisions = hw_stats->tx_collisions;
-		storage->rx_length_errors = hw_stats->rx_short_errors +
-			hw_stats->rx_long_errors;
-		storage->rx_over_errors = hw_stats->rx_overflow;
-		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
-		storage->rx_errors = hw_stats->rx_checksum_errors;
-		storage->tx_aborted_errors = hw_stats->tx_skip;
-	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
-	storage->tx_errors = dev->stats.tx_errors;
-	storage->rx_dropped = dev->stats.rx_dropped;
-	storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
-			       __be16 proto, u16 vid)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	u32 idx = (vid & 0xf);
-	u32 vlan_cfg;
-
-	if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-	      (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-		return 0;
-
-	if (test_bit(idx, &eth->vlan_map)) {
-		netdev_warn(dev, "disable tx vlan offload\n");
-		dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-		netdev_update_features(dev);
-	} else {
-		vlan_cfg = mtk_r32(eth,
-				   mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-				   ((idx >> 1) << 2));
-		if (idx & 0x1) {
-			vlan_cfg &= 0xffff;
-			vlan_cfg |= (vid << 16);
-		} else {
-			vlan_cfg &= 0xffff0000;
-			vlan_cfg |= vid;
-		}
-		mtk_w32(eth,
-			vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-			((idx >> 1) << 2));
-		set_bit(idx, &eth->vlan_map);
-	}
-
-	return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
-				__be16 proto, u16 vid)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	u32 idx = (vid & 0xf);
-
-	if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-	      (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-		return 0;
-
-	clear_bit(idx, &eth->vlan_map);
-
-	return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
-	barrier();
-	return (u32)(ring->tx_ring_size -
-		     ((ring->tx_next_idx - ring->tx_free_idx) &
-		      (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
-	unsigned int len;
-	int ret;
-
-	if (unlikely(skb->len >= VLAN_ETH_ZLEN))
-		return 0;
-
-	if (eth->soc->padding_64b && !eth->soc->padding_bug)
-		return 0;
-
-	if (skb_vlan_tag_present(skb))
-		len = ETH_ZLEN;
-	else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-		len = VLAN_ETH_ZLEN;
-	else if (!eth->soc->padding_64b)
-		len = ETH_ZLEN;
-	else
-		return 0;
-
-	if (skb->len >= len)
-		return 0;
-
-	ret = skb_pad(skb, len - skb->len);
-	if (ret < 0)
-		return ret;
-	skb->len = len;
-	skb_set_tail_pointer(skb, len);
-
-	return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-			   int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	struct skb_frag_struct *frag;
-	struct mtk_tx_dma txd, *ptxd;
-	struct mtk_tx_buf *tx_buf;
-	int i, j, k, frag_size, frag_map_size, offset;
-	dma_addr_t mapped_addr;
-	unsigned int nr_frags;
-	u32 def_txd4;
-
-	if (mtk_skb_padto(skb, eth)) {
-		netif_warn(eth, tx_err, dev, "tx padding failed!\n");
-		return -1;
-	}
-
-	tx_buf = &ring->tx_buf[ring->tx_next_idx];
-	memset(tx_buf, 0, sizeof(*tx_buf));
-	memset(&txd, 0, sizeof(txd));
-	nr_frags = skb_shinfo(skb)->nr_frags;
-
-	/* init tx descriptor */
-	def_txd4 = eth->soc->txd4;
-	txd.txd4 = def_txd4;
-
-	if (eth->soc->mac_count > 1)
-		txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-	if (gso)
-		txd.txd4 |= TX_DMA_TSO;
-
-	/* TX Checksum offload */
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		txd.txd4 |= TX_DMA_CHKSUM;
-
-	/* VLAN header offload */
-	if (skb_vlan_tag_present(skb)) {
-		u16 tag = skb_vlan_tag_get(skb);
-
-		txd.txd4 |= TX_DMA_INS_VLAN |
-			((tag >> VLAN_PRIO_SHIFT) << 4) |
-			(tag & 0xF);
-	}
-
-	mapped_addr = dma_map_single(&dev->dev, skb->data,
-				     skb_headlen(skb), DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-		return -1;
-
-	txd.txd1 = mapped_addr;
-	txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
-	tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-	dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-	dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-	/* TX SG offload */
-	j = ring->tx_next_idx;
-	k = 0;
-	for (i = 0; i < nr_frags; i++) {
-		offset = 0;
-		frag = &skb_shinfo(skb)->frags[i];
-		frag_size = skb_frag_size(frag);
-
-		while (frag_size > 0) {
-			frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-			mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-						       frag_map_size,
-						       DMA_TO_DEVICE);
-			if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-				goto err_dma;
-
-			if (k & 0x1) {
-				j = NEXT_TX_DESP_IDX(j);
-				txd.txd1 = mapped_addr;
-				txd.txd2 = TX_DMA_PLEN0(frag_map_size);
-				txd.txd4 = def_txd4;
-
-				tx_buf = &ring->tx_buf[j];
-				memset(tx_buf, 0, sizeof(*tx_buf));
-
-				tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-				dma_unmap_addr_set(tx_buf, dma_addr0,
-						   mapped_addr);
-				dma_unmap_len_set(tx_buf, dma_len0,
-						  frag_map_size);
-			} else {
-				txd.txd3 = mapped_addr;
-				txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
-				tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-				tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
-				dma_unmap_addr_set(tx_buf, dma_addr1,
-						   mapped_addr);
-				dma_unmap_len_set(tx_buf, dma_len1,
-						  frag_map_size);
-
-				if (!((i == (nr_frags - 1)) &&
-				      (frag_map_size == frag_size))) {
-					mtk_set_txd_pdma(&txd,
-							 &ring->tx_dma[j]);
-					memset(&txd, 0, sizeof(txd));
-				}
-			}
-			frag_size -= frag_map_size;
-			offset += frag_map_size;
-			k++;
-		}
-	}
-
-	/* set last segment */
-	if (k & 0x1)
-		txd.txd2 |= TX_DMA_LS1;
-	else
-		txd.txd2 |= TX_DMA_LS0;
-	mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
-	/* store skb to cleanup */
-	tx_buf->skb = skb;
-
-	netdev_sent_queue(dev, skb->len);
-	skb_tx_timestamp(skb);
-
-	ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
-	/* make sure that all changes to the dma ring are flushed before we
-	 * continue
-	 */
-	wmb();
-	atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-	if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-		mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
-	return 0;
-
-err_dma:
-	j = ring->tx_next_idx;
-	for (i = 0; i < tx_num; i++) {
-		ptxd = &ring->tx_dma[j];
-		tx_buf = &ring->tx_buf[j];
-
-		/* unmap dma */
-		mtk_txd_unmap(&dev->dev, tx_buf);
-
-		ptxd->txd2 = TX_DMA_DESP2_DEF;
-		j = NEXT_TX_DESP_IDX(j);
-	}
-	/* make sure that all changes to the dma ring are flushed before we
-	 * continue
-	 */
-	wmb();
-	return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
-	dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
-	int cnt = eth->soc->dma_ring_size;
-	int i;
-
-	eth->scratch_ring = dma_alloc_coherent(eth->dev,
-					       cnt * sizeof(struct mtk_tx_dma),
-					       &phy_ring_head,
-					       GFP_ATOMIC | __GFP_ZERO);
-	if (unlikely(!eth->scratch_ring))
-		return -ENOMEM;
-
-	eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
-				    GFP_KERNEL);
-	dma_addr = dma_map_single(eth->dev,
-				  eth->scratch_head, cnt * QDMA_PAGE_SIZE,
-				  DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-		return -ENOMEM;
-
-	memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-	phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
-	for (i = 0; i < cnt; i++) {
-		eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
-		if (i < cnt - 1)
-			eth->scratch_ring[i].txd2 = (phy_ring_head +
-				((i + 1) * sizeof(struct mtk_tx_dma)));
-		eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
-	}
-
-	mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
-	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
-	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
-	mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
-	return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
-	void *ret = ring->tx_dma;
-
-	return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
-					   struct mtk_tx_dma *txd)
-{
-	return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
-					     struct mtk_tx_dma *txd)
-{
-	int idx = txd - ring->tx_dma;
-
-	return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-			   int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	struct mtk_tx_dma *itxd, *txd;
-	struct mtk_tx_buf *tx_buf;
-	dma_addr_t mapped_addr;
-	unsigned int nr_frags;
-	int i, n_desc = 1;
-	u32 txd4 = eth->soc->txd4;
-
-	itxd = ring->tx_next_free;
-	if (itxd == ring->tx_last_free)
-		return -ENOMEM;
-
-	if (eth->soc->mac_count > 1)
-		txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-	tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-	memset(tx_buf, 0, sizeof(*tx_buf));
-
-	if (gso)
-		txd4 |= TX_DMA_TSO;
-
-	/* TX Checksum offload */
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		txd4 |= TX_DMA_CHKSUM;
-
-	/* VLAN header offload */
-	if (skb_vlan_tag_present(skb))
-		txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
-	mapped_addr = dma_map_single(&dev->dev, skb->data,
-				     skb_headlen(skb), DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-		return -ENOMEM;
-
-	WRITE_ONCE(itxd->txd1, mapped_addr);
-	tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-	dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-	dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-	/* TX SG offload */
-	txd = itxd;
-	nr_frags = skb_shinfo(skb)->nr_frags;
-	for (i = 0; i < nr_frags; i++) {
-		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-		unsigned int offset = 0;
-		int frag_size = skb_frag_size(frag);
-
-		while (frag_size) {
-			bool last_frag = false;
-			unsigned int frag_map_size;
-
-			txd = mtk_tx_next_qdma(ring, txd);
-			if (txd == ring->tx_last_free)
-				goto err_dma;
-
-			n_desc++;
-			frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-			mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-						       frag_map_size,
-						       DMA_TO_DEVICE);
-			if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-				goto err_dma;
-
-			if (i == nr_frags - 1 &&
-			    (frag_size - frag_map_size) == 0)
-				last_frag = true;
-
-			WRITE_ONCE(txd->txd1, mapped_addr);
-			WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
-					       TX_DMA_PLEN0(frag_map_size) |
-					       last_frag * TX_DMA_LS0) |
-					       mac->id);
-			WRITE_ONCE(txd->txd4, 0);
-
-			tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-			tx_buf = mtk_desc_to_tx_buf(ring, txd);
-			memset(tx_buf, 0, sizeof(*tx_buf));
-
-			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-			dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
-			frag_size -= frag_map_size;
-			offset += frag_map_size;
-		}
-	}
-
-	/* store skb to cleanup */
-	tx_buf->skb = skb;
-
-	WRITE_ONCE(itxd->txd4, txd4);
-	WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
-				(!nr_frags * TX_DMA_LS0)));
-
-	netdev_sent_queue(dev, skb->len);
-	skb_tx_timestamp(skb);
-
-	ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
-	atomic_sub(n_desc, &ring->tx_free_count);
-
-	/* make sure that all changes to the dma ring are flushed before we
-	 * continue
-	 */
-	wmb();
-
-	if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-		mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
-	return 0;
-
-err_dma:
-	do {
-		tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
-		/* unmap dma */
-		mtk_txd_unmap(&dev->dev, tx_buf);
-
-		itxd->txd3 = TX_DMA_DESP2_DEF;
-		itxd = mtk_tx_next_qdma(ring, itxd);
-	} while (itxd != txd);
-
-	return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
-	int i, nfrags;
-	struct skb_frag_struct *frag;
-
-	nfrags = 1;
-	if (skb_is_gso(skb)) {
-		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-			frag = &skb_shinfo(skb)->frags[i];
-			nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
-		}
-	} else {
-		nfrags += skb_shinfo(skb)->nr_frags;
-	}
-
-	return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	struct net_device_stats *stats = &dev->stats;
-	int tx_num;
-	int len = skb->len;
-	bool gso = false;
-
-	tx_num = mtk_cal_txd_req(skb);
-	if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
-		netif_stop_queue(dev);
-		netif_err(eth, tx_queued, dev,
-			  "Tx Ring full when queue awake!\n");
-		return NETDEV_TX_BUSY;
-	}
-
-	/* TSO: fill MSS info in tcp checksum field */
-	if (skb_is_gso(skb)) {
-		if (skb_cow_head(skb, 0)) {
-			netif_warn(eth, tx_err, dev,
-				   "GSO expand head fail.\n");
-			goto drop;
-		}
-
-		if (skb_shinfo(skb)->gso_type &
-				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-			gso = true;
-			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
-		}
-	}
-
-	if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
-		goto drop;
-
-	stats->tx_packets++;
-	stats->tx_bytes += len;
-
-	if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
-		netif_stop_queue(dev);
-		smp_mb();
-		if (unlikely(atomic_read(&ring->tx_free_count) >
-			     ring->tx_thresh))
-			netif_wake_queue(dev);
-	}
-
-	return NETDEV_TX_OK;
-
-drop:
-	stats->tx_dropped++;
-	dev_kfree_skb(skb);
-	return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
-		       struct mtk_eth *eth, u32 rx_intr)
-{
-	struct mtk_soc_data *soc = eth->soc;
-	struct mtk_rx_ring *ring = &eth->rx_ring[0];
-	int idx = ring->rx_calc_idx;
-	u32 checksum_bit;
-	struct sk_buff *skb;
-	u8 *data, *new_data;
-	struct mtk_rx_dma *rxd, trxd;
-	int done = 0, pad;
-
-	if (eth->soc->hw_features & NETIF_F_RXCSUM)
-		checksum_bit = soc->checksum_bit;
-	else
-		checksum_bit = 0;
-
-	if (eth->soc->rx_2b_offset)
-		pad = 0;
-	else
-		pad = NET_IP_ALIGN;
-
-	while (done < budget) {
-		struct net_device *netdev;
-		unsigned int pktlen;
-		dma_addr_t dma_addr;
-		int mac = 0;
-
-		idx = NEXT_RX_DESP_IDX(idx);
-		rxd = &ring->rx_dma[idx];
-		data = ring->rx_data[idx];
-
-		mtk_get_rxd(&trxd, rxd);
-		if (!(trxd.rxd2 & RX_DMA_DONE))
-			break;
-
-		/* find out which mac the packet come from. values start at 1 */
-		if (eth->soc->mac_count > 1) {
-			mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
-			      RX_DMA_FPORT_MASK;
-			mac--;
-			if (mac < 0 || mac >= eth->soc->mac_count)
-				goto release_desc;
-		}
-
-		netdev = eth->netdev[mac];
-
-		/* alloc new buffer */
-		new_data = napi_alloc_frag(ring->frag_size);
-		if (unlikely(!new_data || !netdev)) {
-			netdev->stats.rx_dropped++;
-			goto release_desc;
-		}
-		dma_addr = dma_map_single(&netdev->dev,
-					  new_data + NET_SKB_PAD + pad,
-					  ring->rx_buf_size,
-					  DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
-			skb_free_frag(new_data);
-			goto release_desc;
-		}
-
-		/* receive data */
-		skb = build_skb(data, ring->frag_size);
-		if (unlikely(!skb)) {
-			put_page(virt_to_head_page(new_data));
-			goto release_desc;
-		}
-		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
-		dma_unmap_single(&netdev->dev, trxd.rxd1,
-				 ring->rx_buf_size, DMA_FROM_DEVICE);
-		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
-		skb->dev = netdev;
-		skb_put(skb, pktlen);
-		if (trxd.rxd4 & checksum_bit)
-			skb->ip_summed = CHECKSUM_UNNECESSARY;
-		else
-			skb_checksum_none_assert(skb);
-		skb->protocol = eth_type_trans(skb, netdev);
-
-		netdev->stats.rx_packets++;
-		netdev->stats.rx_bytes += pktlen;
-
-		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-		    RX_DMA_VID(trxd.rxd3))
-			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-					       RX_DMA_VID(trxd.rxd3));
-		napi_gro_receive(napi, skb);
-
-		ring->rx_data[idx] = new_data;
-		rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
-		if (eth->soc->rx_sg_dma)
-			rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-		else
-			rxd->rxd2 = RX_DMA_LSO;
-
-		ring->rx_calc_idx = idx;
-		/* make sure that all changes to the dma ring are flushed before
-		 * we continue
-		 */
-		wmb();
-		if (eth->soc->dma_type == MTK_QDMA)
-			mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
-		else
-			mtk_reg_w32(eth, ring->rx_calc_idx,
-				    MTK_REG_RX_CALC_IDX0);
-		done++;
-	}
-
-	if (done < budget)
-		mtk_irq_ack(eth, rx_intr);
-
-	return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-	struct sk_buff *skb;
-	struct mtk_tx_buf *tx_buf;
-	int done = 0;
-	u32 idx, hwidx;
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	unsigned int bytes = 0;
-
-	idx = ring->tx_free_idx;
-	hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
-	while ((idx != hwidx) && budget) {
-		tx_buf = &ring->tx_buf[idx];
-		skb = tx_buf->skb;
-
-		if (!skb)
-			break;
-
-		if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-			bytes += skb->len;
-			done++;
-			budget--;
-		}
-		mtk_txd_unmap(eth->dev, tx_buf);
-		idx = NEXT_TX_DESP_IDX(idx);
-	}
-	ring->tx_free_idx = idx;
-	atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-	/* read hw index again make sure no new tx packet */
-	if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
-		*tx_again = 1;
-
-	if (done)
-		netdev_completed_queue(*eth->netdev, done, bytes);
-
-	return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	struct mtk_tx_dma *desc;
-	struct sk_buff *skb;
-	struct mtk_tx_buf *tx_buf;
-	int total = 0, done[MTK_MAX_DEVS];
-	unsigned int bytes[MTK_MAX_DEVS];
-	u32 cpu, dma;
-	int i;
-
-	memset(done, 0, sizeof(done));
-	memset(bytes, 0, sizeof(bytes));
-
-	cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
-	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
-	desc = mtk_qdma_phys_to_virt(ring, cpu);
-
-	while ((cpu != dma) && budget) {
-		u32 next_cpu = desc->txd2;
-		int mac;
-
-		desc = mtk_tx_next_qdma(ring, desc);
-		if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
-			break;
-
-		mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-		       TX_DMA_FPORT_MASK;
-		mac--;
-
-		tx_buf = mtk_desc_to_tx_buf(ring, desc);
-		skb = tx_buf->skb;
-		if (!skb)
-			break;
-
-		if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-			bytes[mac] += skb->len;
-			done[mac]++;
-			budget--;
-		}
-		mtk_txd_unmap(eth->dev, tx_buf);
-
-		ring->tx_last_free->txd2 = next_cpu;
-		ring->tx_last_free = desc;
-		atomic_inc(&ring->tx_free_count);
-
-		cpu = next_cpu;
-	}
-
-	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
-	/* read hw index again make sure no new tx packet */
-	if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
-		*tx_again = true;
-
-	for (i = 0; i < eth->soc->mac_count; i++) {
-		if (!done[i])
-			continue;
-		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
-		total += done[i];
-	}
-
-	return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
-		       bool *tx_again)
-{
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	struct net_device *netdev = eth->netdev[0];
-	int done;
-
-	done = eth->tx_ring.tx_poll(eth, budget, tx_again);
-	if (!*tx_again)
-		mtk_irq_ack(eth, tx_intr);
-
-	if (!done)
-		return 0;
-
-	smp_mb();
-	if (unlikely(!netif_queue_stopped(netdev)))
-		return done;
-
-	if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
-		netif_wake_queue(netdev);
-
-	return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
-	int i;
-
-	for (i = 0; i < eth->soc->mac_count; i++) {
-		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
-			continue;
-		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
-			mtk_stats_update_mac(eth->mac[i]);
-			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
-		}
-	}
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
-	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
-	u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
-	int tx_done, rx_done;
-	bool tx_again = false;
-
-	status = mtk_irq_pending(eth);
-	mtk_status = mtk_irq_pending_status(eth);
-	tx_intr = eth->soc->tx_int;
-	rx_intr = eth->soc->rx_int;
-	status_intr = eth->soc->status_int;
-	tx_done = 0;
-	rx_done = 0;
-	tx_again = 0;
-
-	if (status & tx_intr)
-		tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
-	if (status & rx_intr)
-		rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
-	if (unlikely(mtk_status & status_intr)) {
-		mtk_stats_update(eth);
-		mtk_irq_ack_status(eth, status_intr);
-	}
-
-	if (unlikely(netif_msg_intr(eth))) {
-		mask = mtk_irq_enabled(eth);
-		netdev_info(eth->netdev[0],
-			    "done tx %d, rx %d, intr 0x%08x/0x%x\n",
-			    tx_done, rx_done, status, mask);
-	}
-
-	if (tx_again || rx_done == budget)
-		return budget;
-
-	status = mtk_irq_pending(eth);
-	if (status & (tx_intr | rx_intr))
-		return budget;
-
-	napi_complete(napi);
-	mtk_irq_enable(eth, tx_intr | rx_intr);
-
-	return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
-	int i;
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-
-	ring->tx_ring_size = eth->soc->dma_ring_size;
-	ring->tx_free_idx = 0;
-	ring->tx_next_idx = 0;
-	ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-			      MAX_SKB_FRAGS);
-
-	ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-			       GFP_KERNEL);
-	if (!ring->tx_buf)
-		goto no_tx_mem;
-
-	ring->tx_dma =
-		dma_alloc_coherent(eth->dev,
-				   ring->tx_ring_size * sizeof(*ring->tx_dma),
-				   &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
-	if (!ring->tx_dma)
-		goto no_tx_mem;
-
-	for (i = 0; i < ring->tx_ring_size; i++) {
-		ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
-		ring->tx_dma[i].txd4 = eth->soc->txd4;
-	}
-
-	atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-	ring->tx_map = mtk_pdma_tx_map;
-	ring->tx_poll = mtk_pdma_tx_poll;
-	ring->tx_clean = mtk_pdma_tx_clean;
-
-	/* make sure that all changes to the dma ring are flushed before we
-	 * continue
-	 */
-	wmb();
-
-	mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
-	mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
-	mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
-	mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-	return 0;
-
-no_tx_mem:
-	return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-	int i, sz = sizeof(*ring->tx_dma);
-
-	ring->tx_ring_size = eth->soc->dma_ring_size;
-	ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-			       GFP_KERNEL);
-	if (!ring->tx_buf)
-		goto no_tx_mem;
-
-	ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
-					  &ring->tx_phys,
-					  GFP_ATOMIC | __GFP_ZERO);
-	if (!ring->tx_dma)
-		goto no_tx_mem;
-
-	for (i = 0; i < ring->tx_ring_size; i++) {
-		int next = (i + 1) % ring->tx_ring_size;
-		u32 next_ptr = ring->tx_phys + next * sz;
-
-		ring->tx_dma[i].txd2 = next_ptr;
-		ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
-	}
-
-	atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
-	ring->tx_next_free = &ring->tx_dma[0];
-	ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
-	ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-			      MAX_SKB_FRAGS);
-
-	ring->tx_map = mtk_qdma_tx_map;
-	ring->tx_poll = mtk_qdma_tx_poll;
-	ring->tx_clean = mtk_qdma_tx_clean;
-
-	/* make sure that all changes to the dma ring are flushed before we
-	 * continue
-	 */
-	wmb();
-
-	mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
-	mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
-	mtk_w32(eth,
-		ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-		MTK_QTX_CRX_PTR);
-	mtk_w32(eth,
-		ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-		MTK_QTX_DRX_PTR);
-
-	return 0;
-
-no_tx_mem:
-	return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
-	int err;
-
-	err = mtk_init_fq_dma(eth);
-	if (err)
-		return err;
-
-	err = mtk_qdma_tx_alloc_tx(eth);
-	if (err)
-		return err;
-
-	err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
-	if (err)
-		return err;
-
-	mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
-	mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
-	mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
-	mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
-	mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
-	/* Enable random early drop and set drop threshold automatically */
-	mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
-	mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
-	return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
-	int err = mtk_qdma_init(eth, 1);
-
-	if (err)
-		return err;
-
-	err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
-	if (err)
-		return err;
-
-	mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
-	mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
-	mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-	mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-	return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
-	struct mtk_rx_ring *ring = &eth->rx_ring[0];
-	int err;
-
-	err = mtk_pdma_tx_alloc(eth);
-	if (err)
-		return err;
-
-	err = mtk_dma_rx_alloc(eth, ring);
-	if (err)
-		return err;
-
-	mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
-	mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
-	mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-	mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-	return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
-	int i;
-
-	for (i = 0; i < eth->soc->mac_count; i++)
-		if (eth->netdev[i])
-			netdev_reset_queue(eth->netdev[i]);
-	eth->tx_ring.tx_clean(eth);
-	mtk_clean_rx(eth, &eth->rx_ring[0]);
-	mtk_clean_rx(eth, &eth->rx_ring[1]);
-	kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	struct mtk_tx_ring *ring = &eth->tx_ring;
-
-	eth->netdev[mac->id]->stats.tx_errors++;
-	netif_err(eth, tx_err, dev,
-		  "transmit timed out\n");
-	if (eth->soc->dma_type & MTK_PDMA) {
-		netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
-			   mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
-		netif_info(eth, drv, dev,
-			   "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
-			   0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
-			   mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
-			   mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
-			   mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
-			   ring->tx_free_idx,
-			   ring->tx_next_idx);
-	}
-	if (eth->soc->dma_type & MTK_QDMA) {
-		netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
-			   mtk_r32(eth, MTK_QDMA_GLO_CFG));
-		netif_info(eth, drv, dev,
-			   "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
-			   0, mtk_r32(eth, MTK_QTX_CTX_PTR),
-			   mtk_r32(eth, MTK_QTX_DTX_PTR),
-			   mtk_r32(eth, MTK_QTX_CRX_PTR),
-			   mtk_r32(eth, MTK_QTX_DRX_PTR),
-			   atomic_read(&ring->tx_free_count));
-	}
-	netif_info(eth, drv, dev,
-		   "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
-		   0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
-		   mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
-		   mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
-		   mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
-	schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
-	struct mtk_eth *eth = _eth;
-	u32 status, int_mask;
-
-	status = mtk_irq_pending(eth);
-	if (unlikely(!status))
-		return IRQ_NONE;
-
-	int_mask = (eth->soc->rx_int | eth->soc->tx_int);
-	if (likely(status & int_mask)) {
-		if (likely(napi_schedule_prep(&eth->rx_napi)))
-			__napi_schedule(&eth->rx_napi);
-	} else {
-		mtk_irq_ack(eth, status);
-	}
-	mtk_irq_disable(eth, int_mask);
-
-	return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
-	mtk_irq_disable(eth, int_mask);
-	mtk_handle_irq(dev->irq, dev);
-	mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
-	unsigned long sysclk = eth->sysclk;
-
-	sysclk /= MTK_US_CYC_CNT_DIVISOR;
-	sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
-	mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
-			~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
-			sysclk,
-			MTK_GLO_CFG);
-	return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
-	u32 fwd_cfg;
-
-	fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
-	/* disable jumbo frame */
-	if (eth->soc->jumbo_frame)
-		fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
-	/* set unicast/multicast/broadcast frame to cpu */
-	fwd_cfg &= ~0xffff;
-
-	mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
-	if (eth->soc->hw_features & NETIF_F_RXCSUM)
-		mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
-			(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-			MTK_GDMA1_FWD_CFG);
-	else
-		mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
-			~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-			MTK_GDMA1_FWD_CFG);
-	if (eth->soc->hw_features & NETIF_F_IP_CSUM)
-		mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
-			(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-			MTK_CDMA_CSG_CFG);
-	else
-		mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
-			~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-			MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
-	unsigned long flags;
-	u32 val;
-	int err;
-
-	if (eth->soc->dma_type == MTK_PDMA)
-		err = mtk_pdma_init(eth);
-	else if (eth->soc->dma_type == MTK_QDMA)
-		err = mtk_qdma_init(eth, 0);
-	else
-		err = mtk_pdma_qdma_init(eth);
-	if (err) {
-		mtk_dma_free(eth);
-		return err;
-	}
-
-	spin_lock_irqsave(&eth->page_lock, flags);
-
-	val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
-	if (eth->soc->rx_2b_offset)
-		val |= MTK_RX_2B_OFFSET;
-	val |= eth->soc->pdma_glo_cfg;
-
-	if (eth->soc->dma_type & MTK_PDMA)
-		mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
-	if (eth->soc->dma_type & MTK_QDMA)
-		mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
-	spin_unlock_irqrestore(&eth->page_lock, flags);
-
-	return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-
-	dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
-	if (!atomic_read(&eth->dma_refcnt)) {
-		int err = mtk_start_dma(eth);
-
-		if (err)
-			return err;
-
-		napi_enable(&eth->rx_napi);
-		mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
-	}
-	atomic_inc(&eth->dma_refcnt);
-
-	if (eth->phy)
-		eth->phy->start(mac);
-
-	if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
-		netif_carrier_on(dev);
-
-	netif_start_queue(dev);
-	eth->soc->fwd_config(eth);
-
-	return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
-	unsigned long flags;
-	u32 val;
-	int i;
-
-	/* stop the dma enfine */
-	spin_lock_irqsave(&eth->page_lock, flags);
-	val = mtk_r32(eth, glo_cfg);
-	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
-		glo_cfg);
-	spin_unlock_irqrestore(&eth->page_lock, flags);
-
-	/* wait for dma stop */
-	for (i = 0; i < 10; i++) {
-		val = mtk_r32(eth, glo_cfg);
-		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
-			msleep(20);
-			continue;
-		}
-		break;
-	}
-}
-
-static int mtk_stop(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-
-	netif_tx_disable(dev);
-	if (eth->phy)
-		eth->phy->stop(mac);
-
-	if (!atomic_dec_and_test(&eth->dma_refcnt))
-		return 0;
-
-	mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-	napi_disable(&eth->rx_napi);
-
-	if (eth->soc->dma_type & MTK_PDMA)
-		mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
-	if (eth->soc->dma_type & MTK_QDMA)
-		mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
-	mtk_dma_free(eth);
-
-	return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
-	int i, err;
-
-	eth->soc->reset_fe(eth);
-
-	if (eth->soc->switch_init)
-		if (eth->soc->switch_init(eth)) {
-			dev_err(eth->dev, "failed to initialize switch core\n");
-			return -ENODEV;
-		}
-
-	err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
-			       dev_name(eth->dev), eth);
-	if (err)
-		return err;
-
-	err = mtk_mdio_init(eth);
-	if (err)
-		return err;
-
-	/* disable delay and normal interrupt */
-	mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
-	if (eth->soc->dma_type & MTK_QDMA)
-		mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-	mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
-	/* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
-	if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-		for (i = 0; i < 16; i += 2)
-			mtk_w32(eth, ((i + 1) << 16) + i,
-				mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-				(i * 2));
-
-	if (eth->soc->fwd_config(eth))
-		dev_err(eth->dev, "unable to get clock\n");
-
-	if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
-		mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
-		mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
-	}
-
-	return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	struct device_node *port;
-	const char *mac_addr;
-	int err;
-
-	mac_addr = of_get_mac_address(mac->of_node);
-	if (mac_addr)
-		ether_addr_copy(dev->dev_addr, mac_addr);
-
-	/* If the mac address is invalid, use random mac address  */
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		eth_hw_addr_random(dev);
-		dev_err(eth->dev, "generated random MAC address %pM\n",
-			dev->dev_addr);
-	}
-	mac->hw->soc->set_mac(mac, dev->dev_addr);
-
-	if (eth->soc->port_init)
-		for_each_child_of_node(mac->of_node, port)
-			if (of_device_is_compatible(port,
-						    "mediatek,eth-port") &&
-			    of_device_is_available(port))
-				eth->soc->port_init(eth, mac, port);
-
-	if (eth->phy) {
-		err = eth->phy->connect(mac);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-
-	if (eth->phy)
-		eth->phy->disconnect(mac);
-	mtk_mdio_cleanup(eth);
-
-	mtk_irq_disable(eth, ~0);
-	free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-
-	if (!mac->phy_dev)
-		return -ENODEV;
-
-	switch (cmd) {
-	case SIOCGMIIPHY:
-	case SIOCGMIIREG:
-	case SIOCSMIIREG:
-		return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
-	default:
-		break;
-	}
-
-	return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
-	struct mtk_mac *mac = netdev_priv(dev);
-	struct mtk_eth *eth = mac->hw;
-	int frag_size, old_mtu;
-	u32 fwd_cfg;
-
-	if (!eth->soc->jumbo_frame)
-		return eth_change_mtu(dev, new_mtu);
-
-	frag_size = mtk_max_frag_size(new_mtu);
-	if (new_mtu < 68 || frag_size > PAGE_SIZE)
-		return -EINVAL;
-
-	old_mtu = dev->mtu;
-	dev->mtu = new_mtu;
-
-	/* return early if the buffer sizes will not change */
-	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
-		return 0;
-	if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
-		return 0;
-
-	if (new_mtu <= ETH_DATA_LEN)
-		eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-	else
-		eth->rx_ring[0].frag_size = PAGE_SIZE;
-	eth->rx_ring[0].rx_buf_size =
-				mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
-	if (!netif_running(dev))
-		return 0;
-
-	mtk_stop(dev);
-	fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-	if (new_mtu <= ETH_DATA_LEN) {
-		fwd_cfg &= ~MTK_GDM1_JMB_EN;
-	} else {
-		fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
-		fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
-				MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
-	}
-	mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
-	return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
-	struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
-	struct mtk_eth *eth = mac->hw;
-	struct net_device *dev = eth->netdev[mac->id];
-	int err;
-
-	rtnl_lock();
-	mtk_stop(dev);
-
-	err = mtk_open(dev);
-	if (err) {
-		netif_alert(eth, ifup, dev,
-			    "Driver up/down cycle failed, closing device.\n");
-		dev_close(dev);
-	}
-	rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
-	int i;
-
-	for (i = 0; i < eth->soc->mac_count; i++) {
-		struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
-		if (!eth->netdev[i])
-			continue;
-
-		unregister_netdev(eth->netdev[i]);
-		free_netdev(eth->netdev[i]);
-		cancel_work_sync(&mac->pending_work);
-	}
-
-	return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
-	.ndo_init		= mtk_init,
-	.ndo_uninit		= mtk_uninit,
-	.ndo_open		= mtk_open,
-	.ndo_stop		= mtk_stop,
-	.ndo_start_xmit		= mtk_start_xmit,
-	.ndo_set_mac_address	= mtk_set_mac_address,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_do_ioctl		= mtk_do_ioctl,
-	.ndo_change_mtu		= mtk_change_mtu,
-	.ndo_tx_timeout		= mtk_tx_timeout,
-	.ndo_get_stats64        = mtk_get_stats64,
-	.ndo_vlan_rx_add_vid	= mtk_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid	= mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
-	struct mtk_mac *mac;
-	const __be32 *_id = of_get_property(np, "reg", NULL);
-	int id, err;
-
-	if (!_id) {
-		dev_err(eth->dev, "missing mac id\n");
-		return -EINVAL;
-	}
-	id = be32_to_cpup(_id);
-	if (id >= eth->soc->mac_count || eth->netdev[id]) {
-		dev_err(eth->dev, "%d is not a valid mac id\n", id);
-		return -EINVAL;
-	}
-
-	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
-	if (!eth->netdev[id]) {
-		dev_err(eth->dev, "alloc_etherdev failed\n");
-		return -ENOMEM;
-	}
-	mac = netdev_priv(eth->netdev[id]);
-	eth->mac[id] = mac;
-	mac->id = id;
-	mac->hw = eth;
-	mac->of_node = np;
-	INIT_WORK(&mac->pending_work, mtk_pending_work);
-
-	if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-		mac->hw_stats = devm_kzalloc(eth->dev,
-					     sizeof(*mac->hw_stats),
-					     GFP_KERNEL);
-		if (!mac->hw_stats) {
-			err = -ENOMEM;
-			goto free_netdev;
-		}
-		spin_lock_init(&mac->hw_stats->stats_lock);
-		mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
-	}
-
-	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
-	eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
-	if (eth->soc->init_data)
-		eth->soc->init_data(eth->soc, eth->netdev[id]);
-
-	eth->netdev[id]->vlan_features = eth->soc->hw_features &
-		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
-	eth->netdev[id]->features |= eth->soc->hw_features;
-
-	if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-		eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-	mtk_set_ethtool_ops(eth->netdev[id]);
-
-	err = register_netdev(eth->netdev[id]);
-	if (err) {
-		dev_err(eth->dev, "error bringing up device\n");
-		err = -ENOMEM;
-		goto free_netdev;
-	}
-	eth->netdev[id]->irq = eth->irq;
-	netif_info(eth, probe, eth->netdev[id],
-		   "mediatek frame engine at 0x%08lx, irq %d\n",
-		   eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
-	return 0;
-
-free_netdev:
-	free_netdev(eth->netdev[id]);
-	return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	const struct of_device_id *match;
-	struct device_node *mac_np;
-	struct mtk_soc_data *soc;
-	struct mtk_eth *eth;
-	struct clk *sysclk;
-	int err;
-
-	device_reset(&pdev->dev);
-
-	match = of_match_device(of_mtk_match, &pdev->dev);
-	soc = (struct mtk_soc_data *)match->data;
-
-	if (soc->reg_table)
-		mtk_reg_table = soc->reg_table;
-
-	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
-	if (!eth)
-		return -ENOMEM;
-
-	eth->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(eth->base))
-		return PTR_ERR(eth->base);
-
-	spin_lock_init(&eth->page_lock);
-
-	eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-						      "mediatek,ethsys");
-	if (IS_ERR(eth->ethsys))
-		return PTR_ERR(eth->ethsys);
-
-	eth->irq = platform_get_irq(pdev, 0);
-	if (eth->irq < 0) {
-		dev_err(&pdev->dev, "no IRQ resource found\n");
-		return -ENXIO;
-	}
-
-	sysclk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(sysclk)) {
-		dev_err(&pdev->dev,
-			"the clock is not defined in the devicetree\n");
-		return -ENXIO;
-	}
-	eth->sysclk = clk_get_rate(sysclk);
-
-	eth->switch_np = of_parse_phandle(pdev->dev.of_node,
-					  "mediatek,switch", 0);
-	if (soc->has_switch && !eth->switch_np) {
-		dev_err(&pdev->dev, "failed to read switch phandle\n");
-		return -ENODEV;
-	}
-
-	eth->dev = &pdev->dev;
-	eth->soc = soc;
-	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
-	err = mtk_init_hw(eth);
-	if (err)
-		return err;
-
-	if (eth->soc->mac_count > 1) {
-		for_each_child_of_node(pdev->dev.of_node, mac_np) {
-			if (!of_device_is_compatible(mac_np,
-						     "mediatek,eth-mac"))
-				continue;
-
-			if (!of_device_is_available(mac_np))
-				continue;
-
-			err = mtk_add_mac(eth, mac_np);
-			if (err)
-				goto err_free_dev;
-		}
-
-		init_dummy_netdev(&eth->dummy_dev);
-		netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
-			       soc->napi_weight);
-	} else {
-		err = mtk_add_mac(eth, pdev->dev.of_node);
-		if (err)
-			goto err_free_dev;
-		netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
-			       soc->napi_weight);
-	}
-
-	platform_set_drvdata(pdev, eth);
-
-	return 0;
-
-err_free_dev:
-	mtk_cleanup(eth);
-	return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
-	struct mtk_eth *eth = platform_get_drvdata(pdev);
-
-	netif_napi_del(&eth->rx_napi);
-	mtk_cleanup(eth);
-	platform_set_drvdata(pdev, NULL);
-
-	return 0;
-}
-
-static struct platform_driver mtk_driver = {
-	.probe = mtk_probe,
-	.remove = mtk_remove,
-	.driver = {
-		.name = "mtk_soc_eth",
-		.of_match_table = of_mtk_match,
-	},
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed804..0000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
-	MTK_REG_PDMA_GLO_CFG = 0,
-	MTK_REG_PDMA_RST_CFG,
-	MTK_REG_DLY_INT_CFG,
-	MTK_REG_TX_BASE_PTR0,
-	MTK_REG_TX_MAX_CNT0,
-	MTK_REG_TX_CTX_IDX0,
-	MTK_REG_TX_DTX_IDX0,
-	MTK_REG_RX_BASE_PTR0,
-	MTK_REG_RX_MAX_CNT0,
-	MTK_REG_RX_CALC_IDX0,
-	MTK_REG_RX_DRX_IDX0,
-	MTK_REG_MTK_INT_ENABLE,
-	MTK_REG_MTK_INT_STATUS,
-	MTK_REG_MTK_DMA_VID_BASE,
-	MTK_REG_MTK_COUNTER_BASE,
-	MTK_REG_MTK_RST_GL,
-	MTK_REG_MTK_INT_STATUS2,
-	MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT	0x80
-#define MTK_DELAY_MAX_INT	0x04
-#define MTK_DELAY_MAX_TOUT	0x04
-#define MTK_DELAY_TIME		20
-#define MTK_DELAY_CHAN		(((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
-				 | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT		((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT	0x80504000
-#define MTK_PSE_FQFC_CFG_256Q	0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF		BIT(31)
-#define MTK_CNT_GDM_AF		BIT(29)
-#define MTK_PSE_P2_FC		BIT(26)
-#define MTK_PSE_BUF_DROP	BIT(24)
-#define MTK_GDM_OTHER_DROP	BIT(23)
-#define MTK_PSE_P1_FC		BIT(22)
-#define MTK_PSE_P0_FC		BIT(21)
-#define MTK_PSE_FQ_EMPTY	BIT(20)
-#define MTK_GE1_STA_CHG		BIT(18)
-#define MTK_TX_COHERENT		BIT(17)
-#define MTK_RX_COHERENT		BIT(16)
-#define MTK_TX_DONE_INT3	BIT(11)
-#define MTK_TX_DONE_INT2	BIT(10)
-#define MTK_TX_DONE_INT1	BIT(9)
-#define MTK_TX_DONE_INT0	BIT(8)
-#define MTK_RX_DONE_INT0	BIT(2)
-#define MTK_TX_DLY_INT		BIT(1)
-#define MTK_RX_DLY_INT		BIT(0)
-
-#define MTK_RX_DONE_INT		MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT		(MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
-				 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT	BIT(30)
-#define RT5350_TX_DLY_INT	BIT(28)
-#define RT5350_RX_DONE_INT1	BIT(17)
-#define RT5350_RX_DONE_INT0	BIT(16)
-#define RT5350_TX_DONE_INT3	BIT(3)
-#define RT5350_TX_DONE_INT2	BIT(2)
-#define RT5350_TX_DONE_INT1	BIT(1)
-#define RT5350_TX_DONE_INT0	BIT(0)
-
-#define RT5350_RX_DONE_INT	(RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT	(RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
-				 RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET		0x0020
-#define MTK_PSE_OFFSET		0x0040
-#define MTK_GDMA2_OFFSET	0x0060
-#define MTK_CDMA_OFFSET		0x0080
-#define MTK_DMA_VID0		0x00a8
-#define MTK_PDMA_OFFSET		0x0100
-#define MTK_PPE_OFFSET		0x0200
-#define MTK_CMTABLE_OFFSET	0x0400
-#define MTK_POLICYTABLE_OFFSET	0x1000
-
-#define MT7621_GDMA_OFFSET	0x0500
-#define MT7620_GDMA_OFFSET	0x0600
-
-#define RT5350_PDMA_OFFSET	0x0800
-#define RT5350_SDM_OFFSET	0x0c00
-
-#define MTK_MDIO_ACCESS		0x00
-#define MTK_MDIO_CFG		0x04
-#define MTK_GLO_CFG		0x08
-#define MTK_RST_GL		0x0C
-#define MTK_INT_STATUS		0x10
-#define MTK_INT_ENABLE		0x14
-#define MTK_MDIO_CFG2		0x18
-#define MTK_FOC_TS_T		0x1C
-
-#define	MTK_GDMA1_FWD_CFG	(MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG	(MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG	(MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL	(MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH	(MTK_GDMA_OFFSET + 0x10)
-
-#define	MTK_GDMA2_FWD_CFG	(MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG	(MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG	(MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL	(MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH	(MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG		(MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG		(MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG	(MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG	(MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG	(MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG	(MTK_CDMA_OFFSET + 0x04)
-
-#define	MT7621_GDMA_FWD_CFG(x)	(MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define	MT7620_GDMA1_FWD_CFG	(MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0	(RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0	(RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0	(RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0	(RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1	(RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1	(RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1	(RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1	(RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2	(RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2	(RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2	(RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2	(RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3	(RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3	(RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3	(RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3	(RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0	(RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0	(RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0	(RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0	(RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1	(RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1	(RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1	(RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1	(RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG	(RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG	(RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG	(RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS	(RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE	(RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG	(RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG	(MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG	(MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG	(MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG		(MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0	(MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0		(MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0		(MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0		(MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1	(MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1		(MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1		(MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1		(MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0	(MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0		(MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0	(MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0		(MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2	(MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2		(MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2		(MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2		(MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3	(MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3		(MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3		(MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3		(MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1	(MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1		(MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1	(MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1		(MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG		(RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING	(RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING	(RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL	(RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH	(RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT	(RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT	(RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT	(RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT	(RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR	(RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN	BIT(16)
-#define RT5350_SDM_TCS_EN	BIT(17)
-#define RT5350_SDM_UCS_EN	BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x)		(0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x)		(0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0	0x1900
-#define MTK_QRX_MAX_CNT0	0x1904
-#define MTK_QRX_CRX_IDX0	0x1908
-#define MTK_QRX_DRX_IDX0	0x190C
-#define MTK_QDMA_GLO_CFG	0x1A04
-#define MTK_QDMA_RST_IDX	0x1A08
-#define MTK_QDMA_DELAY_INT	0x1A0C
-#define MTK_QDMA_FC_THRES	0x1A10
-#define MTK_QMTK_INT_STATUS	0x1A18
-#define MTK_QMTK_INT_ENABLE	0x1A1C
-#define MTK_QDMA_HRED2		0x1A44
-
-#define MTK_QTX_CTX_PTR		0x1B00
-#define MTK_QTX_DTX_PTR		0x1B04
-
-#define MTK_QTX_CRX_PTR		0x1B10
-#define MTK_QTX_DRX_PTR		0x1B14
-
-#define MTK_QDMA_FQ_HEAD	0x1B20
-#define MTK_QDMA_FQ_TAIL	0x1B24
-#define MTK_QDMA_FQ_CNT		0x1B28
-#define MTK_QDMA_FQ_BLEN	0x1B2C
-
-#define QDMA_PAGE_SIZE		2048
-#define QDMA_TX_OWNER_CPU	BIT(31)
-#define QDMA_TX_SWC		BIT(14)
-#define TX_QDMA_SDL(_x)		(((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES		4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN	BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN		BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN		BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10	(0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100	(1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000	(2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX		BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX		BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX		BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN	BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL	BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1	(0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2	(1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4	(2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8	(3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ	BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE	BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0	(0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200	(1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400	(2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV	(3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0	0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200	1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400	2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV	3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK	0xf
-#define MTK_GDM1_JMB_LEN_SHIFT	28
-#define MTK_GDM1_ICS_EN		BIT(22)
-#define MTK_GDM1_TCS_EN		BIT(21)
-#define MTK_GDM1_UCS_EN		BIT(20)
-#define MTK_GDM1_JMB_EN		BIT(19)
-#define MTK_GDM1_STRPCRC	BIT(16)
-#define MTK_GDM1_UFRC_P_CPU	(0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1	(1 << 12)
-#define MTK_GDM1_UFRC_P_PPE	(6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN		BIT(2)
-#define MTK_UCS_GEN_EN		BIT(1)
-#define MTK_TCS_GEN_EN		BIT(0)
-
-/* dma mode */
-#define MTK_PDMA		BIT(0)
-#define MTK_QDMA		BIT(1)
-#define MTK_PDMA_RX_QDMA_TX	(MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0	BIT(16)
-#define MTK_PST_DTX_IDX3	BIT(3)
-#define MTK_PST_DTX_IDX2	BIT(2)
-#define MTK_PST_DTX_IDX1	BIT(1)
-#define MTK_PST_DTX_IDX0	BIT(0)
-
-#define MTK_RX_2B_OFFSET	BIT(31)
-#define MTK_TX_WB_DDONE		BIT(6)
-#define MTK_RX_DMA_BUSY		BIT(3)
-#define MTK_TX_DMA_BUSY		BIT(1)
-#define MTK_RX_DMA_EN		BIT(2)
-#define MTK_TX_DMA_EN		BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS	(0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS	(1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS	(2 << 4)
-
-#define MTK_US_CYC_CNT_MASK	0xff
-#define MTK_US_CYC_CNT_SHIFT	0x8
-#define MTK_US_CYC_CNT_DIVISOR	1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE		BIT(31)
-#define RX_DMA_LSO		BIT(30)
-#define RX_DMA_PLEN0(_x)	(((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x)	(((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG		BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x)		(((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x)		((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID		BIT(30)
-#define RX_DMA_FPORT_SHIFT	19
-#define RX_DMA_FPORT_MASK	0x7
-
-struct mtk_rx_dma {
-	unsigned int rxd1;
-	unsigned int rxd2;
-	unsigned int rxd3;
-	unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN		0x3fff
-#define TX_DMA_PLEN0_MASK	(TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x)	(((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x)	((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x)    (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x)    ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1		BIT(14)
-#define TX_DMA_LS0		BIT(30)
-#define TX_DMA_DONE		BIT(31)
-#define TX_DMA_FPORT_SHIFT	25
-#define TX_DMA_FPORT_MASK	0x7
-#define TX_DMA_INS_VLAN_MT7621	BIT(16)
-#define TX_DMA_INS_VLAN		BIT(7)
-#define TX_DMA_INS_PPPOE	BIT(12)
-#define TX_DMA_TAG		BIT(15)
-#define TX_DMA_TAG_MASK		BIT(15)
-#define TX_DMA_QN(_x)		((_x) << 16)
-#define TX_DMA_PN(_x)		((_x) << 24)
-#define TX_DMA_QN_MASK		TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK		TX_DMA_PN(0x7)
-#define TX_DMA_UDF		BIT(20)
-#define TX_DMA_CHKSUM		(0x7 << 29)
-#define TX_DMA_TSO		BIT(28)
-#define TX_DMA_DESP4_DEF	(TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0	(MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT	(MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT	(MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT	BIT(0)
-#define MTK_PHY_FLAG_ATTACH	BIT(1)
-
-struct mtk_tx_dma {
-	unsigned int txd1;
-	unsigned int txd2;
-	unsigned int txd3;
-	unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
-	spinlock_t		lock;
-
-	struct phy_device	*phy[8];
-	struct device_node	*phy_node[8];
-	const __be32		*phy_fixed[8];
-	int			duplex[8];
-	int			speed[8];
-	int			tx_fc[8];
-	int			rx_fc[8];
-	int (*connect)(struct mtk_mac *mac);
-	void (*disconnect)(struct mtk_mac *mac);
-	void (*start)(struct mtk_mac *mac);
-	void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table:		Some of the legacy registers changed their location
- *			over time. Their offsets are stored in this table
- *
- * @init_data:		Some features depend on the silicon revision. This
- *			callback allows runtime modification of the content of
- *			this struct
- * @reset_fe:		This callback is used to trigger the reset of the frame
- *			engine
- * @set_mac:		This callback is used to set the unicast mac address
- *			filter
- * @fwd_config:		This callback is used to setup the forward config
- *			register of the MAC
- * @switch_init:	This callback is used to bring up the switch core
- * @port_init:		Some SoCs have ports that can be router to a switch port
- *			or an external PHY. This callback is used to setup these
- *			ports.
- * @has_carrier:	This callback allows driver to check if there is a cable
- *			attached.
- * @mdio_init:		This callbck is used to setup the MDIO bus if one is
- *			present
- * @mdio_cleanup:	This callback is used to cleanup the MDIO state.
- * @mdio_write:		This callback is used to write data to the MDIO bus.
- * @mdio_read:		This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link:	This callback is used to apply the PHY settings.
- * @piac_offset:	the PIAC register has a different different base offset
- * @hw_features:	feature set depends on the SoC type
- * @dma_ring_size:	allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight:	allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type:		SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg:	the default DMA configuration
- * @rx_int:		the TX interrupt bits used by the SoC
- * @tx_int:		the TX interrupt bits used by the SoC
- * @status_int:		the Status interrupt bits used by the SoC
- * @checksum_bit:	the bits used to turn on HW checksumming
- * @txd4:		default value of the TXD4 descriptor
- * @mac_count:		the number of MACs that the SoC has
- * @new_stats:		there is a old and new way to read hardware stats
- *			registers
- * @jumbo_frame:	does the SoC support jumbo frames ?
- * @rx_2b_offset:	tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma:		scatter gather support
- * @padding_64b		enable 64 bit padding
- * @padding_bug:	rt2880 has a padding bug
- * @has_switch:		does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
-	const u16 *reg_table;
-
-	void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
-	void (*reset_fe)(struct mtk_eth *eth);
-	void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
-	int (*fwd_config)(struct mtk_eth *eth);
-	int (*switch_init)(struct mtk_eth *eth);
-	void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
-			  struct device_node *port);
-	int (*has_carrier)(struct mtk_eth *eth);
-	int (*mdio_init)(struct mtk_eth *eth);
-	void (*mdio_cleanup)(struct mtk_eth *eth);
-	int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
-			  u16 val);
-	int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
-	void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
-	u32 piac_offset;
-	netdev_features_t hw_features;
-	u32 dma_ring_size;
-	u32 napi_weight;
-	u32 dma_type;
-	u32 pdma_glo_cfg;
-	u32 rx_int;
-	u32 tx_int;
-	u32 status_int;
-	u32 checksum_bit;
-	u32 txd4;
-	u32 mac_count;
-
-	u32 new_stats:1;
-	u32 jumbo_frame:1;
-	u32 rx_2b_offset:1;
-	u32 rx_sg_dma:1;
-	u32 padding_64b:1;
-	u32 padding_bug:1;
-	u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET			0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock:		make sure that stats operations are atomic
- * @reg_offset:		the status register offset of the SoC
- * @syncp:		the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
-	spinlock_t stats_lock;
-	u32 reg_offset;
-	struct u64_stats_sync syncp;
-
-	u64 tx_bytes;
-	u64 tx_packets;
-	u64 tx_skip;
-	u64 tx_collisions;
-	u64 rx_bytes;
-	u64 rx_packets;
-	u64 rx_overflow;
-	u64 rx_fcs_errors;
-	u64 rx_short_errors;
-	u64 rx_long_errors;
-	u64 rx_checksum_errors;
-	u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
-	MTK_TX_FLAGS_SINGLE0	= 0x01,
-	MTK_TX_FLAGS_PAGE0	= 0x02,
-	MTK_TX_FLAGS_PAGE1	= 0x04,
-};
-
-/* struct mtk_tx_buf -	This struct holds the pointers to the memory pointed at
- *			by the TX descriptor	s
- * @skb:		The SKB pointer of the packet being sent
- * @dma_addr0:		The base addr of the first segment
- * @dma_len0:		The length of the first segment
- * @dma_addr1:		The base addr of the second segment
- * @dma_len1:		The length of the second segment
- */
-struct mtk_tx_buf {
-	struct sk_buff *skb;
-	u32 flags;
-	DEFINE_DMA_UNMAP_ADDR(dma_addr0);
-	DEFINE_DMA_UNMAP_LEN(dma_len0);
-	DEFINE_DMA_UNMAP_ADDR(dma_addr1);
-	DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring -	This struct holds info describing a TX ring
- * @tx_dma:		The descriptor ring
- * @tx_buf:		The memory pointed at by the ring
- * @tx_phys:		The physical addr of tx_buf
- * @tx_next_free:	Pointer to the next free descriptor
- * @tx_last_free:	Pointer to the last free descriptor
- * @tx_thresh:		The threshold of minimum amount of free descriptors
- * @tx_map:		Callback to map a new packet into the ring
- * @tx_poll:		Callback for the housekeeping function
- * @tx_clean:		Callback for the cleanup function
- * @tx_ring_size:	How many descriptors are in the ring
- * @tx_free_idx:	The index of th next free descriptor
- * @tx_next_idx:	QDMA uses a linked list. This element points to the next
- *			free descriptor in the list
- * @tx_free_count:	QDMA uses a linked list. Track how many free descriptors
- *			are present
- */
-struct mtk_tx_ring {
-	struct mtk_tx_dma *tx_dma;
-	struct mtk_tx_buf *tx_buf;
-	dma_addr_t tx_phys;
-	struct mtk_tx_dma *tx_next_free;
-	struct mtk_tx_dma *tx_last_free;
-	u16 tx_thresh;
-	int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
-		      struct mtk_tx_ring *ring, bool gso);
-	int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
-	void (*tx_clean)(struct mtk_eth *eth);
-
-	/* PDMA only */
-	u16 tx_ring_size;
-	u16 tx_free_idx;
-
-	/* QDMA only */
-	u16 tx_next_idx;
-	atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring -	This struct holds info describing a RX ring
- * @rx_dma:		The descriptor ring
- * @rx_data:		The memory pointed at by the ring
- * @trx_phys:		The physical addr of rx_buf
- * @rx_ring_size:	How many descriptors are in the ring
- * @rx_buf_size:	The size of each packet buffer
- * @rx_calc_idx:	The current head of ring
- */
-struct mtk_rx_ring {
-	struct mtk_rx_dma *rx_dma;
-	u8 **rx_data;
-	dma_addr_t rx_phys;
-	u16 rx_ring_size;
-	u16 frag_size;
-	u16 rx_buf_size;
-	u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS			2
-
-/* struct mtk_eth -	This is the main datasructure for holding the state
- *			of the driver
- * @dev:		The device pointer
- * @base:		The mapped register i/o base
- * @page_lock:		Make sure that register operations are atomic
- * @soc:		pointer to our SoC specific data
- * @dummy_dev:		we run 2 netdevs on 1 physical DMA ring and need a
- *			dummy for NAPI to work
- * @netdev:		The netdev instances
- * @mac:		Each netdev is linked to a physical MAC
- * @switch_np:		The phandle for the switch
- * @irq:		The IRQ that we are using
- * @msg_enable:		Ethtool msg level
- * @ysclk:		The sysclk rate - neeed for calibration
- * @ethsys:		The register map pointing at the range used to setup
- *			MII modes
- * @dma_refcnt:		track how many netdevs are using the DMA engine
- * @tx_ring:		Pointer to the memore holding info about the TX ring
- * @rx_ring:		Pointer to the memore holding info about the RX ring
- * @rx_napi:		The NAPI struct
- * @scratch_ring:	Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head:	The scratch memory that scratch_ring points to.
- * @phy:		Info about the attached PHYs
- * @mii_bus:		If there is a bus we need to create an instance for it
- * @link:		Track if the ports have a physical link
- * @sw_priv:		Pointer to the switches private data
- * @vlan_map:		RX VID tracking
- */
-
-struct mtk_eth {
-	struct device			*dev;
-	void __iomem			*base;
-	spinlock_t			page_lock;
-	struct mtk_soc_data		*soc;
-	struct net_device		dummy_dev;
-	struct net_device		*netdev[MTK_MAX_DEVS];
-	struct mtk_mac			*mac[MTK_MAX_DEVS];
-	struct device_node		*switch_np;
-	int				irq;
-	u32				msg_enable;
-	unsigned long			sysclk;
-	struct regmap			*ethsys;
-	atomic_t			dma_refcnt;
-	struct mtk_tx_ring		tx_ring;
-	struct mtk_rx_ring		rx_ring[2];
-	struct napi_struct		rx_napi;
-	struct mtk_tx_dma		*scratch_ring;
-	void				*scratch_head;
-	struct mtk_phy			*phy;
-	struct mii_bus			*mii_bus;
-	int				link[8];
-	void				*sw_priv;
-	unsigned long			vlan_map;
-};
-
-/* struct mtk_mac -	the structure that holds the info about the MACs of the
- *			SoC
- * @id:			The number of the MAC
- * @of_node:		Our devicetree node
- * @hw:			Backpointer to our main datastruture
- * @hw_stats:		Packet statistics counter
- * @phy_dev:		The attached PHY if available
- * @phy_flags:		The PHYs flags
- * @pending_work:	The workqueue used to reset the dma ring
- */
-struct mtk_mac {
-	int				id;
-	struct device_node		*of_node;
-	struct mtk_eth			*hw;
-	struct mtk_hw_stats		*hw_stats;
-	struct phy_device		*phy_dev;
-	u32				phy_flags;
-	struct work_struct		pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d..0000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG	0x400
-#define MT7621_CDMP_IG_CTRL	(MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL	(MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE		BIT(6)
-#define MT7621_L4_VALID		BIT(24)
-
-#define MT7621_TX_DMA_UDF	BIT(19)
-
-#define CDMA_ICS_EN		BIT(2)
-#define CDMA_UCS_EN		BIT(1)
-#define CDMA_TCS_EN		BIT(0)
-
-#define GDMA_ICS_EN		BIT(22)
-#define GDMA_TCS_EN		BIT(21)
-#define GDMA_UCS_EN		BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET	0x2000
-#define MT7621_PPE_AC_BCNT0	(MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT	(MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT	(MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL	0x508
-#define GSW_REG_GDMA1_MAC_ADRH	0x50C
-#define GSW_REG_GDMA2_MAC_ADRL	0x1508
-#define GSW_REG_GDMA2_MAC_ADRH	0x150C
-
-#define MT7621_MTK_RST_GL	0x04
-#define MT7620_MTK_INT_STATUS2	0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF	BIT(28)
-#define MT7621_MTK_GDM2_AF	BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
-	[MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
-	[MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
-	[MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
-	[MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
-	[MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
-	[MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
-	[MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
-	[MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
-	[MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
-	[MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
-	[MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
-	[MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
-	[MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
-	[MTK_REG_MTK_DMA_VID_BASE] = 0,
-	[MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
-	[MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
-	[MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
-	mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
-	/* Setup GMAC1 only, there is no support for GMAC2 yet */
-	mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
-		MT7620_GDMA1_FWD_CFG);
-
-	/* Enable RX checksum */
-	mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
-		       GDMA_TCS_EN | GDMA_UCS_EN),
-		       MT7620_GDMA1_FWD_CFG);
-
-	/* Enable RX VLan Offloading */
-	mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
-	return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&mac->hw->page_lock, flags);
-	if (mac->id == 0) {
-		mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-			GSW_REG_GDMA1_MAC_ADRH);
-		mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-			(hwaddr[4] << 8) | hwaddr[5],
-			GSW_REG_GDMA1_MAC_ADRL);
-	}
-	if (mac->id == 1) {
-		mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-			GSW_REG_GDMA2_MAC_ADRH);
-		mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-			(hwaddr[4] << 8) | hwaddr[5],
-			GSW_REG_GDMA2_MAC_ADRL);
-	}
-	spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
-	.hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-		       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
-		       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
-		       NETIF_F_IPV6_CSUM,
-	.dma_type = MTK_PDMA,
-	.dma_ring_size = 256,
-	.napi_weight = 64,
-	.new_stats = 1,
-	.padding_64b = 1,
-	.rx_2b_offset = 1,
-	.rx_sg_dma = 1,
-	.has_switch = 1,
-	.mac_count = 2,
-	.reset_fe = mt7621_mtk_reset,
-	.set_mac = mt7621_set_mac,
-	.fwd_config = mt7621_fwd_config,
-	.switch_init = mtk_gsw_init,
-	.reg_table = mt7621_reg_table,
-	.pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
-	.rx_int = RT5350_RX_DONE_INT,
-	.tx_int = RT5350_TX_DONE_INT,
-	.status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
-	.checksum_bit = MT7621_L4_VALID,
-	.has_carrier = mt7620_has_carrier,
-	.mdio_read = mt7620_mdio_read,
-	.mdio_write = mt7620_mdio_write,
-	.mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
-	{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d335338..c8fa17c 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
 config PCI_MT7621
 	tristate "MediaTek MT7621 PCI Controller"
 	depends on RALINK
+	depends on PCI
 	select PCI_DRIVERS_GENERIC
 	help
 	  This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248ee..2aee64f 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
 		goto no_phy;
 
 	phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
-				PHY_INTERFACE_MODE_GMII);
+				priv->phy_mode);
 	of_node_put(phy_node);
 
 	if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c56..986db767 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
 	return np;
 }
 
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+				int port)
 {
+	struct device_node *np = priv->of_node;
 	u32 delay_value;
+	bool rx_delay;
+	bool tx_delay;
 
-	if (!of_property_read_u32(np, "rx-delay", &delay_value))
+	/* By default, both RX/TX delay is enabled in
+	 * __cvmx_helper_rgmii_enable().
+	 */
+	rx_delay = true;
+	tx_delay = true;
+
+	if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
 		cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
-	if (!of_property_read_u32(np, "tx-delay", &delay_value))
+		rx_delay = delay_value > 0;
+	}
+	if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
 		cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+		tx_delay = delay_value > 0;
+	}
+
+	if (!rx_delay && !tx_delay)
+		priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+	else if (!rx_delay)
+		priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+	else if (!tx_delay)
+		priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+	else
+		priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
 }
 
 static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
 			priv->port = port;
 			priv->queue = cvmx_pko_get_base_queue(priv->port);
 			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+			priv->phy_mode = PHY_INTERFACE_MODE_NA;
 			for (qos = 0; qos < 16; qos++)
 				skb_queue_head_init(&priv->tx_free_list[qos]);
 			for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
 				break;
 
 			case CVMX_HELPER_INTERFACE_MODE_SGMII:
+				priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
 				dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
 				strcpy(dev->name, "eth%d");
 				break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
 				strcpy(dev->name, "spi%d");
 				break;
 
-			case CVMX_HELPER_INTERFACE_MODE_RGMII:
 			case CVMX_HELPER_INTERFACE_MODE_GMII:
+				priv->phy_mode = PHY_INTERFACE_MODE_GMII;
 				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
 				strcpy(dev->name, "eth%d");
-				cvm_set_rgmii_delay(priv->of_node, interface,
+				break;
+
+			case CVMX_HELPER_INTERFACE_MODE_RGMII:
+				dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+				strcpy(dev->name, "eth%d");
+				cvm_set_rgmii_delay(priv, interface,
 						    port_index);
 				break;
 			}
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f..be570d3 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
 #define OCTEON_ETHERNET_H
 
 #include <linux/of.h>
-
+#include <linux/phy.h>
 #include <asm/octeon/cvmx-helper-board.h>
 
 /**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
 	 * cvmx_helper_interface_mode_t
 	 */
 	int imode;
+	/* PHY mode */
+	phy_interface_t phy_mode;
 	/* List of outstanding tx buffers per queue */
 	struct sk_buff_head tx_free_list[16];
 	unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d41..a542864 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
 {
 	unsigned char lob;
 	int ret, i;
-	struct dcon_gpio *pin = &gpios_asis[0];
+	const struct dcon_gpio *pin = &gpios_asis[0];
 
 	for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
 		gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a..952f2ab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
 	pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-	rtw_alloc_hwxmits(padapter);
+	res = rtw_alloc_hwxmits(padapter);
+	if (res == _FAIL)
+		goto exit;
 	rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
 	for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
 	return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
 	struct hw_xmit *hwxmits;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
 	pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
 				     sizeof(struct hw_xmit), GFP_KERNEL);
+	if (!pxmitpriv->hwxmits)
+		return _FAIL;
 
 	hwxmits = pxmitpriv->hwxmits;
 
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 	hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
 	hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
 	hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+	return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c..ba7e15f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
 void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
 
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02..8c36ace 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
 
 static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
 {
-	u32 val;
-	void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj	*pcmd);
 	struct cmd_obj *pcmd  = (struct cmd_obj *)pbuf;
 
-	if (pcmd->rsp && pcmd->rspsz > 0)
-		memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
-	pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
-	if (!pcmd_callback)
-		r8712_free_cmd_obj(pcmd);
-	else
-		pcmd_callback(padapter, pcmd);
+	r8712_free_cmd_obj(pcmd);
 	return H2C_SUCCESS;
 }
 
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb776..1ef86b8 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
 static struct _cmd_callback	cmd_callback[] = {
 	{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
 	{GEN_CMD_CODE(_Write_MACREG), NULL},
-	{GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+	{GEN_CMD_CODE(_Read_BBREG), NULL},
 	{GEN_CMD_CODE(_Write_BBREG), NULL},
 	{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
 	{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61b..b87f13a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 		}
 	}
 
-	rtw_alloc_hwxmits(padapter);
+	res = rtw_alloc_hwxmits(padapter);
+	if (res == _FAIL)
+		goto exit;
 	rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
 	for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
 	return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
 	struct hw_xmit *hwxmits;
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
 	pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
 
-	if (pxmitpriv->hwxmits == NULL) {
-		DBG_871X("alloc hwxmits fail!...\n");
-		return;
-	}
+	if (!pxmitpriv->hwxmits)
+		return _FAIL;
 
 	hwxmits = pxmitpriv->hwxmits;
 
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
 	}
 
-
+	return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b91..37f42b2 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
 
 
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 
 
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed9..4cc77b2 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
 
 	rtlpriv->phydm.internal =
 		kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+	if (!rtlpriv->phydm.internal)
+		return 0;
 
 	_rtl_phydm_init_com_info(rtlpriv, ic, params);
 
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1..cf6b7a8 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 		      u1_rsvd_page_loc, 3);
 
 	skb = dev_alloc_skb(totalpacketlen);
+	if (!skb)
+		return;
 	memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
 	       totalpacketlen);
 
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce..9d85a3a 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
 		return -EINVAL;
 
 	spin_lock_irqsave(&speakup_info.spinlock, flags);
+	synth_soft.alive = 1;
 	while (1) {
 		prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
-		if (!unicode)
-			synth_buffer_skip_nonlatin1();
-		if (!synth_buffer_empty() || speakup_info.flushing)
-			break;
+		if (synth_current() == &synth_soft) {
+			if (!unicode)
+				synth_buffer_skip_nonlatin1();
+			if (!synth_buffer_empty() || speakup_info.flushing)
+				break;
+		}
 		spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 		if (fp->f_flags & O_NONBLOCK) {
 			finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
 
 	/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
 	while (chars_sent <= count - bytes_per_ch) {
+		if (synth_current() != &synth_soft)
+			break;
 		if (speakup_info.flushing) {
 			speakup_info.flushing = 0;
 			ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
 	poll_wait(fp, &speakup_event, wait);
 
 	spin_lock_irqsave(&speakup_info.spinlock, flags);
-	if (!synth_buffer_empty() || speakup_info.flushing)
+	if (synth_current() == &synth_soft &&
+	    (!synth_buffer_empty() || speakup_info.flushing))
 		ret = EPOLLIN | EPOLLRDNORM;
 	spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 	return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e6888..ac6a748 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
 int synth_release_region(unsigned long start, unsigned long n);
 int synth_add(struct spk_synth *in_synth);
 void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
 
 extern struct speakup_info_t speakup_info;
 
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259e..3568bfb 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
 }
 EXPORT_SYMBOL_GPL(synth_remove);
 
+struct spk_synth *synth_current(void)
+{
+	return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
 short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf8..064d0db 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
 	struct device_node *fw_node;
 	const struct of_device_id *of_id;
 	struct vchiq_drvdata *drvdata;
+	struct device *vchiq_dev;
 	int err;
 
 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
 		goto failed_platform_init;
 	}
 
-	if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
-				 NULL, "vchiq")))
+	vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+				  "vchiq");
+	if (IS_ERR(vchiq_dev)) {
+		err = PTR_ERR(vchiq_dev);
 		goto failed_device_create;
+	}
 
 	vchiq_debugfs_init();
 
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985..c6bb4aa 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
 		return;
 	}
 
-	MACvIntDisable(priv->PortOffset);
-
 	spin_lock_irqsave(&priv->lock, flags);
 
 	/* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
-
-	MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
 
 	if (priv->vif)
 		vnt_interrupt_process(priv);
+
+	MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static irqreturn_t vnt_interrupt(int irq,  void *arg)
 {
 	struct vnt_private *priv = arg;
 
-	if (priv->vif)
-		schedule_work(&priv->interrupt_work);
+	schedule_work(&priv->interrupt_work);
+
+	MACvIntDisable(priv->PortOffset);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d..3bdd56a 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
 	struct clk		*clk;
 };
 
-static inline bool ar933x_uart_console_enabled(void)
-{
-	return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
 static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
 					    int offset)
 {
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
 	.verify_port	= ar933x_uart_verify_port,
 };
 
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
 static struct ar933x_uart_port *
 ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
 
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
 	.index		= -1,
 	.data		= &ar933x_uart_driver,
 };
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
-	if (!ar933x_uart_console_enabled())
-		return;
-
-	ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
 
 static struct uart_driver ar933x_uart_driver = {
 	.owner		= THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
 	baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
 	up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
 
-	ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+	ar933x_console_ports[up->port.line] = up;
+#endif
 
 	ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
 	if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
 {
 	int ret;
 
-	if (ar933x_uart_console_enabled())
-		ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+	ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
 
 	ret = uart_register_driver(&ar933x_uart_driver);
 	if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe..0b4f369 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
 	unsigned int		pending_status;
 	spinlock_t		lock_suspended;
 
+	bool			hd_start_rx;	/* can start RX during half-duplex operation */
+
 	/* ISO7816 */
 	unsigned int		fidi_min;
 	unsigned int		fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
 	__raw_writeb(value, port->membase + ATMEL_US_THR);
 }
 
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+	return ((port->rs485.flags & SER_RS485_ENABLED) &&
+		!(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+		(port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
 	/* Disable interrupts */
 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
-	if (((port->rs485.flags & SER_RS485_ENABLED) &&
-	     !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-	    port->iso7816.flags & SER_ISO7816_ENABLED)
+	if (atmel_uart_is_half_duplex(port))
 		atmel_start_rx(port);
+
 }
 
 /*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
 		return;
 
 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
-		if (((port->rs485.flags & SER_RS485_ENABLED) &&
-		     !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-		    port->iso7816.flags & SER_ISO7816_ENABLED)
+		if (atmel_uart_is_half_duplex(port))
 			atmel_stop_rx(port);
 
 	if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
 	 */
 	if (!uart_circ_empty(xmit))
 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
-	else if (((port->rs485.flags & SER_RS485_ENABLED) &&
-		  !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-		 port->iso7816.flags & SER_ISO7816_ENABLED) {
-		/* DMA done, stop TX, start RX for RS485 */
-		atmel_start_rx(port);
+	else if (atmel_uart_is_half_duplex(port)) {
+		/*
+		 * DMA done, re-enable TXEMPTY and signal that we can stop
+		 * TX and start RX for RS485
+		 */
+		atmel_port->hd_start_rx = true;
+		atmel_uart_writel(port, ATMEL_US_IER,
+				  atmel_port->tx_done_mask);
 	}
 
 	spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
 					 sg_dma_len(&atmel_port->sg_rx)/2,
 					 DMA_DEV_TO_MEM,
 					 DMA_PREP_INTERRUPT);
+	if (!desc) {
+		dev_err(port->dev, "Preparing DMA cyclic failed\n");
+		goto chan_err;
+	}
 	desc->callback = atmel_complete_rx_dma;
 	desc->callback_param = port;
 	atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
 	if (pending & atmel_port->tx_done_mask) {
-		/* Either PDC or interrupt transmission */
 		atmel_uart_writel(port, ATMEL_US_IDR,
 				  atmel_port->tx_done_mask);
+
+		/* Start RX if flag was set and FIFO is empty */
+		if (atmel_port->hd_start_rx) {
+			if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+					& ATMEL_US_TXEMPTY))
+				dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+			atmel_port->hd_start_rx = false;
+			atmel_start_rx(port);
+			return;
+		}
+
 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
 	}
 }
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
 		atmel_uart_writel(port, ATMEL_US_IER,
 				  atmel_port->tx_done_mask);
 	} else {
-		if (((port->rs485.flags & SER_RS485_ENABLED) &&
-		     !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-		    port->iso7816.flags & SER_ISO7816_ENABLED) {
+		if (atmel_uart_is_half_duplex(port)) {
 			/* DMA done, stop TX, start RX for RS485 */
 			atmel_start_rx(port);
 		}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e..bfe5e9e 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
 	char *cptr = config;
 	struct console *cons;
 
-	if (!strlen(config) || isspace(config[0]))
+	if (!strlen(config) || isspace(config[0])) {
+		err = 0;
 		goto noconfig;
+	}
 
 	kgdboc_io_ops.is_console = 0;
 	kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde4..450ba6d 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
 	if (spi->dev.of_node) {
 		const struct of_device_id *of_id =
 			of_match_device(max310x_dt_ids, &spi->dev);
+		if (!of_id)
+			return -ENODEV;
 
 		devtype = (struct max310x_devtype *)of_id->data;
 	} else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751..7e7b1559 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
+	if (!match)
+		return -ENODEV;
+
 	/* Assume that all UART ports have a DT alias or none has */
 	id = of_alias_get_id(pdev->dev.of_node, "serial");
 	if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a5..4c188f4 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
 
 	s->port.mapbase = r->start;
 	s->port.membase = ioremap(r->start, resource_size(r));
+	if (!s->port.membase) {
+		ret = -ENOMEM;
+		goto out_disable_clks;
+	}
 	s->port.ops = &mxs_auart_ops;
 	s->port.iotype = UPIO_MEM;
 	s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c..35e5f9c 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
 {
 	struct uart_port *uport;
 	struct qcom_geni_serial_port *port;
-	int baud;
+	int baud = 9600;
 	int bits = 8;
 	int parity = 'n';
 	int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178c..09a183d 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
 	ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
 	if (ret < 0) {
 		pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
-		return ret;
+		goto err_i2c;
 	}
 #endif
 
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
 	ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
 	if (ret < 0) {
 		pr_err("failed to init sc16is7xx spi --> %d\n", ret);
-		return ret;
+		goto err_spi;
 	}
 #endif
 	return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+	i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+	uart_unregister_driver(&sc16is7xx_uart);
+	return ret;
 }
 module_init(sc16is7xx_init);
 
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd4..2d1c626 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
-	if (uart_circ_empty(xmit)) {
+	if (uart_circ_empty(xmit))
 		sci_stop_tx(port);
-	} else {
-		ctrl = serial_port_in(port, SCSCR);
 
-		if (port->type != PORT_SCI) {
-			serial_port_in(port, SCxSR); /* Dummy read */
-			sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
-		}
-
-		ctrl |= SCSCR_TIE;
-		serial_port_out(port, SCSCR, ctrl);
-	}
 }
 
 /* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cb..a9e12b3 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
 		if (tty && C_HUPCL(tty))
 			tty_port_lower_dtr_rts(port);
 
-		if (port->ops->shutdown)
+		if (port->ops && port->ops->shutdown)
 			port->ops->shutdown(port);
 	}
 out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
  */
 int tty_port_carrier_raised(struct tty_port *port)
 {
-	if (port->ops->carrier_raised == NULL)
+	if (!port->ops || !port->ops->carrier_raised)
 		return 1;
 	return port->ops->carrier_raised(port);
 }
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
  */
 void tty_port_raise_dtr_rts(struct tty_port *port)
 {
-	if (port->ops->dtr_rts)
+	if (port->ops && port->ops->dtr_rts)
 		port->ops->dtr_rts(port, 1);
 }
 EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
  */
 void tty_port_lower_dtr_rts(struct tty_port *port)
 {
-	if (port->ops->dtr_rts)
+	if (port->ops && port->ops->dtr_rts)
 		port->ops->dtr_rts(port, 0);
 }
 EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
 
 	if (!tty_port_initialized(port)) {
 		clear_bit(TTY_IO_ERROR, &tty->flags);
-		if (port->ops->activate) {
+		if (port->ops && port->ops->activate) {
 			int retval = port->ops->activate(port, tty);
 			if (retval) {
 				mutex_unlock(&port->mutex);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f896..ec666eb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
 		clear_bit(EVENT_RX_STALL, &acm->flags);
 	}
 
-	if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+	if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
 		tty_port_tty_wakeup(&acm->port);
-		clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
-	}
 }
 
 /*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bb..73c8e65 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
 
 	do {
 		controller = of_find_node_with_property(controller, "phys");
+		if (!of_device_is_available(controller))
+			continue;
 		index = 0;
 		do {
 			if (arg0 == -1) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181..975d7c1 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
 		retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
 						  PHY_MODE_USB_HOST_SS);
 		if (retval)
+			retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+							  PHY_MODE_USB_HOST);
+		if (retval)
 			goto err_usb_phy_roothub_power_on;
 
 		retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e..8cced36 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M		0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL			0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP			0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH		0x02ee
 #define PCI_DEVICE_ID_INTEL_GLK			0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP		0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH		0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
 	  (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
 
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
 	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a..f3816a5 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
 	req->complete = f_hidg_req_complete;
 	req->context  = hidg;
 
+	spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
 	status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
 	if (status < 0) {
 		ERROR(hidg->func.config->cdev,
 			"usb_ep_queue error on int endpoint %zd\n", status);
-		goto release_write_pending_unlocked;
+		goto release_write_pending;
 	} else {
 		status = count;
 	}
-	spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
 	return status;
 release_write_pending:
 	spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
 	hidg->write_pending = 0;
 	spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f312..c2011cd 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 			break;
 	}
 	if (&req->req != _req) {
+		ep->stopped = stopped;
 		spin_unlock_irqrestore(&ep->dev->lock, flags);
 		return -EINVAL;
 	}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f824..898339e5 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
 	(void) readl(&ep->dev->pci->pcimstctl);
 
 	writel(BIT(DMA_START), &dma->dmastat);
-
-	if (!ep->is_in)
-		stop_out_naking(ep);
 }
 
 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
 			writel(BIT(DMA_START), &dma->dmastat);
 			return;
 		}
+		stop_out_naking(ep);
 	}
 
 	tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 			break;
 	}
 	if (&req->req != _req) {
+		ep->stopped = stopped;
 		spin_unlock_irqrestore(&ep->dev->lock, flags);
-		dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
-								__func__);
+		ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
 		return -EINVAL;
 	}
 
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f..6343fba 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
 	printk(KERN_INFO "driver %s\n", hcd_name);
 	workqueue = create_singlethread_workqueue("u132");
 	retval = platform_driver_register(&u132_platform_driver);
+	if (retval)
+		destroy_workqueue(workqueue);
+
 	return retval;
 }
 
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be57..d932cc3 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
 		return -1;
 
 	writel(0, &dbc->regs->control);
-	xhci_dbc_mem_cleanup(xhci);
 	dbc->state = DS_DISABLED;
 
 	return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
 	ret = xhci_do_dbc_stop(xhci);
 	spin_unlock_irqrestore(&dbc->lock, flags);
 
-	if (!ret)
+	if (!ret) {
+		xhci_dbc_mem_cleanup(xhci);
 		pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+	}
 }
 
 static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece6..96a7405 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
 	port_index = max_ports;
 	while (port_index--) {
 		u32 t1, t2;
-
+		int retries = 10;
+retry:
 		t1 = readl(ports[port_index]->addr);
 		t2 = xhci_port_state_to_neutral(t1);
 		portsc_buf[port_index] = 0;
 
-		/* Bail out if a USB3 port has a new device in link training */
-		if ((hcd->speed >= HCD_USB3) &&
+		/*
+		 * Give a USB3 port in link training time to finish, but don't
+		 * prevent suspend as port might be stuck
+		 */
+		if ((hcd->speed >= HCD_USB3) && retries-- &&
 		    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
-			bus_state->bus_suspended = 0;
 			spin_unlock_irqrestore(&xhci->lock, flags);
-			xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
-			return -EBUSY;
+			msleep(XHCI_PORT_POLLING_LFPS_TIME);
+			spin_lock_irqsave(&xhci->lock, flags);
+			xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+				 port_index);
+			goto retry;
 		}
-
 		/* suspend ports in U0, or bail out for new connect changes */
 		if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
 			if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e4637..671bce1 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
 	if (!xhci_rcar_wait_for_pll_active(hcd))
 		return -ETIMEDOUT;
 
+	xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 	return xhci_rcar_download_firmware(hcd);
 }
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c..9215a28 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
 		}
 	}
 
-	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
-			DEV_SUPERSPEED_ANY(portsc)) {
+	if ((portsc & PORT_PLC) &&
+	    DEV_SUPERSPEED_ANY(portsc) &&
+	    ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+	     (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+	     (portsc & PORT_PLS_MASK) == XDEV_U2)) {
 		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
-		/* We've just brought the device into U0 through either the
+		/* We've just brought the device into U0/1/2 through either the
 		 * Resume state after a device remote wakeup, or through the
 		 * U3Exit state after a host-initiated resume.  If it's a device
 		 * initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36..9334cde 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
  */
 #define XHCI_DEFAULT_BESL	4
 
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME  36
+
 /**
  * struct xhci_intr_reg - Interrupt Register Set
  * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d..0468484 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
 	 */
 	hub->port_swap = USB251XB_DEF_PORT_SWAP;
 	of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
-		if ((port >= 0) && (port <= data->port_cnt))
+		if (port <= data->port_cnt)
 			hub->port_swap |= BIT(port);
 	}
 
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
 							   dev);
 	int err;
 
-	if (np) {
+	if (np && of_id) {
 		err = usb251xb_get_ofdata(hub,
 					  (struct usb251xb_data *)of_id->data);
 		if (err) {
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc2348..928c2cd6 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@
 	tristate "MediaTek USB3 Dual Role controller"
 	depends on USB || USB_GADGET
 	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on EXTCON || !EXTCON
 	select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
 	help
 	  Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23a..979bef9 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
 	{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
 	{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+	{ USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
 	{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
 	{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
 	{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b174..1d8461a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bed..5755f0d 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
 /*
  * NovaTech product ids (FTDI_VID)
  */
-#define FTDI_NT_ORIONLXM_PID	0x7c90	/* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID		0x7c90	/* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID	0x7c91	/* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID		0x7c92	/* Orion I/O */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac7..18110225 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
 	if (!urbtrack)
 		return -ENOMEM;
 
-	kref_get(&mos_parport->ref_count);
-	urbtrack->mos_parport = mos_parport;
 	urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urbtrack->urb) {
 		kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
 			     usb_sndctrlpipe(usbdev, 0),
 			     (unsigned char *)urbtrack->setup,
 			     NULL, 0, async_complete, urbtrack);
+	kref_get(&mos_parport->ref_count);
+	urbtrack->mos_parport = mos_parport;
 	kref_init(&urbtrack->ref_count);
 	INIT_LIST_HEAD(&urbtrack->urblist_entry);
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9..8386906 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EC25			0x0125
 #define QUECTEL_PRODUCT_BG96			0x0296
 #define QUECTEL_PRODUCT_EP06			0x0306
+#define QUECTEL_PRODUCT_EM12			0x0512
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+	  .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
 	/* Quectel products using Qualcomm vendor ID */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
 	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
 	  .driver_info = RSVD(4) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
-	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },	/* D-Link DWM-152/C1 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/C1 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/A3 */
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),			/* Olicard 600 */
+	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },			/* OLICARD300 - MT6225 */
 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db0..a2233d7 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
 	S(SRC_ATTACHED),			\
 	S(SRC_STARTUP),				\
 	S(SRC_SEND_CAPABILITIES),		\
+	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
 	S(SRC_NEGOTIATE_CAPABILITIES),		\
 	S(SRC_TRANSITION_SUPPLY),		\
 	S(SRC_READY),				\
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
 			/* port->hard_reset_count = 0; */
 			port->caps_count = 0;
 			port->pd_capable = true;
-			tcpm_set_state_cond(port, hard_reset_state(port),
+			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
 					    PD_T_SEND_SOURCE_CAP);
 		}
 		break;
+	case SRC_SEND_CAPABILITIES_TIMEOUT:
+		/*
+		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+		 *
+		 * PD 2.0 sinks are supposed to accept src-capabilities with a
+		 * 3.0 header and simply ignore any src PDOs which the sink does
+		 * not understand such as PPS but some 2.0 sinks instead ignore
+		 * the entire PD_DATA_SOURCE_CAP message, causing contract
+		 * negotiation to fail.
+		 *
+		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+		 * sending src-capabilities with a lower PD revision to
+		 * make these broken sinks work.
+		 */
+		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+			tcpm_set_state(port, HARD_RESET_SEND, 0);
+		} else if (port->negotiated_rev > PD_REV20) {
+			port->negotiated_rev--;
+			port->hard_reset_count = 0;
+			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+		} else {
+			tcpm_set_state(port, hard_reset_state(port), 0);
+		}
+		break;
 	case SRC_NEGOTIATE_CAPABILITIES:
 		ret = tcpm_pd_check_request(port);
 		if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e..6770afd4 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
 	wcove->dev = &pdev->dev;
 	wcove->regmap = pmic->regmap;
 
-	irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
-				  platform_get_irq(pdev, 0));
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+		return irq;
+	}
+
+	irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
 	if (irq < 0)
 		return irq;
 
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d094..8ca333f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
 
 #define GUEST_MAPPINGS_TRIES	5
 
+#define VBG_KERNEL_REQUEST \
+	(VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+	 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
 /**
  * Reserves memory in which the VMM can relocate any guest mappings
  * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
 	int i, rc;
 
 	/* Query the required space. */
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return;
 
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
 	 * Tell the host that we're going to free the memory we reserved for
 	 * it, the free it up. (Leak the memory if anything goes wrong here.)
 	 */
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return;
 
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
 	struct vmmdev_guest_info2 *req2 = NULL;
 	int rc, ret = -ENOMEM;
 
-	req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
-	req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+	req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+			     VBG_KERNEL_REQUEST);
+	req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+			     VBG_KERNEL_REQUEST);
 	if (!req1 || !req2)
 		goto out_free;
 
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
 	req2->additions_minor = VBG_VERSION_MINOR;
 	req2->additions_build = VBG_VERSION_BUILD;
 	req2->additions_revision = VBG_SVN_REV;
-	/* (no features defined yet) */
-	req2->additions_features = 0;
+	req2->additions_features =
+		VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
 	strlcpy(req2->name, VBG_VERSION_STRING,
 		sizeof(req2->name));
 
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
 	struct vmmdev_guest_status *req;
 	int rc;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return -ENOMEM;
 
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
 	struct vmmdev_heartbeat *req;
 	int rc;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return -ENOMEM;
 
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
 
 	gdev->guest_heartbeat_req = vbg_req_alloc(
 					sizeof(*gdev->guest_heartbeat_req),
-					VMMDEVREQ_GUEST_HEARTBEAT);
+					VMMDEVREQ_GUEST_HEARTBEAT,
+					VBG_KERNEL_REQUEST);
 	if (!gdev->guest_heartbeat_req)
 		return -ENOMEM;
 
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
 	struct vmmdev_mask *req;
 	int rc;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return -ENOMEM;
 
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
 	u32 changed, previous;
 	int rc, ret = 0;
 
-	/* Allocate a request buffer before taking the spinlock */
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+	/*
+	 * Allocate a request buffer before taking the spinlock, when
+	 * the session is being terminated the requestor is the kernel,
+	 * as we're cleaning up.
+	 */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+			    session_termination ? VBG_KERNEL_REQUEST :
+						  session->requestor);
 	if (!req) {
 		if (!session_termination)
 			return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
 	struct vmmdev_mask *req;
 	int rc;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return -ENOMEM;
 
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
 	u32 changed, previous;
 	int rc, ret = 0;
 
-	/* Allocate a request buffer before taking the spinlock */
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+	/*
+	 * Allocate a request buffer before taking the spinlock, when
+	 * the session is being terminated the requestor is the kernel,
+	 * as we're cleaning up.
+	 */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+			    session_termination ? VBG_KERNEL_REQUEST :
+						  session->requestor);
 	if (!req) {
 		if (!session_termination)
 			return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
 	struct vmmdev_host_version *req;
 	int rc, ret;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return -ENOMEM;
 
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 
 	gdev->mem_balloon.get_req =
 		vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
-			      VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+			      VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+			      VBG_KERNEL_REQUEST);
 	gdev->mem_balloon.change_req =
 		vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
-			      VMMDEVREQ_CHANGE_MEMBALLOON);
+			      VMMDEVREQ_CHANGE_MEMBALLOON,
+			      VBG_KERNEL_REQUEST);
 	gdev->cancel_req =
 		vbg_req_alloc(sizeof(*(gdev->cancel_req)),
-			      VMMDEVREQ_HGCM_CANCEL2);
+			      VMMDEVREQ_HGCM_CANCEL2,
+			      VBG_KERNEL_REQUEST);
 	gdev->ack_events_req =
 		vbg_req_alloc(sizeof(*gdev->ack_events_req),
-			      VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+			      VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+			      VBG_KERNEL_REQUEST);
 	gdev->mouse_status_req =
 		vbg_req_alloc(sizeof(*gdev->mouse_status_req),
-			      VMMDEVREQ_GET_MOUSE_STATUS);
+			      VMMDEVREQ_GET_MOUSE_STATUS,
+			      VBG_KERNEL_REQUEST);
 
 	if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
 	    !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
  * vboxguest_linux.c calls this when userspace opens the char-device.
  * Return: A pointer to the new session or an ERR_PTR on error.
  * @gdev:		The Guest extension device.
- * @user:		Set if this is a session for the vboxuser device.
+ * @requestor:		VMMDEV_REQUESTOR_* flags
  */
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 {
 	struct vbg_session *session;
 
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
 		return ERR_PTR(-ENOMEM);
 
 	session->gdev = gdev;
-	session->user_session = user;
+	session->requestor = requestor;
 
 	return session;
 }
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
 		if (!session->hgcm_client_ids[i])
 			continue;
 
-		vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+		/* requestor is kernel here, as we're cleaning up. */
+		vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+				    session->hgcm_client_ids[i], &rc);
 	}
 
 	kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
 		return -EPERM;
 	}
 
-	if (trusted_apps_only && session->user_session) {
+	if (trusted_apps_only &&
+	    (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
 		vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
 			req->request_type);
 		return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
 	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
 		return -EMFILE;
 
-	ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
-			       &conn->hdr.rc);
+	ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+			       &client_id, &conn->hdr.rc);
 
 	mutex_lock(&gdev->session_mutex);
 	if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
 	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
 		return -EINVAL;
 
-	ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+	ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+				  &disconn->hdr.rc);
 
 	mutex_lock(&gdev->session_mutex);
 	if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
 	}
 
 	if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
-		ret = vbg_hgcm_call32(gdev, client_id,
+		ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
 				      call->function, call->timeout_ms,
 				      VBG_IOCTL_HGCM_CALL_PARMS32(call),
 				      call->parm_count, &call->hdr.rc);
 	else
-		ret = vbg_hgcm_call(gdev, client_id,
+		ret = vbg_hgcm_call(gdev, session->requestor, client_id,
 				    call->function, call->timeout_ms,
 				    VBG_IOCTL_HGCM_CALL_PARMS(call),
 				    call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
 }
 
 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+				     struct vbg_session *session,
 				     struct vbg_ioctl_write_coredump *dump)
 {
 	struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
 	if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
 		return -EINVAL;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+			    session->requestor);
 	if (!req)
 		return -ENOMEM;
 
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
 	case VBG_IOCTL_CHECK_BALLOON:
 		return vbg_ioctl_check_balloon(gdev, data);
 	case VBG_IOCTL_WRITE_CORE_DUMP:
-		return vbg_ioctl_write_core_dump(gdev, data);
+		return vbg_ioctl_write_core_dump(gdev, session, data);
 	}
 
 	/* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
 	struct vmmdev_mouse_status *req;
 	int rc;
 
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+			    VBG_KERNEL_REQUEST);
 	if (!req)
 		return -ENOMEM;
 
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec4..4188c12 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
 	 * host. Protected by vbg_gdev.session_mutex.
 	 */
 	u32 guest_caps;
-	/** Does this session belong to a root process or a user one? */
-	bool user_session;
+	/** VMMDEV_REQUESTOR_* flags */
+	u32 requestor;
 	/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
 	bool cancel_waiters;
 };
 
 int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
 void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
 void vbg_core_close_session(struct vbg_session *session);
 int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
 int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
 void vbg_linux_mouse_event(struct vbg_dev *gdev);
 
 /* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+		    u32 requestor);
 void vbg_req_free(void *req, size_t len);
 int vbg_req_perform(struct vbg_dev *gdev, void *req);
 int vbg_hgcm_call32(
-	struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-	struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-	int *vbox_status);
+	struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+	u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+	u32 parm_count, int *vbox_status);
 
 #endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a961..6e8c0f1 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
  * Copyright (C) 2006-2016 Oracle Corporation
  */
 
+#include <linux/cred.h>
 #include <linux/input.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
 /** Global vbg_gdev pointer used by vbg_get/put_gdev. */
 static struct vbg_dev *vbg_gdev;
 
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+	u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+			VMMDEV_REQUESTOR_CON_DONT_KNOW |
+			VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+	if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+		requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+	else
+		requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+	if (in_egroup_p(inode->i_gid))
+		requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+	return requestor;
+}
+
 static int vbg_misc_device_open(struct inode *inode, struct file *filp)
 {
 	struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
 	/* misc_open sets filp->private_data to our misc device */
 	gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
 
-	session = vbg_core_open_session(gdev, false);
+	session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
 	if (IS_ERR(session))
 		return PTR_ERR(session);
 
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
 	gdev = container_of(filp->private_data, struct vbg_dev,
 			    misc_device_user);
 
-	session = vbg_core_open_session(gdev, false);
+	session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+					      VMMDEV_REQUESTOR_USER_DEVICE);
 	if (IS_ERR(session))
 		return PTR_ERR(session);
 
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
 			 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
 
 	if (is_vmmdev_req)
-		buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+		buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+				    session->requestor);
 	else
 		buf = kmalloc(size, GFP_KERNEL);
 	if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf447421..75fd140 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
 VBG_LOG(vbg_debug, pr_debug);
 #endif
 
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+		    u32 requestor)
 {
 	struct vmmdev_request_header *req;
 	int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
 	req->request_type = req_type;
 	req->rc = VERR_GENERAL_FAILURE;
 	req->reserved1 = 0;
-	req->reserved2 = 0;
+	req->requestor = requestor;
 
 	return req;
 }
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
 	return done;
 }
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
 		     struct vmmdev_hgcm_service_location *loc,
 		     u32 *client_id, int *vbox_status)
 {
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
 	int rc;
 
 	hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
-				     VMMDEVREQ_HGCM_CONNECT);
+				     VMMDEVREQ_HGCM_CONNECT, requestor);
 	if (!hgcm_connect)
 		return -ENOMEM;
 
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
 }
 EXPORT_SYMBOL(vbg_hgcm_connect);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+			u32 client_id, int *vbox_status)
 {
 	struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
 	int rc;
 
 	hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
-					VMMDEVREQ_HGCM_DISCONNECT);
+					VMMDEVREQ_HGCM_DISCONNECT,
+					requestor);
 	if (!hgcm_disconnect)
 		return -ENOMEM;
 
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
 	return 0;
 }
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-		  u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-		  u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+		  u32 function, u32 timeout_ms,
+		  struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+		  int *vbox_status)
 {
 	struct vmmdev_hgcm_call *call;
 	void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
 		goto free_bounce_bufs;
 	}
 
-	call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+	call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
 	if (!call) {
 		ret = -ENOMEM;
 		goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
 
 #ifdef CONFIG_COMPAT
 int vbg_hgcm_call32(
-	struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-	struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-	int *vbox_status)
+	struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+	u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+	u32 parm_count, int *vbox_status)
 {
 	struct vmmdev_hgcm_function_parameter *parm64 = NULL;
 	u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
 			goto out_free;
 	}
 
-	ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+	ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
 			    parm64, parm_count, vbox_status);
 	if (ret < 0)
 		goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f..84834da 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
 #ifndef __VBOX_VERSION_H__
 #define __VBOX_VERSION_H__
 
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
 #define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
 
 #endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae97..6337b8d 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
 	s32 rc;
 	/** Reserved field no.1. MBZ. */
 	u32 reserved1;
-	/** Reserved field no.2. MBZ. */
-	u32 reserved2;
+	/** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+	u32 requestor;
 };
 VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
 
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
 };
 VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
 
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO	BIT(0)
+
 /** struct vmmdev_guestinfo2 - Guest information report, version 2. */
 struct vmmdev_guest_info2 {
 	/** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
 	u32 additions_build;
 	/** SVN revision. */
 	u32 additions_revision;
-	/** Feature mask, currently unused. */
+	/** Feature mask. */
 	u32 additions_features;
 	/**
 	 * The intentional meaning of this field was:
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e3346628..2d61ddd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
+	kfree(ci->i_symlink);
 	kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
 		}
 	}
 
-	kfree(ci->i_symlink);
 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
 		frag = rb_entry(n, struct ceph_inode_frag, node);
 		rb_erase(n, &ci->i_fragtree);
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 842e8f7..570d710 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc)
 			for (param = desc->specs; param->name; param++) {
 				if (param->opt == e->opt &&
 				    param->type != fs_param_is_enum) {
-					pr_err("VALIDATE %s: e[%lu] enum val for %s\n",
+					pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
 					       name, e - desc->enums, param->name);
 					good = false;
 				}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6aaa3058..bbdbd56 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1022,6 +1022,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 
 	ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
 	if (!ret) {
+		ssize_t ret2;
+
 		/*
 		 * Open-code file_start_write here to grab freeze protection,
 		 * which will be released by another thread in
@@ -1036,7 +1038,19 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 						SB_FREEZE_WRITE);
 		}
 		kiocb->ki_flags |= IOCB_WRITE;
-		io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
+
+		ret2 = call_write_iter(file, kiocb, &iter);
+		if (!force_nonblock || ret2 != -EAGAIN) {
+			io_rw_done(kiocb, ret2);
+		} else {
+			/*
+			 * If ->needs_lock is true, we're already in async
+			 * context.
+			 */
+			if (!s->needs_lock)
+				io_async_list_note(WRITE, req, iov_count);
+			ret = -EAGAIN;
+		}
 	}
 out_free:
 	kfree(iovec);
@@ -1968,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 		return 0;
 
 	if (sig) {
-		ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
+#ifdef CONFIG_COMPAT
+		if (in_compat_syscall())
+			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+						      &ksigmask, &sigsaved, sigsz);
+		else
+#endif
+			ret = set_user_sigmask(sig, &ksigmask,
+					       &sigsaved, sigsz);
+
 		if (ret)
 			return ret;
 	}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index a35259e..1dc9a08 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4719,22 +4719,23 @@ loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
 
 /* Lock an inode and grab a bh pointing to the inode. */
 int ocfs2_reflink_inodes_lock(struct inode *s_inode,
-			      struct buffer_head **bh1,
+			      struct buffer_head **bh_s,
 			      struct inode *t_inode,
-			      struct buffer_head **bh2)
+			      struct buffer_head **bh_t)
 {
-	struct inode *inode1;
-	struct inode *inode2;
+	struct inode *inode1 = s_inode;
+	struct inode *inode2 = t_inode;
 	struct ocfs2_inode_info *oi1;
 	struct ocfs2_inode_info *oi2;
+	struct buffer_head *bh1 = NULL;
+	struct buffer_head *bh2 = NULL;
 	bool same_inode = (s_inode == t_inode);
+	bool need_swap = (inode1->i_ino > inode2->i_ino);
 	int status;
 
 	/* First grab the VFS and rw locks. */
 	lock_two_nondirectories(s_inode, t_inode);
-	inode1 = s_inode;
-	inode2 = t_inode;
-	if (inode1->i_ino > inode2->i_ino)
+	if (need_swap)
 		swap(inode1, inode2);
 
 	status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 	trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
 				(unsigned long long)oi2->ip_blkno);
 
-	if (*bh1)
-		*bh1 = NULL;
-	if (*bh2)
-		*bh2 = NULL;
-
 	/* We always want to lock the one with the lower lockid first. */
 	if (oi1->ip_blkno > oi2->ip_blkno)
 		mlog_errno(-ENOLCK);
 
 	/* lock id1 */
-	status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
+	status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
+					 OI_LS_REFLINK_TARGET);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
 	/* lock id2 */
 	if (!same_inode) {
-		status = ocfs2_inode_lock_nested(inode2, bh2, 1,
+		status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
 						 OI_LS_REFLINK_TARGET);
 		if (status < 0) {
 			if (status != -ENOENT)
 				mlog_errno(status);
 			goto out_cl1;
 		}
-	} else
-		*bh2 = *bh1;
+	} else {
+		bh2 = bh1;
+	}
+
+	/*
+	 * If we swapped inode order above, we have to swap the buffer heads
+	 * before passing them back to the caller.
+	 */
+	if (need_swap)
+		swap(bh1, bh2);
+	*bh_s = bh1;
+	*bh_t = bh2;
 
 	trace_ocfs2_double_lock_end(
 			(unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
 out_cl1:
 	ocfs2_inode_unlock(inode1, 1);
-	brelse(*bh1);
-	*bh1 = NULL;
+	brelse(bh1);
 out_rw2:
 	ocfs2_rw_unlock(inode2, 1);
 out_i2:
diff --git a/fs/open.c b/fs/open.c
index 0285ce7..f1c2f85 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
 		return 0;
 	}
 
+	/* Any file opened for execve()/uselib() has to be a regular file. */
+	if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+		error = -EACCES;
+		goto cleanup_file;
+	}
+
 	if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
 		error = get_write_access(inode);
 		if (unlikely(error))
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index d29d869a..f583448 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -615,7 +615,7 @@ static void __init proc_kcore_text_init(void)
 /*
  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
  */
-struct kcore_list kcore_modules;
+static struct kcore_list kcore_modules;
 static void __init add_modules_range(void)
 {
 	if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4d598a3..d653907 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
 	if (--header->nreg)
 		return;
 
-	put_links(header);
+	if (parent)
+		put_links(header);
 	start_unregistering(header);
 	if (!--header->count)
 		kfree_rcu(header, rcu);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 48502cb..4637ae1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 	 */
 	level = be16_to_cpu(block->bb_level);
-	ASSERT(level > 0);
+	if (unlikely(level == 0)) {
+		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+		return -EFSCORRUPTED;
+	}
 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 	bno = be64_to_cpu(*pp);
 
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
 	struct xfs_bmbt_irec	*mval,		/* output: map values */
 	int			*nmap)		/* i/o: mval size/count */
 {
+	struct xfs_bmalloca	bma = {
+		.tp		= tp,
+		.ip		= ip,
+		.total		= total,
+	};
 	struct xfs_mount	*mp = ip->i_mount;
 	struct xfs_ifork	*ifp;
-	struct xfs_bmalloca	bma = { NULL };	/* args for xfs_bmap_alloc */
 	xfs_fileoff_t		end;		/* end of mapped file region */
 	bool			eof = false;	/* after the end of extents */
 	int			error;		/* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
 		eof = true;
 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
 		bma.prev.br_startoff = NULLFILEOFF;
-	bma.tp = tp;
-	bma.ip = ip;
-	bma.total = total;
-	bma.datatype = 0;
 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
 
 	n = 0;
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 6f94d1f..117910d 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
 	struct xfs_btree_cur	*cur = bs->cur;
 	struct check_owner	*co;
 
-	if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+	/*
+	 * In theory, xfs_btree_get_block should only give us a null buffer
+	 * pointer for the root of a root-in-inode btree type, but we need
+	 * to check defensively here in case the cursor state is also screwed
+	 * up.
+	 */
+	if (bp == NULL) {
+		if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
+			xchk_btree_set_corrupt(bs->sc, bs->cur, level);
 		return 0;
+	}
 
 	/*
 	 * We want to cross-reference each btree block with the bnobt
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index f1260b4..90527b0 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -574,6 +574,11 @@ xchk_da_btree(
 		/* Drill another level deeper. */
 		blkno = be32_to_cpu(key->before);
 		level++;
+		if (level >= XFS_DA_NODE_MAXDEPTH) {
+			/* Too deep! */
+			xchk_da_set_corrupt(&ds, level - 1);
+			break;
+		}
 		ds.tree_level--;
 		error = xchk_da_btree_block(&ds, level, blkno);
 		if (error)
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 93f07ed..9ee2a7d 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -161,6 +161,14 @@ xfs_ioc_trim(
 		return -EPERM;
 	if (!blk_queue_discard(q))
 		return -EOPNOTSUPP;
+
+	/*
+	 * We haven't recovered the log, so we cannot use our bnobt-guided
+	 * storage zapping commands.
+	 */
+	if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+		return -EROFS;
+
 	if (copy_from_user(&range, urange, sizeof(range)))
 		return -EFAULT;
 
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1f2e284..a7ceae9 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
 	count = iov_iter_count(from);
 
 	/*
-	 * If we are doing unaligned IO, wait for all other IO to drain,
-	 * otherwise demote the lock if we had to take the exclusive lock
-	 * for other reasons in xfs_file_aio_write_checks.
+	 * If we are doing unaligned IO, we can't allow any other overlapping IO
+	 * in-flight at the same time or we risk data corruption. Wait for all
+	 * other IO to drain before we submit. If the IO is aligned, demote the
+	 * iolock if we had to take the exclusive lock in
+	 * xfs_file_aio_write_checks() for other reasons.
 	 */
 	if (unaligned_io) {
-		/* If we are going to wait for other DIO to finish, bail */
-		if (iocb->ki_flags & IOCB_NOWAIT) {
-			if (atomic_read(&inode->i_dio_count))
-				return -EAGAIN;
-		} else {
-			inode_dio_wait(inode);
-		}
+		/* unaligned dio always waits, bail */
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		inode_dio_wait(inode);
 	} else if (iolock == XFS_IOLOCK_EXCL) {
 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 		iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
 
 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
 	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+	/*
+	 * If unaligned, this is the only IO in-flight. If it has not yet
+	 * completed, wait on it before we release the iolock to prevent
+	 * subsequent overlapping IO.
+	 */
+	if (ret == -EIOCBQUEUED && unaligned_io)
+		inode_dio_wait(inode);
 out:
 	xfs_iunlock(ip, iolock);
 
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 30b1ae53..c50542d 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -150,7 +150,10 @@
 
 /* Defaults for debug_level, debug and normal */
 
+#ifndef ACPI_DEBUG_DEFAULT
 #define ACPI_DEBUG_DEFAULT          (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
+#endif
+
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 9ff328f..624b90b 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -82,6 +82,11 @@
 #define ACPI_NO_ERROR_MESSAGES
 #undef ACPI_DEBUG_OUTPUT
 
+/* Use a specific bugging default separate from ACPICA */
+
+#undef ACPI_DEBUG_DEFAULT
+#define ACPI_DEBUG_DEFAULT          (ACPI_LV_INFO | ACPI_LV_REPAIR)
+
 /* External interface for __KERNEL__, stub is needed */
 
 #define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
diff --git a/include/linux/device.h b/include/linux/device.h
index b425a7e..4e6987e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -49,8 +49,6 @@ struct bus_attribute {
 	ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
 };
 
-#define BUS_ATTR(_name, _mode, _show, _store)	\
-	struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
 #define BUS_ATTR_RW(_name) \
 	struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
 #define BUS_ATTR_RO(_name) \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ea35263..11943b6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
 #define pud_huge(x)	0
 #define is_hugepage_only_range(mm, addr, len)	0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; })
 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
 				src_addr, pagep)	({ BUG(); 0; })
 #define huge_pte_offset(mm, address, sz)	0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 {
 	BUG();
 }
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+				struct vm_area_struct *vma, unsigned long address,
+				unsigned int flags)
+{
+	BUG();
+	return 0;
+}
 
 #endif /* !CONFIG_HUGETLB_PAGE */
 /*
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index c843f4a..da676cd 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -38,12 +38,6 @@ struct vmcoredd_node {
 
 #ifdef CONFIG_PROC_KCORE
 void __init kclist_add(struct kcore_list *, void *, size_t, int type);
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-	m->vaddr = (unsigned long)vaddr;
-	kclist_add(m, addr, sz, KCORE_REMAP);
-}
 
 extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
 #else
@@ -51,11 +45,6 @@ static inline
 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 {
 }
-
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-}
 #endif
 
 #endif /* _LINUX_KCORE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 79626b5..58aa3ad 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
 }
 
 /**
- * list_is_first -- tests whether @ list is the first entry in list @head
+ * list_is_first -- tests whether @list is the first entry in list @head
  * @list: the entry to test
  * @head: the head of the list
  */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 4eb26d2..280ae96 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
- *
- * The following flags are allowed (they can be combined in a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
- * REPORT_FAILURE - report details about the failure to isolate the range
  */
 int
 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index a867637..9e46678 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL+ */
+/* SPDX-License-Identifier: GPL-2.0+ */
 
 /*
  * AMD FCH gpio driver platform-data
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index ae56551..e412c09 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
 	set_thread_flag(TIF_RESTORE_SIGMASK);
 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
 static inline void clear_restore_sigmask(void)
 {
 	clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
 static inline bool test_restore_sigmask(void)
 {
 	return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
 	current->restore_sigmask = true;
 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	tsk->restore_sigmask = false;
+}
 static inline void clear_restore_sigmask(void)
 {
 	current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
 {
 	return current->restore_sigmask;
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+	return tsk->restore_sigmask;
+}
 static inline bool test_and_clear_restore_sigmask(void)
 {
 	if (!current->restore_sigmask)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 11b45f7..9449b19 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -32,6 +32,8 @@
 #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
 /* Use GFP_DMA memory */
 #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
+/* Use GFP_DMA32 memory */
+#define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
 /* DEBUG: Store the last owner for bug hunting */
 #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
 /* Panic if kmem_cache_create() fails */
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index a240ed2..ff56c443 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
 #define vbg_debug pr_debug
 #endif
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
 		     struct vmmdev_hgcm_service_location *loc,
 		     u32 *client_id, int *vbox_status);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+			u32 client_id, int *vbox_status);
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-		  u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-		  u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+		  u32 function, u32 timeout_ms,
+		  struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+		  int *vbox_status);
 
 /**
  * Convert a VirtualBox status code to a standard Linux kernel return value.
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 0e68024..26f3981 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
 #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
 #endif
 
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN                      0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV                            0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER                      0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT                           0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER                           0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK                           0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL                             0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE                           0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK                          0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW                      0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO                             0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES                            0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK                           0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX                           0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN                    0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED                    0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW                          0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM                       0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS                  0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH                         0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM                       0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED                    0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK                         0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE                        0x00008000
+
 /** HGCM service location types. */
 enum vmmdev_hgcm_service_location_type {
 	VMMDEV_HGCM_LOC_INVALID    = 0,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 025f419..6754f3e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -564,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
 		cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 }
 
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+		return true;
+	/*
+	 * When CPU hotplug is disabled, then taking the CPU down is not
+	 * possible because takedown_cpu() and the architecture and
+	 * subsystem specific mechanisms are not available. So the CPU
+	 * which would be completely unplugged again needs to stay around
+	 * in the current state.
+	 */
+	return st->state <= CPUHP_BRINGUP_CPU;
+}
+
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 			      enum cpuhp_state target)
 {
@@ -574,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 		st->state++;
 		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
 		if (ret) {
-			st->target = prev_state;
-			undo_cpu_up(cpu, st);
+			if (can_rollback_cpu(st)) {
+				st->target = prev_state;
+				undo_cpu_up(cpu, st);
+			}
 			break;
 		}
 	}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 771e93f..6f357f4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
+#include <linux/sched/signal.h>
 
 /*
  * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
 			ret = ptrace_setsiginfo(child, &siginfo);
 		break;
 
-	case PTRACE_GETSIGMASK:
+	case PTRACE_GETSIGMASK: {
+		sigset_t *mask;
+
 		if (addr != sizeof(sigset_t)) {
 			ret = -EINVAL;
 			break;
 		}
 
-		if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+		if (test_tsk_restore_sigmask(child))
+			mask = &child->saved_sigmask;
+		else
+			mask = &child->blocked;
+
+		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
 			ret = -EFAULT;
 		else
 			ret = 0;
 
 		break;
+	}
 
 	case PTRACE_SETSIGMASK: {
 		sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
 		child->blocked = new_set;
 		spin_unlock_irq(&child->sighand->siglock);
 
+		clear_tsk_restore_sigmask(child);
+
 		ret = 0;
 		break;
 	}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 403c9bd..6a578723 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
 
 int lockup_detector_online_cpu(unsigned int cpu)
 {
-	watchdog_enable(cpu);
+	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+		watchdog_enable(cpu);
 	return 0;
 }
 
 int lockup_detector_offline_cpu(unsigned int cpu)
 {
-	watchdog_disable(cpu);
+	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+		watchdog_disable(cpu);
 	return 0;
 }
 
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1..155fe38 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 			 unsigned int cpu)
 {
+	/*
+	 * Once the clear bit is set, the bit may be allocated out.
+	 *
+	 * Orders READ/WRITE on the asssociated instance(such as request
+	 * of blk_mq) by this bit for avoiding race with re-allocation,
+	 * and its pair is the memory barrier implied in __sbitmap_get_word.
+	 *
+	 * One invariant is that the clear bit has to be zero when the bit
+	 * is in use.
+	 */
+	smp_mb__before_atomic();
 	sbitmap_deferred_clear_bit(&sbq->sb, nr);
 
 	/*
diff --git a/mm/debug.c b/mm/debug.c
index c0b31b6..eee9c22 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
 		pr_warn("ksm ");
 	else if (mapping) {
 		pr_warn("%ps ", mapping->a_ops);
-		if (mapping->host->i_dentry.first) {
+		if (mapping->host && mapping->host->i_dentry.first) {
 			struct dentry *dentry;
 			dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
 			pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
 		mm_pgtables_bytes(mm),
 		mm->map_count,
 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
-		atomic64_read(&mm->pinned_vm),
+		(u64)atomic64_read(&mm->pinned_vm),
 		mm->data_vm, mm->exec_vm, mm->stack_vm,
 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
 		mm->start_brk, mm->brk, mm->start_stack,
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3e0c11f..3ce956e 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
 #endif
 
 #ifndef arch_kasan_set_tag
-#define arch_kasan_set_tag(addr, tag)	((void *)(addr))
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+	return addr;
+}
 #endif
 #ifndef arch_kasan_reset_tag
 #define arch_kasan_reset_tag(addr)	((void *)(addr))
diff --git a/mm/memory.c b/mm/memory.c
index 47fe250..ab650c2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
 				goto out_unlock;
 			}
-			entry = *pte;
-			goto out_mkwrite;
-		} else
-			goto out_unlock;
+			entry = pte_mkyoung(*pte);
+			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
+				update_mmu_cache(vma, addr, pte);
+		}
+		goto out_unlock;
 	}
 
 	/* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 	else
 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
 
-out_mkwrite:
 	if (mkwrite) {
 		entry = pte_mkyoung(entry);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f767582..0082d69 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
 {
 	unsigned long pfn, nr_pages;
 	long offlined_pages;
-	int ret, node;
+	int ret, node, nr_isolate_pageblock;
 	unsigned long flags;
 	unsigned long valid_start, valid_end;
 	struct zone *zone;
@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
 	ret = start_isolate_page_range(start_pfn, end_pfn,
 				       MIGRATE_MOVABLE,
 				       SKIP_HWPOISON | REPORT_FAILURE);
-	if (ret) {
+	if (ret < 0) {
 		reason = "failure to isolate range";
 		goto failed_removal;
 	}
+	nr_isolate_pageblock = ret;
 
 	arg.start_pfn = start_pfn;
 	arg.nr_pages = nr_pages;
@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
 	/* Ok, all of our target is isolated.
 	   We cannot do rollback at this point. */
 	offline_isolated_pages(start_pfn, end_pfn);
-	/* reset pagetype flags and makes migrate type to be MOVABLE */
-	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+
+	/*
+	 * Onlining will reset pagetype flags and makes migrate type
+	 * MOVABLE, so just need to decrease the number of isolated
+	 * pageblocks zone counter here.
+	 */
+	spin_lock_irqsave(&zone->lock, flags);
+	zone->nr_isolate_pageblock -= nr_isolate_pageblock;
+	spin_unlock_irqrestore(&zone->lock, flags);
+
 	/* removal success */
 	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
 	zone->present_pages -= offlined_pages;
@@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
 failed_removal_isolated:
 	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+	memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal:
 	pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
 		 (unsigned long long) start_pfn << PAGE_SHIFT,
 		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
 		 reason);
-	memory_notify(MEM_CANCEL_OFFLINE, &arg);
 	/* pushback to free area */
 	mem_hotplug_done();
 	return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index af171cc..2219e74 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 }
 
+/*
+ * queue_pages_pmd() has three possible return values:
+ * 1 - pages are placed on the right node or queued successfully.
+ * 0 - THP was split.
+ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
+ *        page was already on a node that does not follow the policy.
+ */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 	unsigned long flags;
 
 	if (unlikely(is_pmd_migration_entry(*pmd))) {
-		ret = 1;
+		ret = -EIO;
 		goto unlock;
 	}
 	page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 	ret = 1;
 	flags = qp->flags;
 	/* go to thp migration */
-	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+		if (!vma_migratable(walk->vma)) {
+			ret = -EIO;
+			goto unlock;
+		}
+
 		migrate_page_add(page, qp->pagelist, flags);
+	} else
+		ret = -EIO;
 unlock:
 	spin_unlock(ptl);
 out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 	ptl = pmd_trans_huge_lock(pmd, vma);
 	if (ptl) {
 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-		if (ret)
+		if (ret > 0)
 			return 0;
+		else if (ret < 0)
+			return ret;
 	}
 
 	if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 			continue;
 		if (!queue_pages_required(page, qp))
 			continue;
-		migrate_page_add(page, qp->pagelist, flags);
+		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+			if (!vma_migratable(vma))
+				break;
+			migrate_page_add(page, qp->pagelist, flags);
+		} else
+			break;
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
-	return 0;
+	return addr != end ? -EIO : 0;
 }
 
 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 	unsigned long endvma = vma->vm_end;
 	unsigned long flags = qp->flags;
 
-	if (!vma_migratable(vma))
+	/*
+	 * Need check MPOL_MF_STRICT to return -EIO if possible
+	 * regardless of vma_migratable
+	 */
+	if (!vma_migratable(vma) &&
+	    !(flags & MPOL_MF_STRICT))
 		return 1;
 
 	if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 	}
 
 	/* queue pages from current vma */
-	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+	if (flags & MPOL_MF_VALID)
 		return 0;
 	return 1;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index ac6f493..663a544 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 				pte = swp_entry_to_pte(entry);
 			} else if (is_device_public_page(new)) {
 				pte = pte_mkdevmap(pte);
-				flush_dcache_page(new);
 			}
-		} else
-			flush_dcache_page(new);
+		}
 
 #ifdef CONFIG_HUGETLB_PAGE
 		if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
 		 */
 		if (!PageMappingFlags(page))
 			page->mapping = NULL;
+
+		if (unlikely(is_zone_device_page(newpage))) {
+			if (is_device_public_page(newpage))
+				flush_dcache_page(newpage);
+		} else
+			flush_dcache_page(newpage);
+
 	}
 out:
 	return rc;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03fcf73..d96ca5b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
 	ret = start_isolate_page_range(pfn_max_align_down(start),
 				       pfn_max_align_up(end), migratetype, 0);
-	if (ret)
+	if (ret < 0)
 		return ret;
 
 	/*
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index ce323e5..0192807 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
 	 * We just check MOVABLE pages.
 	 */
-	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
+	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
+				 isol_flags))
 		ret = 0;
 
 	/*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
 	return NULL;
 }
 
-/*
- * start_isolate_page_range() -- make page-allocation-type of range of pages
- * to be MIGRATE_ISOLATE.
- * @start_pfn: The lower PFN of the range to be isolated.
- * @end_pfn: The upper PFN of the range to be isolated.
- * @migratetype: migrate type to set in error recovery.
+/**
+ * start_isolate_page_range() - make page-allocation-type of range of pages to
+ * be MIGRATE_ISOLATE.
+ * @start_pfn:		The lower PFN of the range to be isolated.
+ * @end_pfn:		The upper PFN of the range to be isolated.
+ *			start_pfn/end_pfn must be aligned to pageblock_order.
+ * @migratetype:	Migrate type to set in error recovery.
+ * @flags:		The following flags are allowed (they can be combined in
+ *			a bit mask)
+ *			SKIP_HWPOISON - ignore hwpoison pages
+ *			REPORT_FAILURE - report details about the failure to
+ *			isolate the range
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
- * future will not be allocated again.
- *
- * start_pfn/end_pfn must be aligned to pageblock_order.
- * Return 0 on success and -EBUSY if any part of range cannot be isolated.
+ * future will not be allocated again. If specified range includes migrate types
+ * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
+ * pages in the range finally, the caller have to free all pages in the range.
+ * test_page_isolated() can be used for test it.
  *
  * There is no high level synchronization mechanism that prevents two threads
- * from trying to isolate overlapping ranges.  If this happens, one thread
+ * from trying to isolate overlapping ranges. If this happens, one thread
  * will notice pageblocks in the overlapping range already set to isolate.
  * This happens in set_migratetype_isolate, and set_migratetype_isolate
- * returns an error.  We then clean up by restoring the migration type on
- * pageblocks we may have modified and return -EBUSY to caller.  This
+ * returns an error. We then clean up by restoring the migration type on
+ * pageblocks we may have modified and return -EBUSY to caller. This
  * prevents two threads from simultaneously working on overlapping ranges.
+ *
+ * Return: the number of isolated pageblocks on success and -EBUSY if any part
+ * of range cannot be isolated.
  */
 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 			     unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 	unsigned long pfn;
 	unsigned long undo_pfn;
 	struct page *page;
+	int nr_isolate_pageblock = 0;
 
 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 	     pfn < end_pfn;
 	     pfn += pageblock_nr_pages) {
 		page = __first_valid_page(pfn, pageblock_nr_pages);
-		if (page &&
-		    set_migratetype_isolate(page, migratetype, flags)) {
-			undo_pfn = pfn;
-			goto undo;
+		if (page) {
+			if (set_migratetype_isolate(page, migratetype, flags)) {
+				undo_pfn = pfn;
+				goto undo;
+			}
+			nr_isolate_pageblock++;
 		}
 	}
-	return 0;
+	return nr_isolate_pageblock;
 undo:
 	for (pfn = start_pfn;
 	     pfn < undo_pfn;
diff --git a/mm/slab.c b/mm/slab.c
index 28652e4..329bfe6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
 	cachep->allocflags = __GFP_COMP;
 	if (flags & SLAB_CACHE_DMA)
 		cachep->allocflags |= GFP_DMA;
+	if (flags & SLAB_CACHE_DMA32)
+		cachep->allocflags |= GFP_DMA32;
 	if (flags & SLAB_RECLAIM_ACCOUNT)
 		cachep->allocflags |= __GFP_RECLAIMABLE;
 	cachep->size = size;
diff --git a/mm/slab.h b/mm/slab.h
index e5e6658..43ac818 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
+			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 
 #if defined(CONFIG_DEBUG_SLAB)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 03eeb8b..58251ba 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
 		SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
-			 SLAB_ACCOUNT)
+			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 1b08fbc..d30ede8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 	if (s->flags & SLAB_CACHE_DMA)
 		s->allocflags |= GFP_DMA;
 
+	if (s->flags & SLAB_CACHE_DMA32)
+		s->allocflags |= GFP_DMA32;
+
 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
 		s->allocflags |= __GFP_RECLAIMABLE;
 
@@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s)
 	 */
 	if (s->flags & SLAB_CACHE_DMA)
 		*p++ = 'd';
+	if (s->flags & SLAB_CACHE_DMA32)
+		*p++ = 'D';
 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
 		*p++ = 'a';
 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
diff --git a/mm/sparse.c b/mm/sparse.c
index 69904aa..56e057c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-/* Mark all memory sections within the pfn range as online */
+/* Mark all memory sections within the pfn range as offline */
 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 {
 	unsigned long pfn;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 7e71b0d..3083988 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
 					size_t bytes)
 {
 	struct ceph_bio_iter *it = &cursor->bio_iter;
+	struct page *page = bio_iter_page(it->bio, it->iter);
 
 	BUG_ON(bytes > cursor->resid);
 	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
 		return false;   /* no more data */
 	}
 
-	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
+	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
+		       page == bio_iter_page(it->bio, it->iter)))
 		return false;	/* more bytes to process in this segment */
 
 	if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
 					size_t bytes)
 {
 	struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
+	struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 
 	BUG_ON(bytes > cursor->resid);
 	BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
 		return false;   /* no more data */
 	}
 
-	if (!bytes || cursor->bvec_iter.bi_bvec_done)
+	if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
+		       page == bvec_iter_page(bvecs, cursor->bvec_iter)))
 		return false;	/* more bytes to process in this segment */
 
 	BUG_ON(cursor->last_piece);
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 2554a15..76ca30c 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -199,11 +199,8 @@
 	"$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
-cmd_record_mcount =						\
-	if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" =	\
-	     "$(CC_FLAGS_FTRACE)" ]; then			\
-		$(sub_cmd_record_mcount)			\
-	fi
+cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),	\
+	$(sub_cmd_record_mcount))
 endif # CC_USING_RECORD_MCOUNT
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 5b75627..a09333f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5977,7 +5977,7 @@
 				while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
 					$specifier = $1;
 					$extension = $2;
-					if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) {
+					if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
 						$bad_specifier = $specifier;
 						last;
 					}
diff --git a/scripts/coccinelle/free/put_device.cocci b/scripts/coccinelle/free/put_device.cocci
index 7395697..c9f071b 100644
--- a/scripts/coccinelle/free/put_device.cocci
+++ b/scripts/coccinelle/free/put_device.cocci
@@ -32,6 +32,7 @@
 (    id
 |    (T2)dev_get_drvdata(&id->dev)
 |    (T3)platform_get_drvdata(id)
+|    &id->dev
 );
 | return@p2 ...;
 )
diff --git a/scripts/coccinelle/misc/badty.cocci b/scripts/coccinelle/misc/badty.cocci
index 481cf30..0847036 100644
--- a/scripts/coccinelle/misc/badty.cocci
+++ b/scripts/coccinelle/misc/badty.cocci
@@ -1,4 +1,4 @@
-/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element
+/// Correct the size argument to alloc functions
 ///
 //# This makes an effort to find cases where the argument to sizeof is wrong
 //# in memory allocation functions by checking the type of the allocated memory
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 6119456..1dcfb28 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -113,7 +113,8 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
 			case KEY_DOWN:
 				break;
 			case KEY_BACKSPACE:
-			case 127:
+			case 8:   /* ^H */
+			case 127: /* ^? */
 				if (pos) {
 					wattrset(dialog, dlg.inputbox.atr);
 					if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a4670f4..ac92c0d 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
 		state->match_direction = FIND_NEXT_MATCH_UP;
 		*ans = get_mext_match(state->pattern,
 				state->match_direction);
-	} else if (key == KEY_BACKSPACE || key == 127) {
+	} else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
 		state->pattern[strlen(state->pattern)-1] = '\0';
 		adj_match_dir(&state->match_direction);
 	} else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 7be620a..77f525a 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
 		case KEY_F(F_EXIT):
 		case KEY_F(F_BACK):
 			break;
-		case 127:
+		case 8:   /* ^H */
+		case 127: /* ^? */
 		case KEY_BACKSPACE:
 			if (cursor_position > 0) {
 				memmove(&result[cursor_position-1],
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0b0d108..f277e11 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
 			       info->sechdrs[sym->st_shndx].sh_offset -
 			       (info->hdr->e_type != ET_REL ?
 				info->sechdrs[sym->st_shndx].sh_addr : 0);
-			crc = *crcp;
+			crc = TO_NATIVE(*crcp);
 		}
 		sym_update_crc(symname + strlen("__crc_"), mod, crc,
 				export);
diff --git a/security/Kconfig b/security/Kconfig
index 1d6463f..353cfef 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -239,8 +239,46 @@
 
 source "security/integrity/Kconfig"
 
+choice
+	prompt "First legacy 'major LSM' to be initialized"
+	default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+	default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+	default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+	default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+	default DEFAULT_SECURITY_DAC
+
+	help
+	  This choice is there only for converting CONFIG_DEFAULT_SECURITY
+	  in old kernel configs to CONFIG_LSM in new kernel configs. Don't
+	  change this choice unless you are creating a fresh kernel config,
+	  for this choice will be ignored after CONFIG_LSM has been set.
+
+	  Selects the legacy "major security module" that will be
+	  initialized first. Overridden by non-default CONFIG_LSM.
+
+	config DEFAULT_SECURITY_SELINUX
+		bool "SELinux" if SECURITY_SELINUX=y
+
+	config DEFAULT_SECURITY_SMACK
+		bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+	config DEFAULT_SECURITY_TOMOYO
+		bool "TOMOYO" if SECURITY_TOMOYO=y
+
+	config DEFAULT_SECURITY_APPARMOR
+		bool "AppArmor" if SECURITY_APPARMOR=y
+
+	config DEFAULT_SECURITY_DAC
+		bool "Unix Discretionary Access Controls"
+
+endchoice
+
 config LSM
 	string "Ordered list of enabled LSMs"
+	default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
+	default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
+	default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
+	default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
 	default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
 	help
 	  A comma-separated list of LSMs, in initialization order.
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 57cc607..efac685 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
  * yama_task_free - check for task_pid to remove from exception list
  * @task: task being removed
  */
-void yama_task_free(struct task_struct *task)
+static void yama_task_free(struct task_struct *task)
 {
 	yama_ptracer_del(task, task);
 }
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
  * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
  * does not handle the given option.
  */
-int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
 			   unsigned long arg4, unsigned long arg5)
 {
 	int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
  *
  * Returns 0 if following the ptrace is allowed, -ve on error.
  */
-int yama_ptrace_traceme(struct task_struct *parent)
+static int yama_ptrace_traceme(struct task_struct *parent)
 {
 	int rc = 0;
 
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
 static int zero;
 static int max_scope = YAMA_SCOPE_NO_ATTACH;
 
-struct ctl_path yama_sysctl_path[] = {
+static struct ctl_path yama_sysctl_path[] = {
 	{ .procname = "kernel", },
 	{ .procname = "yama", },
 	{ }
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index d5b0d7b..f6ae680 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
 	oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
 			 params_channels(params) / 8;
 
+	err = snd_pcm_oss_period_size(substream, params, sparams);
+	if (err < 0)
+		goto failure;
+
+	n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+	if (err < 0)
+		goto failure;
+
+	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+				     runtime->oss.periods, NULL);
+	if (err < 0)
+		goto failure;
+
+	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+	err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+	if (err < 0) {
+		pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+		goto failure;
+	}
+
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
 	snd_pcm_oss_plugin_clear(substream);
 	if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
 	}
 #endif
 
-	err = snd_pcm_oss_period_size(substream, params, sparams);
-	if (err < 0)
-		goto failure;
-
-	n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
-	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
-	if (err < 0)
-		goto failure;
-
-	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
-				     runtime->oss.periods, NULL);
-	if (err < 0)
-		goto failure;
-
-	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
-	if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
-		pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
-		goto failure;
-	}
-
 	if (runtime->oss.trigger) {
 		sw_params->start_threshold = 1;
 	} else {
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f731f90..1d84529 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+	switch (runtime->status->state) {
+	case SNDRV_PCM_STATE_SUSPENDED:
 		return -EBUSY;
+	/* unresumable PCM state; return -EBUSY for skipping suspend */
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_DISCONNECTED:
+		return -EBUSY;
+	}
 	runtime->trigger_master = substream;
 	return 0;
 }
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index ee601d7..c0690d1 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <sound/rawmidi.h>
 #include <sound/info.h>
 #include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
 		return -ENXIO;
 	if (info->stream < 0 || info->stream > 1)
 		return -EINVAL;
+	info->stream = array_index_nospec(info->stream, 2);
 	pstr = &rmidi->streams[info->stream];
 	if (pstr->substream_count == 0)
 		return -ENOENT;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 278ebb99..c939459 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@ int
 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
 {
 	struct seq_oss_synth *rec;
+	struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
 
-	if (dev < 0 || dev >= dp->max_synthdev)
+	if (!info)
 		return -ENXIO;
 
-	if (dp->synths[dev].is_midi) {
+	if (info->is_midi) {
 		struct midi_info minf;
-		snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+		snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
 		inf->synth_type = SYNTH_TYPE_MIDI;
 		inf->synth_subtype = 0;
 		inf->nr_voices = 16;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 29882bd..e1ebc6d 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
 	unsigned int scp_resp_header;
 	unsigned int scp_resp_data[4];
 	unsigned int scp_resp_count;
-	bool alt_firmware_present;
 	bool startup_check_entered;
 	bool dsp_reload;
 
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
 	bool dsp_loaded = false;
 	struct ca0132_spec *spec = codec->spec;
 	const struct dsp_image_seg *dsp_os_image;
-	const struct firmware *fw_entry;
+	const struct firmware *fw_entry = NULL;
 	/*
 	 * Alternate firmwares for different variants. The Recon3Di apparently
 	 * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
 	case QUIRK_R3D:
 	case QUIRK_AE5:
 		if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
-					codec->card->dev) != 0) {
+					codec->card->dev) != 0)
 			codec_dbg(codec, "Desktop firmware not found.");
-			spec->alt_firmware_present = false;
-		} else {
+		else
 			codec_dbg(codec, "Desktop firmware selected.");
-			spec->alt_firmware_present = true;
-		}
 		break;
 	case QUIRK_R3DI:
 		if (request_firmware(&fw_entry, R3DI_EFX_FILE,
-					codec->card->dev) != 0) {
+					codec->card->dev) != 0)
 			codec_dbg(codec, "Recon3Di alt firmware not detected.");
-			spec->alt_firmware_present = false;
-		} else {
+		else
 			codec_dbg(codec, "Recon3Di firmware selected.");
-			spec->alt_firmware_present = true;
-		}
 		break;
 	default:
-		spec->alt_firmware_present = false;
 		break;
 	}
 	/*
 	 * Use default ctefx.bin if no alt firmware is detected, or if none
 	 * exists for your particular codec.
 	 */
-	if (!spec->alt_firmware_present) {
+	if (!fw_entry) {
 		codec_dbg(codec, "Default firmware selected.");
 		if (request_firmware(&fw_entry, EFX_FILE,
 					codec->card->dev) != 0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 191830d..a3fb3d4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5688,6 +5688,8 @@ enum {
 	ALC225_FIXUP_WYSE_AUTO_MUTE,
 	ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
 	ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+	ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+	ALC299_FIXUP_PREDATOR_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6696,6 +6698,22 @@ static const struct hda_fixup alc269_fixups[] = {
 		.chained = true,
 		.chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
 	},
+	[ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+	},
+	[ALC299_FIXUP_PREDATOR_SPK] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
+			{ }
+		}
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6712,9 +6730,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
 	SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+	SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
 	SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7111,6 +7133,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 	{.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
 	{.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
 	{.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+	{.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
 	{}
 };
 #define ALC225_STANDARD_PINS \
@@ -7331,6 +7354,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 		{0x14, 0x90170110},
 		{0x1b, 0x90a70130},
 		{0x21, 0x03211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+		{0x12, 0x90a60130},
+		{0x14, 0x90170110},
+		{0x21, 0x03211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+		{0x12, 0x90a60130},
+		{0x14, 0x90170110},
+		{0x21, 0x04211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+		{0x1a, 0x90a70130},
+		{0x1b, 0x90170110},
+		{0x21, 0x03211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
 		{0x12, 0xb7a60130},
 		{0x13, 0xb8a61140},
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index c9d038f..53f8be0 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -25,14 +25,17 @@
 OBJTOOL    := $(OUTPUT)objtool
 OBJTOOL_IN := $(OBJTOOL)-in.o
 
+LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
+LIBELF_LIBS  := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 all: $(OBJTOOL)
 
 INCLUDES := -I$(srctree)/tools/include \
 	    -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
 	    -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS  += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
 
 # Allow old libelf to be used:
 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)