Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-07-30

Here's a set of Bluetooth & 802.15.4 patches intended for the 4.3 kernel.

 - Cleanups & fixes to mac802154
 - Refactoring of Intel Bluetooth HCI driver
 - Various coding style fixes to Bluetooth HCI drivers
 - Support for Intel Lightning Peak Bluetooth devices
 - Generic class code in interface descriptor in btusb to match more HW
 - Refactoring of Bluetooth HS code together with a new config option
 - Support for BCM4330B1 Broadcom UART controller

Let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index d0e6fa3..b30ab6b 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -130,7 +130,11 @@
 
 Optional properties:
 - efuse-mac:	If this is 1, then the MAC address for the interface is
-		obtained from the device efuse mac address register
+		obtained from the device efuse mac address register.
+		If this is 2, the two DWORDs occupied by the MAC address
+		are swapped.  The netcp driver will swap the two DWORDs
+		back to the proper order when this property is set to 2
+		when it obtains the mac address from efuse.
 - local-mac-address:	the driver is designed to use the of_get_mac_address api
 			only if efuse-mac is 0. When efuse-mac is 0, the MAC
 			address is obtained from local-mac-address. If this
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
new file mode 100644
index 0000000..51f8d2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -0,0 +1,75 @@
+* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+
+
+Required properties:
+- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- reg: Address and length of the register set for the device
+- clocks: Phandles to the reference clock and the bus clock
+- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
+  for the bus clock.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the core's combined interrupt signal
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+- mac-address: See ethernet.txt in the same directory
+- local-mac-address: See ethernet.txt in the same directory
+- snps,en-lpi: If present it enables use of the AXI low-power interface
+- snps,write-requests: Number of write requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,read-requests: Number of read requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,burst-map: Bitmap of allowed AXI burst lengts, with the LSB
+  representing 4, then 8 etc.
+- snps,txpbl: DMA Programmable burst length for the TX DMA
+- snps,rxpbl: DMA Programmable burst length for the RX DMA
+- snps,en-tx-lpi-clockgating: Enable gating of the MAC TX clock during
+  TX low-power mode.
+- phy-handle: See ethernet.txt file in the same directory
+- mdio device tree subnode: When the GMAC has a phy connected to its local
+    mdio, there must be device tree subnode with the following
+    required properties:
+    - compatible: Must be "snps,dwc-qos-ethernet-mdio".
+    - #address-cells: Must be <1>.
+    - #size-cells: Must be <0>.
+
+    For each phy on the mdio bus, there must be a node with the following
+    fields:
+
+    - reg: phy id used to communicate to phy.
+    - device_type: Must be "ethernet-phy".
+    - fixed-mode device tree subnode: see fixed-link.txt in the same directory
+
+Examples:
+ethernet2@40010000 {
+	clock-names = "phy_ref_clk", "apb_pclk";
+	clocks = <&clkc 17>, <&clkc 15>;
+	compatible = "snps,dwc-qos-ethernet-4.10";
+	interrupt-parent = <&intc>;
+	interrupts = <0x0 0x1e 0x4>;
+	reg = <0x40010000 0x4000>;
+	phy-handle = <&phy2>;
+	phy-mode = "gmii";
+
+	snps,en-tx-lpi-clockgating;
+	snps,en-lpi;
+	snps,write-requests = <2>;
+	snps,read-requests = <16>;
+	snps,burst-map = <0x7>;
+	snps,txpbl = <8>;
+	snps,rxpbl = <2>;
+
+	dma-coherent;
+
+	mdio {
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		phy2: phy@1 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			device_type = "ethernet-phy";
+			reg = <0x1>;
+		};
+	};
+};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 1a5ab21b..00d26d9 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1340,6 +1340,14 @@
 	   disabled if accept_ra_from_local is disabled
                on a specific interface.
 
+accept_ra_min_hop_limit - INTEGER
+	Minimum hop limit Information in Router Advertisement.
+
+	Hop limit Information in Router Advertisement less than this
+	variable shall be ignored.
+
+	Default: 1
+
 accept_ra_pinfo - BOOLEAN
 	Learn Prefix Information in Router Advertisement.
 
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 5fddefa..2903b1c 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -135,8 +135,6 @@
 	int maxmtu;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
 	void *bsp_priv;
@@ -177,12 +175,10 @@
  o bus_setup: perform HW setup of the bus. For example, on some ST platforms
 	     this field is used to configure the AMBA  bridge to generate more
 	     efficient STBus traffic.
- o setup/init/exit: callbacks used for calling a custom initialization;
+ o init/exit: callbacks used for calling a custom initialization;
 	     this is sometime necessary on some platforms (e.g. ST boxes)
 	     where the HW needs to have set some PIO lines or system cfg
-	     registers. setup should return a pointer to private data,
-	     which will be stored in bsp_priv, and then passed to init and
-	     exit callbacks. init/exit callbacks should not use or modify
+	     registers.  init/exit callbacks should not use or modify
 	     platform data.
  o bsp_priv: another private pointer.
 
@@ -274,8 +270,6 @@
 Please see the following document:
 	Documentation/devicetree/bindings/net/stmmac.txt
 
-and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
-
 4.11) This is a summary of the content of some relevant files:
  o stmmac_main.c: to implement the main network device driver;
  o stmmac_mdio.c: to provide mdio functions;
diff --git a/MAINTAINERS b/MAINTAINERS
index a226416..6712fd5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6645,6 +6645,15 @@
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 F:	drivers/net/ethernet/mellanox/mlx4/en_*
 
+MELLANOX ETHERNET SWITCH DRIVERS
+M:	Jiri Pirko <jiri@mellanox.com>
+M:	Ido Schimmel <idosch@mellanox.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+W:	http://www.mellanox.com
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
+F:	drivers/net/ethernet/mellanox/mlxsw/
+
 MEMORY MANAGEMENT
 L:	linux-mm@kvack.org
 W:	http://www.linux-mm.org
@@ -8903,6 +8912,13 @@
 F:	include/linux/platform_data/dma-dw.h
 F:	drivers/dma/dw/
 
+SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
+M: Lars Persson <lars.persson@axis.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c011e22..876060b 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -857,7 +857,9 @@
 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 			break;
 		case BPF_ANC | SKF_AD_IFINDEX:
+		case BPF_ANC | SKF_AD_HATYPE:
 			/* A = skb->dev->ifindex */
+			/* A = skb->dev->type */
 			ctx->seen |= SEEN_SKB;
 			off = offsetof(struct sk_buff, dev);
 			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
@@ -867,8 +869,24 @@
 
 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 						  ifindex) != 4);
-			off = offsetof(struct net_device, ifindex);
-			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+						  type) != 2);
+
+			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+				off = offsetof(struct net_device, ifindex);
+				emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+			} else {
+				/*
+				 * offset of field "type" in "struct
+				 * net_device" is above what can be
+				 * used in the ldrh rd, [rn, #imm]
+				 * instruction, so load the offset in
+				 * a register and use ldrh rd, [rn, rm]
+				 */
+				off = offsetof(struct net_device, type);
+				emit_mov_i(ARM_R3, off, ctx);
+				emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
+			}
 			break;
 		case BPF_ANC | SKF_AD_MARK:
 			ctx->seen |= SEEN_SKB;
@@ -895,6 +913,17 @@
 				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
 			}
 			break;
+		case BPF_ANC | SKF_AD_PKTTYPE:
+			ctx->seen |= SEEN_SKB;
+			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+						  __pkt_type_offset[0]) != 1);
+			off = PKT_TYPE_OFFSET();
+			emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
+			emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+			emit(ARM_LSR_I(r_A, r_A, 5), ctx);
+#endif
+			break;
 		case BPF_ANC | SKF_AD_QUEUE:
 			ctx->seen |= SEEN_SKB;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -904,6 +933,14 @@
 			off = offsetof(struct sk_buff, queue_mapping);
 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 			break;
+		case BPF_ANC | SKF_AD_PAY_OFFSET:
+			ctx->seen |= SEEN_SKB | SEEN_CALL;
+
+			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+			emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
+			emit_blx_r(ARM_R3, ctx);
+			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+			break;
 		case BPF_LDX | BPF_W | BPF_ABS:
 			/*
 			 * load a 32bit word from struct seccomp_data.
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index b2d7d92..4b17d5ab 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -74,6 +74,7 @@
 #define ARM_INST_LDRB_I		0x05d00000
 #define ARM_INST_LDRB_R		0x07d00000
 #define ARM_INST_LDRH_I		0x01d000b0
+#define ARM_INST_LDRH_R		0x019000b0
 #define ARM_INST_LDR_I		0x05900000
 
 #define ARM_INST_LDM		0x08900000
@@ -160,6 +161,8 @@
 				 | (rm))
 #define ARM_LDRH_I(rt, rn, off)	(ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
 				 | (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_LDRH_R(rt, rn, rm)	(ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+				 | (rm))
 
 #define ARM_LDM(rn, regs)	(ARM_INST_LDM | (rn) << 16 | (regs))
 
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f6498ee..f010c93 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -36,6 +36,8 @@
  *	      |   BPF stack   |     |
  *	      |		      |     |
  *	      +---------------+     |
+ *	      | 8 byte skbp   |     |
+ * R15+170 -> +---------------+     |
  *	      | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *	      | 4 byte align  |     |
@@ -51,11 +53,12 @@
  * We get 160 bytes stack space from calling function, but only use
  * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
  */
-#define STK_SPACE	(MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_SPACE	(MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
 #define STK_160_UNUSED	(160 - 12 * 8)
 #define STK_OFF		(STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP	160	/* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN	168	/* Offset of SKB header length on stack */
+#define STK_OFF_SKBP	170	/* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
 #define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 79c731e..9f4bbc0 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@
 	int labels[1];		/* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX	4096	/* Max size for program */
+#define BPF_SIZE_MAX	0x7ffff	/* Max size for program (20 bit signed displ) */
 
 #define SEEN_SKB	1	/* skb access */
 #define SEEN_MEM	2	/* use mem[] for temporary storage */
@@ -53,6 +53,7 @@
 #define SEEN_LITERAL	8	/* code uses literals */
 #define SEEN_FUNC	16	/* calls C functions */
 #define SEEN_TAIL_CALL	32	/* code uses tail calls */
+#define SEEN_SKB_CHANGE	64	/* code changes skb data */
 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -203,19 +204,11 @@
 	_EMIT6(op1 | __disp, op2);				\
 })
 
-#define EMIT6_DISP(op1, op2, b1, b2, b3, disp)			\
-({								\
-	_EMIT6_DISP(op1 | reg(b1, b2) << 16 |			\
-		    reg_high(b3) << 8, op2, disp);		\
-	REG_SET_SEEN(b1);					\
-	REG_SET_SEEN(b2);					\
-	REG_SET_SEEN(b3);					\
-})
-
 #define _EMIT6_DISP_LH(op1, op2, disp)				\
 ({								\
-	unsigned int __disp_h = ((u32)disp) & 0xff000;		\
-	unsigned int __disp_l = ((u32)disp) & 0x00fff;		\
+	u32 _disp = (u32) disp;					\
+	unsigned int __disp_h = _disp & 0xff000;		\
+	unsigned int __disp_l = _disp & 0x00fff;		\
 	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 })
 
@@ -390,12 +383,32 @@
 }
 
 /*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+static void emit_load_skb_data_hlen(struct bpf_jit *jit)
+{
+	/* Header length: llgf %w1,<len>(%b1) */
+	EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+		      offsetof(struct sk_buff, len));
+	/* s %w1,<data_len>(%b1) */
+	EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+		   offsetof(struct sk_buff, data_len));
+	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
+	/* lg %skb_data,data_off(%b1) */
+	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+		      BPF_REG_1, offsetof(struct sk_buff, data));
+}
+
+/*
  * Emit function prologue
  *
  * Save registers and create stack frame if necessary.
  * See stack frame layout desription in "bpf_jit.h"!
  */
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
 {
 	if (jit->seen & SEEN_TAIL_CALL) {
 		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -429,32 +442,21 @@
 			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 				      REG_15, 152);
 	}
-	/*
-	 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
-	 * we store the SKB header length on the stack and the SKB data
-	 * pointer in REG_SKB_DATA.
-	 */
-	if (jit->seen & SEEN_SKB) {
-		/* Header length: llgf %w1,<len>(%b1) */
-		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
-			      offsetof(struct sk_buff, len));
-		/* s %w1,<data_len>(%b1) */
-		EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
-			   offsetof(struct sk_buff, data_len));
-		/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+	if (jit->seen & SEEN_SKB)
+		emit_load_skb_data_hlen(jit);
+	if (jit->seen & SEEN_SKB_CHANGE)
+		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
-			      STK_OFF_HLEN);
-		/* lg %skb_data,data_off(%b1) */
-		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-			      BPF_REG_1, offsetof(struct sk_buff, data));
+			      STK_OFF_SKBP);
+	/* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+	if (is_classic) {
+		if (REG_SEEN(BPF_REG_A))
+			/* lghi %ba,0 */
+			EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+		if (REG_SEEN(BPF_REG_X))
+			/* lghi %bx,0 */
+			EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 	}
-	/* BPF compatibility: clear A (%b7) and X (%b8) registers */
-	if (REG_SEEN(BPF_REG_7))
-		/* lghi %b7,0 */
-		EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
-	if (REG_SEEN(BPF_REG_8))
-		/* lghi %b8,0 */
-		EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
 }
 
 /*
@@ -973,19 +975,22 @@
 		 */
 		const u64 func = (u64)__bpf_call_base + imm;
 
-		if (bpf_helper_changes_skb_data((void *)func))
-			/* TODO reload skb->data, hlen */
-			return -1;
-
 		REG_SET_SEEN(BPF_REG_5);
 		jit->seen |= SEEN_FUNC;
 		/* lg %w1,<d(imm)>(%l) */
-		EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
-			   EMIT_CONST_U64(func));
+		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+			      EMIT_CONST_U64(func));
 		/* basr %r14,%w1 */
 		EMIT2(0x0d00, REG_14, REG_W1);
 		/* lgr %b0,%r2: load return value into %b0 */
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
+		if (bpf_helper_changes_skb_data((void *)func)) {
+			jit->seen |= SEEN_SKB_CHANGE;
+			/* lg %b1,ST_OFF_SKBP(%r15) */
+			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
+				      REG_15, STK_OFF_SKBP);
+			emit_load_skb_data_hlen(jit);
+		}
 		break;
 	}
 	case BPF_JMP | BPF_CALL | BPF_X:
@@ -1240,7 +1245,7 @@
 	jit->lit = jit->lit_start;
 	jit->prg = 0;
 
-	bpf_jit_prologue(jit);
+	bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
 	for (i = 0; i < fp->len; i += insn_count) {
 		insn_count = bpf_jit_insn(jit, fp, i);
 		if (insn_count < 0)
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 7931eee..f8b9f71 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -807,7 +807,7 @@
 	}
 
 	if (bpf_jit_enable > 1)
-		bpf_jit_dump(flen, proglen, pass, image);
+		bpf_jit_dump(flen, proglen, pass + 1, image);
 
 	if (image) {
 		bpf_flush_icache(image, image + proglen);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 6c335a8..c08000b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1103,7 +1103,7 @@
 	}
 
 	if (bpf_jit_enable > 1)
-		bpf_jit_dump(prog->len, proglen, 0, image);
+		bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
 	if (image) {
 		bpf_flush_icache(header, image + proglen);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 36eb3d0..180a8f7 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -871,7 +871,7 @@
 		if (is_eth) {
 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
 			if (be32_to_cpu(cqe->vlan_my_qpn) &
-					MLX4_CQE_VLAN_PRESENT_MASK) {
+					MLX4_CQE_CVLAN_PRESENT_MASK) {
 				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
 					MLX4_CQE_VID_MASK;
 			} else {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e9c624d..6dda57e 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -420,6 +420,13 @@
 		.flags = BOND_OPTFLAG_IFDOWN,
 		.values = bond_ad_user_port_key_tbl,
 		.set = bond_option_ad_user_port_key_set,
+	},
+	[BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+		.id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+		.name = "num_grat_arp",
+		.desc = "Number of peer notifications to send on failover event",
+		.values = bond_num_peer_notif_tbl,
+		.set = bond_option_num_peer_notif_set
 	}
 };
 
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 31835a4..f4ae720 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -380,7 +380,7 @@
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
 		   bonding_show_ad_select, bonding_sysfs_store_option);
 
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
 					   struct device_attribute *attr,
 					   char *buf)
@@ -388,24 +388,10 @@
 	struct bonding *bond = to_bond(d);
 	return sprintf(buf, "%d\n", bond->params.num_peer_notif);
 }
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
-					    struct device_attribute *attr,
-					    const char *buf, size_t count)
-{
-	struct bonding *bond = to_bond(d);
-	int ret;
-
-	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
-	if (!ret)
-		ret = count;
-
-	return ret;
-}
 static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
-		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+		   bonding_show_num_peer_notif, bonding_sysfs_store_option);
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
-		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+		   bonding_show_num_peer_notif, bonding_sysfs_store_option);
 
 /* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cfece5a..af210ef 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -92,70 +92,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
-	int ret;
-
-	*temp = 0;
-
-	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
-	if (ret < 0)
-		return ret;
-
-	*temp = (ret & 0xff) - 25;
-
-	return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
-	int ret;
-
-	*temp = 0;
-
-	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
-	if (ret < 0)
-		return ret;
-
-	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
-	return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
-	int ret;
-
-	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
-	if (ret < 0)
-		return ret;
-	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-	return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
-					(ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
-	int ret;
-
-	*alarm = false;
-
-	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
-	if (ret < 0)
-		return ret;
-
-	*alarm = !!(ret & 0x40);
-
-	return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 static int mv88e6352_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -393,10 +329,10 @@
 	.set_eee		= mv88e6xxx_set_eee,
 	.get_eee		= mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
-	.get_temp		= mv88e6352_get_temp,
-	.get_temp_limit		= mv88e6352_get_temp_limit,
-	.set_temp_limit		= mv88e6352_set_temp_limit,
-	.get_temp_alarm		= mv88e6352_get_temp_alarm,
+	.get_temp		= mv88e6xxx_get_temp,
+	.get_temp_limit		= mv88e6xxx_get_temp_limit,
+	.set_temp_limit		= mv88e6xxx_set_temp_limit,
+	.get_temp_alarm		= mv88e6xxx_get_temp_alarm,
 #endif
 	.get_eeprom		= mv88e6352_get_eeprom,
 	.set_eeprom		= mv88e6352_set_eeprom,
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 5158375..61ce4cf 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -517,7 +517,7 @@
 	return false;
 }
 
-bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
@@ -808,54 +808,6 @@
 	}
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-	int val;
-
-	*temp = 0;
-
-	mutex_lock(&ps->smi_mutex);
-
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
-	if (ret < 0)
-		goto error;
-
-	/* Enable temperature sensor */
-	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-	if (ret < 0)
-		goto error;
-
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
-	if (ret < 0)
-		goto error;
-
-	/* Wait for temperature to stabilize */
-	usleep_range(10000, 12000);
-
-	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-	if (val < 0) {
-		ret = val;
-		goto error;
-	}
-
-	/* Disable temperature sensor */
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
-	if (ret < 0)
-		goto error;
-
-	*temp = ((val & 0x1f) - 5) * 5;
-
-error:
-	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-	mutex_unlock(&ps->smi_mutex);
-	return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
 			   u16 mask)
@@ -2180,6 +2132,132 @@
 	return ret;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+	int val;
+
+	*temp = 0;
+
+	mutex_lock(&ps->smi_mutex);
+
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+	if (ret < 0)
+		goto error;
+
+	/* Enable temperature sensor */
+	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (ret < 0)
+		goto error;
+
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+	if (ret < 0)
+		goto error;
+
+	/* Wait for temperature to stabilize */
+	usleep_range(10000, 12000);
+
+	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (val < 0) {
+		ret = val;
+		goto error;
+	}
+
+	/* Disable temperature sensor */
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+	if (ret < 0)
+		goto error;
+
+	*temp = ((val & 0x1f) - 5) * 5;
+
+error:
+	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+	mutex_unlock(&ps->smi_mutex);
+	return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	*temp = 0;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+	if (ret < 0)
+		return ret;
+
+	*temp = (ret & 0xff) - 25;
+
+	return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+		return mv88e63xx_get_temp(ds, temp);
+
+	return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	*temp = 0;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+	return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+	return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+					(ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	*alarm = false;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*alarm = !!(ret & 0x40);
+
+	return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 64786cb..78e3722 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -394,7 +394,10 @@
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
 			struct ethtool_regs *regs, void *_p);
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -415,7 +418,7 @@
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
 			     int reg, int val);
-bool mv88e6xxx_6320_family(struct dsa_switch *ds);
+
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f3bb178..05aa759 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -167,6 +167,7 @@
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
 source "drivers/net/ethernet/ti/Kconfig"
 source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c51014b..f42177b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -77,6 +77,7 @@
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
 obj-$(CONFIG_NET_VENDOR_TI) += ti/
 obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f9785..5762c48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1229,6 +1229,10 @@
 	} mac_rdata;
 
 	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_rdata;
+
+	union {
 		struct tstorm_eth_mac_filter_config	e1x;
 		struct eth_filter_rules_ramrod_data	e2;
 	} rx_mode_rdata;
@@ -1410,6 +1414,9 @@
 
 	/* Queue State object */
 	struct bnx2x_queue_sp_obj q_obj;
+
+	/* VLANs object */
+	struct bnx2x_vlan_mac_obj vlan_obj;
 };
 
 struct bnx2x_fp_stats {
@@ -1427,6 +1434,12 @@
 	SUB_MF_MODE_BD,
 };
 
+struct bnx2x_vlan_entry {
+	struct list_head link;
+	u16 vid;
+	bool hw;
+};
+
 struct bnx2x {
 	/* Fields used in the tx and intr/napi performance paths
 	 * are grouped together in the beginning of the structure
@@ -1865,8 +1878,6 @@
 	int					dcb_version;
 
 	/* CAM credit pools */
-
-	/* used only in sriov */
 	struct bnx2x_credit_pool_obj		vlans_pool;
 
 	struct bnx2x_credit_pool_obj		macs_pool;
@@ -1929,6 +1940,11 @@
 	u16 rx_filter;
 
 	struct bnx2x_link_report_data		vf_link_vars;
+	struct list_head vlan_reg;
+	u16 vlan_cnt;
+	u16 vlan_credit;
+	u16 vxlan_dst_port;
+	bool accept_any_vlan;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -1956,23 +1972,14 @@
 #define RSS_IPV6_TCP_CAP_MASK						\
 	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
-/* func init flags */
-#define FUNC_FLG_RSS		0x0001
-#define FUNC_FLG_STATS		0x0002
-/* removed  FUNC_FLG_UNMATCHED	0x0004 */
-#define FUNC_FLG_TPA		0x0008
-#define FUNC_FLG_SPQ		0x0010
-#define FUNC_FLG_LEADING	0x0020	/* PF only */
-#define FUNC_FLG_LEADING_STATS	0x0040
 struct bnx2x_func_init_params {
 	/* dma */
-	dma_addr_t	fw_stat_map;	/* valid iff FUNC_FLG_STATS */
-	dma_addr_t	spq_map;	/* valid iff FUNC_FLG_SPQ */
+	bool		spq_active;
+	dma_addr_t	spq_map;
+	u16		spq_prod;
 
-	u16		func_flgs;
 	u16		func_id;	/* abs fid */
 	u16		pf_id;
-	u16		spq_prod;	/* valid iff FUNC_FLG_SPQ */
 };
 
 #define for_each_cnic_queue(bp, var) \
@@ -2082,6 +2089,11 @@
 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 		      struct bnx2x_vlan_mac_obj *obj, bool set,
 		      int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags);
+
 /**
  * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
  *
@@ -2486,6 +2498,7 @@
 #define VF_ACQUIRE_THRESH		3
 #define VF_ACQUIRE_MAC_FILTERS		1
 #define VF_ACQUIRE_MC_FILTERS		10
+#define VF_ACQUIRE_VLAN_FILTERS		2 /* VLAN0 + 'real' VLAN */
 
 #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
 			    (!((me_reg) & ME_REG_VF_ERR)))
@@ -2596,4 +2609,9 @@
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
 
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 6088c86..1637de6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2848,6 +2848,11 @@
 
 	/* Start fast path */
 
+	/* Re-configure vlan filters */
+	rc = bnx2x_vlan_reconfigure_vid(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error3);
+
 	/* Initialize Rx filter. */
 	bnx2x_set_rx_mode_inner(bp);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 821346c..fa7c532 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1066,6 +1066,15 @@
 			   BNX2X_FILTER_MAC_PENDING,
 			   &bp->sp_state, obj_type,
 			   &bp->macs_pool);
+
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+				    fp->cl_id, fp->cid, BP_FUNC(bp),
+				    bnx2x_sp(bp, vlan_rdata),
+				    bnx2x_sp_mapping(bp, vlan_rdata),
+				    BNX2X_FILTER_VLAN_PENDING,
+				    &bp->sp_state, obj_type,
+				    &bp->vlans_pool);
 }
 
 /**
@@ -1125,7 +1134,7 @@
 	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
 				   bnx2x_get_path_func_num(bp));
 
-	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
 				    bnx2x_get_path_func_num(bp));
 
 	/* RSS configuration object */
@@ -1135,6 +1144,8 @@
 				  bnx2x_sp_mapping(bp, rss_rdata),
 				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
 				  BNX2X_OBJ_TYPE_RX);
+
+	bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
 }
 
 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 78e55fe6..31c63aa2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3067,7 +3067,7 @@
 	storm_memset_func_en(bp, p->func_id, 1);
 
 	/* spq */
-	if (p->func_flgs & FUNC_FLG_SPQ) {
+	if (p->spq_active) {
 		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
 		REG_WR(bp, XSEM_REG_FAST_MEMORY +
 		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3283,7 +3283,6 @@
 {
 	struct bnx2x_func_init_params func_init = {0};
 	struct event_ring_data eq_data = { {0} };
-	u16 flags;
 
 	if (!CHIP_IS_E1x(bp)) {
 		/* reset IGU PF statistics: MSIX + ATTN */
@@ -3300,15 +3299,7 @@
 				BP_FUNC(bp) : BP_VN(bp))*4, 0);
 	}
 
-	/* function setup flags */
-	flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
-	/* This flag is relevant for E1x only.
-	 * E2 doesn't have a TPA configuration in a function level.
-	 */
-	flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
-	func_init.func_flgs = flags;
+	func_init.spq_active = true;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = BP_FUNC(bp);
 	func_init.spq_map = bp->spq_mapping;
@@ -5304,6 +5295,10 @@
 			vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
 		break;
+	case BNX2X_FILTER_VLAN_PENDING:
+		DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+		vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+		break;
 	case BNX2X_FILTER_MCAST_PENDING:
 		DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
 		/* This is only relevant for 57710 where multicast MACs are
@@ -5617,7 +5612,7 @@
 		      BNX2X_STATE_DIAG):
 		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 		      BNX2X_STATE_CLOSING_WAIT4_HALT):
-			DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+			DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
 			bnx2x_handle_classification_eqe(bp, elem);
 			break;
 
@@ -6205,6 +6200,11 @@
 		__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
 		break;
 	case BNX2X_RX_MODE_ALLMULTI:
 		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6216,6 +6216,11 @@
 		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
 		break;
 	case BNX2X_RX_MODE_PROMISC:
 		/* According to definition of SI mode, iface in promisc mode
@@ -6236,18 +6241,15 @@
 		else
 			__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
 		break;
 	default:
 		BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
 		return -EINVAL;
 	}
 
-	/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
-	if (rx_mode != BNX2X_RX_MODE_NONE) {
-		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
-		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
-	}
-
 	return 0;
 }
 
@@ -8441,6 +8443,42 @@
 	return rc;
 }
 
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags)
+{
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
+
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		ramrod_param.user_req.u.vlan.vlan = vlan;
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+	}
+
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+	if (rc == -EEXIST) {
+		/* Do not treat adding same vlan as error. */
+		DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+		rc = 0;
+	} else if (rc < 0) {
+		BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+	}
+
+	return rc;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
 		       struct bnx2x_vlan_mac_obj *mac_obj,
 		       int mac_type, bool wait_for_comp)
@@ -12140,6 +12178,7 @@
 	mutex_init(&bp->drv_info_mutex);
 	sema_init(&bp->stats_lock, 1);
 	bp->drv_info_mng_owner = false;
+	INIT_LIST_HEAD(&bp->vlan_reg);
 
 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12658,6 +12697,169 @@
 	return vxlan_features_check(skb, features);
 }
 
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+	int rc;
+
+	if (IS_PF(bp)) {
+		unsigned long ramrod_flags = 0;
+
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+		rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+					add, &ramrod_flags);
+	} else {
+		rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+	}
+
+	return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!bp->vlan_cnt) {
+		DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+		return 0;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link) {
+		/* Prepare for cleanup in case of errors */
+		if (rc) {
+			vlan->hw = false;
+			continue;
+		}
+
+		if (!vlan->hw)
+			continue;
+
+		DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+		rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+		if (rc) {
+			BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+			vlan->hw = false;
+			rc = -EINVAL;
+			continue;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	bool hw = false;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+	vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
+		return -ENOMEM;
+
+	bp->vlan_cnt++;
+	if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+		DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+		bp->accept_any_vlan = true;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	} else if (bp->vlan_cnt <= bp->vlan_credit) {
+		rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+		hw = true;
+	}
+
+	vlan->vid = vid;
+	vlan->hw = hw;
+
+	if (!rc) {
+		list_add(&vlan->link, &bp->vlan_reg);
+	} else {
+		bp->vlan_cnt--;
+		kfree(vlan);
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+	if (!bp->vlan_cnt) {
+		BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		if (vlan->vid == vid)
+			break;
+
+	if (vlan->vid != vid) {
+		BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+		return -EINVAL;
+	}
+
+	if (vlan->hw)
+		rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+	list_del(&vlan->link);
+	kfree(vlan);
+
+	bp->vlan_cnt--;
+
+	if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+		/* Configure all non-configured entries */
+		list_for_each_entry(vlan, &bp->vlan_reg, link) {
+			if (vlan->hw)
+				continue;
+
+			rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+			if (rc) {
+				BNX2X_ERR("Unable to config VLAN %d\n",
+					  vlan->vid);
+				continue;
+			}
+			DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+			   vlan->vid);
+			vlan->hw = true;
+		}
+		DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+		bp->accept_any_vlan = false;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+	return rc;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
 	.ndo_open		= bnx2x_open,
 	.ndo_stop		= bnx2x_close,
@@ -12671,6 +12873,8 @@
 	.ndo_fix_features	= bnx2x_fix_features,
 	.ndo_set_features	= bnx2x_set_features,
 	.ndo_tx_timeout		= bnx2x_tx_timeout,
+	.ndo_vlan_rx_add_vid	= bnx2x_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= bnx2x_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= poll_bnx2x,
 #endif
@@ -12881,6 +13085,18 @@
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
+	/* VF with OLD Hypervisor or old PF do not support filtering */
+	if (IS_PF(bp)) {
+		if (CHIP_IS_E1x(bp))
+			bp->accept_any_vlan = true;
+		else
+			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+	} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+	}
+
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 265fe0a..c9bd7f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -357,6 +357,23 @@
 
 	return vp->get(vp, 1);
 }
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
+
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 {
 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -385,6 +402,22 @@
 	return vp->put(vp, 1);
 }
 
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
+
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
 /**
  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  *
@@ -638,6 +671,26 @@
 	return 0;
 }
 
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+				    struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+				  ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return -EEXIST;
+
+	return 0;
+}
+
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
 	bnx2x_check_mac_del(struct bnx2x *bp,
@@ -672,6 +725,27 @@
 	return NULL;
 }
 
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return pos;
+
+	return NULL;
+}
+
 /* check_move() callback */
 static bool bnx2x_check_move(struct bnx2x *bp,
 			     struct bnx2x_vlan_mac_obj *src_o,
@@ -1038,6 +1112,96 @@
 					rule_cnt);
 }
 
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+	u16 inner_mac;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselves */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+	inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+	rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	/* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		struct bnx2x_vlan_mac_obj *target_obj;
+
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		target_obj = elem->cmd_data.vlan_mac.target_obj;
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
+		rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	}
+
+	/* Set the ramrod data header */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/* 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
@@ -1137,6 +1301,25 @@
 	return NULL;
 }
 
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			    sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd ==
+		     elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
 /**
  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  *
@@ -2044,6 +2227,68 @@
 	}
 }
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/* CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+}
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
 			struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3856,8 +4101,8 @@
  * If credit is negative pool operations will always succeed (unlimited pool).
  *
  */
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
-					  int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit)
 {
 	/* Zero the object first */
 	memset(p, 0, sizeof(*p));
@@ -3936,9 +4181,9 @@
 		/* CAM credit is equaly divided between all active functions
 		 * on the PATH.
 		 */
-		if ((func_num > 0)) {
+		if (func_num > 0) {
 			if (!CHIP_REV_IS_SLOW(bp))
-				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+				cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
 			else
 				cam_sz = BNX2X_CAM_SIZE_EMUL;
 
@@ -3968,8 +4213,9 @@
 		 * on the PATH.
 		 */
 		if (func_num > 0) {
-			int credit = MAX_VLAN_CREDIT_E2 / func_num;
-			bnx2x_init_credit_pool(p, func_id * credit, credit);
+			int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+			bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
 		} else
 			/* this should never happen! Block VLAN operations. */
 			bnx2x_init_credit_pool(p, 0, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 324e9f9..4048fc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1413,6 +1413,14 @@
 			 unsigned long *pstate, bnx2x_obj_type type,
 			 struct bnx2x_credit_pool_obj *vlans_pool);
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 					struct bnx2x_vlan_mac_obj *o);
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1483,6 +1491,8 @@
 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
 				 struct bnx2x_credit_pool_obj *p, u8 func_id,
 				 u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit);
 
 /****************** RSS CONFIGURATION ****************/
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1510,4 +1520,12 @@
 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
 			     u8 *ind_table);
 
+#define PF_MAC_CREDIT_E2(bp, func_num)					\
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num)					 \
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
 #endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5b243bc..ec82831 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -197,14 +197,6 @@
 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 	setup_p->gen_params.fp_hsi = vf->fp_hsi;
 
-	/* Setup-op pause params:
-	 * Nothing to do, the pause thresholds are set by default to 0 which
-	 * effectively turns off the feature for this queue. We don't want
-	 * one queue (VF) to interfering with another queue (another VF)
-	 */
-	if (vf->cfg_flags & VF_CFG_FW_FC)
-		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
-			  vf->abs_vfid);
 	/* Setup-op flags:
 	 * collect statistics, zero statistics, local-switching, security,
 	 * OV for Flex10, RSS and MCAST for leading
@@ -360,22 +352,24 @@
 }
 
 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
-				   int qid, bool drv_only, bool mac)
+				   int qid, bool drv_only, int type)
 {
 	struct bnx2x_vlan_mac_ramrod_params ramrod;
 	int rc;
 
 	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
-	   mac ? "MACs" : "VLANs");
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 
 	/* Prepare ramrod params */
 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-	if (mac) {
+	if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+	} else if (type == BNX2X_VF_FILTER_MAC) {
 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 	} else {
-		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-			&ramrod.user_req.vlan_mac_flags);
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 	}
 	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -393,14 +387,11 @@
 					     &ramrod.ramrod_flags);
 	if (rc) {
 		BNX2X_ERR("Failed to delete all %s\n",
-			  mac ? "MACs" : "VLANs");
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 		return rc;
 	}
 
-	/* Clear the vlan counters */
-	if (!mac)
-		atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
 	return 0;
 }
 
@@ -414,13 +405,17 @@
 
 	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 	   vf->abs_vfid, filter->add ? "Adding" : "Deleting",
-	   filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+	   (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+	   (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 
 	/* Prepare ramrod params */
 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-	if (filter->type == BNX2X_VF_FILTER_VLAN) {
-		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-			&ramrod.user_req.vlan_mac_flags);
+	if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+		ramrod.user_req.u.vlan.vlan = filter->vid;
+		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+	} else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 		ramrod.user_req.u.vlan.vlan = filter->vid;
 	} else {
@@ -431,16 +426,6 @@
 	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 					    BNX2X_VLAN_MAC_DEL;
 
-	/* Verify there are available vlan credits */
-	if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
-	    (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
-	     vf_vlan_rules_cnt(vf))) {
-		BNX2X_ERR("No credits for vlan [%d >= %d]\n",
-			  atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
-			  vf_vlan_rules_cnt(vf));
-		return -ENOMEM;
-	}
-
 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 	if (drv_only)
 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -452,16 +437,13 @@
 	if (rc && rc != -EEXIST) {
 		BNX2X_ERR("Failed to %s %s\n",
 			  filter->add ? "add" : "delete",
-			  filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
-								"VLAN");
+			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+				"VLAN-MAC" :
+			  (filter->type == BNX2X_VF_FILTER_MAC) ?
+				"MAC" : "VLAN");
 		return rc;
 	}
 
-	/* Update the vlan counters */
-	if (filter->type == BNX2X_VF_FILTER_VLAN)
-		bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
-				     &bnx2x_vfq(vf, qid, vlan_count));
-
 	return 0;
 }
 
@@ -513,21 +495,7 @@
 	if (rc)
 		goto op_err;
 
-	/* Configure vlan0 for leading queue */
-	if (!qid) {
-		struct bnx2x_vf_mac_vlan_filter filter;
-
-		memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
-		filter.type = BNX2X_VF_FILTER_VLAN;
-		filter.add = true;
-		filter.vid = 0;
-		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
-		if (rc)
-			goto op_err;
-	}
-
 	/* Schedule the configuration of any pending vlan filters */
-	vf->cfg_flags |= VF_CFG_VLAN;
 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 			       BNX2X_MSG_IOV);
 	return 0;
@@ -546,10 +514,16 @@
 	/* If needed, clean the filtering data base */
 	if ((qid == LEADING_IDX) &&
 	    bnx2x_validate_vf_sp_objs(bp, vf, false)) {
-		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN_MAC);
 		if (rc)
 			goto op_err;
-		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN);
+		if (rc)
+			goto op_err;
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_MAC);
 		if (rc)
 			goto op_err;
 	}
@@ -682,11 +656,18 @@
 		/* Remove filtering if feasible */
 		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-						     false, false);
+						     false,
+						     BNX2X_VF_FILTER_VLAN_MAC);
 			if (rc)
 				goto op_err;
 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-						     false, true);
+						     false,
+						     BNX2X_VF_FILTER_VLAN);
+			if (rc)
+				goto op_err;
+			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+						     false,
+						     BNX2X_VF_FILTER_MAC);
 			if (rc)
 				goto op_err;
 			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -767,8 +748,6 @@
 
 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
-	if (vf->cfg_flags & VF_CFG_INT_SIMD)
-		val |= IGU_VF_CONF_SINGLE_ISR_EN;
 	val &= ~IGU_VF_CONF_PARENT_MASK;
 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -847,29 +826,6 @@
 	return 0;
 }
 
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
-					  struct bnx2x_virtf *vf,
-					  int new)
-{
-	int num = vf_vlan_rules_cnt(vf);
-	int diff = new - num;
-	bool rc = true;
-
-	DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
-	   vf->abs_vfid, new, num);
-
-	if (diff > 0)
-		rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
-	else if (diff < 0)
-		rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
-	if (rc)
-		vf_vlan_rules_cnt(vf) = new;
-	else
-		DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
-		   vf->abs_vfid);
-}
-
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -877,21 +833,13 @@
 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
-	u16 vlan_count = 0;
 
 	/* will be set only during VF-ACQUIRE */
 	resc->num_rxqs = 0;
 	resc->num_txqs = 0;
 
-	/* no credit calculations for macs (just yet) */
-	resc->num_mac_filters = 1;
-
-	/* divvy up vlan rules */
-	bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
-	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
-	vlan_count = 1 << ilog2(vlan_count);
-	bnx2x_iov_re_set_vlan_filters(bp, vf,
-				      vlan_count / BNX2X_NR_VIRTFN(bp));
+	resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+	resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 
 	/* no real limitation */
 	resc->num_mc_filters = 0;
@@ -1625,6 +1573,11 @@
 		vf->filter_state = 0;
 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
+		bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+				       vf_vlan_rules_cnt(vf));
+		bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+				       vf_mac_rules_cnt(vf));
+
 		/*  init mcast object - This object will be re-initialized
 		 *  during VF-ACQUIRE with the proper cl_id and cid.
 		 *  It needs to be initialized here so that it can be safely
@@ -2037,12 +1990,11 @@
 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
-	/* Save a vlan filter for the Hypervisor */
 	return ((req_resc->num_rxqs <= rxq_cnt) &&
 		(req_resc->num_txqs <= txq_cnt) &&
 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-		(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -2096,16 +2048,12 @@
 	vf_sb_count(vf) = resc->num_sbs;
 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
-	if (resc->num_mac_filters)
-		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-	/* Add an additional vlan filter credit for the hypervisor */
-	bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
 	DP(BNX2X_MSG_IOV,
 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
 	   vf_sb_count(vf), vf_rxq_count(vf),
 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
-	   vf_vlan_rules_visible_cnt(vf));
+	   vf_vlan_rules_cnt(vf));
 
 	/* Initialize the queues */
 	if (!vf->vfqs) {
@@ -2138,7 +2086,6 @@
 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
 {
 	struct bnx2x_func_init_params func_init = {0};
-	u16 flags = 0;
 	int i;
 
 	/* the sb resources are initialized at this point, do the
@@ -2165,23 +2112,9 @@
 	/* reset IGU VF statistics: MSIX */
 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
 
-	/* vf init */
-	if (vf->cfg_flags & VF_CFG_STATS)
-		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
-	if (vf->cfg_flags & VF_CFG_TPA)
-		flags |= FUNC_FLG_TPA;
-
-	if (is_vf_multi(vf))
-		flags |= FUNC_FLG_RSS;
-
 	/* function setup */
-	func_init.func_flgs = flags;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
-	func_init.fw_stat_map = vf->fw_stat_map;
-	func_init.spq_map = vf->spq_map;
-	func_init.spq_prod = 0;
 	bnx2x_func_init(bp, &func_init);
 
 	/* Enable the vf */
@@ -2594,8 +2527,8 @@
 
 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
 	for_each_vf(bp, vfidx) {
-	bulletin = BP_VF_BULLETIN(bp, vfidx);
-		if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+		bulletin = BP_VF_BULLETIN(bp, vfidx);
+		if (bulletin->valid_bitmap & VLAN_VALID)
 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
 	}
 }
@@ -2813,20 +2746,58 @@
 	return rc;
 }
 
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf, bool accept)
+{
+	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+	unsigned long accept_flags;
+
+	/* need to remove/add the VF's accept_any_vlan bit */
+	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+	if (accept)
+		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+	else
+		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+			      accept_flags);
+	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+	bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    u16 vlan, bool add)
+{
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+
+	/* configure the new vlan to device */
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.user_req.u.vlan.vlan = vlan;
+	ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+					: BNX2X_VLAN_MAC_DEL;
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+	if (rc) {
+		BNX2X_ERR("failed to configure vlan\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 {
-	struct bnx2x_queue_state_params q_params = {NULL};
-	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
-	struct bnx2x_queue_update_params *update_params;
 	struct pf_vf_bulletin_content *bulletin = NULL;
-	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
 	struct bnx2x *bp = netdev_priv(dev);
 	struct bnx2x_vlan_mac_obj *vlan_obj;
 	unsigned long vlan_mac_flags = 0;
 	unsigned long ramrod_flags = 0;
 	struct bnx2x_virtf *vf = NULL;
-	unsigned long accept_flags;
-	int rc;
+	int i, rc;
 
 	if (vlan > 4095) {
 		BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2855,6 +2826,10 @@
 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
 	bulletin->vlan = vlan;
 
+	/* Post update on VF's bulletin board */
+	rc = bnx2x_post_vf_bulletin(bp, vfidx);
+	if (rc)
+		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
 	mutex_unlock(&bp->vfdb->bulletin_mutex);
 
 	/* is vf initialized and queue set up? */
@@ -2881,84 +2856,76 @@
 		goto out;
 	}
 
-	/* need to remove/add the VF's accept_any_vlan bit */
-	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
-	if (vlan)
-		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-	else
-		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
-	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
-			      accept_flags);
-	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
-	bnx2x_config_rx_mode(bp, &rx_ramrod);
-
-	/* configure the new vlan to device */
-	memset(&ramrod_param, 0, sizeof(ramrod_param));
-	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-	ramrod_param.vlan_mac_obj = vlan_obj;
-	ramrod_param.ramrod_flags = ramrod_flags;
-	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-		&ramrod_param.user_req.vlan_mac_flags);
-	ramrod_param.user_req.u.vlan.vlan = vlan;
-	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
-	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-	if (rc) {
-		BNX2X_ERR("failed to configure vlan\n");
-		rc =  -EINVAL;
-		goto out;
-	}
-
-	/* send queue update ramrod to configure default vlan and silent
-	 * vlan removal
+	/* clear accept_any_vlan when HV forces vlan, otherwise
+	 * according to VF capabilities
 	 */
-	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-	q_params.cmd = BNX2X_Q_CMD_UPDATE;
-	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
-	update_params = &q_params.params.update;
-	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
-		  &update_params->update_flags);
-	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
-		  &update_params->update_flags);
-	if (vlan == 0) {
-		/* if vlan is 0 then we want to leave the VF traffic
-		 * untagged, and leave the incoming traffic untouched
-		 * (i.e. do not remove any vlan tags).
-		 */
-		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-			    &update_params->update_flags);
-		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-			    &update_params->update_flags);
-	} else {
-		/* configure default vlan to vf queue and set silent
-		 * vlan removal (the vf remains unaware of this vlan).
-		 */
-		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-			  &update_params->update_flags);
-		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-			  &update_params->update_flags);
-		update_params->def_vlan = vlan;
-		update_params->silent_removal_value =
-			vlan & VLAN_VID_MASK;
-		update_params->silent_removal_mask = VLAN_VID_MASK;
-	}
+	if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+		bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
 
-	/* Update the Queue state */
-	rc = bnx2x_queue_state_change(bp, &q_params);
-	if (rc) {
-		BNX2X_ERR("Failed to configure default VLAN\n");
+	rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+	if (rc)
 		goto out;
-	}
 
-
-	/* clear the flag indicating that this VF needs its vlan
-	 * (will only be set if the HV configured the Vlan before vf was
-	 * up and we were called because the VF came up later
+	/* send queue update ramrods to configure default vlan and
+	 * silent vlan removal
 	 */
+	for_each_vfq(vf, i) {
+		struct bnx2x_queue_state_params q_params = {NULL};
+		struct bnx2x_queue_update_params *update_params;
+
+		q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+		/* validate the Q is UP */
+		if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+		    BNX2X_Q_LOGICAL_STATE_ACTIVE)
+			continue;
+
+		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+		q_params.cmd = BNX2X_Q_CMD_UPDATE;
+		update_params = &q_params.params.update;
+		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			  &update_params->update_flags);
+		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			  &update_params->update_flags);
+		if (vlan == 0) {
+			/* if vlan is 0 then we want to leave the VF traffic
+			 * untagged, and leave the incoming traffic untouched
+			 * (i.e. do not remove any vlan tags).
+			 */
+			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				    &update_params->update_flags);
+			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				    &update_params->update_flags);
+		} else {
+			/* configure default vlan to vf queue and set silent
+			 * vlan removal (the vf remains unaware of this vlan).
+			 */
+			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				  &update_params->update_flags);
+			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				  &update_params->update_flags);
+			update_params->def_vlan = vlan;
+			update_params->silent_removal_value =
+				vlan & VLAN_VID_MASK;
+			update_params->silent_removal_mask = VLAN_VID_MASK;
+		}
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+				  i);
+			goto out;
+		}
+	}
 out:
-	vf->cfg_flags &= ~VF_CFG_VLAN;
 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
+	if (rc)
+		DP(BNX2X_MSG_IOV,
+		   "updated VF[%d] vlan configuration (vlan = %d)\n",
+		   vfidx, vlan);
+
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 2011205..670a581 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -77,7 +77,10 @@
 
 	/* VLANs object */
 	struct bnx2x_vlan_mac_obj	vlan_obj;
-	atomic_t vlan_count;		/* 0 means vlan-0 is set  ~ untagged */
+
+	/* VLAN-MACs object */
+	struct bnx2x_vlan_mac_obj	vlan_mac_obj;
+
 	unsigned long accept_flags;	/* last accept flags configured */
 
 	/* Queue Slow-path State object */
@@ -105,8 +108,10 @@
 
 struct bnx2x_vf_mac_vlan_filter {
 	int type;
-#define BNX2X_VF_FILTER_MAC	1
-#define BNX2X_VF_FILTER_VLAN	2
+#define BNX2X_VF_FILTER_MAC	BIT(0)
+#define BNX2X_VF_FILTER_VLAN	BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+	(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
 	bool add;
 	u8 *mac;
@@ -121,14 +126,9 @@
 /* vf context */
 struct bnx2x_virtf {
 	u16 cfg_flags;
-#define VF_CFG_STATS		0x0001
-#define VF_CFG_FW_FC		0x0002
-#define VF_CFG_TPA		0x0004
-#define VF_CFG_INT_SIMD		0x0008
-#define VF_CACHE_LINE		0x0010
-#define VF_CFG_VLAN		0x0020
-#define VF_CFG_STATS_COALESCE	0x0040
-#define VF_CFG_EXT_BULLETIN	0x0080
+#define VF_CFG_STATS_COALESCE	0x1
+#define VF_CFG_EXT_BULLETIN	0x2
+#define VF_CFG_VLAN_FILTER	0x4
 	u8 link_cfg;		/* IFLA_VF_LINK_STATE_AUTO
 				 * IFLA_VF_LINK_STATE_ENABLE
 				 * IFLA_VF_LINK_STATE_DISABLE
@@ -142,9 +142,8 @@
 	bool flr_clnup_stage;	/* true during flr cleanup */
 
 	/* dma */
-	dma_addr_t fw_stat_map;		/* valid iff VF_CFG_STATS */
+	dma_addr_t fw_stat_map;
 	u16 stats_stride;
-	dma_addr_t spq_map;
 	dma_addr_t bulletin_map;
 
 	/* Allocated resources counters. Before the VF is acquired, the
@@ -165,8 +164,6 @@
 #define vf_mac_rules_cnt(vf)		((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)		((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)		((vf)->alloc_resc.num_mc_filters)
-	/* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf)	(vf_vlan_rules_cnt(vf) - 1)
 
 	u8 sb_count;	/* actual number of SBs */
 	u8 igu_base_id;	/* base igu status block id */
@@ -209,6 +206,9 @@
 	enum channel_tlvs		op_current;
 
 	u8 fp_hsi;
+
+	struct bnx2x_credit_pool_obj	vf_vlans_pool;
+	struct bnx2x_credit_pool_obj	vf_macs_pool;
 };
 
 #define BNX2X_NR_VIRTFN(bp)	((bp)->vfdb->sriov.nr_virtfn)
@@ -232,6 +232,12 @@
 #define FW_VF_HANDLE(abs_vfid)	\
 	(abs_vfid + FW_PF_MAX_HANDLE)
 
+#define GET_NUM_VFS_PER_PATH(bp)	64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp)		((bp)->vfdb ? (bp)->vfdb->sriov.total \
+						    : 0)
+#define VF_MAC_CREDIT_CNT		1
+#define VF_VLAN_CREDIT_CNT		2 /* VLAN0 + 'real' VLAN */
+
 /* locking and unlocking the channel mutex */
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
 			      enum channel_tlvs tlv);
@@ -276,6 +282,10 @@
 	} vlan_rdata;
 
 	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_mac_rdata;
+
+	union {
 		struct eth_filter_rules_ramrod_data	e2;
 	} rx_mode_rdata;
 
@@ -538,8 +548,14 @@
 
 int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
 
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
 #else /* CONFIG_BNX2X_SRIOV */
 
+#define GET_NUM_VFS_PER_PATH(bp)	0
+#define GET_NUM_VFS_PER_PF(bp)		0
+#define VF_MAC_CREDIT_CNT		0
+#define VF_VLAN_CREDIT_CNT		0
+
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
 				struct bnx2x_queue_sp_obj **q_obj) {}
 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -606,5 +622,7 @@
 static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
 					      bool support_long) {}
 
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 31b79bd..1374e53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -247,6 +247,7 @@
 	req->resc_request.num_sbs = bp->igu_sb_cnt;
 	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
 	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
 
 	/* pf 2 vf bulletin board address */
 	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -257,6 +258,8 @@
 
 	/* Bulletin support for bulletin board with length > legacy length */
 	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+	/* vlan filtering is supported */
+	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
 
 	/* add list termination tlv */
 	bnx2x_add_tlv(bp, req,
@@ -375,6 +378,8 @@
 		NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
 	bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
 	bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+	bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
 	strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
 		sizeof(bp->fw_ver));
 
@@ -548,7 +553,7 @@
 			   BNX2X_FILTER_MAC_PENDING,
 			   &vf->filter_state,
 			   BNX2X_OBJ_TYPE_RX_TX,
-			   &bp->macs_pool);
+			   &vf->vf_macs_pool);
 	/* vlan */
 	bnx2x_init_vlan_obj(bp, &q->vlan_obj,
 			    cl_id, q->cid, func_id,
@@ -557,8 +562,17 @@
 			    BNX2X_FILTER_VLAN_PENDING,
 			    &vf->filter_state,
 			    BNX2X_OBJ_TYPE_RX_TX,
-			    &bp->vlans_pool);
-
+			    &vf->vf_vlans_pool);
+	/* vlan-mac */
+	bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+				cl_id, q->cid, func_id,
+				bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+				bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+				BNX2X_FILTER_VLAN_MAC_PENDING,
+				&vf->filter_state,
+				BNX2X_OBJ_TYPE_RX_TX,
+				&vf->vf_macs_pool,
+				&vf->vf_vlans_pool);
 	/* mcast */
 	bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
 			     q->cid, func_id, func_id,
@@ -725,7 +739,7 @@
 
 	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
 	if (set)
-		req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
 
 	/* sample bulletin board for new mac */
 	bnx2x_sample_bulletin(bp);
@@ -913,6 +927,67 @@
 	return 0;
 }
 
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc = 0;
+
+	if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+		DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+		return 0;
+	}
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+	req->vf_qid = vf_qid;
+	req->n_mac_vlan_filters = 1;
+
+	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+	if (add)
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+	/* sample bulletin board for hypervisor vlan */
+	bnx2x_sample_bulletin(bp);
+
+	if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+		BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	req->filters[0].vlan_tag = vid;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send message to pf */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+		goto out;
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+			  vid);
+		rc = -EINVAL;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
 {
 	int mode = bp->rx_mode;
@@ -936,8 +1011,13 @@
 		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+		if (mode == BNX2X_RX_MODE_PROMISC)
+			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
 	}
 
+	if (bp->accept_any_vlan)
+		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
 	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
 	req->vf_qid = 0;
 
@@ -1190,7 +1270,8 @@
 	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
 	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
 				   PFVF_CAP_TPA |
-				   PFVF_CAP_TPA_UPDATE);
+				   PFVF_CAP_TPA_UPDATE |
+				   PFVF_CAP_VLAN_FILTER);
 	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
 			  sizeof(resp->pfdev_info.fw_ver));
 
@@ -1205,7 +1286,7 @@
 			bnx2x_vf_max_queue_cnt(bp, vf);
 		resc->num_sbs = vf_sb_count(vf);
 		resc->num_mac_filters = vf_mac_rules_cnt(vf);
-		resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+		resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
 		resc->num_mc_filters = 0;
 
 		if (status == PFVF_STATUS_SUCCESS) {
@@ -1372,6 +1453,14 @@
 		vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
 	}
 
+	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+		DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+		   vf->abs_vfid);
+		vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+	} else {
+		vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+	}
+
 out:
 	/* response */
 	bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1384,7 +1473,6 @@
 	int rc;
 
 	/* record ghost addresses from vf message */
-	vf->spq_map = init->spq_addr;
 	vf->fw_stat_map = init->stats_addr;
 	vf->stats_stride = init->stats_stride;
 	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1580,17 +1668,18 @@
 
 		if ((msg_filter->flags & type_flag) != type_flag)
 			continue;
-		if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+		memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+		if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
 			fl->filters[j].mac = msg_filter->mac;
-			fl->filters[j].type = BNX2X_VF_FILTER_MAC;
-		} else {
-			fl->filters[j].vid = msg_filter->vlan_tag;
-			fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+			fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
 		}
-		fl->filters[j].add =
-			(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
-			true : false;
+		if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
+			fl->filters[j].vid = msg_filter->vlan_tag;
+			fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
+		}
+		fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
 		fl->count++;
+		j++;
 	}
 	if (!fl->count)
 		kfree(fl);
@@ -1600,6 +1689,18 @@
 	return 0;
 }
 
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+				    u32 flags)
+{
+	int i, cnt = 0;
+
+	for (i = 0; i < filters->n_mac_vlan_filters; i++)
+		if  ((filters->filters[i].flags & flags) == flags)
+			cnt++;
+
+	return cnt;
+}
+
 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
 				       struct vfpf_q_mac_vlan_filter *filter)
 {
@@ -1631,6 +1732,7 @@
 
 #define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID
 #define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER	(VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
 
 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
@@ -1641,16 +1743,33 @@
 
 	/* check for any mac/vlan changes */
 	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-		/* build mac list */
 		struct bnx2x_vf_mac_vlan_filters *fl = NULL;
 
+		/* build vlan-mac list */
+		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+					       VFPF_VLAN_MAC_FILTER);
+		if (rc)
+			goto op_err;
+
+		if (fl) {
+
+			/* set vlan-mac list */
+			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+							   msg->vf_qid,
+							   false);
+			if (rc)
+				goto op_err;
+		}
+
+		/* build mac list */
+		fl = NULL;
+
 		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
 					       VFPF_MAC_FILTER);
 		if (rc)
 			goto op_err;
 
 		if (fl) {
-
 			/* set mac list */
 			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
 							   msg->vf_qid,
@@ -1659,22 +1778,6 @@
 				goto op_err;
 		}
 
-		/* build vlan list */
-		fl = NULL;
-
-		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-					       VFPF_VLAN_FILTER);
-		if (rc)
-			goto op_err;
-
-		if (fl) {
-			/* set vlan list */
-			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
-							   msg->vf_qid,
-							   false);
-			if (rc)
-				goto op_err;
-		}
 	}
 
 	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1689,11 +1792,15 @@
 			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
 		}
 
-		/* A packet arriving the vf's mac should be accepted
-		 * with any vlan, unless a vlan has already been
-		 * configured.
+		/* any_vlan is not configured if HV is forcing VLAN
+		 * any_vlan is configured if
+		 *   1. VF does not support vlan filtering
+		 *   OR
+		 *   2. VF supports vlan filtering and explicitly requested it
 		 */
-		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+		    (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+		     msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
 			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
 
 		/* set rx-mode */
@@ -1729,17 +1836,31 @@
 	 * since queue was not set up.
 	 */
 	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
-		/* once a mac was set by ndo can only accept a single mac... */
-		if (filters->n_mac_vlan_filters > 1) {
-			BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
-				  vf->abs_vfid);
-			rc = -EPERM;
-			goto response;
+		struct vfpf_q_mac_vlan_filter *filter = NULL;
+		int i;
+
+		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+			if (!(filters->filters[i].flags &
+			      VFPF_Q_FILTER_DEST_MAC_VALID))
+				continue;
+
+			/* once a mac was set by ndo can only accept
+			 * a single mac...
+			 */
+			if (filter) {
+				BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+					  vf->abs_vfid,
+					  filters->n_mac_vlan_filters);
+				rc = -EPERM;
+				goto response;
+			}
+
+			filter = &filters->filters[i];
 		}
 
 		/* ...and only the mac set by the ndo */
-		if (filters->n_mac_vlan_filters == 1 &&
-		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+		if (filter &&
+		    !ether_addr_equal(filter->mac, bulletin->mac)) {
 			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
 				  vf->abs_vfid);
 
@@ -1761,17 +1882,14 @@
 
 	/* if vlan was set by hypervisor we don't allow guest to config vlan */
 	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
-		int i;
-
 		/* search for vlan filters */
-		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
-			if (filters->filters[i].flags &
-			    VFPF_Q_FILTER_VLAN_TAG_VALID) {
-				BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
-					  vf->abs_vfid);
-				rc = -EPERM;
-				goto response;
-			}
+
+		if (bnx2x_vf_filters_contain(filters,
+					     VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+			BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+				  vf->abs_vfid);
+			rc = -EPERM;
+			goto response;
 		}
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f0110f4..64f2b52 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -70,6 +70,8 @@
 #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST		0x00000004
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST	0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST		0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN		0x00000020
+
 #define BULLETIN_CONTENT_SIZE		(sizeof(struct pf_vf_bulletin_content))
 #define BULLETIN_CONTENT_LEGACY_SIZE	(32)
 #define BULLETIN_ATTEMPTS	5 /* crc failures before throwing towel */
@@ -133,6 +135,7 @@
 		u8 fp_hsi_ver;
 		u8 caps;
 #define VF_CAP_SUPPORT_EXT_BULLETIN	(1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER	(1 << 1)
 	} vfdev_info;
 
 	struct vf_pf_resc_request resc_request;
@@ -174,10 +177,12 @@
 	struct pf_vf_pfdev_info {
 		u32 chip_num;
 		u32 pf_cap;
-#define PFVF_CAP_RSS		0x00000001
-#define PFVF_CAP_DHC		0x00000002
-#define PFVF_CAP_TPA		0x00000004
-#define PFVF_CAP_TPA_UPDATE	0x00000008
+#define PFVF_CAP_RSS          0x00000001
+#define PFVF_CAP_DHC          0x00000002
+#define PFVF_CAP_TPA          0x00000004
+#define PFVF_CAP_TPA_UPDATE   0x00000008
+#define PFVF_CAP_VLAN_FILTER  0x00000010
+
 		char fw_ver[32];
 		u16 db_size;
 		u8  indices_per_sb;
@@ -294,7 +299,7 @@
 	u32 flags;
 #define VFPF_Q_FILTER_DEST_MAC_VALID	0x01
 #define VFPF_Q_FILTER_VLAN_TAG_VALID	0x02
-#define VFPF_Q_FILTER_SET_MAC		0x100	/* set/clear */
+#define VFPF_Q_FILTER_SET		0x100	/* set/clear */
 	u8  mac[ETH_ALEN];
 	u16 vlan_tag;
 };
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5bf7ce0..c6f2d39 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2625,8 +2625,7 @@
 	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
 
 	/* Turn on the clock */
-	if (!IS_ERR(priv->clk))
-		clk_prepare_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	/* If this is an internal GPHY, power it back on now, before UniMAC is
 	 * brought out of reset as absolutely no UniMAC activity is allowed
@@ -2703,8 +2702,7 @@
 err_fini_dma:
 	bcmgenet_fini_dma(priv);
 err_clk_disable:
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 	return ret;
 }
 
@@ -2761,8 +2759,7 @@
 	if (priv->internal_phy)
 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	return ret;
 }
@@ -3215,11 +3212,12 @@
 		priv->version = pd->genet_version;
 
 	priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
-	if (IS_ERR(priv->clk))
+	if (IS_ERR(priv->clk)) {
 		dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+		priv->clk = NULL;
+	}
 
-	if (!IS_ERR(priv->clk))
-		clk_prepare_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	bcmgenet_set_hw_params(priv);
 
@@ -3230,8 +3228,10 @@
 	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
 
 	priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
-	if (IS_ERR(priv->clk_wol))
+	if (IS_ERR(priv->clk_wol)) {
 		dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+		priv->clk_wol = NULL;
+	}
 
 	priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
 	if (IS_ERR(priv->clk_eee)) {
@@ -3257,8 +3257,7 @@
 	netif_carrier_off(dev);
 
 	/* Turn off the main clock, WOL clock is handled separately */
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	err = register_netdev(dev);
 	if (err)
@@ -3267,8 +3266,7 @@
 	return err;
 
 err_clk_disable:
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 err:
 	free_netdev(dev);
 	return err;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 0802cd9..b3679ad 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,6 +163,15 @@
 	phy_print_status(phydev);
 }
 
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+					  struct fixed_phy_status *status)
+{
+	if (dev && dev->phydev && status)
+		status->link = dev->phydev->link;
+
+	return 0;
+}
+
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -215,6 +224,10 @@
 	reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
 	reg |= LED_ACT_SOURCE_MAC;
 	bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+	if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+		fixed_phy_set_link_update(priv->phydev,
+					  bcmgenet_fixed_phy_link_update);
 }
 
 int bcmgenet_mii_config(struct net_device *dev)
@@ -460,6 +473,7 @@
 	struct device_node *dn = priv->pdev->dev.of_node;
 	struct device *kdev = &priv->pdev->dev;
 	const char *phy_mode_str = NULL;
+	struct phy_device *phydev = NULL;
 	char *compat;
 	int phy_mode;
 	int ret;
@@ -515,14 +529,12 @@
 			priv->internal_phy = true;
 	}
 
-	return 0;
-}
-
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
-					  struct fixed_phy_status *status)
-{
-	if (dev && dev->phydev && status)
-		status->link = dev->phydev->link;
+	/* Make sure we initialize MoCA PHYs with a link down */
+	if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+		phydev = of_phy_find_device(dn);
+		if (phydev)
+			phydev->link = 0;
+	}
 
 	return 0;
 }
@@ -579,12 +591,9 @@
 			return -ENODEV;
 		}
 
-		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
-			ret = fixed_phy_set_link_update(
-				phydev, bcmgenet_fixed_phy_link_update);
-			if (!ret)
-				phydev->link = 0;
-		}
+		/* Make sure we initialize MoCA PHYs with a link down */
+		phydev->link = 0;
+
 	}
 
 	priv->phydev = phydev;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d746559..8fb80b2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -399,7 +399,7 @@
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
 #define MACB_CAPS_SG_DISABLED			0x40000000
 #define MACB_CAPS_MACB_IS_GEM			0x80000000
-#define MACB_CAPS_JUMBO				0x00000008
+#define MACB_CAPS_JUMBO				0x00000010
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)					\
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe..3584420 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -37,6 +37,8 @@
 	tristate "Thunder MAC interface driver (BGX)"
 	depends on 64BIT
 	default ARCH_THUNDER
+	select PHYLIB
+	select MDIO_OCTEON
 	---help---
 	  This driver supports programming and controlling of MAC
 	  interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 687acf7..5eedb98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -925,6 +925,20 @@
 	const struct firmware *fw;
 	struct adapter *adap = netdev2adap(netdev);
 	unsigned int mbox = PCIE_FW_MASTER_M + 1;
+	u32 pcie_fw;
+	unsigned int master;
+	u8 master_vld = 0;
+
+	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+	master = PCIE_FW_MASTER_G(pcie_fw);
+	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+		master_vld = 1;
+	/* if csiostor is the master return */
+	if (master_vld && (master != adap->pf)) {
+		dev_warn(adap->pdev_dev,
+			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+		return -EOPNOTSUPP;
+	}
 
 	ef->data[sizeof(ef->data) - 1] = '\0';
 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 375a825..ed8a8f35 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -338,6 +338,11 @@
 #define EGRTHRESHOLDPACKING_G(x) \
 	(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
 
+#define T6_EGRTHRESHOLDPACKING_S    16
+#define T6_EGRTHRESHOLDPACKING_M    0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+	(((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
 #define SGE_TIMESTAMP_LO_A 0x1098
 #define SGE_TIMESTAMP_HI_A 0x109c
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1d5e77a..fa3786a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2668,8 +2668,22 @@
 	 * give it more Free List entries.  (Note that the SGE's Egress
 	 * Congestion Threshold is in units of 2 Free List pointers.)
 	 */
-	s->fl_starve_thres
-		= EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+	case CHELSIO_T4:
+		s->fl_starve_thres =
+		   EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T5:
+		s->fl_starve_thres =
+		   EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T6:
+	default:
+		s->fl_starve_thres =
+		   T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	}
+	s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
 
 	/*
 	 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cb5777b..0a27805 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -105,6 +105,8 @@
 
 #define MAX_VFS			30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN		32
+#define	CNTL_SERIAL_NUM_WORDS	8  /* Controller serial number words */
+#define	CNTL_SERIAL_NUM_WORD_SZ	(sizeof(u16)) /* Byte-sz of serial num word */
 
 #define	RSS_INDIR_TABLE_LEN	128
 #define RSS_HASH_KEY_LEN	40
@@ -228,6 +230,7 @@
 struct be_tx_stats {
 	u64 tx_bytes;
 	u64 tx_pkts;
+	u64 tx_vxlan_offload_pkts;
 	u64 tx_reqs;
 	u64 tx_compl;
 	ulong tx_jiffies;
@@ -275,6 +278,7 @@
 struct be_rx_stats {
 	u64 rx_bytes;
 	u64 rx_pkts;
+	u64 rx_vxlan_offload_pkts;
 	u32 rx_drops_no_skbs;	/* skb allocation errors */
 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
 	u32 rx_post_fail;	/* page post alloc failures */
@@ -590,6 +594,7 @@
 	struct rss_info rss_info;
 	/* Filters for packets that need to be sent to BMC */
 	u32 bmc_filt_mask;
+	u16 serial_num[CNTL_SERIAL_NUM_WORDS];
 };
 
 #define be_physfn(adapter)		(!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ecad46f..3be1fbd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2852,10 +2852,11 @@
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_cntl_attribs *req;
 	struct be_cmd_resp_cntl_attribs *resp;
-	int status;
+	int status, i;
 	int payload_len = max(sizeof(*req), sizeof(*resp));
 	struct mgmt_controller_attrib *attribs;
 	struct be_dma_mem attribs_cmd;
+	u32 *serial_num;
 
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
 		return -1;
@@ -2886,6 +2887,10 @@
 	if (!status) {
 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
+		serial_num = attribs->hba_attribs.controller_serial_number;
+		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+				(BIT_MASK(16) - 1);
 	}
 
 err:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a4479f7..36d835b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1637,10 +1637,12 @@
 struct mgmt_hba_attribs {
 	u32 rsvd0[24];
 	u8 controller_model_number[32];
-	u32 rsvd1[79];
-	u8 rsvd2[3];
+	u32 rsvd1[16];
+	u32 controller_serial_number[8];
+	u32 rsvd2[55];
+	u8 rsvd3[3];
 	u8 phy_port;
-	u32 rsvd3[13];
+	u32 rsvd4[13];
 } __packed;
 
 struct mgmt_controller_attrib {
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index d20ff05..2c9ed17 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -138,6 +138,7 @@
 static const struct be_ethtool_stat et_rx_stats[] = {
 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+	{DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
 	{DRVSTAT_RX_INFO(rx_compl)},
 	{DRVSTAT_RX_INFO(rx_compl_err)},
 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@
 	{DRVSTAT_TX_INFO(tx_internal_parity_err)},
 	{DRVSTAT_TX_INFO(tx_bytes)},
 	{DRVSTAT_TX_INFO(tx_pkts)},
+	{DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
 	/* Number of skbs queued for trasmission by the driver */
 	{DRVSTAT_TX_INFO(tx_reqs)},
 	/* Number of times the TX queue was stopped due to lack
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c996dd7..d86bc5d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -677,11 +677,14 @@
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
 	struct be_tx_stats *stats = tx_stats(txo);
+	u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
 
 	u64_stats_update_begin(&stats->sync);
 	stats->tx_reqs++;
 	stats->tx_bytes += skb->len;
-	stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+	stats->tx_pkts += tx_pkts;
+	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+		stats->tx_vxlan_offload_pkts += tx_pkts;
 	u64_stats_update_end(&stats->sync);
 }
 
@@ -1957,6 +1960,8 @@
 	stats->rx_compl++;
 	stats->rx_bytes += rxcp->pkt_size;
 	stats->rx_pkts++;
+	if (rxcp->tunneled)
+		stats->rx_vxlan_offload_pkts++;
 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
 		stats->rx_mcast_pkts++;
 	if (rxcp->err)
@@ -5219,6 +5224,27 @@
 }
 #endif
 
+static int be_get_phys_port_id(struct net_device *dev,
+			       struct netdev_phys_item_id *ppid)
+{
+	int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+	struct be_adapter *adapter = netdev_priv(dev);
+	u8 *id;
+
+	if (MAX_PHYS_ITEM_ID_LEN < id_len)
+		return -ENOSPC;
+
+	ppid->id[0] = adapter->hba_port_num + 1;
+	id = &ppid->id[1];
+	for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+	     i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+		memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+	ppid->id_len = id_len;
+
+	return 0;
+}
+
 static const struct net_device_ops be_netdev_ops = {
 	.ndo_open		= be_open,
 	.ndo_stop		= be_close,
@@ -5249,6 +5275,7 @@
 	.ndo_del_vxlan_port	= be_del_vxlan_port,
 	.ndo_features_check	= be_features_check,
 #endif
+	.ndo_get_phys_port_id   = be_get_phys_port_id,
 };
 
 static void be_netdev_init(struct net_device *netdev)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 648ca85..afa3ea7 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2611,7 +2611,8 @@
 
 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
 			struct skb_shared_hwtstamps shhwtstamps;
-			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+					  ~0x7UL);
 
 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -3043,8 +3044,9 @@
 
 	/* Update Last Free RxBD pointer for LFC */
 	if (unlikely(priv->tx_actual_en)) {
-		bdp = gfar_rxbd_lastfree(rx_queue);
-		gfar_write(rx_queue->rfbptr, (u32)bdp);
+		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+		gfar_write(rx_queue->rfbptr, bdp_dma);
 	}
 
 	return howmany;
@@ -3563,7 +3565,6 @@
 	struct phy_device *phydev = priv->phydev;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	int i;
-	struct rxbd8 *bdp;
 
 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
 		return;
@@ -3620,9 +3621,11 @@
 		/* Turn last free buffer recording on */
 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
 			for (i = 0; i < priv->num_rx_queues; i++) {
+				u32 bdp_dma;
+
 				rx_queue = priv->rx_queue[i];
-				bdp = gfar_rxbd_lastfree(rx_queue);
-				gfar_write(rx_queue->rfbptr, (u32)bdp);
+				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+				gfar_write(rx_queue->rfbptr, bdp_dma);
 			}
 
 			priv->tx_actual_en = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4402124..8a5f4de9 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1310,13 +1310,18 @@
 	return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
 }
 
-static inline struct rxbd8 *gfar_rxbd_lastfree(struct gfar_priv_rx_q *rxq)
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
 {
+	struct rxbd8 *bdp;
+	u32 bdp_dma;
 	int i;
 
 	i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+	bdp = &rxq->rx_bd_base[i];
+	bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+	bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
 
-	return &rxq->rx_bd_base[i];
+	return bdp_dma;
 }
 
 irqreturn_t gfar_receive(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 89d788d..fea1601 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4588,6 +4588,7 @@
 	return 0;
 
 err_req_irq:
+	pm_qos_remove_request(&adapter->pm_qos_req);
 	e1000e_release_hw_control(adapter);
 	e1000_power_down_phy(adapter);
 	e1000e_free_rx_resources(adapter->rx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ec76c3f..281fd84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -98,7 +98,7 @@
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG	(1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG	BIT(0)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +289,35 @@
 	struct work_struct service_task;
 
 	u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED		BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED			BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT		BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE		BIT_ULL(9)
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
-#define I40E_FLAG_PTP                          (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL			BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED	BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT		BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT		BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED			BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED		BIT_ULL(22)
+#define I40E_FLAG_PTP				BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED			BIT_ULL(26)
 #ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC		BIT_ULL(27)
 #endif
-#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID			BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE			BIT_ULL(29)
 #define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(40)
 
 	/* tracks features that get auto disabled by errors */
@@ -443,8 +443,8 @@
 
 	u32 current_netdev_flags;
 	unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED	BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER		BIT(1)
 	unsigned long flags;
 
 	struct list_head mac_filter_list;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0703222..167ca0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -72,6 +72,212 @@
 }
 
 /**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
  * @mask: debug mask
@@ -1187,9 +1393,9 @@
 			blink = false;
 
 		if (blink)
-			gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 		else
-			gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 
 		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
 		break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index e137e3f..50fc894 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -58,9 +58,9 @@
 #define I40E_IEEE_ETS_MAXTC_SHIFT	0
 #define I40E_IEEE_ETS_MAXTC_MASK	(0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
 #define I40E_IEEE_ETS_CBS_SHIFT		6
-#define I40E_IEEE_ETS_CBS_MASK		(0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK		BIT(I40E_IEEE_ETS_CBS_SHIFT)
 #define I40E_IEEE_ETS_WILLING_SHIFT	7
-#define I40E_IEEE_ETS_WILLING_MASK	(0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK	BIT(I40E_IEEE_ETS_WILLING_SHIFT)
 #define I40E_IEEE_ETS_PRIO_0_SHIFT	0
 #define I40E_IEEE_ETS_PRIO_0_MASK	(0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
 #define I40E_IEEE_ETS_PRIO_1_SHIFT	4
@@ -79,9 +79,9 @@
 #define I40E_IEEE_PFC_CAP_SHIFT		0
 #define I40E_IEEE_PFC_CAP_MASK		(0xF << I40E_IEEE_PFC_CAP_SHIFT)
 #define I40E_IEEE_PFC_MBC_SHIFT		6
-#define I40E_IEEE_PFC_MBC_MASK		(0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK		BIT(I40E_IEEE_PFC_MBC_SHIFT)
 #define I40E_IEEE_PFC_WILLING_SHIFT	7
-#define I40E_IEEE_PFC_WILLING_MASK	(0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK	BIT(I40E_IEEE_PFC_WILLING_SHIFT)
 
 /* Defines for IEEE APP TLV */
 #define I40E_IEEE_APP_SEL_SHIFT		0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index bd5079d..1c51f73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -187,7 +187,7 @@
 	/* Set up all the App TLVs if DCBx is negotiated */
 	for (i = 0; i < dcbxcfg->numapps; i++) {
 		prio = dcbxcfg->app[i].priority;
-		tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+		tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
 
 		/* Add APP only if the TC is enabled for this VSI */
 		if (tc_map & vsi->tc_config.enabled_tc) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da0faf4..d7c15d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -964,7 +964,7 @@
 		pf->auto_disable_flags |= flag;
 	}
 	dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-	i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+	i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 }
 
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@
 		}
 	} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "corer", 5) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "globr", 5) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "empr", 4) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "read", 4) == 0) {
 		u32 address;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 56438bd..f141e78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -144,11 +144,8 @@
 	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
 	if (!ret_code &&
 	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-	     (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
-		ret_code = i40e_validate_nvm_checksum(hw, NULL);
-	} else {
-		ret_code = I40E_ERR_DIAG_TEST_FAILED;
-	}
-
-	return ret_code;
+	     BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+		return i40e_validate_nvm_checksum(hw, NULL);
+	else
+		return I40E_ERR_DIAG_TEST_FAILED;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f2075d5b..83d41c2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -681,15 +681,17 @@
 		/* make the aq call */
 		status = i40e_aq_set_phy_config(hw, &config, NULL);
 		if (status) {
-			netdev_info(netdev, "Set phy config failed with error %d.\n",
-				    status);
+			netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
 			return -EAGAIN;
 		}
 
 		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
 		if (status)
-			netdev_info(netdev, "Updating link info failed with error %d\n",
-				    status);
+			netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
 
 	} else {
 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -709,8 +711,9 @@
 
 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
 	if (ret) {
-		netdev_info(netdev, "link restart failed, aq_err=%d\n",
-			    pf->hw.aq.asq_last_status);
+		netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+			    i40e_stat_str(hw, ret),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		return -EIO;
 	}
 
@@ -822,18 +825,21 @@
 	status = i40e_set_fc(hw, &aq_failures, link_up);
 
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-		netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-		netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 
@@ -1011,7 +1017,7 @@
 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
 	/* register returns value in power of 2, 64Kbyte chunks. */
-	val = (64 * 1024) * (1 << val);
+	val = (64 * 1024) * BIT(val);
 	return val;
 }
 
@@ -1464,11 +1470,11 @@
 	else
 		info->phc_index = -1;
 
-	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
 
-	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 	return 0;
 }
@@ -1584,7 +1590,7 @@
 			/* indicate we're in test mode */
 			dev_close(netdev);
 		else
-			i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+			i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 		/* Link test performed before hardware reset
 		 * so autoneg doesn't interfere with test result
@@ -1606,7 +1612,7 @@
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		clear_bit(__I40E_TESTING, &pf->state);
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 		if (if_running)
 			dev_open(netdev);
@@ -1639,7 +1645,7 @@
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+	if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
 		wol->supported = 0;
 		wol->wolopts = 0;
 	} else {
@@ -1672,7 +1678,7 @@
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if (((1 << hw->port) & wol_nvm_bits))
+	if (BIT(hw->port) & wol_nvm_bits)
 		return -EOPNOTSUPP;
 
 	/* only magic packet is supported */
@@ -2018,10 +2024,10 @@
 	case TCP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -2030,10 +2036,10 @@
 	case TCP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -2042,12 +2048,12 @@
 	case UDP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		default:
 			return -EINVAL;
@@ -2056,12 +2062,12 @@
 	case UDP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		default:
 			return -EINVAL;
@@ -2074,7 +2080,7 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
 		break;
 	case AH_ESP_V6_FLOW:
 	case AH_V6_FLOW:
@@ -2083,15 +2089,15 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
 		break;
 	case IPV4_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
 		break;
 	case IPV6_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index c8b621e..5ea75dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -298,8 +298,8 @@
 
 	/* enable FCoE hash filter */
 	val = rd32(hw, I40E_PFQF_HENA(1));
-	val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
-	val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+	val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+	val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
 	val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
 	wr32(hw, I40E_PFQF_HENA(1), val);
 
@@ -308,10 +308,10 @@
 	pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
 
 	/* Reserve 4K DDP contexts and 20K filter size for FCoE */
-	pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
-				 I40E_DMA_CNTX_BASE_SIZE;
+	pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+				I40E_DMA_CNTX_BASE_SIZE;
 	pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
-				(1 << I40E_HASH_FILTER_SIZE_16K) *
+				BIT(I40E_HASH_FILTER_SIZE_16K) *
 				I40E_HASH_FILTER_BASE_SIZE;
 
 	/* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@
 		if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
 		    app.protocolid == ETH_P_FCOE) {
 			tc = dcbcfg->etscfg.prioritytable[app.priority];
-			enabled_tc |= (1 << tc);
+			enabled_tc |= BIT(tc);
 			break;
 		}
 	}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 0d49e2d..a93174d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -59,9 +59,9 @@
 	(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
 
 #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT	\
-	(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+	BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
 #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT	\
-	(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+	BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
 
 #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)	\
 	I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index b89856a..5ebe12d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -297,21 +297,15 @@
 					    u32 idx, bool is_pf)
 {
 	struct i40e_hmc_sd_entry *sd_entry;
-	i40e_status ret_code = 0;
+
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
 
 	/* get the entry and decrease its ref counter */
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
-	if (is_pf) {
-		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
-	} else {
-		ret_code = I40E_NOT_SUPPORTED;
-		goto exit;
-	}
-	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
-	if (ret_code)
-		goto exit;
-exit:
-	return ret_code;
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+	return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
 }
 
 /**
@@ -351,20 +345,13 @@
 					      struct i40e_hmc_info *hmc_info,
 					      u32 idx, bool is_pf)
 {
-	i40e_status ret_code = 0;
 	struct i40e_hmc_sd_entry *sd_entry;
 
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
+
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
-	if (is_pf) {
-		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
-	} else {
-		ret_code = I40E_NOT_SUPPORTED;
-		goto exit;
-	}
-	/* free memory here */
-	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
-	if (ret_code)
-		goto exit;
-exit:
-	return ret_code;
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+	return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 386416b..d906692 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -127,8 +127,8 @@
 		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -147,7 +147,7 @@
 		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d399eaf..fa371a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -129,7 +129,7 @@
 	obj->cnt = txq_num;
 	obj->base = 0;
 	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (fcoe_filt_num > obj->max_cnt) {
@@ -763,7 +763,7 @@
 
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
-	mask = ((u8)1 << ce_info->width) - 1;
+	mask = BIT(ce_info->width) - 1;
 
 	src_byte = *from;
 	src_byte &= mask;
@@ -804,7 +804,7 @@
 
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
-	mask = ((u16)1 << ce_info->width) - 1;
+	mask = BIT(ce_info->width) - 1;
 
 	/* don't swizzle the bits until after the mask because the mask bits
 	 * will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@
 	 * to 5 bits so the shift will do nothing
 	 */
 	if (ce_info->width < 32)
-		mask = ((u32)1 << ce_info->width) - 1;
+		mask = BIT(ce_info->width) - 1;
 	else
 		mask = ~(u32)0;
 
@@ -906,7 +906,7 @@
 	 * to 6 bits so the shift will do nothing
 	 */
 	if (ce_info->width < 64)
-		mask = ((u64)1 << ce_info->width) - 1;
+		mask = BIT_ULL(ce_info->width) - 1;
 	else
 		mask = ~(u64)0;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6ce9086..857d294 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -520,7 +520,7 @@
 	if (likely(new_data >= *offset))
 		*stat = new_data - *offset;
 	else
-		*stat = (new_data + ((u64)1 << 48)) - *offset;
+		*stat = (new_data + BIT_ULL(48)) - *offset;
 	*stat &= 0xFFFFFFFFFFFFULL;
 }
 
@@ -543,7 +543,7 @@
 	if (likely(new_data >= *offset))
 		*stat = (u32)(new_data - *offset);
 	else
-		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
 /**
@@ -1276,7 +1276,7 @@
 {
 	struct i40e_aqc_remove_macvlan_element_data element;
 	struct i40e_pf *pf = vsi->back;
-	i40e_status aq_ret;
+	i40e_status ret;
 
 	/* Only appropriate for the PF main VSI */
 	if (vsi->type != I40E_VSI_MAIN)
@@ -1287,8 +1287,8 @@
 	element.vlan_tag = 0;
 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-	aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
-	if (aq_ret)
+	ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+	if (ret)
 		return -ENOENT;
 
 	return 0;
@@ -1526,7 +1526,7 @@
 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
 		/* Find numtc from enabled TC bitmap */
 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-			if (enabled_tc & (1 << i)) /* TC is enabled */
+			if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
 				numtc++;
 		}
 		if (!numtc) {
@@ -1552,7 +1552,8 @@
 	/* Setup queue offset/count for all TCs for given VSI */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		/* See if the given TC is enabled for the given VSI */
-		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+		if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+			/* TC is enabled */
 			int pow, num_qps;
 
 			switch (vsi->type) {
@@ -1578,7 +1579,7 @@
 			/* find the next higher power-of-2 of num queue pairs */
 			num_qps = qcount;
 			pow = 0;
-			while (num_qps && ((1 << pow) < qcount)) {
+			while (num_qps && (BIT_ULL(pow) < qcount)) {
 				pow++;
 				num_qps >>= 1;
 			}
@@ -1728,10 +1729,11 @@
 	bool add_happened = false;
 	int filter_list_len = 0;
 	u32 changed_flags = 0;
-	i40e_status aq_ret = 0;
+	i40e_status ret = 0;
 	struct i40e_pf *pf;
 	int num_add = 0;
 	int num_del = 0;
+	int aq_err = 0;
 	u16 cmd_flags;
 
 	/* empty array typed pointers, kcalloc later */
@@ -1783,31 +1785,31 @@
 
 			/* flush a full buffer */
 			if (num_del == filter_list_len) {
-				aq_ret = i40e_aq_remove_macvlan(&pf->hw,
-					    vsi->seid, del_list, num_del,
-					    NULL);
+				ret = i40e_aq_remove_macvlan(&pf->hw,
+						  vsi->seid, del_list, num_del,
+						  NULL);
+				aq_err = pf->hw.aq.asq_last_status;
 				num_del = 0;
 				memset(del_list, 0, sizeof(*del_list));
 
-				if (aq_ret &&
-				    pf->hw.aq.asq_last_status !=
-							      I40E_AQ_RC_ENOENT)
+				if (ret && aq_err != I40E_AQ_RC_ENOENT)
 					dev_info(&pf->pdev->dev,
-						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-						 aq_ret,
-						 pf->hw.aq.asq_last_status);
+						 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+						 i40e_stat_str(&pf->hw, ret),
+						 i40e_aq_str(&pf->hw, aq_err));
 			}
 		}
 		if (num_del) {
-			aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+			ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
 						     del_list, num_del, NULL);
+			aq_err = pf->hw.aq.asq_last_status;
 			num_del = 0;
 
-			if (aq_ret &&
-			    pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+			if (ret && aq_err != I40E_AQ_RC_ENOENT)
 				dev_info(&pf->pdev->dev,
-					 "ignoring delete macvlan error, err %d, aq_err %d\n",
-					 aq_ret, pf->hw.aq.asq_last_status);
+					 "ignoring delete macvlan error, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw, aq_err));
 		}
 
 		kfree(del_list);
@@ -1845,29 +1847,31 @@
 
 			/* flush a full buffer */
 			if (num_add == filter_list_len) {
-				aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-							     add_list, num_add,
-							     NULL);
+				ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+							  add_list, num_add,
+							  NULL);
+				aq_err = pf->hw.aq.asq_last_status;
 				num_add = 0;
 
-				if (aq_ret)
+				if (ret)
 					break;
 				memset(add_list, 0, sizeof(*add_list));
 			}
 		}
 		if (num_add) {
-			aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-						     add_list, num_add, NULL);
+			ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+						  add_list, num_add, NULL);
+			aq_err = pf->hw.aq.asq_last_status;
 			num_add = 0;
 		}
 		kfree(add_list);
 		add_list = NULL;
 
-		if (add_happened && aq_ret &&
-		    pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+		if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
 			dev_info(&pf->pdev->dev,
-				 "add filter failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "add filter failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw, aq_err));
 			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
 			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 				      &vsi->state)) {
@@ -1883,34 +1887,40 @@
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-							       vsi->seid,
-							       cur_multipromisc,
-							       NULL);
-		if (aq_ret)
+		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+							    vsi->seid,
+							    cur_multipromisc,
+							    NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set multi promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "set multi promisc failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
 		bool cur_promisc;
 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 					&vsi->state));
-		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-							     vsi->seid,
-							     cur_promisc, NULL);
-		if (aq_ret)
+		ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+							  vsi->seid,
+							  cur_promisc, NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set uni promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
-		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-						   vsi->seid,
-						   cur_promisc, NULL);
-		if (aq_ret)
+				 "set uni promisc failed, err %s, aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+		ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+						vsi->seid,
+						cur_promisc, NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set brdcast promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "set brdcast promisc failed, err %s, aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 
 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -2006,8 +2016,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vlan stripping failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 	}
 }
 
@@ -2035,8 +2047,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vlan stripping failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 	}
 }
 
@@ -2306,7 +2320,7 @@
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
 	struct i40e_vsi_context ctxt;
-	i40e_status aq_ret;
+	i40e_status ret;
 
 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
 	vsi->info.pvid = cpu_to_le16(vid);
@@ -2316,11 +2330,13 @@
 
 	ctxt.seid = vsi->seid;
 	ctxt.info = vsi->info;
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "add pvid failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		return -ENOENT;
 	}
 
@@ -2708,9 +2724,9 @@
 #endif /* I40E_FCOE */
 	/* round up for the chip's needs */
 	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-				(1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+				BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-				(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
 	/* set up individual rings */
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2740,7 +2756,7 @@
 	}
 
 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
-		if (!(vsi->tc_config.enabled_tc & (1 << n)))
+		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
 			continue;
 
 		qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -4085,7 +4101,7 @@
 		if (app.selector == I40E_APP_SEL_TCPIP &&
 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
 			tc = dcbcfg->etscfg.prioritytable[app.priority];
-			enabled_tc |= (1 << tc);
+			enabled_tc |= BIT_ULL(tc);
 			break;
 		}
 	}
@@ -4134,7 +4150,7 @@
 	u8 i;
 
 	for (i = 0; i < num_tc; i++)
-		enabled_tc |= 1 << i;
+		enabled_tc |= BIT(i);
 
 	return enabled_tc;
 }
@@ -4169,7 +4185,7 @@
 	/* At least have TC0 */
 	enabled_tc = (enabled_tc ? enabled_tc : 0x1);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			num_tc++;
 	}
 	return num_tc;
@@ -4191,11 +4207,11 @@
 
 	/* Find the first enabled TC */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			break;
 	}
 
-	return 1 << i;
+	return BIT(i);
 }
 
 /**
@@ -4233,26 +4249,28 @@
 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	i40e_status aq_ret;
+	i40e_status ret;
 	u32 tc_bw_max;
 	int i;
 
 	/* Get the VSI level BW configuration */
-	aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi bw config, err %d, aq_err %d\n",
-			 aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EINVAL;
 	}
 
 	/* Get the VSI level BW configuration per TC */
-	aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
-						  NULL);
-	if (aq_ret) {
+	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+					       NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
-			 aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EINVAL;
 	}
 
@@ -4291,16 +4309,16 @@
 				       u8 *bw_share)
 {
 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-	i40e_status aq_ret;
+	i40e_status ret;
 	int i;
 
 	bw_data.tc_valid_bits = enabled_tc;
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
 		bw_data.tc_bw_credits[i] = bw_share[i];
 
-	aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-					  NULL);
-	if (aq_ret) {
+	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+				       NULL);
+	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
 			 vsi->back->hw.aq.asq_last_status);
@@ -4349,7 +4367,7 @@
 		 * will set the numtc for netdev as 2 that will be
 		 * referenced by the netdev layer as TC 0 and 1.
 		 */
-		if (vsi->tc_config.enabled_tc & (1 << i))
+		if (vsi->tc_config.enabled_tc & BIT_ULL(i))
 			netdev_set_tc_queue(netdev,
 					vsi->tc_config.tc_info[i].netdev_tc,
 					vsi->tc_config.tc_info[i].qcount,
@@ -4411,7 +4429,7 @@
 
 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			bw_share[i] = 1;
 	}
 
@@ -4435,8 +4453,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "update vsi failed, aq_err=%d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 "Update vsi tc config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		goto out;
 	}
 	/* update the local VSI info with updated queue map */
@@ -4447,8 +4467,10 @@
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "Failed updating vsi bw info, aq_err=%d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 "Failed updating vsi bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		goto out;
 	}
 
@@ -4481,7 +4503,7 @@
 
 	/* Enable ETS TCs with equal BW Share for now */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			bw_data.tc_bw_share_credits[i] = 1;
 	}
 
@@ -4489,8 +4511,9 @@
 						   &bw_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "veb bw config failed, aq_err=%d\n",
-			 pf->hw.aq.asq_last_status);
+			 "VEB bw config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto out;
 	}
 
@@ -4498,8 +4521,9 @@
 	ret = i40e_veb_get_bw_info(veb);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Failed getting veb bw config, aq_err=%d\n",
-			 pf->hw.aq.asq_last_status);
+			 "Failed getting veb bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 
 out:
@@ -4586,8 +4610,9 @@
 	ret = i40e_aq_resume_port_tx(hw, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "AQ command Resume Port Tx failed = %d\n",
-			  pf->hw.aq.asq_last_status);
+			 "Resume Port Tx failed, err %s aq_err %s\n",
+			  i40e_stat_str(&pf->hw, ret),
+			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		/* Schedule PF reset to recover */
 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 		i40e_service_event_schedule(pf);
@@ -4639,8 +4664,9 @@
 		}
 	} else {
 		dev_info(&pf->pdev->dev,
-			 "AQ Querying DCB configuration failed: aq_err %d\n",
-			 pf->hw.aq.asq_last_status);
+			 "Query for DCB configuration failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 
 out:
@@ -4871,7 +4897,7 @@
 
 	/* Generate TC map for number of tc requested */
 	for (i = 0; i < tc; i++)
-		enabled_tc |= (1 << i);
+		enabled_tc |= BIT_ULL(i);
 
 	/* Requesting same TC configuration as already enabled */
 	if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -5010,7 +5036,7 @@
 err_setup_tx:
 	i40e_vsi_free_tx_resources(vsi);
 	if (vsi == pf->vsi[pf->lan_vsi])
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
 	return err;
 }
@@ -5078,7 +5104,7 @@
 		i40e_vc_notify_reset(pf);
 
 	/* do the biggest reset indicated */
-	if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
 		/* Request a Global Reset
 		 *
@@ -5093,7 +5119,7 @@
 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-	} else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
 		/* Request a Core Reset
 		 *
@@ -5105,7 +5131,7 @@
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 		i40e_flush(&pf->hw);
 
-	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
 
 		/* Request a PF Reset
 		 *
@@ -5118,7 +5144,7 @@
 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
 		i40e_handle_reset_warning(pf);
 
-	} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
 		int v;
 
 		/* Find the VSI(s) that requested a re-init */
@@ -5135,7 +5161,7 @@
 
 		/* no further action needed, so return now */
 		return;
-	} else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
 		int v;
 
 		/* Find the VSI(s) that needs to be brought down */
@@ -5265,7 +5291,10 @@
 	/* Get updated DCBX data from firmware */
 	ret = i40e_get_dcb_config(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+		dev_info(&pf->pdev->dev,
+			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto exit;
 	}
 
@@ -5773,23 +5802,23 @@
 
 	rtnl_lock();
 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
 	}
 
@@ -5995,27 +6024,29 @@
 {
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	struct i40e_vsi_context ctxt;
-	int aq_ret;
+	int ret;
 
 	ctxt.seid = pf->main_vsi_seid;
 	ctxt.pf_num = pf->hw.pf_id;
 	ctxt.vf_num = 0;
-	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return;
 	}
 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s: update vsi switch failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 }
 
@@ -6029,27 +6060,29 @@
 {
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	struct i40e_vsi_context ctxt;
-	int aq_ret;
+	int ret;
 
 	ctxt.seid = pf->main_vsi_seid;
 	ctxt.pf_num = pf->hw.pf_id;
 	ctxt.vf_num = 0;
-	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return;
 	}
 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s: update vsi switch failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 }
 
@@ -6109,7 +6142,8 @@
 	ret = i40e_add_vsi(ctl_vsi);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "rebuild of owner VSI failed: %d\n", ret);
+			 "rebuild of veb_idx %d owner VSI failed: %d\n",
+			 veb->idx, ret);
 		goto end_reconstitute;
 	}
 	i40e_vsi_reset_stats(ctl_vsi);
@@ -6188,8 +6222,10 @@
 			buf_len = data_size;
 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
 			dev_info(&pf->pdev->dev,
-				 "capability discovery failed: aq=%d\n",
-				 pf->hw.aq.asq_last_status);
+				 "capability discovery failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			return -ENODEV;
 		}
 	} while (err);
@@ -6375,7 +6411,9 @@
 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
 	ret = i40e_init_adminq(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto clear_recovery;
 	}
 
@@ -6385,11 +6423,8 @@
 
 	i40e_clear_pxe_mode(hw);
 	ret = i40e_get_capabilities(pf);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
-			 ret);
+	if (ret)
 		goto end_core_reset;
-	}
 
 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
 				hw->func_caps.num_rx_qp,
@@ -6430,12 +6465,16 @@
 				       I40E_AQ_EVENT_LINK_UPDOWN |
 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* make sure our flow control settings are restored */
 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+		dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* Rebuild the VSIs and VEBs that existed before reset.
 	 * They are still in our local switch element arrays, so only
@@ -6496,8 +6535,10 @@
 		msleep(75);
 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
 		if (ret)
-			dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-				 pf->hw.aq.asq_last_status);
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	/* reinit the misc interrupt */
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6659,8 +6700,8 @@
 	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
 
 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-		if (pf->pending_vxlan_bitmap & (1 << i)) {
-			pf->pending_vxlan_bitmap &= ~(1 << i);
+		if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+			pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
 			port = pf->vxlan_ports[i];
 			if (port)
 				ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6671,10 +6712,12 @@
 
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+					 "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
 					 port ? "add" : "delete",
-					 ntohs(port), i, ret,
-					 pf->hw.aq.asq_last_status);
+					 ntohs(port), i,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				pf->vxlan_ports[i] = 0;
 			}
 		}
@@ -7471,7 +7514,7 @@
 			j = 0;
 		/* lut = 4-byte sliding window of 4 lut entries */
 		lut = (lut << 8) | (j &
-			 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+			 (BIT(pf->hw.func_caps.rss_table_entry_width) - 1));
 		/* On i = 3, we have 4 entries in lut; write to the register */
 		if ((i & 3) == 3)
 			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
@@ -7545,7 +7588,7 @@
 	i40e_status status;
 
 	/* Set the valid bit for this PF */
-	bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
 	bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
 	bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
 
@@ -7579,8 +7622,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for read access, err %d: aq_err %d\n",
-			 ret, last_aq_status);
+			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 
@@ -7595,8 +7639,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	i40e_release_nvm(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
-			 ret, last_aq_status);
+		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 
@@ -7608,8 +7653,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for write access, err %d: aq_err %d\n",
-			 ret, last_aq_status);
+			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 	/* Write it back out unchanged to initiate update NVM,
@@ -7627,8 +7673,9 @@
 	i40e_release_nvm(&pf->hw);
 	if (ret)
 		dev_info(&pf->pdev->dev,
-			 "BW settings NOT SAVED, err %d aq_err %d\n",
-			 ret, last_aq_status);
+			 "BW settings NOT SAVED, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 bw_commit_out:
 
 	return ret;
@@ -7674,7 +7721,7 @@
 	/* Depending on PF configurations, it is possible that the RSS
 	 * maximum might end up larger than the available queues
 	 */
-	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
 	pf->rss_size = 1;
 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
 	pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7824,7 +7871,7 @@
 	need_reset = i40e_set_ntuple(pf, features);
 
 	if (need_reset)
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
 	return 0;
 }
@@ -7887,7 +7934,7 @@
 
 	/* New port: add it and mark its index in the bitmap */
 	pf->vxlan_ports[next_idx] = port;
-	pf->pending_vxlan_bitmap |= (1 << next_idx);
+	pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
 	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
 	dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
@@ -7918,7 +7965,7 @@
 		 * and make it pending
 		 */
 		pf->vxlan_ports[idx] = 0;
-		pf->pending_vxlan_bitmap |= (1 << idx);
+		pf->pending_vxlan_bitmap |= BIT_ULL(idx);
 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
 		dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -8328,8 +8375,10 @@
 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "couldn't get PF vsi config, err %d, aq_err %d\n",
-				 ret, pf->hw.aq.asq_last_status);
+				 "couldn't get PF vsi config, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			return -ENOENT;
 		}
 		vsi->info = ctxt.info;
@@ -8351,8 +8400,10 @@
 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "update vsi failed, aq_err=%d\n",
-					 pf->hw.aq.asq_last_status);
+					 "update vsi failed, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				ret = -ENOENT;
 				goto err;
 			}
@@ -8369,9 +8420,11 @@
 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
-					 enabled_tc, ret,
-					 pf->hw.aq.asq_last_status);
+					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+					 enabled_tc,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				ret = -ENOENT;
 			}
 		}
@@ -8462,8 +8515,10 @@
 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
 		if (ret) {
 			dev_info(&vsi->back->pdev->dev,
-				 "add vsi failed, aq_err=%d\n",
-				 vsi->back->hw.aq.asq_last_status);
+				 "add vsi failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			ret = -ENOENT;
 			goto err;
 		}
@@ -8508,8 +8563,9 @@
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get vsi bw info, err %d, aq_err %d\n",
-			 ret, pf->hw.aq.asq_last_status);
+			 "couldn't get vsi bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		/* VSI is already added so not tearing that up */
 		ret = 0;
 	}
@@ -8682,7 +8738,7 @@
 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
 	if (ret < 0) {
 		dev_info(&pf->pdev->dev,
-			 "failed to get tracking for %d queues for VSI %d err=%d\n",
+			 "failed to get tracking for %d queues for VSI %d err %d\n",
 			 vsi->alloc_queue_pairs, vsi->seid, ret);
 		goto err_vsi;
 	}
@@ -8920,8 +8976,9 @@
 						  &bw_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "query veb bw config failed, aq_err=%d\n",
-			 hw->aq.asq_last_status);
+			 "query veb bw config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -8929,8 +8986,9 @@
 						   &ets_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "query veb bw ets config failed, aq_err=%d\n",
-			 hw->aq.asq_last_status);
+			 "query veb bw ets config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -9114,36 +9172,40 @@
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
+	struct i40e_pf *pf = veb->pf;
 	bool is_default = false;
 	bool is_cloud = false;
 	int ret;
 
 	/* get a VEB from the hardware */
-	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
 			      veb->enabled_tc, is_default,
 			      is_cloud, &veb->seid, NULL);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't add VEB, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "couldn't add VEB, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EPERM;
 	}
 
 	/* get statistics counter */
-	ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
 					 &veb->stats_idx, NULL, NULL, NULL);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EPERM;
 	}
 	ret = i40e_veb_get_bw_info(veb);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't get VEB bw info, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
-		i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+		dev_info(&pf->pdev->dev,
+			 "couldn't get VEB bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
 		return -ENOENT;
 	}
 
@@ -9349,8 +9411,10 @@
 						&next_seid, NULL);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "get switch config failed %d aq_err=%x\n",
-				 ret, pf->hw.aq.asq_last_status);
+				 "get switch config failed err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			kfree(aq_buf);
 			return -ENOENT;
 		}
@@ -9391,8 +9455,9 @@
 	ret = i40e_fetch_switch_configuration(pf, false);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't fetch switch config, err %d, aq_err %d\n",
-			 ret, pf->hw.aq.asq_last_status);
+			 "couldn't fetch switch config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return ret;
 	}
 	i40e_pf_reset_stats(pf);
@@ -9935,15 +10000,19 @@
 				       I40E_AQ_EVENT_LINK_UPDOWN |
 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
 	    (pf->hw.aq.fw_maj_ver < 4)) {
 		msleep(75);
 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
 		if (err)
-			dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-				 pf->hw.aq.asq_last_status);
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	/* The main driver is (mostly) up and happy. We need to set this state
 	 * before setting up the misc vector or we get a race and the vector
@@ -10031,8 +10100,10 @@
 	/* get the requested speeds from the fw */
 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
-			 err);
+		dev_info(&pf->pdev->dev,
+			 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
 	/* print a string summarizing features */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 554e49d..ce986af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -50,7 +50,7 @@
 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
 	/* Switching to words (sr_size contains power of 2KB) */
-	nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
 
 	/* Check if we are in the normal or blank NVM programming mode */
 	fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@
 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
 	if (!ret_code) {
 		/* Write the address and start reading */
-		sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
-			 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 7b34f1e..d52a9f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,6 +58,8 @@
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b772..8c40d6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -43,9 +43,8 @@
 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
 #define I40E_PTP_1GB_INCVAL  0x2000000000ULL
 
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
-					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
 					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 
 /**
@@ -357,7 +356,7 @@
 
 	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
 
-	if (!(prttsyn_stat & (1 << index)))
+	if (!(prttsyn_stat & BIT(index)))
 		return;
 
 	lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1fe230d..330e4ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -464,7 +464,7 @@
 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-	if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@
 			dev_info(&pdev->dev,
 				"FD filter programming failed due to incorrect filter parameters\n");
 		}
-	} else if (error ==
-			  (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
 				 rx_desc->wb.qword0.hi_dword.fd_id);
@@ -892,7 +891,7 @@
 	 *  20-1249MB/s bulk   (8000 ints/s)
 	 */
 	bytes_per_int = rc->total_bytes / rc->itr;
-	switch (rc->itr) {
+	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
 			new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +904,14 @@
 		break;
 	case I40E_BULK_LATENCY:
 		if (bytes_per_int <= 20)
-			rc->latency_range = I40E_LOW_LATENCY;
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	default:
+		if (bytes_per_int <= 20)
+			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
@@ -923,42 +927,14 @@
 		break;
 	}
 
-	if (new_itr != rc->itr) {
-		/* do an exponential smoothing */
-		new_itr = (10 * new_itr * rc->itr) /
-			  ((9 * new_itr) + rc->itr);
-		rc->itr = new_itr & I40E_MAX_ITR;
-	}
+	if (new_itr != rc->itr)
+		rc->itr = new_itr;
 
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
 }
 
 /**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-	struct i40e_hw *hw = &q_vector->vsi->back->hw;
-	u32 reg_addr;
-	u16 old_itr;
-
-	reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
-	old_itr = q_vector->rx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->rx);
-	if (old_itr != q_vector->rx.itr)
-		wr32(hw, reg_addr, q_vector->rx.itr);
-
-	reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
-	old_itr = q_vector->tx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->tx);
-	if (old_itr != q_vector->tx.itr)
-		wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
  * i40e_clean_programming_status - clean the programming status descriptor
  * @rx_ring: the rx ring that has this descriptor
  * @rx_desc: the rx descriptor written back by HW
@@ -1386,7 +1362,7 @@
 		return;
 
 	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
 	/* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1377,25 @@
 		ipv6 = true;
 
 	if (ipv4 &&
-	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 		goto checksum_fail;
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
 
 	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
 		goto checksum_fail;
 
 	/* handle packets that were not able to be checksummed due
 	 * to arrival speed, in this case the stack can compute
 	 * the csum.
 	 */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1543,7 +1519,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1584,8 +1560,8 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1613,7 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			struct i40e_rx_buffer *next_buffer;
 
 			next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1623,7 @@
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
@@ -1669,7 +1645,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1730,7 +1706,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1753,7 +1729,7 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1747,13 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			rx_ring->rx_stats.non_eop_descs++;
 			continue;
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			/* TODO: shouldn't we increment a counter indicating the
 			 * drop?
@@ -1802,7 +1778,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1827,6 +1803,68 @@
 }
 
 /**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u16 old_itr;
+	int vector;
+	u32 val;
+
+	vector = (q_vector->v_idx + vsi->base_vector);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+		old_itr = q_vector->rx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->rx);
+		if (old_itr != q_vector->rx.itr) {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_RX_ITR <<
+				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+			(q_vector->rx.itr <<
+				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_ITR_NONE <<
+				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+	} else {
+		i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+	}
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+		old_itr = q_vector->tx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->tx);
+		if (old_itr != q_vector->tx.itr) {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_TX_ITR <<
+				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+				(q_vector->tx.itr <<
+				   I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_ITR_NONE <<
+				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+			      vsi->base_vector - 1), val);
+	} else {
+		i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+	}
+}
+
+/**
  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
@@ -1882,33 +1920,24 @@
 
 	/* Work is done so exit the polling mode and re-enable the interrupt */
 	napi_complete(napi);
-	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-		i40e_update_dynamic_itr(q_vector);
+	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+		i40e_update_enable_itr(vsi, q_vector);
+	} else { /* Legacy mode */
+		struct i40e_hw *hw = &vsi->back->hw;
+		/* We re-enable the queue 0 cause, but
+		 * don't worry about dynamic_enable
+		 * because we left it on for the other
+		 * possible interrupts during napi
+		 */
+		u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+			   I40E_QINT_RQCTL_CAUSE_ENA_MASK;
 
-	if (!test_bit(__I40E_DOWN, &vsi->state)) {
-		if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-			i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
-		} else {
-			struct i40e_hw *hw = &vsi->back->hw;
-			/* We re-enable the queue 0 cause, but
-			 * don't worry about dynamic_enable
-			 * because we left it on for the other
-			 * possible interrupts during napi
-			 */
-			u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-			qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-			wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-			qval = rd32(hw, I40E_QINT_TQCTL(0));
-			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-			wr32(hw, I40E_QINT_TQCTL(0), qval);
-
-			i40e_irq_dynamic_enable_icr0(vsi->back);
-		}
+		wr32(hw, I40E_QINT_RQCTL(0), qval);
+		qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+		       I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+		wr32(hw, I40E_QINT_TQCTL(0), qval);
+		i40e_irq_dynamic_enable_icr0(vsi->back);
 	}
-
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 0dc48dc..429833c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -66,17 +66,17 @@
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,17 +129,17 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
-#define I40E_TX_FLAGS_CSUM		(u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN		(u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM		BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_TSYN		BIT(8)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 220371e..a20128b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -497,6 +497,7 @@
 
 	/* debug mask */
 	u32 debug_mask;
+	char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -610,7 +611,7 @@
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
 					 << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -618,8 +619,8 @@
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
-					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -753,8 +754,7 @@
 					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
-					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
 	/* Note: These are predefined bit offsets */
@@ -930,12 +930,12 @@
 #define I40E_TXD_CTX_QW0_NATT_SHIFT	9
 #define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
-					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -1000,8 +1000,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1019,8 +1019,7 @@
 #define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
-					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
 						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 2d20af2..a7ab463 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -110,7 +110,9 @@
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR		1
-#define I40E_VIRTCHNL_VERSION_MINOR		0
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
 struct i40e_virtchnl_version_info {
 	u32 major;
 	u32 minor;
@@ -129,7 +131,8 @@
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@
 	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index fdd7f5e..d29d406 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -277,16 +277,14 @@
 	}
 	tempmap = vecmap->rxq_map;
 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-		linklistmap |= (1 <<
-				(I40E_VIRTCHNL_SUPPORTED_QTYPES *
-				 vsi_queue_id));
+		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				    vsi_queue_id));
 	}
 
 	tempmap = vecmap->txq_map;
 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-		linklistmap |= (1 <<
-				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
-				 + 1));
+		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				     vsi_queue_id + 1));
 	}
 
 	next_q = find_first_bit(&linklistmap,
@@ -332,7 +330,7 @@
 		reg = (vector_id) |
 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-		    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 		wr32(hw, reg_idx, reg);
 	}
@@ -897,7 +895,7 @@
 		for (vf_id = 0; vf_id < tmp; vf_id++) {
 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 		}
 	}
 	clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -1121,12 +1119,16 @@
  *
  * called from the VF to request the API version used by the PF
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
 {
 	struct i40e_virtchnl_version_info info = {
 		I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
 	};
 
+	vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+	if (VF_IS_V10(vf))
+		info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
 	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
 				      I40E_SUCCESS, (u8 *)&info,
 				      sizeof(struct
@@ -1141,7 +1143,7 @@
  *
  * called from the VF to request its resources
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
 {
 	struct i40e_virtchnl_vf_resource *vfres = NULL;
 	struct i40e_pf *pf = vf->pf;
@@ -1165,11 +1167,18 @@
 		len = 0;
 		goto err;
 	}
+	if (VF_IS_V11(vf))
+		vf->driver_caps = *(u32 *)msg;
+	else
+		vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+				  I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+				  I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
 	vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
 	vsi = pf->vsi[vf->lan_vsi_idx];
 	if (!vsi->info.pvid)
-		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+					   I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 
 	vfres->num_vsis = num_vsis;
 	vfres->num_queue_pairs = vf->num_queue_pairs;
@@ -1771,9 +1780,14 @@
 		valid_len = sizeof(struct i40e_virtchnl_version_info);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
-	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 		valid_len = 0;
 		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		if (VF_IS_V11(vf))
+			valid_len = sizeof(u32);
+		else
+			valid_len = 0;
+		break;
 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
 		valid_len = sizeof(struct i40e_virtchnl_txq_info);
 		break;
@@ -1886,10 +1900,10 @@
 
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_VERSION:
-		ret = i40e_vc_get_version_msg(vf);
+		ret = i40e_vc_get_version_msg(vf, msg);
 		break;
 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-		ret = i40e_vc_get_vf_resources_msg(vf);
+		ret = i40e_vc_get_vf_resources_msg(vf, msg);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
 		i40e_vc_reset_vf_msg(vf);
@@ -1967,9 +1981,9 @@
 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
 		vf = &pf->vf[vf_id];
 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-		if (reg & (1 << bit_idx)) {
+		if (reg & BIT(bit_idx)) {
 			/* clear the bit in GLGEN_VFLRSTAT */
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 
 			if (!test_bit(__I40E_DOWN, &pf->state))
 				i40e_reset_vf(vf, true);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 09043c1..736f6f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -42,6 +42,9 @@
 #define I40E_VLAN_MASK			0xFFF
 #define I40E_PRIORITY_MASK		0x7000
 
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
 	I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@
 	u16 vf_id;
 	/* all VF vsis connect to the same parent */
 	enum i40e_switch_element_types parent_type;
+	struct i40e_virtchnl_version_info vf_ver;
+	u32 driver_caps; /* reported by VF driver */
 
 	/* VF Port Extender (PE) stag if used */
 	u16 stag;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 39fcb1d..56c7e75 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -72,6 +72,212 @@
 }
 
 /**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
  * i40evf_debug_aq
  * @hw: debug mask related to admin queue
  * @mask: debug mask
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index adc6f71..00ed24b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -127,8 +127,8 @@
 		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -147,7 +147,7 @@
 		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 58e37a4..856eb9d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,6 +60,8 @@
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 0f0e185..60f88e4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -404,7 +404,7 @@
 	 *  20-1249MB/s bulk   (8000 ints/s)
 	 */
 	bytes_per_int = rc->total_bytes / rc->itr;
-	switch (rc->itr) {
+	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
 			new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +417,14 @@
 		break;
 	case I40E_BULK_LATENCY:
 		if (bytes_per_int <= 20)
-			rc->latency_range = I40E_LOW_LATENCY;
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	default:
+		if (bytes_per_int <= 20)
+			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
@@ -435,42 +440,14 @@
 		break;
 	}
 
-	if (new_itr != rc->itr) {
-		/* do an exponential smoothing */
-		new_itr = (10 * new_itr * rc->itr) /
-			  ((9 * new_itr) + rc->itr);
-		rc->itr = new_itr & I40E_MAX_ITR;
-	}
+	if (new_itr != rc->itr)
+		rc->itr = new_itr;
 
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-	struct i40e_hw *hw = &q_vector->vsi->back->hw;
-	u32 reg_addr;
-	u16 old_itr;
-
-	reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
-	old_itr = q_vector->rx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->rx);
-	if (old_itr != q_vector->rx.itr)
-		wr32(hw, reg_addr, q_vector->rx.itr);
-
-	reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
-	old_itr = q_vector->tx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->tx);
-	if (old_itr != q_vector->tx.itr)
-		wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
  * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
  * @tx_ring: the tx ring to set up
  *
@@ -873,7 +850,7 @@
 		return;
 
 	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
 	/* both known and outer_ip must be set for the below code to work */
@@ -888,25 +865,25 @@
 		ipv6 = true;
 
 	if (ipv4 &&
-	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 		goto checksum_fail;
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
 
 	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
 		goto checksum_fail;
 
 	/* handle packets that were not able to be checksummed due
 	 * to arrival speed, in this case the stack can compute
 	 * the csum.
 	 */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1004,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1063,8 +1040,8 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1093,7 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			struct i40e_rx_buffer *next_buffer;
 
 			next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1103,7 @@
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
@@ -1141,7 +1118,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1202,7 +1179,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1220,7 +1197,7 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1215,13 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			rx_ring->rx_stats.non_eop_descs++;
 			continue;
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			/* TODO: shouldn't we increment a counter indicating the
 			 * drop?
@@ -1262,7 +1239,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1281,6 +1258,67 @@
 }
 
 /**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u16 old_itr;
+	int vector;
+	u32 val;
+
+	vector = (q_vector->v_idx + vsi->base_vector);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+		old_itr = q_vector->rx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->rx);
+		if (old_itr != q_vector->rx.itr) {
+			val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+			I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_RX_ITR <<
+				I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+			(q_vector->rx.itr <<
+				I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+			I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_ITR_NONE <<
+				I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+	} else {
+		i40evf_irq_enable_queues(vsi->back, 1
+			<< q_vector->v_idx);
+	}
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+		old_itr = q_vector->tx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->tx);
+		if (old_itr != q_vector->tx.itr) {
+			val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+				I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_TX_ITR <<
+				   I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+				(q_vector->tx.itr <<
+				   I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+		} else {
+			val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+				I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_ITR_NONE <<
+				   I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+	} else {
+		i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+	}
+}
+
+/**
  * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
@@ -1336,13 +1374,7 @@
 
 	/* Work is done so exit the polling mode and re-enable the interrupt */
 	napi_complete(napi);
-	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-		i40e_update_dynamic_itr(q_vector);
-
-	if (!test_bit(__I40E_DOWN, &vsi->state))
-		i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+	i40e_update_enable_itr(vsi, q_vector);
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e7a34f89..6b47c81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -66,17 +66,17 @@
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,16 +129,16 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
-#define I40E_TX_FLAGS_CSUM		(u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM		BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3969c65..4ba9a012 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -491,6 +491,7 @@
 
 	/* debug mask */
 	u32 debug_mask;
+	char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -604,7 +605,7 @@
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
 					 << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -612,8 +613,8 @@
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
-					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -747,8 +748,7 @@
 					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
-					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
 	/* Note: These are predefined bit offsets */
@@ -924,12 +924,12 @@
 #define I40E_TXD_CTX_QW0_NATT_SHIFT	9
 #define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
-					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -994,8 +994,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1013,8 +1013,7 @@
 #define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
-					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
 						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 59f62f0..1e89dea 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -110,7 +110,9 @@
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR		1
-#define I40E_VIRTCHNL_VERSION_MINOR		0
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
 struct i40e_virtchnl_version_info {
 	u32 major;
 	u32 minor;
@@ -129,7 +131,8 @@
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@
 	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fea3b75..c33c7cc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -207,17 +207,17 @@
 	struct msix_entry *msix_entries;
 
 	u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED              (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING                (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED                 (u32)(1 << 10)
+#define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED              BIT(8)
+#define I40EVF_FLAG_RESET_PENDING                BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
 /* duplcates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED		 0
 #define I40E_FLAG_DCB_ENABLED			 0
@@ -225,15 +225,16 @@
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
 	/* flags for admin queue service task */
 	u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES		(u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES		(u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		(u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		(u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		(u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		(u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		(u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS		(u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET		(u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES		BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES		BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS		BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET		BIT(8)
+#define I40EVF_FLAG_AQ_GET_CONFIG		BIT(10)
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -249,8 +250,17 @@
 	bool netdev_registered;
 	bool link_up;
 	enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+			    I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+		    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+			  I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
 	struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
 	struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+	struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+		       ((_a)->pf_version.minor == 1))
 	u16 msg_enable;
 	struct i40e_eth_stats current_stats;
 	struct i40e_vsi vsi;
@@ -264,6 +274,7 @@
 
 int i40evf_up(struct i40evf_adapter *adapter);
 void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
 void i40evf_reset(struct i40evf_adapter *adapter);
 void i40evf_set_ethtool_ops(struct net_device *netdev);
 void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 2b53c87..4790437 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -381,11 +381,11 @@
 
 	switch (cmd->flow_type) {
 	case TCP_V4_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 	case UDP_V4_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 
@@ -397,11 +397,11 @@
 		break;
 
 	case TCP_V6_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 	case UDP_V6_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 
@@ -479,10 +479,10 @@
 	case TCP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -491,10 +491,10 @@
 	case TCP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -503,12 +503,12 @@
 	case UDP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		default:
 			return -EINVAL;
@@ -517,12 +517,12 @@
 	case UDP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		default:
 			return -EINVAL;
@@ -535,7 +535,7 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
 		break;
 	case AH_ESP_V6_FLOW:
 	case AH_V6_FLOW:
@@ -544,15 +544,15 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
 		break;
 	case IPV4_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 		break;
 	case IPV6_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c698523..1503cad 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -240,7 +240,7 @@
 	int i;
 
 	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & (1 << (i - 1))) {
+		if (mask & BIT(i - 1)) {
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
 			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
 			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -268,7 +268,7 @@
 		wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
 	}
 	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & (1 << i)) {
+		if (mask & BIT(i)) {
 			dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
 			dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
 				   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -377,7 +377,7 @@
 	q_vector->tx.count++;
 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
 	q_vector->num_ringpairs++;
-	q_vector->ring_mask |= (1 << t_idx);
+	q_vector->ring_mask |= BIT(t_idx);
 }
 
 /**
@@ -1371,6 +1371,10 @@
 		}
 		goto watchdog_done;
 	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+		i40evf_send_vf_config_msg(adapter);
+		goto watchdog_done;
+	}
 
 	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
 		i40evf_disable_queues(adapter);
@@ -1606,7 +1610,8 @@
 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
 			 err);
 
-	i40evf_map_queues(adapter);
+	adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
 	/* re-add all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1616,7 +1621,7 @@
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		f->add = true;
 	}
-	adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	i40evf_misc_irq_enable(adapter);
@@ -1982,6 +1987,62 @@
 }
 
 /**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	/* got VF config message back from PF, now we can parse it */
+	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+	}
+	if (!adapter->vsi_res) {
+		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+		return -ENODEV;
+	}
+
+	if (adapter->vf_res->vf_offload_flags
+	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+		netdev->vlan_features = netdev->features;
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_CTAG_RX |
+				    NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+	netdev->features |= NETIF_F_HIGHDMA |
+			    NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_SCTP_CSUM |
+			    NETIF_F_IPV6_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_GRO;
+
+	/* copy netdev features into list of user selectable features */
+	netdev->hw_features |= netdev->features;
+	netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+	adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+	adapter->vsi.back = adapter;
+	adapter->vsi.base_vector = 1;
+	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+	adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+	adapter->vsi.netdev = adapter->netdev;
+	return 0;
+}
+
+/**
  * i40evf_init_task - worker thread to perform delayed initialization
  * @work: pointer to work_struct containing our data
  *
@@ -2001,7 +2062,7 @@
 	struct net_device *netdev = adapter->netdev;
 	struct i40e_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
-	int i, err, bufsz;
+	int err, bufsz;
 
 	switch (adapter->state) {
 	case __I40EVF_STARTUP:
@@ -2052,6 +2113,12 @@
 		if (err) {
 			if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
 				err = i40evf_send_api_ver(adapter);
+			else
+				dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+					adapter->pf_version.major,
+					adapter->pf_version.minor,
+					I40E_VIRTCHNL_VERSION_MAJOR,
+					I40E_VIRTCHNL_VERSION_MINOR);
 			goto err;
 		}
 		err = i40evf_send_vf_config_msg(adapter);
@@ -2087,42 +2154,15 @@
 	default:
 		goto err_alloc;
 	}
-	/* got VF config message back from PF, now we can parse it */
-	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
-			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
-	}
-	if (!adapter->vsi_res) {
-		dev_err(&pdev->dev, "No LAN VSI found\n");
+	if (i40evf_process_config(adapter))
 		goto err_alloc;
-	}
+	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
 	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
 
 	netdev->netdev_ops = &i40evf_netdev_ops;
 	i40evf_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo = 5 * HZ;
-	netdev->features |= NETIF_F_HIGHDMA |
-			    NETIF_F_SG |
-			    NETIF_F_IP_CSUM |
-			    NETIF_F_SCTP_CSUM |
-			    NETIF_F_IPV6_CSUM |
-			    NETIF_F_TSO |
-			    NETIF_F_TSO6 |
-			    NETIF_F_RXCSUM |
-			    NETIF_F_GRO;
-
-	if (adapter->vf_res->vf_offload_flags
-	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-		netdev->vlan_features = netdev->features;
-		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
-				    NETIF_F_HW_VLAN_CTAG_RX |
-				    NETIF_F_HW_VLAN_CTAG_FILTER;
-	}
-
-	/* copy netdev features into list of user selectable features */
-	netdev->hw_features |= netdev->features;
-	netdev->hw_features &= ~NETIF_F_RXCSUM;
 
 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2153,17 +2193,6 @@
 
 	netif_carrier_off(netdev);
 
-	adapter->vsi.id = adapter->vsi_res->vsi_id;
-	adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
-	adapter->vsi.back = adapter;
-	adapter->vsi.base_vector = 1;
-	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-	adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
-				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
-				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-	adapter->vsi.netdev = adapter->netdev;
-
 	if (!adapter->netdev_registered) {
 		err = register_netdev(netdev);
 		if (err)
@@ -2291,7 +2320,7 @@
 	hw = &adapter->hw;
 	hw->back = adapter;
 
-	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 	adapter->state = __I40EVF_STARTUP;
 
 	/* Call save state here because it relies on the adapter struct. */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 61e0905..d4eb1a5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -51,8 +51,9 @@
 
 	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
 	if (err)
-		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
-			op, err, hw->aq.asq_last_status);
+		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+			op, i40evf_stat_str(hw, err),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
 	return err;
 }
 
@@ -125,8 +126,11 @@
 	}
 
 	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
-	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
-	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+	adapter->pf_version = *pf_vvi;
+
+	if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+	    ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+	     (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
 		err = -EIO;
 
 out_alloc:
@@ -145,8 +149,24 @@
  **/
 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
 {
-	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-				  NULL, 0);
+	u32 caps;
+
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+	       I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	if (PF_IS_V11(adapter))
+		return i40evf_send_pf_msg(adapter,
+					  I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+					  (u8 *)&caps, sizeof(caps));
+	else
+		return i40evf_send_pf_msg(adapter,
+					  I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+					  NULL, 0);
 }
 
 /**
@@ -274,7 +294,7 @@
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
 	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
 	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
 	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
 	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@
 		return;
 	}
 	if (v_retval) {
-		dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
-			__func__, v_retval, v_opcode);
+		dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+			__func__, v_retval,
+			i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
 	}
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@
 		adapter->current_stats = *stats;
 		}
 		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+		u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+			  I40E_MAX_VF_VSI *
+			  sizeof(struct i40e_virtchnl_vsi_resource);
+		memcpy(adapter->vf_res, msg, min(msglen, len));
+		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+		i40evf_process_config(adapter);
+		}
+		break;
 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
 		/* enable transmits */
 		i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@
 		i40evf_free_all_rx_resources(adapter);
 		break;
 	case I40E_VIRTCHNL_OP_VERSION:
-	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
 		/* Don't display an error if we get these out of sequence.
 		 * If the firmware needed to get kicked, we'll get these and
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c1bb64d..987c9de 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,5 +1,5 @@
 /* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] = {
 	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
-	(sizeof(e1000_m88_cable_length_table) / \
-	sizeof(e1000_m88_cable_length_table[0]))
 
 static const u16 e1000_igp_2_cable_length_table[] = {
 	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@
 	60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
 	83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
 	104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
-	(sizeof(e1000_igp_2_cable_length_table) / \
-	 sizeof(e1000_igp_2_cable_length_table[0]))
 
 /**
  *  igb_check_reset_block - Check if PHY reset is blocked
@@ -1700,7 +1694,7 @@
 
 	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+	if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
 		ret_val = -E1000_ERR_PHY;
 		goto out;
 	}
@@ -1796,7 +1790,7 @@
 
 		index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 			M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-		if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
 			ret_val = -E1000_ERR_PHY;
 			goto out;
 		}
@@ -1840,7 +1834,7 @@
 	s32 ret_val = 0;
 	u16 phy_data, i, agc_value = 0;
 	u16 cur_agc_index, max_agc_index = 0;
-	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
 	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
 		IGP02E1000_PHY_AGC_A,
 		IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1857,7 @@
 				IGP02E1000_AGC_LENGTH_MASK;
 
 		/* Array index bound check. */
-		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
 		    (cur_agc_index == 0)) {
 			ret_val = -E1000_ERR_PHY;
 			goto out;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 109cad9..b7b9c67 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2159,6 +2159,27 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	int i;
 
+	if (ec->rx_max_coalesced_frames ||
+	    ec->rx_coalesce_usecs_irq ||
+	    ec->rx_max_coalesced_frames_irq ||
+	    ec->tx_max_coalesced_frames ||
+	    ec->tx_coalesce_usecs_irq ||
+	    ec->stats_block_coalesce_usecs ||
+	    ec->use_adaptive_rx_coalesce ||
+	    ec->use_adaptive_tx_coalesce ||
+	    ec->pkt_rate_low ||
+	    ec->rx_coalesce_usecs_low ||
+	    ec->rx_max_coalesced_frames_low ||
+	    ec->tx_coalesce_usecs_low ||
+	    ec->tx_max_coalesced_frames_low ||
+	    ec->pkt_rate_high ||
+	    ec->rx_coalesce_usecs_high ||
+	    ec->rx_max_coalesced_frames_high ||
+	    ec->tx_coalesce_usecs_high ||
+	    ec->tx_max_coalesced_frames_high ||
+	    ec->rate_sample_interval)
+		return -ENOTSUPP;
+
 	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
 	    ((ec->rx_coalesce_usecs > 3) &&
 	     (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fc7729e..41e2740 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index b2f5b16..d3e5f5b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -813,22 +813,15 @@
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-	/* We support this operation only for 82599 and x540 at the moment */
-	if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-		return IXGBEVF_82599_RETA_SIZE;
+	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+		return IXGBEVF_X550_VFRETA_SIZE;
 
-	return 0;
+	return IXGBEVF_82599_RETA_SIZE;
 }
 
 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
 {
-	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
-	/* We support this operation only for 82599 and x540 at the moment */
-	if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-		return IXGBEVF_RSS_HASH_KEY_SIZE;
-
-	return 0;
+	return IXGBEVF_RSS_HASH_KEY_SIZE;
 }
 
 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@
 	if (hfunc)
 		*hfunc = ETH_RSS_HASH_TOP;
 
-	/* If neither indirection table nor hash key was requested - just
-	 * return a success avoiding taking any locks.
-	 */
-	if (!indir && !key)
-		return 0;
+	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+		if (key)
+			memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
 
-	spin_lock_bh(&adapter->mbx_lock);
-	if (indir)
-		err = ixgbevf_get_reta_locked(&adapter->hw, indir,
-					      adapter->num_rx_queues);
+		if (indir) {
+			int i;
 
-	if (!err && key)
-		err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+			for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+				indir[i] = adapter->rss_indir_tbl[i];
+		}
+	} else {
+		/* If neither indirection table nor hash key was requested
+		 *  - just return a success avoiding taking any locks.
+		 */
+		if (!indir && !key)
+			return 0;
 
-	spin_unlock_bh(&adapter->mbx_lock);
+		spin_lock_bh(&adapter->mbx_lock);
+		if (indir)
+			err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+						      adapter->num_rx_queues);
+
+		if (!err && key)
+			err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+		spin_unlock_bh(&adapter->mbx_lock);
+	}
 
 	return err;
 }
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 775d089..04c7ec84 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -144,9 +144,11 @@
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES	2
-#define IXGBEVF_82599_RETA_SIZE	128
+#define IXGBEVF_MAX_RSS_QUEUES		2
+#define IXGBEVF_82599_RETA_SIZE		128	/* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE	64	/* 64 entries */
 #define IXGBEVF_RSS_HASH_KEY_SIZE	40
+#define IXGBEVF_VFRSSRK_REGS		10	/* 10 registers for RSS key */
 
 #define IXGBEVF_DEFAULT_TXD	1024
 #define IXGBEVF_DEFAULT_RXD	512
@@ -447,6 +449,9 @@
 
 	spinlock_t mbx_lock;
 	unsigned long last_reset;
+
+	u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+	u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
 };
 
 enum ixbgevf_state_t {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b2c86f1..88298a3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1696,22 +1696,25 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 vfmrqc = 0, vfreta = 0;
-	u32 rss_key[10];
 	u16 rss_i = adapter->num_rx_queues;
-	int i, j;
+	u8 i, j;
 
 	/* Fill out hash function seeds */
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-	for (i = 0; i < 10; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+	netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+	for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
 
-	/* Fill out redirection table */
-	for (i = 0, j = 0; i < 64; i++, j++) {
+	for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
 		if (j == rss_i)
 			j = 0;
-		vfreta = (vfreta << 8) | (j * 0x1);
-		if ((i & 3) == 3)
+
+		adapter->rss_indir_tbl[i] = j;
+
+		vfreta |= j << (i & 0x3) * 8;
+		if ((i & 3) == 3) {
 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+			vfreta = 0;
+		}
 	}
 
 	/* Perform hash on these packet types */
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665..d547010 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@
 
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef..2e2a5ec 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c5..f79d812 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -102,6 +102,7 @@
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
 	"blueflame",
+	"phv-bit"
 };
 
 static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@
 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
 	bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
 	bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+	bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+	bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
 	int i;
+	int ret = 0;
 
-	if (bf_enabled_new == bf_enabled_old)
-		return 0; /* Nothing to do */
+	if (bf_enabled_new != bf_enabled_old) {
+		if (bf_enabled_new) {
+			bool bf_supported = true;
 
-	if (bf_enabled_new) {
-		bool bf_supported = true;
+			for (i = 0; i < priv->tx_ring_num; i++)
+				bf_supported &= priv->tx_ring[i]->bf_alloced;
 
-		for (i = 0; i < priv->tx_ring_num; i++)
-			bf_supported &= priv->tx_ring[i]->bf_alloced;
+			if (!bf_supported) {
+				en_err(priv, "BlueFlame is not supported\n");
+				return -EINVAL;
+			}
 
-		if (!bf_supported) {
-			en_err(priv, "BlueFlame is not supported\n");
-			return -EINVAL;
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+		} else {
+			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
 		}
 
-		priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-	} else {
-		priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+		for (i = 0; i < priv->tx_ring_num; i++)
+			priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+
+		en_info(priv, "BlueFlame %s\n",
+			bf_enabled_new ?  "Enabled" : "Disabled");
 	}
 
-	for (i = 0; i < priv->tx_ring_num; i++)
-		priv->tx_ring[i]->bf_enabled = bf_enabled_new;
-
-	en_info(priv, "BlueFlame %s\n",
-		bf_enabled_new ?  "Enabled" : "Disabled");
-
+	if (phv_enabled_new != phv_enabled_old) {
+		ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+		if (ret)
+			return ret;
+		else if (phv_enabled_new)
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+		else
+			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+		en_info(priv, "PHV bit %s\n",
+			phv_enabled_new ?  "Enabled" : "Disabled");
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd..4726122 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2184,6 +2184,25 @@
 	}
 }
 
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+					      netdev_features_t features)
+{
+	struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+	struct mlx4_en_dev *mdev = en_priv->mdev;
+
+	/* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+	 * enable/disable make sure S-TAG flag is always in same state as
+	 * C-TAG.
+	 */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+	    !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+		features |= NETIF_F_HW_VLAN_STAG_RX;
+	else
+		features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+	return features;
+}
+
 static int mlx4_en_set_features(struct net_device *netdev,
 		netdev_features_t features)
 {
@@ -2218,6 +2237,10 @@
 		en_info(priv, "Turn %s TX vlan strip offload\n",
 			(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
+	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+		en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+			(features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
 	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
 		en_info(priv, "Turn %s loopback\n",
 			(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_fix_features	= mlx4_en_fix_features,
 	.ndo_setup_tc		= mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_fix_features	= mlx4_en_fix_features,
 	.ndo_setup_tc		= mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@
 	dev->hw_features |= NETIF_F_LOOPBACK |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+		dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+			NETIF_F_HW_VLAN_STAG_FILTER;
+		dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+	}
+
+	if (mlx4_is_slave(mdev->dev)) {
+		int phv;
+
+		err = get_phv_bit(mdev->dev, port, &phv);
+		if (!err && phv) {
+			dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+		}
+	} else {
+		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+		    !(mdev->dev->caps.flags2 &
+		      MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+			dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+	}
+
 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
 		dev->hw_features |= NETIF_F_RXFCS;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 12c65e1..a67fbb90d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -726,7 +726,7 @@
 
 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
-	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
 	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
 		hdr += sizeof(struct vlan_hdr);
@@ -907,11 +907,17 @@
 				gro_skb->csum_level = 1;
 
 			if ((cqe->vlan_my_qpn &
-			    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+			    cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
 			    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
 				u16 vid = be16_to_cpu(cqe->sl_vid);
 
 				__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+			} else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+				  MLX4_CQE_SVLAN_PRESENT_MASK) &&
+				 (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+				__vlan_hwaccel_put_tag(gro_skb,
+						       htons(ETH_P_8021AD),
+						       be16_to_cpu(cqe->sl_vid));
 			}
 
 			if (dev->features & NETIF_F_RXHASH)
@@ -970,9 +976,14 @@
 					PKT_HASH_TYPE_L3);
 
 		if ((be32_to_cpu(cqe->vlan_my_qpn) &
-		    MLX4_CQE_VLAN_PRESENT_MASK) &&
+		    MLX4_CQE_CVLAN_PRESENT_MASK) &&
 		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+		else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+			  MLX4_CQE_SVLAN_PRESENT_MASK) &&
+			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+					       be16_to_cpu(cqe->sl_vid));
 
 		if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
 			timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1070,7 +1081,10 @@
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+	/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+	 * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+	 */
+	int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
 	int buf_size = 0;
 	int i = 0;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f..494e776 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@
 	u32 index, bf_index;
 	__be32 op_own;
 	u16 vlan_tag = 0;
+	u16 vlan_proto = 0;
 	int i_frag;
 	int lso_header_size;
 	void *fragptr = NULL;
@@ -750,9 +751,10 @@
 		goto tx_drop;
 	}
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		vlan_tag = skb_vlan_tag_get(skb);
-
+		vlan_proto = be16_to_cpu(skb->vlan_proto);
+	}
 
 	netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
 
@@ -958,8 +960,11 @@
 		ring->bf.offset ^= ring->bf.buf_size;
 	} else {
 		tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-		tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-			!!skb_vlan_tag_present(skb);
+		if (vlan_proto == ETH_P_8021AD)
+			tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+		else if (vlan_proto == ETH_P_8021Q)
+			tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
 		tx_desc->ctrl.fence_size = real_size;
 
 		/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57..e8ec1de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@
 		[26] = "Port ETS Scheduler support",
 		[27] = "Port beacon support",
 		[28] = "RX-ALL support",
+		[29] = "802.1ad offload support",
 	};
 	int i;
 
@@ -307,6 +308,7 @@
 
 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT			0x40
 
 	if (vhcr->op_modifier == 1) {
 		struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@
 		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+		if (dev->caps.phv_bit[port]) {
+			field = QUERY_FUNC_CAP_PHV_BIT;
+			MLX4_PUT(outbox->buf, field,
+				 QUERY_FUNC_CAP_FLAGS0_OFFSET);
+		}
+
 	} else if (vhcr->op_modifier == 0) {
 		struct mlx4_active_ports actv_ports =
 			mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@
 		MLX4_GET(func_cap->phys_port_id, outbox,
 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+	func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
 	/* All other resources are allocated by the master, but we still report
 	 * 'num' and 'reserved' capabilities as follows:
 	 * - num remains the maximum resource index
@@ -700,6 +711,7 @@
 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET		0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET		0x96
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET		0x9c
@@ -898,6 +910,12 @@
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
 	if (field & (1 << 2))
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+	if (field & 0x80)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+	if (field & 0x40)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
 	MLX4_GET(dev_cap->reserved_lkey, outbox,
 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@
 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
 	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
 
+	/* phv_check enable */
+	MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+	if (byte_field & 0x2)
+		param->phv_check_en = 1;
 out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -2758,3 +2780,63 @@
 			    0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
 			    MLX4_CMD_NATIVE);
 }
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID	0x10
+#define SET_PORT_GEN_PHV_EN	0x80
+
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_general_context *context;
+	u32 in_mod;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+
+	context->v_ignore_fcs |=  SET_PORT_GEN_PHV_VALID;
+	if (phv_bit)
+		context->phv_en |=  SET_PORT_GEN_PHV_EN;
+
+	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+	int err;
+	struct mlx4_func_cap func_cap;
+
+	memset(&func_cap, 0, sizeof(func_cap));
+	err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+	if (!err)
+		*phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+	return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+	int ret;
+
+	if (mlx4_is_slave(dev))
+		return -EPERM;
+
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+		ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+		if (!ret)
+			dev->caps.phv_bit[port] = new_val;
+		return ret;
+	}
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c2..08de5555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@
 	u16 cqe_size; /* For use only when CQE stride feature enabled */
 	u16 eqe_size; /* For use only when EQE stride feature enabled */
 	u8 rss_ip_frags;
+	u8 phv_check_en; /* for QUERY_HCA */
 };
 
 struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d76f425..6f35b6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
 
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+		struct mlx4_init_hca_param hca_param;
+
+		memset(&hca_param, 0, sizeof(hca_param));
+		err = mlx4_QUERY_HCA(dev, &hca_param);
+		/* Turn off PHV_EN flag in case phv_check_en is set.
+		 * phv_check_en is a HW check that parse the packet and verify
+		 * phv bit was reported correctly in the wqe. To allow QinQ
+		 * PHV_EN flag should be set and phv_check_en must be cleared
+		 * otherwise QinQ packets will be drop by the HW.
+		 */
+		if (err || hca_param.phv_check_en)
+			dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+	}
+
 	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
 	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
 		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c..232b2b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@
 	u8 pprx;
 	u8 pfcrx;
 	u16 reserved4;
+	u32 reserved5;
+	u8 phv_en;
+	u8 reserved6[3];
 };
 
 struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d166..defcf8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
  */
 
 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV	     2
 
 #define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b49..6cb3830 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
  * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+					   size_t size, dma_addr_t *dma_handle,
+					   int node)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	int original_node;
+	void *cpu_handle;
+
+	mutex_lock(&priv->alloc_mutex);
+	original_node = dev_to_node(&dev->pdev->dev);
+	set_dev_node(&dev->pdev->dev, node);
+	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+					 dma_handle, GFP_KERNEL);
+	set_dev_node(&dev->pdev->dev, original_node);
+	mutex_unlock(&priv->alloc_mutex);
+	return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node)
 {
 	dma_addr_t t;
 
 	buf->size = size;
 	buf->npages       = 1;
 	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-						size, &t, GFP_KERNEL);
+	buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
+							  &t, node);
 	if (!buf->direct.buf)
 		return -ENOMEM;
 
@@ -66,6 +85,11 @@
 
 	return 0;
 }
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+						 int node)
 {
 	struct mlx5_db_pgdir *pgdir;
 
@@ -84,8 +109,9 @@
 		return NULL;
 
 	bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
-	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-					    &pgdir->db_dma, GFP_KERNEL);
+
+	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+						       &pgdir->db_dma, node);
 	if (!pgdir->db_page) {
 		kfree(pgdir);
 		return NULL;
@@ -118,7 +144,7 @@
 	return 0;
 }
 
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
 {
 	struct mlx5_db_pgdir *pgdir;
 	int ret = 0;
@@ -129,7 +155,7 @@
 		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
 			goto out;
 
-	pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+	pgdir = mlx5_alloc_db_pgdir(dev, node);
 	if (!pgdir) {
 		ret = -ENOMEM;
 		goto out;
@@ -145,6 +171,12 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd6..45f6dc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -60,6 +60,7 @@
 
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET             16
 
 static const char vport_strings[][ETH_GSTRING_LEN] = {
 	/* vport statistics */
@@ -195,6 +196,8 @@
 	u16 rx_hash_log_tbl_sz;
 	bool lro_en;
 	u32 lro_wqe_sz;
+	u8  rss_hfunc;
+	u16 tx_max_inline;
 };
 
 enum {
@@ -266,7 +269,9 @@
 	/* dirtied @xmit */
 	u16                        pc ____cacheline_aligned_in_smp;
 	u32                        dma_fifo_pc;
-	u32                        bf_offset;
+	u16                        bf_offset;
+	u16                        prev_cc;
+	u8                         bf_budget;
 	struct mlx5e_sq_stats      stats;
 
 	struct mlx5e_cq            cq;
@@ -279,9 +284,10 @@
 	struct mlx5_wq_cyc         wq;
 	u32                        dma_fifo_mask;
 	void __iomem              *uar_map;
+	void __iomem              *uar_bf_map;
 	struct netdev_queue       *txq;
 	u32                        sqn;
-	u32                        bf_buf_size;
+	u16                        bf_buf_size;
 	u16                        max_inline;
 	u16                        edge;
 	struct device             *pdev;
@@ -324,14 +330,18 @@
 };
 
 enum mlx5e_traffic_types {
-	MLX5E_TT_IPV4_TCP = 0,
-	MLX5E_TT_IPV6_TCP = 1,
-	MLX5E_TT_IPV4_UDP = 2,
-	MLX5E_TT_IPV6_UDP = 3,
-	MLX5E_TT_IPV4     = 4,
-	MLX5E_TT_IPV6     = 5,
-	MLX5E_TT_ANY      = 6,
-	MLX5E_NUM_TT      = 7,
+	MLX5E_TT_IPV4_TCP,
+	MLX5E_TT_IPV6_TCP,
+	MLX5E_TT_IPV4_UDP,
+	MLX5E_TT_IPV6_UDP,
+	MLX5E_TT_IPV4_IPSEC_AH,
+	MLX5E_TT_IPV6_IPSEC_AH,
+	MLX5E_TT_IPV4_IPSEC_ESP,
+	MLX5E_TT_IPV6_IPSEC_ESP,
+	MLX5E_TT_IPV4,
+	MLX5E_TT_IPV6,
+	MLX5E_TT_ANY,
+	MLX5E_NUM_TT,
 };
 
 enum {
@@ -379,7 +389,6 @@
 
 struct mlx5e_priv {
 	/* priv data path fields - start */
-	int                        num_tc;
 	int                        default_vlan_prio;
 	struct mlx5e_sq            **txq_to_sq_map;
 	/* priv data path fields - end */
@@ -487,12 +496,12 @@
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-			     struct mlx5e_params *new_params);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-				      struct mlx5e_tx_wqe *wqe)
+				      struct mlx5e_tx_wqe *wqe, int bf_sz)
 {
+	u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
 	/* ensure wqe is visible to device before updating doorbell record */
 	dma_wmb();
 
@@ -503,9 +512,15 @@
 	 */
 	wmb();
 
-	mlx5_write64((__be32 *)&wqe->ctrl,
-		     sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
-		     NULL);
+	if (bf_sz) {
+		__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+		/* flush the write-combining mapped buffer */
+		wmb();
+
+	} else {
+		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+	}
 
 	sq->bf_offset ^= sq->bf_buf_size;
 }
@@ -519,3 +534,4 @@
 }
 
 extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3889384..b95aa33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -173,7 +173,7 @@
 	case ETH_SS_STATS:
 		return NUM_VPORT_COUNTERS +
 		       priv->params.num_channels * NUM_RQ_STATS +
-		       priv->params.num_channels * priv->num_tc *
+		       priv->params.num_channels * priv->params.num_tc *
 						   NUM_SQ_STATS;
 	/* fallthrough */
 	default:
@@ -207,7 +207,7 @@
 					"rx%d_%s", i, rq_stats_strings[j]);
 
 		for (i = 0; i < priv->params.num_channels; i++)
-			for (tc = 0; tc < priv->num_tc; tc++)
+			for (tc = 0; tc < priv->params.num_tc; tc++)
 				for (j = 0; j < NUM_SQ_STATS; j++)
 					sprintf(data +
 						(idx++) * ETH_GSTRING_LEN,
@@ -242,7 +242,7 @@
 				       ((u64 *)&priv->channel[i]->rq.stats)[j];
 
 	for (i = 0; i < priv->params.num_channels; i++)
-		for (tc = 0; tc < priv->num_tc; tc++)
+		for (tc = 0; tc < priv->params.num_tc; tc++)
 			for (j = 0; j < NUM_SQ_STATS; j++)
 				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
 							&priv->state) ? 0 :
@@ -264,7 +264,7 @@
 			       struct ethtool_ringparam *param)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_params new_params;
+	bool was_opened;
 	u16 min_rx_wqes;
 	u8 log_rq_size;
 	u8 log_sq_size;
@@ -316,11 +316,18 @@
 		return 0;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
-	new_params.log_rq_size = log_rq_size;
-	new_params.log_sq_size = log_sq_size;
-	new_params.min_rx_wqes = min_rx_wqes;
-	err = mlx5e_update_priv_params(priv, &new_params);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.log_rq_size = log_rq_size;
+	priv->params.log_sq_size = log_sq_size;
+	priv->params.min_rx_wqes = min_rx_wqes;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -342,7 +349,7 @@
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
 	unsigned int count = ch->combined_count;
-	struct mlx5e_params new_params;
+	bool was_opened;
 	int err = 0;
 
 	if (!count) {
@@ -365,9 +372,16 @@
 		return 0;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
-	new_params.num_channels = count;
-	err = mlx5e_update_priv_params(priv, &new_params);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.num_channels = count;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -662,6 +676,101 @@
 	return err;
 }
 
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+			  u8 *hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (hfunc)
+		*hfunc = priv->params.rss_hfunc;
+
+	return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int err = 0;
+
+	if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+		return 0;
+
+	if ((hfunc != ETH_RSS_HASH_XOR) &&
+	    (hfunc != ETH_RSS_HASH_TOP))
+		return -EINVAL;
+
+	mutex_lock(&priv->state_lock);
+
+	priv->params.rss_hfunc = hfunc;
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+		mlx5e_close_locked(dev);
+		err = mlx5e_open_locked(dev);
+	}
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     void *data)
+{
+	const struct mlx5e_priv *priv = netdev_priv(dev);
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		*(u32 *)data = priv->params.tx_max_inline;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     const void *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
+	u32 val;
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		val = *(u32 *)data;
+		if (val > mlx5e_get_max_inline_cap(mdev)) {
+			err = -EINVAL;
+			break;
+		}
+
+		mutex_lock(&priv->state_lock);
+
+		was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+		if (was_opened)
+			mlx5e_close_locked(dev);
+
+		priv->params.tx_max_inline = val;
+
+		if (was_opened)
+			err = mlx5e_open_locked(dev);
+
+		mutex_unlock(&priv->state_lock);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
 	.get_drvinfo       = mlx5e_get_drvinfo,
 	.get_link          = ethtool_op_get_link,
@@ -676,4 +785,8 @@
 	.set_coalesce      = mlx5e_set_coalesce,
 	.get_settings      = mlx5e_get_settings,
 	.set_settings      = mlx5e_set_settings,
+	.get_rxfh          = mlx5e_get_rxfh,
+	.set_rxfh          = mlx5e_set_rxfh,
+	.get_tunable       = mlx5e_get_tunable,
+	.set_tunable       = mlx5e_set_tunable,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80..70ec31b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@
 {
 	void *ft = priv->ft.main;
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
 }
 
@@ -156,33 +172,37 @@
 		switch (eth_addr_type) {
 		case MLX5E_UC:
 			ret =
-				(1 << MLX5E_TT_IPV4_TCP) |
-				(1 << MLX5E_TT_IPV6_TCP) |
-				(1 << MLX5E_TT_IPV4_UDP) |
-				(1 << MLX5E_TT_IPV6_UDP) |
-				(1 << MLX5E_TT_IPV4)     |
-				(1 << MLX5E_TT_IPV6)     |
-				(1 << MLX5E_TT_ANY)      |
+				BIT(MLX5E_TT_IPV4_TCP)       |
+				BIT(MLX5E_TT_IPV6_TCP)       |
+				BIT(MLX5E_TT_IPV4_UDP)       |
+				BIT(MLX5E_TT_IPV6_UDP)       |
+				BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+				BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+				BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+				BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+				BIT(MLX5E_TT_IPV4)           |
+				BIT(MLX5E_TT_IPV6)           |
+				BIT(MLX5E_TT_ANY)            |
 				0;
 			break;
 
 		case MLX5E_MC_IPV4:
 			ret =
-				(1 << MLX5E_TT_IPV4_UDP) |
-				(1 << MLX5E_TT_IPV4)     |
+				BIT(MLX5E_TT_IPV4_UDP)       |
+				BIT(MLX5E_TT_IPV4)           |
 				0;
 			break;
 
 		case MLX5E_MC_IPV6:
 			ret =
-				(1 << MLX5E_TT_IPV6_UDP) |
-				(1 << MLX5E_TT_IPV6)     |
+				BIT(MLX5E_TT_IPV6_UDP)       |
+				BIT(MLX5E_TT_IPV6)           |
 				0;
 			break;
 
 		case MLX5E_MC_OTHER:
 			ret =
-				(1 << MLX5E_TT_ANY)      |
+				BIT(MLX5E_TT_ANY)            |
 				0;
 			break;
 		}
@@ -191,23 +211,27 @@
 
 	case MLX5E_ALLMULTI:
 		ret =
-			(1 << MLX5E_TT_IPV4_UDP) |
-			(1 << MLX5E_TT_IPV6_UDP) |
-			(1 << MLX5E_TT_IPV4)     |
-			(1 << MLX5E_TT_IPV6)     |
-			(1 << MLX5E_TT_ANY)      |
+			BIT(MLX5E_TT_IPV4_UDP) |
+			BIT(MLX5E_TT_IPV6_UDP) |
+			BIT(MLX5E_TT_IPV4)     |
+			BIT(MLX5E_TT_IPV6)     |
+			BIT(MLX5E_TT_ANY)      |
 			0;
 		break;
 
 	default: /* MLX5E_PROMISC */
 		ret =
-			(1 << MLX5E_TT_IPV4_TCP) |
-			(1 << MLX5E_TT_IPV6_TCP) |
-			(1 << MLX5E_TT_IPV4_UDP) |
-			(1 << MLX5E_TT_IPV6_UDP) |
-			(1 << MLX5E_TT_IPV4)     |
-			(1 << MLX5E_TT_IPV6)     |
-			(1 << MLX5E_TT_ANY)      |
+			BIT(MLX5E_TT_IPV4_TCP)       |
+			BIT(MLX5E_TT_IPV6_TCP)       |
+			BIT(MLX5E_TT_IPV4_UDP)       |
+			BIT(MLX5E_TT_IPV6_UDP)       |
+			BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+			BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+			BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+			BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+			BIT(MLX5E_TT_IPV4)           |
+			BIT(MLX5E_TT_IPV6)           |
+			BIT(MLX5E_TT_ANY)            |
 			0;
 		break;
 	}
@@ -226,6 +250,7 @@
 	u8   *match_criteria_dmac;
 	void *ft   = priv->ft.main;
 	u32  *tirn = priv->tirn;
+	u32  *ft_ix;
 	u32  tt_vec;
 	int  err;
 
@@ -261,51 +286,51 @@
 
 	tt_vec = mlx5e_get_tt_vec(ai, type);
 
-	if (tt_vec & (1 << MLX5E_TT_ANY)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+	if (tt_vec & BIT(MLX5E_TT_ANY)) {
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_ANY]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_ANY]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_ANY);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_ANY);
 	}
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
 			 outer_headers.ethertype);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
 	}
 
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@
 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
 		 IPPROTO_UDP);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4_UDP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4_UDP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6_UDP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6_UDP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
 	}
 
 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
 		 IPPROTO_TCP);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4_TCP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4_TCP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6_TCP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6_TCP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_AH);
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+	}
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_ESP);
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+	}
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
 	}
 
 	return 0;
+
+err_del_ai:
+	mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+	return err;
 }
 
 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -725,7 +821,7 @@
 	if (!g)
 		return -ENOMEM;
 
-	g[0].log_sz = 2;
+	g[0].log_sz = 3;
 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
 			 outer_headers.ethertype);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da..bb815893 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@
 struct mlx5e_sq_param {
 	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
 	struct mlx5_wq_param       wq;
+	u16                        max_inline;
 };
 
 struct mlx5e_cq_param {
@@ -116,7 +117,7 @@
 		s->rx_csum_none	+= rq_stats->csum_none;
 		s->rx_wqe_err   += rq_stats->wqe_err;
 
-		for (j = 0; j < priv->num_tc; j++) {
+		for (j = 0; j < priv->params.num_tc; j++) {
 			sq_stats = &priv->channel[i]->sq[j].stats;
 
 			s->tso_packets		+= sq_stats->tso_packets;
@@ -272,6 +273,8 @@
 	int err;
 	int i;
 
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
 	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
 				&rq->wq_ctrl);
 	if (err)
@@ -342,11 +345,11 @@
 
 	memcpy(rqc, param->rqc, sizeof(param->rqc));
 
-	MLX5_SET(rqc,  rqc, cqn,		c->rq.cq.mcq.cqn);
+	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
 	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
 	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
 	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
-						PAGE_SHIFT);
+						MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
 
 	mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -502,6 +505,8 @@
 	if (err)
 		return err;
 
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
 				 &sq->wq_ctrl);
 	if (err)
@@ -509,7 +514,9 @@
 
 	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
 	sq->uar_map     = sq->uar.map;
+	sq->uar_bf_map  = sq->uar.bf_map;
 	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+	sq->max_inline  = param->max_inline;
 
 	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
 	if (err)
@@ -518,11 +525,12 @@
 	txq_ix = c->ix + tc * priv->params.num_channels;
 	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 
-	sq->pdev    = c->pdev;
-	sq->mkey_be = c->mkey_be;
-	sq->channel = c;
-	sq->tc      = tc;
-	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	sq->pdev      = c->pdev;
+	sq->mkey_be   = c->mkey_be;
+	sq->channel   = c;
+	sq->tc        = tc;
+	sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
 	priv->txq_to_sq_map[txq_ix] = sq;
 
 	return 0;
@@ -569,7 +577,6 @@
 
 	memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-	MLX5_SET(sqc,  sqc, user_index,		sq->tc);
 	MLX5_SET(sqc,  sqc, tis_num_0,		priv->tisn[sq->tc]);
 	MLX5_SET(sqc,  sqc, cqn,		c->sq[sq->tc].cq.mcq.cqn);
 	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
@@ -579,7 +586,7 @@
 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
 	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
-					  PAGE_SHIFT);
+					  MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 
 	mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +709,8 @@
 	int err;
 	u32 i;
 
-	param->wq.numa = cpu_to_node(c->cpu);
+	param->wq.buf_numa_node = cpu_to_node(c->cpu);
+	param->wq.db_numa_node  = cpu_to_node(c->cpu);
 	param->eq_ix   = c->ix;
 
 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -773,7 +781,7 @@
 	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
 	MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
-					    PAGE_SHIFT);
+					    MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
 	err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -929,7 +937,7 @@
 	c->pdev     = &priv->mdev->pdev->dev;
 	c->netdev   = priv->netdev;
 	c->mkey_be  = cpu_to_be32(priv->mr.key);
-	c->num_tc   = priv->num_tc;
+	c->num_tc   = priv->params.num_tc;
 
 	mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
 
@@ -1000,7 +1008,7 @@
 	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
 	MLX5_SET(wq, wq, pd,               priv->pdn);
 
-	param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
 	param->wq.linear = 1;
 }
 
@@ -1014,7 +1022,8 @@
 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
 	MLX5_SET(wq, wq, pd,            priv->pdn);
 
-	param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+	param->max_inline = priv->params.tx_max_inline;
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1068,28 @@
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
 	struct mlx5e_channel_param cparam;
+	int nch = priv->params.num_channels;
 	int err = -ENOMEM;
 	int i;
 	int j;
 
-	priv->channel = kcalloc(priv->params.num_channels,
-				sizeof(struct mlx5e_channel *), GFP_KERNEL);
+	priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+				GFP_KERNEL);
 
-	priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
 				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
 	if (!priv->channel || !priv->txq_to_sq_map)
 		goto err_free_txq_to_sq_map;
 
 	mlx5e_build_channel_param(priv, &cparam);
-	for (i = 0; i < priv->params.num_channels; i++) {
+	for (i = 0; i < nch; i++) {
 		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
 		if (err)
 			goto err_close_channels;
 	}
 
-	for (j = 0; j < priv->params.num_channels; j++) {
+	for (j = 0; j < nch; j++) {
 		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
 		if (err)
 			goto err_close_channels;
@@ -1130,11 +1140,10 @@
 
 static int mlx5e_open_tises(struct mlx5e_priv *priv)
 {
-	int num_tc = priv->num_tc;
 	int err;
 	int tc;
 
-	for (tc = 0; tc < num_tc; tc++) {
+	for (tc = 0; tc < priv->params.num_tc; tc++) {
 		err = mlx5e_open_tis(priv, tc);
 		if (err)
 			goto err_close_tises;
@@ -1151,26 +1160,41 @@
 
 static void mlx5e_close_tises(struct mlx5e_priv *priv)
 {
-	int num_tc = priv->num_tc;
 	int tc;
 
-	for (tc = 0; tc < num_tc; tc++)
+	for (tc = 0; tc < priv->params.num_tc; tc++)
 		mlx5e_close_tis(priv, tc);
 }
 
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+	return (hfunc == ETH_RSS_HASH_TOP) ?
+	       MLX5_RX_HASH_FN_TOEPLITZ :
+	       MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+	int inv = 0;
+	int i;
+
+	for (i = 0; i < size; i++)
+		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+	return inv;
+}
+
 static int mlx5e_open_rqt(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 *in;
-	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
 	void *rqtc;
 	int inlen;
 	int err;
-	int sz;
+	int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
+	int sz = 1 << log_tbl_sz;
 	int i;
 
-	sz = 1 << priv->params.rx_hash_log_tbl_sz;
-
 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
 	if (!in)
@@ -1182,17 +1206,16 @@
 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
 	for (i = 0; i < sz; i++) {
-		int ix = i % priv->params.num_channels;
+		int ix = i;
 
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+			ix = mlx5e_bits_invert(i, log_tbl_sz);
+
+		ix = ix % priv->params.num_channels;
 		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
 	}
 
-	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
-	if (!err)
-		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+	err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn);
 
 	kvfree(in);
 
@@ -1201,16 +1224,7 @@
 
 static void mlx5e_close_rqt(struct mlx5e_priv *priv)
 {
-	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
-	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
-	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
-
-	mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
-				   sizeof(out));
+	mlx5_core_destroy_rqt(priv->mdev, priv->rqtn);
 }
 
 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
@@ -1221,13 +1235,17 @@
 
 #define ROUGH_MAX_L2_L3_HDR_SZ 256
 
-#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-			  MLX5_HASH_FIELD_SEL_DST_IP)
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP)
 
-#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-			  MLX5_HASH_FIELD_SEL_DST_IP   |\
-			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
-			  MLX5_HASH_FIELD_SEL_L4_DPORT)
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
 
 	if (priv->params.lro_en) {
 		MLX5_SET(tirc, tirc, lro_enable_mask,
@@ -1254,12 +1272,16 @@
 		MLX5_SET(tirc, tirc, indirect_table,
 			 priv->rqtn);
 		MLX5_SET(tirc, tirc, rx_hash_fn,
-			 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
-		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-		netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
-						 rx_hash_toeplitz_key),
-				    MLX5_FLD_SZ_BYTES(tirc,
-						      rx_hash_toeplitz_key));
+			 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+			void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+						     rx_hash_toeplitz_key);
+			size_t len = MLX5_FLD_SZ_BYTES(tirc,
+						       rx_hash_toeplitz_key);
+
+			MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+			netdev_rss_key_fill(rss_key, len);
+		}
 		break;
 	}
 
@@ -1270,7 +1292,7 @@
 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
 			 MLX5_L4_PROT_TYPE_TCP);
 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
+			 MLX5_HASH_IP_L4PORTS);
 		break;
 
 	case MLX5E_TT_IPV6_TCP:
@@ -1279,7 +1301,7 @@
 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
 			 MLX5_L4_PROT_TYPE_TCP);
 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
+			 MLX5_HASH_IP_L4PORTS);
 		break;
 
 	case MLX5E_TT_IPV4_UDP:
@@ -1288,7 +1310,7 @@
 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
 			 MLX5_L4_PROT_TYPE_UDP);
 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
+			 MLX5_HASH_IP_L4PORTS);
 		break;
 
 	case MLX5E_TT_IPV6_UDP:
@@ -1297,7 +1319,35 @@
 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
 			 MLX5_L4_PROT_TYPE_UDP);
 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
 		break;
 
 	case MLX5E_TT_IPV4:
@@ -1520,26 +1570,6 @@
 	return err;
 }
 
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-			     struct mlx5e_params *new_params)
-{
-	int err = 0;
-	int was_opened;
-
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(priv->netdev);
-
-	priv->params = *new_params;
-
-	if (was_opened)
-		err = mlx5e_open_locked(priv->netdev);
-
-	return err;
-}
-
 static struct rtnl_link_stats64 *
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
@@ -1589,20 +1619,22 @@
 			      netdev_features_t features)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
 	netdev_features_t changes = features ^ netdev->features;
-	struct mlx5e_params new_params;
-	bool update_params = false;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
 
 	if (changes & NETIF_F_LRO) {
-		new_params.lro_en = !!(features & NETIF_F_LRO);
-		update_params = true;
-	}
+		bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
 
-	if (update_params)
-		mlx5e_update_priv_params(priv, &new_params);
+		if (was_opened)
+			mlx5e_close_locked(priv->netdev);
+
+		priv->params.lro_en = !!(features & NETIF_F_LRO);
+
+		if (was_opened)
+			err = mlx5e_open_locked(priv->netdev);
+	}
 
 	if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1620,8 +1652,9 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
 	int max_mtu;
-	int err;
+	int err = 0;
 
 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -1633,8 +1666,16 @@
 	}
 
 	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(netdev);
+
 	netdev->mtu = new_mtu;
-	err = mlx5e_update_priv_params(priv, &priv->params);
+
+	if (was_opened)
+		err = mlx5e_open_locked(netdev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -1673,6 +1714,15 @@
 	return 0;
 }
 
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+	return bf_buf_size -
+	       sizeof(struct mlx5e_tx_wqe) +
+	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 				    struct net_device *netdev,
 				    int num_comp_vectors)
@@ -1691,6 +1741,7 @@
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
 	priv->params.tx_cq_moderation_pkts =
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
 	priv->params.min_rx_wqes           =
 		MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
 	priv->params.rx_hash_log_tbl_sz    =
@@ -1700,6 +1751,7 @@
 		MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
 	priv->params.num_tc                = 1;
 	priv->params.default_vlan_prio     = 0;
+	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
 	priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
 	priv->params.lro_wqe_sz            =
@@ -1708,7 +1760,6 @@
 	priv->mdev                         = mdev;
 	priv->netdev                       = netdev;
 	priv->params.num_channels          = num_comp_vectors;
-	priv->num_tc                       = priv->params.num_tc;
 	priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
 	spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1784,8 @@
 
 	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
-	if (priv->num_tc > 1) {
+	if (priv->params.num_tc > 1)
 		mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
-	}
 
 	netdev->netdev_ops        = &mlx5e_netdev_ops;
 	netdev->watchdog_timeo    = 15 * HZ;
@@ -1819,36 +1869,31 @@
 
 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
 		goto err_free_netdev;
 	}
 
 	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
 		goto err_unmap_free_uar;
 	}
 
 	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
 		goto err_dealloc_pd;
 	}
 
 	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
 		goto err_dealloc_transport_domain;
 	}
 
 	err = register_netdev(netdev);
 	if (err) {
-		netdev_err(netdev, "%s: register_netdev failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
 		goto err_destroy_mkey;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f4..64380bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@
 
 	if (notify_hw) {
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe);
+		mlx5e_tx_notify_hw(sq, wqe, 0);
 	}
 }
 
@@ -110,9 +110,17 @@
 }
 
 static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-					    struct sk_buff *skb)
+					    struct sk_buff *skb, bool bf)
 {
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+	/* Some NIC TX decisions, e.g loopback, are based on the packet
+	 * headers and occur before the data gather.
+	 * Therefore these headers must be copied into the WQE
+	 */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+	if (bf && (skb_headlen(skb) <= sq->max_inline))
+		return skb_headlen(skb);
+
 	return MLX5E_MIN_INLINE;
 }
 
@@ -129,6 +137,7 @@
 
 	u8  opcode = MLX5_OPCODE_SEND;
 	dma_addr_t dma_addr = 0;
+	bool bf = false;
 	u16 headlen;
 	u16 ds_cnt;
 	u16 ihs;
@@ -141,6 +150,11 @@
 	else
 		sq->stats.csum_offload_none++;
 
+	if (sq->cc != sq->prev_cc) {
+		sq->prev_cc = sq->cc;
+		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+	}
+
 	if (skb_is_gso(skb)) {
 		u32 payload_len;
 
@@ -153,7 +167,10 @@
 		sq->stats.tso_packets++;
 		sq->stats.tso_bytes += payload_len;
 	} else {
-		ihs = mlx5e_get_inline_hdr_size(sq, skb);
+		bf = sq->bf_budget &&
+		     !skb->xmit_more &&
+		     !skb_shinfo(skb)->nr_frags;
+		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
 		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
 							ETH_ZLEN);
 	}
@@ -225,14 +242,21 @@
 	}
 
 	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+		int bf_sz = 0;
+
+		if (bf && sq->uar_bf_map)
+			bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe);
+		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
 	}
 
 	/* fill sq edge with nops to avoid wqe wrap around */
 	while ((sq->pc & wq->sz_m1) > sq->edge)
 		mlx5e_send_nop(sq, false);
 
+	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
 	sq->stats.packets++;
 	return NETDEV_TX_OK;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529..603a8b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -455,7 +455,7 @@
 	struct mlx5_priv *priv  = &mdev->priv;
 	struct msix_entry *msix = priv->msix_arr;
 	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-	int numa_node           = dev_to_node(&mdev->pdev->dev);
+	int numa_node           = priv->numa_node;
 	int err;
 
 	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -654,6 +654,22 @@
 }
 #endif
 
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+	resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+	resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+	dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+	return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+	if (dev->priv.bf_mapping)
+		io_mapping_free(dev->priv.bf_mapping);
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
 	struct mlx5_priv *priv = &dev->priv;
@@ -668,6 +684,10 @@
 	INIT_LIST_HEAD(&priv->pgdir_list);
 	spin_lock_init(&priv->mkey_lock);
 
+	mutex_init(&priv->alloc_mutex);
+
+	priv->numa_node = dev_to_node(&dev->pdev->dev);
+
 	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
 	if (!priv->dbg_root)
 		return -ENOMEM;
@@ -804,10 +824,13 @@
 		goto err_stop_eqs;
 	}
 
+	if (map_bf_area(dev))
+		dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
 	err = mlx5_irq_set_affinity_hints(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-		goto err_free_comp_eqs;
+		goto err_unmap_bf_area;
 	}
 
 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -819,7 +842,9 @@
 
 	return 0;
 
-err_free_comp_eqs:
+err_unmap_bf_area:
+	unmap_bf_area(dev);
+
 	free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -877,6 +902,7 @@
 	mlx5_cleanup_qp_table(dev);
 	mlx5_cleanup_cq_table(dev);
 	mlx5_irq_clear_affinity_hints(dev);
+	unmap_bf_area(dev);
 	free_comp_eqs(dev);
 	mlx5_stop_eqs(dev);
 	mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88eca..566a704 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -73,7 +73,12 @@
 					     int in_size, u32 *out,
 					     int out_size)
 {
-	mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	int err;
+
+	err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	if (err)
+		return err;
+
 	return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b03..c4f3f74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -358,3 +358,32 @@
 	return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
 					   sizeof(out));
 }
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+	int err;
+
+	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+	return err;
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef244..10bd75e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -61,4 +61,8 @@
 int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
 int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
 
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
 #endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef8587..eb05c84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -246,6 +247,10 @@
 		goto err_free_uar;
 	}
 
+	if (mdev->priv.bf_mapping)
+		uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+						uar->index << PAGE_SHIFT);
+
 	return 0;
 
 err_free_uar:
@@ -257,6 +262,7 @@
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
+	io_mapping_unmap(uar->bf_map);
 	iounmap(uar->map);
 	mlx5_cmd_free_uar(mdev, uar->index);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 8388411..ce21ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@
 	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
 	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
 	}
 
-	err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+	err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
 		goto err_db_free;
@@ -108,13 +109,14 @@
 	wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
 	wq->sz_m1 = (1 << wq->log_sz) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
 	}
 
-	err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+	err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
 		goto err_db_free;
@@ -144,7 +146,7 @@
 	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
 	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69..6c2a8f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
 
 struct mlx5_wq_param {
 	int		linear;
-	int		numa;
+	int		buf_numa_node;
+	int		db_numa_node;
 };
 
 struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 0000000..8d1080d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+	tristate "Mellanox Technologies Switch ASICs support"
+	---help---
+	  This driver supports Mellanox Technologies Switch ASICs family.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_core.
+
+config MLXSW_PCI
+	tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+	depends on PCI && MLXSW_CORE
+	default m
+	---help---
+	  This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+	tristate "Mellanox Technologies SwitchX-2 support"
+	depends on MLXSW_CORE && NET_SWITCHDEV
+	default m
+	---help---
+	  This driver supports Mellanox Technologies SwitchX-2 Ethernet
+	  Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 0000000..0a05f65
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE)	+= mlxsw_core.o
+mlxsw_core-objs			:= core.o
+obj-$(CONFIG_MLXSW_PCI)		+= mlxsw_pci.o
+mlxsw_pci-objs			:= pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2)	+= mlxsw_switchx2.o
+mlxsw_switchx2-objs		:= switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 0000000..770db17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE	4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+	return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+	kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+	memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+		   u32 in_mod, bool out_mbox_direct,
+		   char *in_mbox, size_t in_mbox_size,
+		   char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+				    u8 opcode_mod, u32 in_mod, char *in_mbox,
+				    size_t in_mbox_size)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+			      in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+				     u8 opcode_mod, u32 in_mod,
+				     bool out_mbox_direct,
+				     char *out_mbox, size_t out_mbox_size)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+			      out_mbox_direct, NULL, 0,
+			      out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+				      u8 opcode_mod, u32 in_mod)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+			      NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+	MLXSW_CMD_OPCODE_QUERY_FW		= 0x004,
+	MLXSW_CMD_OPCODE_QUERY_BOARDINFO	= 0x006,
+	MLXSW_CMD_OPCODE_QUERY_AQ_CAP		= 0x003,
+	MLXSW_CMD_OPCODE_MAP_FA			= 0xFFF,
+	MLXSW_CMD_OPCODE_UNMAP_FA		= 0xFFE,
+	MLXSW_CMD_OPCODE_CONFIG_PROFILE		= 0x100,
+	MLXSW_CMD_OPCODE_ACCESS_REG		= 0x040,
+	MLXSW_CMD_OPCODE_SW2HW_DQ		= 0x201,
+	MLXSW_CMD_OPCODE_HW2SW_DQ		= 0x202,
+	MLXSW_CMD_OPCODE_2ERR_DQ		= 0x01E,
+	MLXSW_CMD_OPCODE_QUERY_DQ		= 0x022,
+	MLXSW_CMD_OPCODE_SW2HW_CQ		= 0x016,
+	MLXSW_CMD_OPCODE_HW2SW_CQ		= 0x017,
+	MLXSW_CMD_OPCODE_QUERY_CQ		= 0x018,
+	MLXSW_CMD_OPCODE_SW2HW_EQ		= 0x013,
+	MLXSW_CMD_OPCODE_HW2SW_EQ		= 0x014,
+	MLXSW_CMD_OPCODE_QUERY_EQ		= 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+	switch (opcode) {
+	case MLXSW_CMD_OPCODE_QUERY_FW:
+		return "QUERY_FW";
+	case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+		return "QUERY_BOARDINFO";
+	case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+		return "QUERY_AQ_CAP";
+	case MLXSW_CMD_OPCODE_MAP_FA:
+		return "MAP_FA";
+	case MLXSW_CMD_OPCODE_UNMAP_FA:
+		return "UNMAP_FA";
+	case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+		return "CONFIG_PROFILE";
+	case MLXSW_CMD_OPCODE_ACCESS_REG:
+		return "ACCESS_REG";
+	case MLXSW_CMD_OPCODE_SW2HW_DQ:
+		return "SW2HW_DQ";
+	case MLXSW_CMD_OPCODE_HW2SW_DQ:
+		return "HW2SW_DQ";
+	case MLXSW_CMD_OPCODE_2ERR_DQ:
+		return "2ERR_DQ";
+	case MLXSW_CMD_OPCODE_QUERY_DQ:
+		return "QUERY_DQ";
+	case MLXSW_CMD_OPCODE_SW2HW_CQ:
+		return "SW2HW_CQ";
+	case MLXSW_CMD_OPCODE_HW2SW_CQ:
+		return "HW2SW_CQ";
+	case MLXSW_CMD_OPCODE_QUERY_CQ:
+		return "QUERY_CQ";
+	case MLXSW_CMD_OPCODE_SW2HW_EQ:
+		return "SW2HW_EQ";
+	case MLXSW_CMD_OPCODE_HW2SW_EQ:
+		return "HW2SW_EQ";
+	case MLXSW_CMD_OPCODE_QUERY_EQ:
+		return "QUERY_EQ";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+enum mlxsw_cmd_status {
+	/* Command execution succeeded. */
+	MLXSW_CMD_STATUS_OK		= 0x00,
+	/* Internal error (e.g. bus error) occurred while processing command. */
+	MLXSW_CMD_STATUS_INTERNAL_ERR	= 0x01,
+	/* Operation/command not supported or opcode modifier not supported. */
+	MLXSW_CMD_STATUS_BAD_OP		= 0x02,
+	/* Parameter not supported, parameter out of range. */
+	MLXSW_CMD_STATUS_BAD_PARAM	= 0x03,
+	/* System was not enabled or bad system state. */
+	MLXSW_CMD_STATUS_BAD_SYS_STATE	= 0x04,
+	/* Attempt to access reserved or unallocated resource, or resource in
+	 * inappropriate ownership.
+	 */
+	MLXSW_CMD_STATUS_BAD_RESOURCE	= 0x05,
+	/* Requested resource is currently executing a command. */
+	MLXSW_CMD_STATUS_RESOURCE_BUSY	= 0x06,
+	/* Required capability exceeds device limits. */
+	MLXSW_CMD_STATUS_EXCEED_LIM	= 0x08,
+	/* Resource is not in the appropriate state or ownership. */
+	MLXSW_CMD_STATUS_BAD_RES_STATE	= 0x09,
+	/* Index out of range (might be beyond table size or attempt to
+	 * access a reserved resource).
+	 */
+	MLXSW_CMD_STATUS_BAD_INDEX	= 0x0A,
+	/* NVMEM checksum/CRC failed. */
+	MLXSW_CMD_STATUS_BAD_NVMEM	= 0x0B,
+	/* Bad management packet (silently discarded). */
+	MLXSW_CMD_STATUS_BAD_PKT	= 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+	switch (status) {
+	case MLXSW_CMD_STATUS_OK:
+		return "OK";
+	case MLXSW_CMD_STATUS_INTERNAL_ERR:
+		return "INTERNAL_ERR";
+	case MLXSW_CMD_STATUS_BAD_OP:
+		return "BAD_OP";
+	case MLXSW_CMD_STATUS_BAD_PARAM:
+		return "BAD_PARAM";
+	case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+		return "BAD_SYS_STATE";
+	case MLXSW_CMD_STATUS_BAD_RESOURCE:
+		return "BAD_RESOURCE";
+	case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+		return "RESOURCE_BUSY";
+	case MLXSW_CMD_STATUS_EXCEED_LIM:
+		return "EXCEED_LIM";
+	case MLXSW_CMD_STATUS_BAD_RES_STATE:
+		return "BAD_RES_STATE";
+	case MLXSW_CMD_STATUS_BAD_INDEX:
+		return "BAD_INDEX";
+	case MLXSW_CMD_STATUS_BAD_NVMEM:
+		return "BAD_NVMEM";
+	case MLXSW_CMD_STATUS_BAD_PKT:
+		return "BAD_PKT";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+					 char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+				   char *in_mbox, u32 vpm_entries_count)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+				 0, vpm_entries_count,
+				 in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+					       char *in_mbox)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+				 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+	     set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+		     0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+		     0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+		     0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+				       char *in_mbox, char *out_mbox)
+{
+	return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+			      0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+			      out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+				       char *in_mbox, u32 dq_number,
+				       u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+				 opcode_mod, dq_number,
+				 in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+	MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+	MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+				      char *in_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+				      char *in_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+				       u32 dq_number, u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+				   opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number)
+{
+	return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number)
+{
+	return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number, u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+				   opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+				     u32 dq_number)
+{
+	return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+				   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+				     u32 dq_number)
+{
+	return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+				   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+				       char *out_mbox, u32 dq_number,
+				       u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+				  opcode_mod, dq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+				     char *in_mbox, u32 cq_number)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+				 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+				     u32 cq_number)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+				   0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox, u32 cq_number)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+				  0, cq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+				     char *in_mbox, u32 eq_number)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+				 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+				     u32 eq_number)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+				   0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox, u32 eq_number)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+				  0, eq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 0000000..ad66ae4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1286 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+	u64			trap_rx_packets[MLXSW_TRAP_ID_MAX];
+	u64			trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+	u64			port_rx_packets[MLXSW_PORT_MAX_PORTS];
+	u64			port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+	struct u64_stats_sync	syncp;
+	u32			trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+	u32			port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+	u32			trap_rx_invalid;
+	u32			port_rx_invalid;
+};
+
+struct mlxsw_core {
+	struct mlxsw_driver *driver;
+	const struct mlxsw_bus *bus;
+	void *bus_priv;
+	const struct mlxsw_bus_info *bus_info;
+	struct list_head rx_listener_list;
+	struct list_head event_listener_list;
+	struct {
+		struct sk_buff *resp_skb;
+		u64 tid;
+		wait_queue_head_t wait;
+		bool trans_active;
+		struct mutex lock; /* One EMAD transaction at a time. */
+		bool use_emad;
+	} emad;
+	struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+	struct dentry *dbg_dir;
+	struct {
+		struct debugfs_blob_wrapper vsd_blob;
+		struct debugfs_blob_wrapper psid_blob;
+	} dbg;
+	unsigned long driver_priv[0];
+	/* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+	struct list_head list;
+	struct mlxsw_rx_listener rxl;
+	void *priv;
+};
+
+struct mlxsw_event_listener_item {
+	struct list_head list;
+	struct mlxsw_event_listener el;
+	void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+	MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+	MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+	switch (type) {
+	case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+		return "query";
+	case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+		return "write";
+	}
+	BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+	mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+	mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+				    const struct mlxsw_reg_info *reg,
+				    char *payload)
+{
+	mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+	mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+	memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+				   const struct mlxsw_reg_info *reg,
+				   enum mlxsw_core_reg_access_type type,
+				   struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+	mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+	mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+	if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+		mlxsw_emad_op_tlv_method_set(op_tlv,
+					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+	else
+		mlxsw_emad_op_tlv_method_set(op_tlv,
+					     MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+	mlxsw_emad_op_tlv_class_set(op_tlv,
+				    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+	mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+	char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+	mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+	mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+	mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+	mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+	mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+	skb_reset_mac_header(skb);
+
+	return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type,
+				 struct mlxsw_core *mlxsw_core)
+{
+	char *buf;
+
+	buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+	mlxsw_emad_pack_end_tlv(buf);
+
+	buf = skb_push(skb, reg->len + sizeof(u32));
+	mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+	mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+	mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+				      MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+	return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+	char *op_tlv;
+
+	op_tlv = mlxsw_emad_op_tlv(skb);
+	return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+	char *op_tlv;
+
+	op_tlv = mlxsw_emad_op_tlv(skb);
+	return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+				 struct sk_buff *skb,
+				 const struct mlxsw_tx_info *tx_info)
+{
+	int err;
+	int ret;
+
+	err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+	if (err) {
+		dev_warn(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+			 mlxsw_core->emad.tid);
+		dev_kfree_skb(skb);
+		return err;
+	}
+
+	mlxsw_core->emad.trans_active = true;
+	ret = wait_event_timeout(mlxsw_core->emad.wait,
+				 !(mlxsw_core->emad.trans_active),
+				 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+	if (!ret) {
+		dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+			 mlxsw_core->emad.tid);
+		mlxsw_core->emad.trans_active = false;
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+				     char *op_tlv)
+{
+	enum mlxsw_emad_op_tlv_status status;
+	u64 tid;
+
+	status = mlxsw_emad_op_tlv_status_get(op_tlv);
+	tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+	switch (status) {
+	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+		return 0;
+	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+		dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+			 tid, status, mlxsw_emad_op_tlv_status_str(status));
+		return -EAGAIN;
+	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+	default:
+		dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+			tid, status, mlxsw_emad_op_tlv_status_str(status));
+		return -EIO;
+	}
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+					 struct sk_buff *skb)
+{
+	return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+			       struct sk_buff *skb,
+			       const struct mlxsw_tx_info *tx_info)
+{
+	struct sk_buff *trans_skb;
+	int n_retry;
+	int err;
+
+	n_retry = 0;
+retry:
+	/* We copy the EMAD to a new skb, since we might need
+	 * to retransmit it in case of failure.
+	 */
+	trans_skb = skb_copy(skb, GFP_KERNEL);
+	if (!trans_skb) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+	if (!err) {
+		struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+		err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+		if (err)
+			dev_kfree_skb(resp_skb);
+		if (!err || err != -EAGAIN)
+			goto out;
+	}
+	if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+		goto retry;
+
+out:
+	dev_kfree_skb(skb);
+	mlxsw_core->emad.tid++;
+	return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+					void *priv)
+{
+	struct mlxsw_core *mlxsw_core = priv;
+
+	if (mlxsw_emad_is_resp(skb) &&
+	    mlxsw_core->emad.trans_active &&
+	    mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+		mlxsw_core->emad.resp_skb = skb;
+		mlxsw_core->emad.trans_active = false;
+		wake_up(&mlxsw_core->emad.wait);
+	} else {
+		dev_kfree_skb(skb);
+	}
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+	.func = mlxsw_emad_rx_listener_func,
+	.local_port = MLXSW_PORT_DONT_CARE,
+	.trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+			    MLXSW_TRAP_ID_ETHEMAD);
+	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+	int err;
+
+	/* Set the upper 32 bits of the transaction ID field to a random
+	 * number. This allows us to discard EMADs addressed to other
+	 * devices.
+	 */
+	get_random_bytes(&mlxsw_core->emad.tid, 4);
+	mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+	init_waitqueue_head(&mlxsw_core->emad.wait);
+	mlxsw_core->emad.trans_active = false;
+	mutex_init(&mlxsw_core->emad.lock);
+
+	err = mlxsw_core_rx_listener_register(mlxsw_core,
+					      &mlxsw_emad_rx_listener,
+					      mlxsw_core);
+	if (err)
+		return err;
+
+	err = mlxsw_emad_traps_set(mlxsw_core);
+	if (err)
+		goto err_emad_trap_set;
+
+	mlxsw_core->emad.use_emad = true;
+
+	return 0;
+
+err_emad_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_core,
+					  &mlxsw_emad_rx_listener,
+					  mlxsw_core);
+	return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+			    MLXSW_TRAP_ID_ETHEMAD);
+	mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+	mlxsw_core_rx_listener_unregister(mlxsw_core,
+					  &mlxsw_emad_rx_listener,
+					  mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+					u16 reg_len)
+{
+	struct sk_buff *skb;
+	u16 emad_len;
+
+	emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+		    (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+		    sizeof(u32) + mlxsw_core->driver->txhdr_len);
+	if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+		return NULL;
+
+	skb = netdev_alloc_skb(NULL, emad_len);
+	if (!skb)
+		return NULL;
+	memset(skb->data, 0, emad_len);
+	skb_reserve(skb, emad_len);
+
+	return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_core *mlxsw_core = file->private;
+	struct mlxsw_core_pcpu_stats *p;
+	u64 rx_packets, rx_bytes;
+	u64 tmp_rx_packets, tmp_rx_bytes;
+	u32 rx_dropped, rx_invalid;
+	unsigned int start;
+	int i;
+	int j;
+	static const char hdr[] =
+		"     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+		rx_packets = 0;
+		rx_bytes = 0;
+		rx_dropped = 0;
+		for_each_possible_cpu(j) {
+			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+			do {
+				start = u64_stats_fetch_begin(&p->syncp);
+				tmp_rx_packets = p->trap_rx_packets[i];
+				tmp_rx_bytes = p->trap_rx_bytes[i];
+			} while (u64_stats_fetch_retry(&p->syncp, start));
+
+			rx_packets += tmp_rx_packets;
+			rx_bytes += tmp_rx_bytes;
+			rx_dropped += p->trap_rx_dropped[i];
+		}
+		seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+			   i, rx_packets, rx_bytes, rx_dropped);
+	}
+	rx_invalid = 0;
+	for_each_possible_cpu(j) {
+		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+		rx_invalid += p->trap_rx_invalid;
+	}
+	seq_printf(file, "trap INV                           %10u\n",
+		   rx_invalid);
+
+	for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+		rx_packets = 0;
+		rx_bytes = 0;
+		rx_dropped = 0;
+		for_each_possible_cpu(j) {
+			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+			do {
+				start = u64_stats_fetch_begin(&p->syncp);
+				tmp_rx_packets = p->port_rx_packets[i];
+				tmp_rx_bytes = p->port_rx_bytes[i];
+			} while (u64_stats_fetch_retry(&p->syncp, start));
+
+			rx_packets += tmp_rx_packets;
+			rx_bytes += tmp_rx_bytes;
+			rx_dropped += p->port_rx_dropped[i];
+		}
+		seq_printf(file, "port %3d %12llu %12llu %10u\n",
+			   i, rx_packets, rx_bytes, rx_dropped);
+	}
+	rx_invalid = 0;
+	for_each_possible_cpu(j) {
+		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+		rx_invalid += p->port_rx_invalid;
+	}
+	seq_printf(file, "port INV                           %10u\n",
+		   rx_invalid);
+	return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+	struct mlxsw_core *mlxsw_core = inode->i_private;
+
+	return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+	.owner = THIS_MODULE,
+	.open = mlxsw_core_rx_stats_dbg_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+				    const char *buf, size_t size)
+{
+	__be32 *m = (__be32 *) buf;
+	int i;
+	int count = size / sizeof(__be32);
+
+	for (i = count - 1; i >= 0; i--)
+		if (m[i])
+			break;
+	i++;
+	count = i ? i : 1;
+	for (i = 0; i < count; i += 4)
+		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+	spin_lock(&mlxsw_core_driver_list_lock);
+	list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+	spin_lock(&mlxsw_core_driver_list_lock);
+	list_del(&mlxsw_driver->list);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+		if (strcmp(mlxsw_driver->kind, kind) == 0)
+			return mlxsw_driver;
+	}
+	return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	spin_lock(&mlxsw_core_driver_list_lock);
+	mlxsw_driver = __driver_find(kind);
+	if (!mlxsw_driver) {
+		spin_unlock(&mlxsw_core_driver_list_lock);
+		request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+		spin_lock(&mlxsw_core_driver_list_lock);
+		mlxsw_driver = __driver_find(kind);
+	}
+	if (mlxsw_driver) {
+		if (!try_module_get(mlxsw_driver->owner))
+			mlxsw_driver = NULL;
+	}
+
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	spin_lock(&mlxsw_core_driver_list_lock);
+	mlxsw_driver = __driver_find(kind);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	if (!mlxsw_driver)
+		return;
+	module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+	const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+	mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+						 mlxsw_core_dbg_root);
+	if (!mlxsw_core->dbg_dir)
+		return -ENOMEM;
+	debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+			    mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+	mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+	mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+	debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+			    &mlxsw_core->dbg.vsd_blob);
+	mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+	mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+	debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+			    &mlxsw_core->dbg.psid_blob);
+	return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+	debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+				   const struct mlxsw_bus *mlxsw_bus,
+				   void *bus_priv)
+{
+	const char *device_kind = mlxsw_bus_info->device_kind;
+	struct mlxsw_core *mlxsw_core;
+	struct mlxsw_driver *mlxsw_driver;
+	size_t alloc_size;
+	int err;
+
+	mlxsw_driver = mlxsw_core_driver_get(device_kind);
+	if (!mlxsw_driver)
+		return -EINVAL;
+	alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+	mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_core) {
+		err = -ENOMEM;
+		goto err_core_alloc;
+	}
+
+	INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+	INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+	mlxsw_core->driver = mlxsw_driver;
+	mlxsw_core->bus = mlxsw_bus;
+	mlxsw_core->bus_priv = bus_priv;
+	mlxsw_core->bus_info = mlxsw_bus_info;
+
+	mlxsw_core->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+	if (!mlxsw_core->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+	if (err)
+		goto err_bus_init;
+
+	err = mlxsw_emad_init(mlxsw_core);
+	if (err)
+		goto err_emad_init;
+
+	err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+				 mlxsw_bus_info);
+	if (err)
+		goto err_driver_init;
+
+	err = mlxsw_core_debugfs_init(mlxsw_core);
+	if (err)
+		goto err_debugfs_init;
+
+	return 0;
+
+err_debugfs_init:
+	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+	mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+	mlxsw_bus->fini(bus_priv);
+err_bus_init:
+	free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+	kfree(mlxsw_core);
+err_core_alloc:
+	mlxsw_core_driver_put(device_kind);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+	const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+	mlxsw_core_debugfs_fini(mlxsw_core);
+	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+	mlxsw_emad_fini(mlxsw_core);
+	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+	free_percpu(mlxsw_core->pcpu_stats);
+	kfree(mlxsw_core);
+	mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+	return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+					     tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+				   const struct mlxsw_rx_listener *rxl_b)
+{
+	return (rxl_a->func == rxl_b->func &&
+		rxl_a->local_port == rxl_b->local_port &&
+		rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+			const struct mlxsw_rx_listener *rxl,
+			void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+		if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+		    rxl_item->priv == priv)
+			return rxl_item;
+	}
+	return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+				    const struct mlxsw_rx_listener *rxl,
+				    void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+	if (rxl_item)
+		return -EEXIST;
+	rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+	if (!rxl_item)
+		return -ENOMEM;
+	rxl_item->rxl = *rxl;
+	rxl_item->priv = priv;
+
+	list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_rx_listener *rxl,
+				       void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+	if (!rxl_item)
+		return;
+	list_del_rcu(&rxl_item->list);
+	synchronize_rcu();
+	kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+					   void *priv)
+{
+	struct mlxsw_event_listener_item *event_listener_item = priv;
+	struct mlxsw_reg_info reg;
+	char *payload;
+	char *op_tlv = mlxsw_emad_op_tlv(skb);
+	char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+	reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+	reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+	payload = mlxsw_emad_reg_payload(op_tlv);
+	event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+	dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+				      const struct mlxsw_event_listener *el_b)
+{
+	return (el_a->func == el_b->func &&
+		el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+			   const struct mlxsw_event_listener *el,
+			   void *priv)
+{
+	struct mlxsw_event_listener_item *el_item;
+
+	list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+		if (__is_event_listener_equal(&el_item->el, el) &&
+		    el_item->priv == priv)
+			return el_item;
+	}
+	return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_event_listener *el,
+				       void *priv)
+{
+	int err;
+	struct mlxsw_event_listener_item *el_item;
+	const struct mlxsw_rx_listener rxl = {
+		.func = mlxsw_core_event_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = el->trap_id,
+	};
+
+	el_item = __find_event_listener_item(mlxsw_core, el, priv);
+	if (el_item)
+		return -EEXIST;
+	el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+	if (!el_item)
+		return -ENOMEM;
+	el_item->el = *el;
+	el_item->priv = priv;
+
+	err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+	if (err)
+		goto err_rx_listener_register;
+
+	/* No reason to save item if we did not manage to register an RX
+	 * listener for it.
+	 */
+	list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+	return 0;
+
+err_rx_listener_register:
+	kfree(el_item);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+					  const struct mlxsw_event_listener *el,
+					  void *priv)
+{
+	struct mlxsw_event_listener_item *el_item;
+	const struct mlxsw_rx_listener rxl = {
+		.func = mlxsw_core_event_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = el->trap_id,
+	};
+
+	el_item = __find_event_listener_item(mlxsw_core, el, priv);
+	if (!el_item)
+		return;
+	mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+	list_del(&el_item->list);
+	kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+				      const struct mlxsw_reg_info *reg,
+				      char *payload,
+				      enum mlxsw_core_reg_access_type type)
+{
+	int err;
+	char *op_tlv;
+	struct sk_buff *skb;
+	struct mlxsw_tx_info tx_info = {
+		.local_port = MLXSW_PORT_CPU_PORT,
+		.is_emad = true,
+	};
+
+	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+	if (!skb)
+		return -ENOMEM;
+
+	mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+	mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+	dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+		mlxsw_core->emad.tid);
+	mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+	err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+	if (!err) {
+		op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+		memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+		       reg->len);
+
+		dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+			mlxsw_core->emad.tid - 1);
+		mlxsw_core_buf_dump_dbg(mlxsw_core,
+					mlxsw_core->emad.resp_skb->data,
+					skb->len);
+
+		dev_kfree_skb(mlxsw_core->emad.resp_skb);
+	}
+
+	return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+				     const struct mlxsw_reg_info *reg,
+				     char *payload,
+				     enum mlxsw_core_reg_access_type type)
+{
+	int err, n_retry;
+	char *in_mbox, *out_mbox, *tmp;
+
+	in_mbox = mlxsw_cmd_mbox_alloc();
+	if (!in_mbox)
+		return -ENOMEM;
+
+	out_mbox = mlxsw_cmd_mbox_alloc();
+	if (!out_mbox) {
+		err = -ENOMEM;
+		goto free_in_mbox;
+	}
+
+	mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+	tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+	mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+	n_retry = 0;
+retry:
+	err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+	if (!err) {
+		err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+		if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+			goto retry;
+	}
+
+	if (!err)
+		memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+		       reg->len);
+
+	mlxsw_core->emad.tid++;
+	mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+	mlxsw_cmd_mbox_free(in_mbox);
+	return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type)
+{
+	u64 cur_tid;
+	int err;
+
+	if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+		dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+			reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
+		return -EINTR;
+	}
+
+	cur_tid = mlxsw_core->emad.tid;
+	dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+		cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+		mlxsw_core_reg_access_type_str(type));
+
+	/* During initialization EMAD interface is not available to us,
+	 * so we default to command interface. We switch to EMAD interface
+	 * after setting the appropriate traps.
+	 */
+	if (!mlxsw_core->emad.use_emad)
+		err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+						payload, type);
+	else
+		err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+						 payload, type);
+
+	if (err)
+		dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+			cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
+
+	mutex_unlock(&mlxsw_core->emad.lock);
+	return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload)
+{
+	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+				     MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload)
+{
+	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+				     MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+			    struct mlxsw_rx_info *rx_info)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+	const struct mlxsw_rx_listener *rxl;
+	struct mlxsw_core_pcpu_stats *pcpu_stats;
+	u8 local_port = rx_info->sys_port;
+	bool found = false;
+
+	dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+			    __func__, rx_info->sys_port, rx_info->trap_id);
+
+	if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+	    (local_port >= MLXSW_PORT_MAX_PORTS))
+		goto drop;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+		rxl = &rxl_item->rxl;
+		if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+		     rxl->local_port == local_port) &&
+		    rxl->trap_id == rx_info->trap_id) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	if (!found)
+		goto drop;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->port_rx_packets[local_port]++;
+	pcpu_stats->port_rx_bytes[local_port] += skb->len;
+	pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+	pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	rxl->func(skb, local_port, rxl_item->priv);
+	return;
+
+drop:
+	if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+	else
+		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+	if (local_port >= MLXSW_PORT_MAX_PORTS)
+		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+	else
+		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+	dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+		   u32 in_mod, bool out_mbox_direct,
+		   char *in_mbox, size_t in_mbox_size,
+		   char *out_mbox, size_t out_mbox_size)
+{
+	u8 status;
+	int err;
+
+	BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+	if (!mlxsw_core->bus->cmd_exec)
+		return -EOPNOTSUPP;
+
+	dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+		opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+	if (in_mbox) {
+		dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+		mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+	}
+
+	err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+					opcode_mod, in_mod, out_mbox_direct,
+					in_mbox, in_mbox_size,
+					out_mbox, out_mbox_size, &status);
+
+	if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+		dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+			in_mod, status, mlxsw_cmd_status_str(status));
+	} else if (err == -ETIMEDOUT) {
+		dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+			in_mod);
+	}
+
+	if (!err && out_mbox) {
+		dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+		mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+	}
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+	mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+	if (!mlxsw_core_dbg_root)
+		return -ENOMEM;
+	return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+	debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 0000000..2280b31
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,202 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind)	\
+	MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+				   const struct mlxsw_bus *mlxsw_bus,
+				   void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+	u8 local_port;
+	bool is_emad;
+};
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+	void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+	u8 local_port;
+	u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+	void (*func)(const struct mlxsw_reg_info *reg,
+		     char *payload, void *priv);
+	enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+				    const struct mlxsw_rx_listener *rxl,
+				    void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_rx_listener *rxl,
+				       void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_event_listener *el,
+				       void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+					  const struct mlxsw_event_listener *el,
+					  void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+	u16 sys_port;
+	int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+			    struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+	u8	used_type:1,
+		used_properties:1;
+	u8	type;
+	u8	properties;
+};
+
+struct mlxsw_config_profile {
+	u16	used_max_vepa_channels:1,
+		used_max_lag:1,
+		used_max_port_per_lag:1,
+		used_max_mid:1,
+		used_max_pgt:1,
+		used_max_system_port:1,
+		used_max_vlan_groups:1,
+		used_max_regions:1,
+		used_flood_tables:1,
+		used_flood_mode:1,
+		used_max_ib_mc:1,
+		used_max_pkey:1,
+		used_ar_sec:1,
+		used_adaptive_routing_group_cap:1;
+	u8	max_vepa_channels;
+	u16	max_lag;
+	u16	max_port_per_lag;
+	u16	max_mid;
+	u16	max_pgt;
+	u16	max_system_port;
+	u16	max_vlan_groups;
+	u16	max_regions;
+	u8	max_flood_tables;
+	u8	max_vid_flood_tables;
+	u8	flood_mode;
+	u16	max_ib_mc;
+	u16	max_pkey;
+	u8	ar_sec;
+	u16	adaptive_routing_group_cap;
+	u8	arn;
+	struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+	struct list_head list;
+	const char *kind;
+	struct module *owner;
+	size_t priv_size;
+	int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_bus_info *mlxsw_bus_info);
+	void (*fini)(void *driver_priv);
+	void (*txhdr_construct)(struct sk_buff *skb,
+				const struct mlxsw_tx_info *tx_info);
+	u8 txhdr_len;
+	const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+	const char *kind;
+	int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_config_profile *profile);
+	void (*fini)(void *bus_priv);
+	int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info);
+	int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+			u32 in_mod, bool out_mbox_direct,
+			char *in_mbox, size_t in_mbox_size,
+			char *out_mbox, size_t out_mbox_size,
+			u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+	const char *device_kind;
+	const char *device_name;
+	struct device *dev;
+	struct {
+		u16 major;
+		u16 minor;
+		u16 subminor;
+	} fw_rev;
+	u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+	u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 0000000..97b6bb5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518	/* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10	/* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+	MLXSW_EMAD_TLV_TYPE_END,
+	MLXSW_EMAD_TLV_TYPE_OP,
+	MLXSW_EMAD_TLV_TYPE_DR,
+	MLXSW_EMAD_TLV_TYPE_REG,
+	MLXSW_EMAD_TLV_TYPE_USERDATA,
+	MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4		/* Length in u32 */
+
+enum {
+	MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+	MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+	MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+	MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+	MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+	MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+	MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+	MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+	MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+	switch (status) {
+	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+		return "operation performed";
+	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+		return "device is busy";
+	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+		return "version not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+		return "unknown TLV";
+	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+		return "register not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+		return "class not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+		return "method not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+		return "bad parameter";
+	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+		return "resource not available";
+	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+		return "acknowledged. retransmit";
+	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+		return "internal error";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+enum {
+	MLXSW_EMAD_OP_TLV_REQUEST,
+	MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+	MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+	MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+	MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+	MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1	/* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 0000000..4d0ac88
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+	unsigned short	offset;		/* bytes in container */
+	unsigned short	step;		/* step in bytes for indexed items */
+	unsigned short	in_step_offset; /* offset within one step */
+	unsigned char	shift;		/* shift in bits */
+	unsigned char	element_size;	/* size of element in bit array */
+	bool		no_real_shift;
+	union {
+		unsigned char	bits;
+		unsigned short	bytes;
+	} size;
+	const char	*name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+		    size_t typesize)
+{
+	BUG_ON(index && !item->step);
+	if (item->offset % typesize != 0 ||
+	    item->step % typesize != 0 ||
+	    item->in_step_offset % typesize != 0) {
+		pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%lx)\n",
+		       item->name, item->offset, item->step,
+		       item->in_step_offset, typesize);
+		BUG();
+	}
+
+	return ((item->offset + item->step * index + item->in_step_offset) /
+		typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+	__be16 *b = (__be16 *) buf;
+	u16 tmp;
+
+	tmp = be16_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u16 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index,
+						  sizeof(u16));
+	__be16 *b = (__be16 *) buf;
+	u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+	u16 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be16_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+	__be32 *b = (__be32 *) buf;
+	u32 tmp;
+
+	tmp = be32_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u32 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index,
+						  sizeof(u32));
+	__be32 *b = (__be32 *) buf;
+	u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+	u32 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be32_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+	__be64 *b = (__be64 *) buf;
+	u64 tmp;
+
+	tmp = be64_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u64 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+	__be64 *b = (__be64 *) buf;
+	u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+	u64 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be64_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+					    struct mlxsw_item *item)
+{
+	memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+					  struct mlxsw_item *item)
+{
+	memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+	u16 max_index, be_index;
+	u16 offset;		/* byte offset inside the array */
+
+	BUG_ON(index && !item->element_size);
+	if (item->offset % sizeof(u32) != 0 ||
+	    BITS_PER_BYTE % item->element_size != 0) {
+		pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+		       item->name, item->offset, item->element_size);
+		BUG();
+	}
+
+	max_index = (item->size.bytes << 3) / item->element_size - 1;
+	be_index = max_index - index;
+	offset = be_index * item->element_size >> 3;
+	*shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+	return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+					    u16 index)
+{
+	u8 shift, tmp;
+	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+	tmp = buf[offset];
+	tmp >>= shift;
+	tmp &= GENMASK(item->element_size - 1, 0);
+	return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+					      u16 index, u8 val)
+{
+	u8 shift, tmp;
+	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+	u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+	val <<= shift;
+	val &= mask;
+	tmp = buf[offset];
+	tmp &= ~mask;
+	tmp |= val;
+	buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname)					\
+	mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{										\
+	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
+			     _step, _instepoffset, _norealshift)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u16								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u16 val)				\
+{										\
+	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{										\
+	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
+			     _step, _instepoffset, _norealshift)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u32								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u32 val)				\
+{										\
+	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{										\
+	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0,	val);	\
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift,		\
+			     _sizebits, _step, _instepoffset, _norealshift)	\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u64								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u64 val)				\
+{										\
+	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)		\
+{										\
+	__mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)		\
+{										\
+	__mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));	\
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,	\
+			     _element_size)					\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.element_size = _element_size,						\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u8								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index)			\
+{										\
+	return __mlxsw_item_bit_array_get(buf,					\
+					  &__ITEM_NAME(_type, _cname, _iname),	\
+					  index);				\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val)		\
+{										\
+	return __mlxsw_item_bit_array_set(buf,					\
+					  &__ITEM_NAME(_type, _cname, _iname),	\
+					  index, val);				\
+}										\
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 0000000..298ead5b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1794 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+	{0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+	switch (id->device) {
+	case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+		return MLXSW_DEVICE_KIND_SWITCHX2;
+	default:
+		BUG();
+	}
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+	MLXSW_PCI_QUEUE_TYPE_SDQ,
+	MLXSW_PCI_QUEUE_TYPE_RDQ,
+	MLXSW_PCI_QUEUE_TYPE_CQ,
+	MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+	switch (q_type) {
+	case MLXSW_PCI_QUEUE_TYPE_SDQ:
+		return "sdq";
+	case MLXSW_PCI_QUEUE_TYPE_RDQ:
+		return "rdq";
+	case MLXSW_PCI_QUEUE_TYPE_CQ:
+		return "cq";
+	case MLXSW_PCI_QUEUE_TYPE_EQ:
+		return "eq";
+	}
+	BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT	4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+	0, /* unused */
+	0, /* unused */
+	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+	char *buf;
+	dma_addr_t mapaddr;
+	size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+	char *elem; /* pointer to actual dma mapped element mem chunk */
+	union {
+		struct {
+			struct sk_buff *skb;
+		} sdq;
+		struct {
+			struct sk_buff *skb;
+		} rdq;
+	} u;
+};
+
+struct mlxsw_pci_queue {
+	spinlock_t lock; /* for queue accesses */
+	struct mlxsw_pci_mem_item mem_item;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	u16 producer_counter;
+	u16 consumer_counter;
+	u16 count; /* number of elements in queue */
+	u8 num; /* queue number */
+	u8 elem_size; /* size of one element */
+	enum mlxsw_pci_queue_type type;
+	struct tasklet_struct tasklet; /* queue processing tasklet */
+	struct mlxsw_pci *pci;
+	union {
+		struct {
+			u32 comp_sdq_count;
+			u32 comp_rdq_count;
+		} cq;
+		struct {
+			u32 ev_cmd_count;
+			u32 ev_comp_count;
+			u32 ev_other_count;
+		} eq;
+	} u;
+};
+
+struct mlxsw_pci_queue_type_group {
+	struct mlxsw_pci_queue *q;
+	u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+	struct pci_dev *pdev;
+	u8 __iomem *hw_addr;
+	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+	u32 doorbell_offset;
+	struct msix_entry msix_entry;
+	struct mlxsw_core *core;
+	struct {
+		u16 num_pages;
+		struct mlxsw_pci_mem_item *items;
+	} fw_area;
+	struct {
+		struct mutex lock; /* Lock access to command registers */
+		bool nopoll;
+		wait_queue_head_t wait;
+		bool wait_done;
+		struct {
+			u8 status;
+			u64 out_param;
+		} comp;
+	} cmd;
+	struct mlxsw_bus_info bus_info;
+	struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+	tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+					size_t elem_size, int elem_index)
+{
+	return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+	return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+	int index = q->producer_counter & (q->count - 1);
+
+	if ((q->producer_counter - q->consumer_counter) == q->count)
+		return NULL;
+	return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+	int index = q->consumer_counter & (q->count - 1);
+
+	return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+	return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+					 u32 (*get_elem_owner_func)(char *))
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *elem;
+	bool owner_bit;
+
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	elem = elem_info->elem;
+	owner_bit = get_elem_owner_func(elem);
+	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+		return NULL;
+	q->consumer_counter++;
+	rmb(); /* make sure we read owned bit before the rest of elem */
+	return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+			       enum mlxsw_pci_queue_type q_type)
+{
+	return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+				  enum mlxsw_pci_queue_type q_type)
+{
+	struct mlxsw_pci_queue_type_group *queue_group;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+	return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+		      enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+	return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+						 u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci,
+				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+						 u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci,
+				     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+						u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+						u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+					   struct mlxsw_pci_queue *q,
+					   u16 val)
+{
+	mlxsw_pci_write32(mlxsw_pci,
+			  DOORBELL(mlxsw_pci->doorbell_offset,
+				   mlxsw_pci_doorbell_type_offset[q->type],
+				   q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+					       struct mlxsw_pci_queue *q,
+					       u16 val)
+{
+	mlxsw_pci_write32(mlxsw_pci,
+			  DOORBELL(mlxsw_pci->doorbell_offset,
+				   mlxsw_pci_doorbell_arm_type_offset[q->type],
+				   q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+						   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+						   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+				       q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+					   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+					     int page_index)
+{
+	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			      struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->producer_counter = 0;
+	q->consumer_counter = 0;
+
+	/* Set CQ of same number of this SDQ. */
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+	}
+
+	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+			       struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %5d\n",
+			   i, q->producer_counter, q->consumer_counter,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+				  int index, char *frag_data, size_t frag_len,
+				  int direction)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	dma_addr_t mapaddr;
+
+	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+		if (net_ratelimit())
+			dev_err(&pdev->dev, "failed to dma map tx frag\n");
+		return -EIO;
+	}
+	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+	return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+				     int index, int direction)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+	if (!frag_len)
+		return;
+	pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+				   struct mlxsw_pci_queue_elem_info *elem_info)
+{
+	size_t buf_len = MLXSW_PORT_MAX_MTU;
+	char *wqe = elem_info->elem;
+	struct sk_buff *skb;
+	int err;
+
+	elem_info->u.rdq.skb = NULL;
+	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+	if (!skb)
+		return -ENOMEM;
+
+	/* Assume that wqe was previously zeroed. */
+
+	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+				     buf_len, DMA_FROM_DEVICE);
+	if (err)
+		goto err_frag_map;
+
+	elem_info->u.rdq.skb = skb;
+	return 0;
+
+err_frag_map:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+				   struct mlxsw_pci_queue_elem_info *elem_info)
+{
+	struct sk_buff *skb;
+	char *wqe;
+
+	skb = elem_info->u.rdq.skb;
+	wqe = elem_info->elem;
+
+	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+	dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			      struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	int i;
+	int err;
+
+	q->producer_counter = 0;
+	q->consumer_counter = 0;
+
+	/* Set CQ of same number of this RDQ with base
+	 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+	 */
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+	}
+
+	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+	for (i = 0; i < q->count; i++) {
+		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+		BUG_ON(!elem_info);
+		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+		if (err)
+			goto rollback;
+		/* Everything is set up, ring doorbell to pass elem to HW */
+		q->producer_counter++;
+		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	}
+
+	return 0;
+
+rollback:
+	for (i--; i >= 0; i--) {
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+	}
+	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+	return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+			       struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	int i;
+
+	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+	for (i = 0; i < q->count; i++) {
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+	}
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %5d\n",
+			   i, q->producer_counter, q->consumer_counter,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			     struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->consumer_counter = 0;
+
+	for (i = 0; i < q->count; i++) {
+		char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+		mlxsw_pci_cqe_owner_set(elem, 1);
+	}
+
+	mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+	mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+	}
+	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+			      struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_cq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %10d %5d\n",
+			   i, q->consumer_counter, q->u.cq.comp_sdq_count,
+			   q->u.cq.comp_rdq_count, q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+				     struct mlxsw_pci_queue *q,
+				     u16 consumer_counter_limit,
+				     char *cqe)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	struct sk_buff *skb;
+	int i;
+
+	spin_lock(&q->lock);
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	skb = elem_info->u.sdq.skb;
+	wqe = elem_info->elem;
+	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+	dev_kfree_skb_any(skb);
+	elem_info->u.sdq.skb = NULL;
+
+	if (q->consumer_counter++ != consumer_counter_limit)
+		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+	spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+				     struct mlxsw_pci_queue *q,
+				     u16 consumer_counter_limit,
+				     char *cqe)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	struct sk_buff *skb;
+	struct mlxsw_rx_info rx_info;
+	int err;
+
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	skb = elem_info->u.sdq.skb;
+	if (!skb)
+		return;
+	wqe = elem_info->elem;
+	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+	if (q->consumer_counter++ != consumer_counter_limit)
+		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+	/* We do not support lag now */
+	if (mlxsw_pci_cqe_lag_get(cqe))
+		goto drop;
+
+	rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+	skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe));
+	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+	memset(wqe, 0, q->elem_size);
+	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+	if (err && net_ratelimit())
+		dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+	/* Everything is set up, ring doorbell to pass elem to HW */
+	q->producer_counter++;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	return;
+
+drop:
+	dev_kfree_skb_any(skb);
+	goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+	struct mlxsw_pci *mlxsw_pci = q->pci;
+	char *cqe;
+	int items = 0;
+	int credits = q->count >> 1;
+
+	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+		u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+		u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+		if (sendq) {
+			struct mlxsw_pci_queue *sdq;
+
+			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+						 wqe_counter, cqe);
+			q->u.cq.comp_sdq_count++;
+		} else {
+			struct mlxsw_pci_queue *rdq;
+
+			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+						 wqe_counter, cqe);
+			q->u.cq.comp_rdq_count++;
+		}
+		if (++items == credits)
+			break;
+	}
+	if (items) {
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	}
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			     struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->consumer_counter = 0;
+
+	for (i = 0; i < q->count; i++) {
+		char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+		mlxsw_pci_eqe_owner_set(elem, 1);
+	}
+
+	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+	mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+	}
+	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+			      struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_eq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+			   i, q->consumer_counter, q->u.eq.ev_cmd_count,
+			   q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+	mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+	mlxsw_pci->cmd.comp.out_param =
+		((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+		mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+	mlxsw_pci->cmd.wait_done = true;
+	wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+	struct mlxsw_pci *mlxsw_pci = q->pci;
+	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+	char *eqe;
+	u8 cqn;
+	bool cq_handle = false;
+	int items = 0;
+	int credits = q->count >> 1;
+
+	memset(&active_cqns, 0, sizeof(active_cqns));
+
+	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+		u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+		switch (event_type) {
+		case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+			mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+			q->u.eq.ev_cmd_count++;
+			break;
+		case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+			cqn = mlxsw_pci_eqe_cqn_get(eqe);
+			set_bit(cqn, active_cqns);
+			cq_handle = true;
+			q->u.eq.ev_comp_count++;
+			break;
+		default:
+			q->u.eq.ev_other_count++;
+		}
+		if (++items == credits)
+			break;
+	}
+	if (items) {
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	}
+
+	if (!cq_handle)
+		return;
+	for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+		mlxsw_pci_queue_tasklet_schedule(q);
+	}
+}
+
+struct mlxsw_pci_queue_ops {
+	const char *name;
+	enum mlxsw_pci_queue_type type;
+	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+		    struct mlxsw_pci_queue *q);
+	void (*fini)(struct mlxsw_pci *mlxsw_pci,
+		     struct mlxsw_pci_queue *q);
+	void (*tasklet)(unsigned long data);
+	int (*dbg_read)(struct seq_file *s, void *data);
+	u16 elem_count;
+	u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
+	.init		= mlxsw_pci_sdq_init,
+	.fini		= mlxsw_pci_sdq_fini,
+	.dbg_read	= mlxsw_pci_sdq_dbg_read,
+	.elem_count	= MLXSW_PCI_WQE_COUNT,
+	.elem_size	= MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
+	.init		= mlxsw_pci_rdq_init,
+	.fini		= mlxsw_pci_rdq_fini,
+	.dbg_read	= mlxsw_pci_rdq_dbg_read,
+	.elem_count	= MLXSW_PCI_WQE_COUNT,
+	.elem_size	= MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
+	.init		= mlxsw_pci_cq_init,
+	.fini		= mlxsw_pci_cq_fini,
+	.tasklet	= mlxsw_pci_cq_tasklet,
+	.dbg_read	= mlxsw_pci_cq_dbg_read,
+	.elem_count	= MLXSW_PCI_CQE_COUNT,
+	.elem_size	= MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
+	.init		= mlxsw_pci_eq_init,
+	.fini		= mlxsw_pci_eq_fini,
+	.tasklet	= mlxsw_pci_eq_tasklet,
+	.dbg_read	= mlxsw_pci_eq_dbg_read,
+	.elem_count	= MLXSW_PCI_EQE_COUNT,
+	.elem_size	= MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				const struct mlxsw_pci_queue_ops *q_ops,
+				struct mlxsw_pci_queue *q, u8 q_num)
+{
+	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+	int i;
+	int err;
+
+	spin_lock_init(&q->lock);
+	q->num = q_num;
+	q->count = q_ops->elem_count;
+	q->elem_size = q_ops->elem_size;
+	q->type = q_ops->type;
+	q->pci = mlxsw_pci;
+
+	if (q_ops->tasklet)
+		tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+	mem_item->size = MLXSW_PCI_AQ_SIZE;
+	mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+					     mem_item->size,
+					     &mem_item->mapaddr);
+	if (!mem_item->buf)
+		return -ENOMEM;
+	memset(mem_item->buf, 0, mem_item->size);
+
+	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+	if (!q->elem_info) {
+		err = -ENOMEM;
+		goto err_elem_info_alloc;
+	}
+
+	/* Initialize dma mapped elements info elem_info for
+	 * future easy access.
+	 */
+	for (i = 0; i < q->count; i++) {
+		struct mlxsw_pci_queue_elem_info *elem_info;
+
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		elem_info->elem =
+			__mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+	}
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = q_ops->init(mlxsw_pci, mbox, q);
+	if (err)
+		goto err_q_ops_init;
+	return 0;
+
+err_q_ops_init:
+	kfree(q->elem_info);
+err_elem_info_alloc:
+	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+			    mem_item->buf, mem_item->mapaddr);
+	return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+				 const struct mlxsw_pci_queue_ops *q_ops,
+				 struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+	q_ops->fini(mlxsw_pci, q);
+	kfree(q->elem_info);
+	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+			    mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				      const struct mlxsw_pci_queue_ops *q_ops,
+				      u8 num_qs)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_type_group *queue_group;
+	char tmp[16];
+	int i;
+	int err;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+	if (!queue_group->q)
+		return -ENOMEM;
+
+	for (i = 0; i < num_qs; i++) {
+		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+					   &queue_group->q[i], i);
+		if (err)
+			goto err_queue_init;
+	}
+	queue_group->count = num_qs;
+
+	sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+	debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+				    q_ops->dbg_read);
+
+	return 0;
+
+err_queue_init:
+	for (i--; i >= 0; i--)
+		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+	kfree(queue_group->q);
+	return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+				       const struct mlxsw_pci_queue_ops *q_ops)
+{
+	struct mlxsw_pci_queue_type_group *queue_group;
+	int i;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+	for (i = 0; i < queue_group->count; i++)
+		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+	kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	u8 num_sdqs;
+	u8 sdq_log2sz;
+	u8 num_rdqs;
+	u8 rdq_log2sz;
+	u8 num_cqs;
+	u8 cq_log2sz;
+	u8 num_eqs;
+	u8 eq_log2sz;
+	int err;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+	if (err)
+		return err;
+
+	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+	if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+	    (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+	    (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+	    (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+		dev_err(&pdev->dev, "Unsupported number of queues\n");
+		return -EINVAL;
+	}
+
+	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+	    (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+		return -EINVAL;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+					 num_eqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize event queues\n");
+		return err;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+					 num_cqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+		goto err_cqs_init;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+					 num_sdqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+		goto err_sdqs_init;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+					 num_rdqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+		goto err_rdqs_init;
+	}
+
+	/* We have to poll in command interface until queues are initialized */
+	mlxsw_pci->cmd.nopoll = true;
+	return 0;
+
+err_rdqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+	return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+	mlxsw_pci->cmd.nopoll = false;
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+				     char *mbox, int index,
+				     const struct mlxsw_swid_config *swid)
+{
+	u8 mask = 0;
+
+	if (swid->used_type) {
+		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+			mbox, index, swid->type);
+		mask |= 1;
+	}
+	if (swid->used_properties) {
+		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+			mbox, index, swid->properties);
+		mask |= 2;
+	}
+	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				    const struct mlxsw_config_profile *profile)
+{
+	int i;
+
+	mlxsw_cmd_mbox_zero(mbox);
+
+	if (profile->used_max_vepa_channels) {
+		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+			mbox, profile->max_vepa_channels);
+	}
+	if (profile->used_max_lag) {
+		mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_lag_set(
+			mbox, profile->max_lag);
+	}
+	if (profile->used_max_port_per_lag) {
+		mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+			mbox, profile->max_port_per_lag);
+	}
+	if (profile->used_max_mid) {
+		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_mid_set(
+			mbox, profile->max_mid);
+	}
+	if (profile->used_max_pgt) {
+		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_pgt_set(
+			mbox, profile->max_pgt);
+	}
+	if (profile->used_max_system_port) {
+		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_system_port_set(
+			mbox, profile->max_system_port);
+	}
+	if (profile->used_max_vlan_groups) {
+		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+			mbox, profile->max_vlan_groups);
+	}
+	if (profile->used_max_regions) {
+		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_regions_set(
+			mbox, profile->max_regions);
+	}
+	if (profile->used_flood_tables) {
+		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+			mbox, profile->max_flood_tables);
+		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+			mbox, profile->max_vid_flood_tables);
+	}
+	if (profile->used_flood_mode) {
+		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_flood_mode_set(
+			mbox, profile->flood_mode);
+	}
+	if (profile->used_max_ib_mc) {
+		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+			mbox, profile->max_ib_mc);
+	}
+	if (profile->used_max_pkey) {
+		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_pkey_set(
+			mbox, profile->max_pkey);
+	}
+	if (profile->used_ar_sec) {
+		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_ar_sec_set(
+			mbox, profile->ar_sec);
+	}
+	if (profile->used_adaptive_routing_group_cap) {
+		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+			mbox, profile->adaptive_routing_group_cap);
+	}
+
+	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+						     &profile->swid_config[i]);
+
+	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+	int err;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+	if (err)
+		return err;
+	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+	return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				  u16 num_pages)
+{
+	struct mlxsw_pci_mem_item *mem_item;
+	int i;
+	int err;
+
+	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+					   GFP_KERNEL);
+	if (!mlxsw_pci->fw_area.items)
+		return -ENOMEM;
+	mlxsw_pci->fw_area.num_pages = num_pages;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	for (i = 0; i < num_pages; i++) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		mem_item->size = MLXSW_PCI_PAGE_SIZE;
+		mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+						     mem_item->size,
+						     &mem_item->mapaddr);
+		if (!mem_item->buf) {
+			err = -ENOMEM;
+			goto err_alloc;
+		}
+		mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+	}
+
+	err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+	if (err)
+		goto err_cmd_map_fa;
+
+	return 0;
+
+err_cmd_map_fa:
+err_alloc:
+	for (i--; i >= 0; i--) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+				    mem_item->buf, mem_item->mapaddr);
+	}
+	kfree(mlxsw_pci->fw_area.items);
+	return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+	struct mlxsw_pci_mem_item *mem_item;
+	int i;
+
+	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+	for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+				    mem_item->buf, mem_item->mapaddr);
+	}
+	kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_id;
+	struct mlxsw_pci_queue *q;
+	int i;
+
+	for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+		q = mlxsw_pci_eq_get(mlxsw_pci, i);
+		mlxsw_pci_queue_tasklet_schedule(q);
+	}
+	return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_config_profile *profile)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	char *mbox;
+	u16 num_pages;
+	int err;
+
+	mutex_init(&mlxsw_pci->cmd.lock);
+	init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+	mlxsw_pci->core = mlxsw_core;
+
+	mbox = mlxsw_cmd_mbox_alloc();
+	if (!mbox)
+		return -ENOMEM;
+	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+	if (err)
+		goto err_query_fw;
+
+	mlxsw_pci->bus_info.fw_rev.major =
+		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+	mlxsw_pci->bus_info.fw_rev.minor =
+		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+	mlxsw_pci->bus_info.fw_rev.subminor =
+		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+		err = -EINVAL;
+		goto err_iface_rev;
+	}
+	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+		err = -EINVAL;
+		goto err_doorbell_page_bar;
+	}
+
+	mlxsw_pci->doorbell_offset =
+		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+	if (err)
+		goto err_fw_area_init;
+
+	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+	if (err)
+		goto err_boardinfo;
+
+	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+	if (err)
+		goto err_config_profile;
+
+	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+	if (err)
+		goto err_aqs_init;
+
+	err = request_irq(mlxsw_pci->msix_entry.vector,
+			  mlxsw_pci_eq_irq_handler, 0,
+			  mlxsw_pci_driver_name, mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "IRQ request failed\n");
+		goto err_request_eq_irq;
+	}
+
+	goto mbox_put;
+
+err_request_eq_irq:
+	mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+	mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+mbox_put:
+	mlxsw_cmd_mbox_free(mbox);
+	return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+	free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+	mlxsw_pci_aqs_fini(mlxsw_pci);
+	mlxsw_pci_fw_area_fini(mlxsw_pci);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+		   const struct mlxsw_tx_info *tx_info)
+{
+	u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+				  const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct mlxsw_pci_queue *q;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	int i;
+	int err;
+
+	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+		err = skb_linearize(skb);
+		if (err)
+			return err;
+	}
+
+	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+	spin_lock_bh(&q->lock);
+	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+	if (!elem_info) {
+		/* queue is full */
+		err = -EAGAIN;
+		goto unlock;
+	}
+	elem_info->u.sdq.skb = skb;
+
+	wqe = elem_info->elem;
+	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+	mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+				     skb_headlen(skb), DMA_TO_DEVICE);
+	if (err)
+		goto unlock;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+					     skb_frag_address(frag),
+					     skb_frag_size(frag),
+					     DMA_TO_DEVICE);
+		if (err)
+			goto unmap_frags;
+	}
+
+	/* Set unused sq entries byte count to zero. */
+	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+	/* Everything is set up, ring producer doorbell to get HW going */
+	q->producer_counter++;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+	goto unlock;
+
+unmap_frags:
+	for (; i >= 0; i--)
+		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+	spin_unlock_bh(&q->lock);
+	return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+			      u32 in_mod, bool out_mbox_direct,
+			      char *in_mbox, size_t in_mbox_size,
+			      char *out_mbox, size_t out_mbox_size,
+			      u8 *p_status)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	dma_addr_t in_mapaddr = 0;
+	dma_addr_t out_mapaddr = 0;
+	bool evreq = mlxsw_pci->cmd.nopoll;
+	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+	bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+	int err;
+
+	*p_status = MLXSW_CMD_STATUS_OK;
+
+	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+	if (err)
+		return err;
+
+	if (in_mbox) {
+		in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
+					    in_mbox_size, PCI_DMA_TODEVICE);
+		if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+						   in_mapaddr))) {
+			err = -EIO;
+			goto err_in_mbox_map;
+		}
+	}
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+	if (out_mbox) {
+		out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
+					     out_mbox_size, PCI_DMA_FROMDEVICE);
+		if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+						   out_mapaddr))) {
+			err = -EIO;
+			goto err_out_mbox_map;
+		}
+	}
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+	*p_wait_done = false;
+
+	wmb(); /* all needs to be written before we write control register */
+	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+			  MLXSW_PCI_CIR_CTRL_GO_BIT |
+			  (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+			  opcode);
+
+	if (!evreq) {
+		unsigned long end;
+
+		end = jiffies + timeout;
+		do {
+			u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+			if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+				*p_wait_done = true;
+				*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+				break;
+			}
+			cond_resched();
+		} while (time_before(jiffies, end));
+	} else {
+		wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+		*p_status = mlxsw_pci->cmd.comp.status;
+	}
+
+	err = 0;
+	if (*p_wait_done) {
+		if (*p_status)
+			err = -EIO;
+	} else {
+		err = -ETIMEDOUT;
+	}
+
+	if (!err && out_mbox && out_mbox_direct) {
+		/* Some commands does not use output param as address to mailbox
+		 * but they store output directly into registers. In that case,
+		 * copy registers into mbox buffer.
+		 */
+		__be32 tmp;
+
+		if (!evreq) {
+			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+							   CIR_OUT_PARAM_HI));
+			memcpy(out_mbox, &tmp, sizeof(tmp));
+			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+							   CIR_OUT_PARAM_LO));
+			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+		}
+	}
+
+	if (out_mapaddr)
+		pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
+				 PCI_DMA_FROMDEVICE);
+
+	/* fall through */
+
+err_out_mbox_map:
+	if (in_mapaddr)
+		pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
+				 PCI_DMA_TODEVICE);
+err_in_mbox_map:
+	mutex_unlock(&mlxsw_pci->cmd.lock);
+
+	return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+	.kind		= "pci",
+	.init		= mlxsw_pci_init,
+	.fini		= mlxsw_pci_fini,
+	.skb_transmit	= mlxsw_pci_skb_transmit,
+	.cmd_exec	= mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+	mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+	/* Current firware does not let us know when the reset is done.
+	 * So we just wait here for constant time and hope for the best.
+	 */
+	msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+	return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct mlxsw_pci *mlxsw_pci;
+	int err;
+
+	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+	if (!mlxsw_pci)
+		return -ENOMEM;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed\n");
+		goto err_pci_enable_device;
+	}
+
+	err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed\n");
+		goto err_pci_request_regions;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (!err) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+			goto err_pci_set_dma_mask;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+			goto err_pci_set_dma_mask;
+		}
+	}
+
+	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+		dev_err(&pdev->dev, "invalid PCI region size\n");
+		err = -EINVAL;
+		goto err_pci_resource_len_check;
+	}
+
+	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+				     pci_resource_len(pdev, 0));
+	if (!mlxsw_pci->hw_addr) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -EIO;
+		goto err_ioremap;
+	}
+	pci_set_master(pdev);
+
+	mlxsw_pci->pdev = pdev;
+	pci_set_drvdata(pdev, mlxsw_pci);
+
+	err = mlxsw_pci_sw_reset(mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "Software reset failed\n");
+		goto err_sw_reset;
+	}
+
+	err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+	if (err) {
+		dev_err(&pdev->dev, "MSI-X init failed\n");
+		goto err_msix_init;
+	}
+
+	mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+	mlxsw_pci->bus_info.dev = &pdev->dev;
+
+	mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+						mlxsw_pci_dbg_root);
+	if (!mlxsw_pci->dbg_dir) {
+		dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+		goto err_dbg_create_dir;
+	}
+
+	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+					     &mlxsw_pci_bus, mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register bus device\n");
+		goto err_bus_device_register;
+	}
+
+	return 0;
+
+err_bus_device_register:
+	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+	pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+	iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+	pci_release_regions(pdev);
+err_pci_request_regions:
+	pci_disable_device(pdev);
+err_pci_enable_device:
+	kfree(mlxsw_pci);
+	return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+	mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+	pci_disable_msix(mlxsw_pci->pdev);
+	iounmap(mlxsw_pci->hw_addr);
+	pci_release_regions(mlxsw_pci->pdev);
+	pci_disable_device(mlxsw_pci->pdev);
+	kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+	.name		= mlxsw_pci_driver_name,
+	.id_table	= mlxsw_pci_id_table,
+	.probe		= mlxsw_pci_probe,
+	.remove		= mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+	int err;
+
+	mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+	if (!mlxsw_pci_dbg_root)
+		return -ENOMEM;
+	err = pci_register_driver(&mlxsw_pci_driver);
+	if (err)
+		goto err_register_driver;
+	return 0;
+
+err_register_driver:
+	debugfs_remove_recursive(mlxsw_pci_dbg_root);
+	return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+	pci_unregister_driver(&mlxsw_pci_driver);
+	debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 0000000..887af84
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,221 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2	0xc738
+#define MLXSW_PCI_BAR0_SIZE		(1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE		4096
+
+#define MLXSW_PCI_CIR_BASE			0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI		MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO		(MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER		(MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI		(MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO		(MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN			(MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL			(MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT		BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT		BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT	12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT		24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS		1000
+
+#define MLXSW_PCI_SW_RESET			0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET		0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET		0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET		0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET		0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET	0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET	0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num)	\
+	((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT	24
+#define MLXSW_PCI_SDQS_COUNT	24
+#define MLXSW_PCI_CQS_COUNT	(MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT	2
+#define MLXSW_PCI_EQ_ASYNC_NUM	0
+#define MLXSW_PCI_EQ_COMP_NUM	1
+
+#define MLXSW_PCI_AQ_PAGES	8
+#define MLXSW_PCI_AQ_SIZE	(MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE	32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE	16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE	16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT	0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES	3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET	0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP	0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD	0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 0000000..726f543
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU		10000
+
+#define MLXSW_PORT_DEFAULT_VID		1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT	255
+#define MLXSW_PORT_SWID_ALL_SWIDS	254
+#define MLXSW_PORT_SWID_TYPE_ETH	2
+
+#define MLXSW_PORT_MID			0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS	0x40
+#define MLXSW_PORT_MAX_PORTS		MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET	10
+#define MLXSW_PORT_PHY_BITS_OFFSET	4
+#define MLXSW_PORT_PHY_BITS_MASK	(MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT		0x0
+
+#define MLXSW_PORT_DONT_CARE		(MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+	MLXSW_PORT_ADMIN_STATUS_UP = 1,
+	MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+	MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+	MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+	MLXSW_PORT_OPER_STATUS_UP = 1,
+	MLXSW_PORT_OPER_STATUS_DOWN = 2,
+	MLXSW_PORT_OPER_STATUS_FAILURE = 4,	/* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 0000000..b5a72f8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,1289 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+	u16 id;
+	u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+	.id = MLXSW_REG_SGCR_ID,
+	.len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+	MLXSW_REG_ZERO(sgcr, payload);
+	mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+	.id = MLXSW_REG_SPAD_ID,
+	.len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+	.id = MLXSW_REG_SMID_ID,
+	.len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+	MLXSW_REG_ZERO(smid, payload);
+	mlxsw_reg_smid_swid_set(payload, 0);
+	mlxsw_reg_smid_mid_set(payload, mid);
+	mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+	.id = MLXSW_REG_SPMS_ID,
+	.len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+	MLXSW_REG_SPMS_STATE_NO_CHANGE,
+	MLXSW_REG_SPMS_STATE_DISCARDING,
+	MLXSW_REG_SPMS_STATE_LEARNING,
+	MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+				       enum mlxsw_reg_spms_state state)
+{
+	MLXSW_REG_ZERO(spms, payload);
+	mlxsw_reg_spms_local_port_set(payload, local_port);
+	mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+	.id = MLXSW_REG_SFGC_ID,
+	.len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+	MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+	MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+	MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+	MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+	MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+	MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+	MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+	MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+	MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+	MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+	MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+		    enum mlxsw_reg_sfgc_bridge_type bridge_type,
+		    enum mlxsw_flood_table_type table_type,
+		    unsigned int flood_table)
+{
+	MLXSW_REG_ZERO(sfgc, payload);
+	mlxsw_reg_sfgc_type_set(payload, type);
+	mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+	mlxsw_reg_sfgc_table_type_set(payload, table_type);
+	mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+	mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+	.id = MLXSW_REG_SFTR_ID,
+	.len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+				       unsigned int flood_table,
+				       unsigned int index,
+				       enum mlxsw_flood_table_type table_type,
+				       unsigned int range)
+{
+	MLXSW_REG_ZERO(sftr, payload);
+	mlxsw_reg_sftr_swid_set(payload, 0);
+	mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+	mlxsw_reg_sftr_index_set(payload, index);
+	mlxsw_reg_sftr_table_type_set(payload, table_type);
+	mlxsw_reg_sftr_range_set(payload, range);
+	mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+	.id = MLXSW_REG_SPMLR_ID,
+	.len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+	MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+	MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+	MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+					enum mlxsw_reg_spmlr_learn_mode mode)
+{
+	MLXSW_REG_ZERO(spmlr, payload);
+	mlxsw_reg_spmlr_local_port_set(payload, local_port);
+	mlxsw_reg_spmlr_sub_port_set(payload, 0);
+	mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+	.id = MLXSW_REG_PMLP_ID,
+	.len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(pmlp, payload);
+	mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+	.id = MLXSW_REG_PMTU_ID,
+	.len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+				       u16 new_mtu)
+{
+	MLXSW_REG_ZERO(pmtu, payload);
+	mlxsw_reg_pmtu_local_port_set(payload, local_port);
+	mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+	mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+	mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+	.id = MLXSW_REG_PTYS_ID,
+	.len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH	BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII			BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX		BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4		BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4		BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR		BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2		BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4		BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4		BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4		BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR		BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR		BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR		BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4		BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4	BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4		BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4		BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4		BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4		BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4	BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX		BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T		BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T		BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR		BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR		BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR		BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2		BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2		BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+				       u32 proto_admin)
+{
+	MLXSW_REG_ZERO(ptys, payload);
+	mlxsw_reg_ptys_local_port_set(payload, local_port);
+	mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+	mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+					 u32 *p_eth_proto_adm,
+					 u32 *p_eth_proto_oper)
+{
+	if (p_eth_proto_cap)
+		*p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+	if (p_eth_proto_adm)
+		*p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+	if (p_eth_proto_oper)
+		*p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+	.id = MLXSW_REG_PPAD_ID,
+	.len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+				       u8 local_port)
+{
+	MLXSW_REG_ZERO(ppad, payload);
+	mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+	mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+	.id = MLXSW_REG_PAOS_ID,
+	.len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+				       enum mlxsw_port_admin_status status)
+{
+	MLXSW_REG_ZERO(paos, payload);
+	mlxsw_reg_paos_swid_set(payload, 0);
+	mlxsw_reg_paos_local_port_set(payload, local_port);
+	mlxsw_reg_paos_admin_status_set(payload, status);
+	mlxsw_reg_paos_oper_status_set(payload, 0);
+	mlxsw_reg_paos_ase_set(payload, 1);
+	mlxsw_reg_paos_ee_set(payload, 1);
+	mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+	.id = MLXSW_REG_PPCNT_ID,
+	.len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+	     0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+	     0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+	     0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+	     0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+	     0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+	     0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+	     0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+	     0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+	     0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+	     0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+	     0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+	     0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+	     0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+	     0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+	     0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+	     0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+	     0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+	     0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+	     0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(ppcnt, payload);
+	mlxsw_reg_ppcnt_swid_set(payload, 0);
+	mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+	mlxsw_reg_ppcnt_pnat_set(payload, 0);
+	mlxsw_reg_ppcnt_grp_set(payload, 0);
+	mlxsw_reg_ppcnt_clr_set(payload, 0);
+	mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+	.id = MLXSW_REG_PSPA_ID,
+	.len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+	MLXSW_REG_ZERO(pspa, payload);
+	mlxsw_reg_pspa_swid_set(payload, swid);
+	mlxsw_reg_pspa_local_port_set(payload, local_port);
+	mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+	.id = MLXSW_REG_HTGT_ID,
+	.len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0	/* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD	0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX	0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+	MLXSW_REG_HTGT_POLICER_DISABLE,
+	MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD	0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX	0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+	u8 swid, rdq;
+
+	MLXSW_REG_ZERO(htgt, payload);
+	if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+		swid = MLXSW_PORT_SWID_ALL_SWIDS;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+	} else {
+		swid = 0;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+	}
+	mlxsw_reg_htgt_swid_set(payload, swid);
+	mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+	mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+	mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+	mlxsw_reg_htgt_pid_set(payload, 0);
+	mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+	mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+	mlxsw_reg_htgt_priority_set(payload, 0);
+	mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+	mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+	.id = MLXSW_REG_HPKT_ID,
+	.len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+	MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+	MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+	MLXSW_REG_HPKT_ACTION_FORWARD,
+	MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+	MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+	MLXSW_REG_HPKT_ACTION_DISCARD,
+	MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+	MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+	MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+	MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+	MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+				       u8 trap_group, u16 trap_id)
+{
+	MLXSW_REG_ZERO(hpkt, payload);
+	mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+	mlxsw_reg_hpkt_action_set(payload, action);
+	mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+	mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+	mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+	switch (reg_id) {
+	case MLXSW_REG_SGCR_ID:
+		return "SGCR";
+	case MLXSW_REG_SPAD_ID:
+		return "SPAD";
+	case MLXSW_REG_SMID_ID:
+		return "SMID";
+	case MLXSW_REG_SPMS_ID:
+		return "SPMS";
+	case MLXSW_REG_SFGC_ID:
+		return "SFGC";
+	case MLXSW_REG_SFTR_ID:
+		return "SFTR";
+	case MLXSW_REG_SPMLR_ID:
+		return "SPMLR";
+	case MLXSW_REG_PMLP_ID:
+		return "PMLP";
+	case MLXSW_REG_PMTU_ID:
+		return "PMTU";
+	case MLXSW_REG_PTYS_ID:
+		return "PTYS";
+	case MLXSW_REG_PPAD_ID:
+		return "PPAD";
+	case MLXSW_REG_PAOS_ID:
+		return "PAOS";
+	case MLXSW_REG_PPCNT_ID:
+		return "PPCNT";
+	case MLXSW_REG_PSPA_ID:
+		return "PSPA";
+	case MLXSW_REG_HTGT_ID:
+		return "HTGT";
+	case MLXSW_REG_HPKT_ID:
+		return "HPKT";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 0000000..29b46ee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1552 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+	struct mlxsw_sx_port **ports;
+	struct mlxsw_core *core;
+	const struct mlxsw_bus_info *bus_info;
+	u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
+	u32			tx_dropped;
+};
+
+struct mlxsw_sx_port {
+	struct net_device *dev;
+	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+	struct mlxsw_sx *mlxsw_sx;
+	u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+				     const struct mlxsw_tx_info *tx_info)
+{
+	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+	bool is_emad = tx_info->is_emad;
+
+	memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+	/* We currently set default values for the egress tclass (QoS). */
+	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+	mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+						  MLXSW_TXHDR_ETCLASS_5);
+	mlxsw_tx_hdr_swid_set(txhdr, 0);
+	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+	mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+	mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+					      MLXSW_TXHDR_RDQ_OTHER);
+	mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+	mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+	mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+	mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+					       MLXSW_TXHDR_NOT_EMAD);
+	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+					  bool is_up)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+			    MLXSW_PORT_ADMIN_STATUS_DOWN);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+					 bool *p_is_up)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+	u8 oper_status;
+	int err;
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+	if (err)
+		return err;
+	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+	return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pmtu_pl[MLXSW_REG_PMTU_LEN];
+	int max_mtu;
+	int err;
+
+	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+	if (err)
+		return err;
+	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+	if (mtu > max_mtu)
+		return -EINVAL;
+
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+				      bool *p_usable)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pmlp_pl[MLXSW_REG_PMLP_LEN];
+	int err;
+
+	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+	if (err)
+		return err;
+	*p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+	return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+	if (err)
+		return err;
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+	const struct mlxsw_tx_info tx_info = {
+		.local_port = mlxsw_sx_port->local_port,
+		.is_emad = false,
+	};
+	struct sk_buff *skb_old = NULL;
+	int err;
+
+	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+		struct sk_buff *skb_new;
+
+		skb_old = skb;
+		skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+		if (!skb_new) {
+			this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+			dev_kfree_skb_any(skb_old);
+			return NETDEV_TX_OK;
+		}
+		skb = skb_new;
+	}
+	mlxsw_sx_txhdr_construct(skb, &tx_info);
+	err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+	if (err == -EAGAIN) {
+		if (skb_old)
+			dev_kfree_skb_any(skb);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb_old)
+		dev_kfree_skb_any(skb_old);
+
+	if (!err) {
+		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->tx_packets++;
+		pcpu_stats->tx_bytes += skb->len;
+		u64_stats_update_end(&pcpu_stats->syncp);
+	} else {
+		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+		dev_kfree_skb_any(skb);
+	}
+	return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+	if (err)
+		return err;
+	dev->mtu = mtu;
+	return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+			  struct rtnl_link_stats64 *stats)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx_port_pcpu_stats *p;
+	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+	u32 tx_dropped = 0;
+	unsigned int start;
+	int i;
+
+	for_each_possible_cpu(i) {
+		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&p->syncp);
+			rx_packets	= p->rx_packets;
+			rx_bytes	= p->rx_bytes;
+			tx_packets	= p->tx_packets;
+			tx_bytes	= p->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+		stats->rx_packets	+= rx_packets;
+		stats->rx_bytes		+= rx_bytes;
+		stats->tx_packets	+= tx_packets;
+		stats->tx_bytes		+= tx_bytes;
+		/* tx_dropped is u32, updated without syncp protection. */
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->tx_dropped	= tx_dropped;
+	return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+	.ndo_open		= mlxsw_sx_port_open,
+	.ndo_stop		= mlxsw_sx_port_stop,
+	.ndo_start_xmit		= mlxsw_sx_port_xmit,
+	.ndo_change_mtu		= mlxsw_sx_port_change_mtu,
+	.ndo_get_stats64	= mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+	strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d",
+		 mlxsw_sx->bus_info->fw_rev.major,
+		 mlxsw_sx->bus_info->fw_rev.minor,
+		 mlxsw_sx->bus_info->fw_rev.subminor);
+	strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+		sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+	char str[ETH_GSTRING_LEN];
+	u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+	{
+		.str = "a_frames_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+	},
+	{
+		.str = "a_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+	},
+	{
+		.str = "a_frame_check_sequence_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+	},
+	{
+		.str = "a_alignment_errors",
+		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+	},
+	{
+		.str = "a_octets_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+	},
+	{
+		.str = "a_octets_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+	},
+	{
+		.str = "a_in_range_length_errors",
+		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+	},
+	{
+		.str = "a_out_of_range_length_field",
+		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+	},
+	{
+		.str = "a_frame_too_long_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+	},
+	{
+		.str = "a_symbol_error_during_carrier",
+		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+	},
+	{
+		.str = "a_mac_control_frames_transmitted",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+	},
+	{
+		.str = "a_mac_control_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+	},
+	{
+		.str = "a_unsupported_opcodes_received",
+		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_xmitted",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+	},
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+				      u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return MLXSW_SX_PORT_HW_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+struct mlxsw_sx_port_link_mode {
+	u32 mask;
+	u32 supported;
+	u32 advertised;
+	u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+		.supported	= SUPPORTED_100baseT_Full,
+		.advertised	= ADVERTISED_100baseT_Full,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+		.supported	= SUPPORTED_1000baseKX_Full,
+		.advertised	= ADVERTISED_1000baseKX_Full,
+		.speed		= 1000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+		.supported	= SUPPORTED_10000baseT_Full,
+		.advertised	= ADVERTISED_10000baseT_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+		.supported	= SUPPORTED_10000baseKX4_Full,
+		.advertised	= ADVERTISED_10000baseKX4_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+		.supported	= SUPPORTED_10000baseKR_Full,
+		.advertised	= ADVERTISED_10000baseKR_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+		.supported	= SUPPORTED_20000baseKR2_Full,
+		.advertised	= ADVERTISED_20000baseKR2_Full,
+		.speed		= 20000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+		.supported	= SUPPORTED_40000baseCR4_Full,
+		.advertised	= ADVERTISED_40000baseCR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+		.supported	= SUPPORTED_40000baseKR4_Full,
+		.advertised	= ADVERTISED_40000baseKR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+		.supported	= SUPPORTED_40000baseSR4_Full,
+		.advertised	= ADVERTISED_40000baseSR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+		.supported	= SUPPORTED_40000baseLR4_Full,
+		.advertised	= ADVERTISED_40000baseLR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+		.speed		= 25000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+		.speed		= 50000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+		.supported	= SUPPORTED_56000baseKR4_Full,
+		.advertised	= ADVERTISED_56000baseKR4_Full,
+		.speed		= 56000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+		.speed		= 100000,
+	},
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return SUPPORTED_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+		return SUPPORTED_Backplane;
+	return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+			modes |= mlxsw_sx_port_link_mode[i].supported;
+	}
+	return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+			modes |= mlxsw_sx_port_link_mode[i].advertised;
+	}
+	return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+					    struct ethtool_cmd *cmd)
+{
+	u32 speed = SPEED_UNKNOWN;
+	u8 duplex = DUPLEX_UNKNOWN;
+	int i;
+
+	if (!carrier_ok)
+		goto out;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+			speed = mlxsw_sx_port_link_mode[i].speed;
+			duplex = DUPLEX_FULL;
+			break;
+		}
+	}
+out:
+	ethtool_cmd_speed_set(cmd, speed);
+	cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return PORT_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+		return PORT_DA;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+		return PORT_NONE;
+
+	return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	u32 eth_proto_oper;
+	int err;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+			      &eth_proto_admin, &eth_proto_oper);
+
+	cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+			 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+	mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+					eth_proto_oper, cmd);
+
+	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+	cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+	cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (speed == mlxsw_sx_port_link_mode[i].speed)
+			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 speed;
+	u32 eth_proto_new;
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	bool is_up;
+	int err;
+
+	speed = ethtool_cmd_speed(cmd);
+
+	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+		mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+		mlxsw_sx_to_ptys_speed(speed);
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+	eth_proto_new = eth_proto_new & eth_proto_cap;
+	if (!eth_proto_new) {
+		netdev_err(dev, "Not supported proto admin requested");
+		return -EINVAL;
+	}
+	if (eth_proto_new == eth_proto_admin)
+		return 0;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to set proto admin");
+		return err;
+	}
+
+	err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+	if (err) {
+		netdev_err(dev, "Failed to get oper status");
+		return err;
+	}
+	if (!is_up)
+		return 0;
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+	.get_drvinfo		= mlxsw_sx_port_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= mlxsw_sx_port_get_strings,
+	.get_ethtool_stats	= mlxsw_sx_port_get_stats,
+	.get_sset_count		= mlxsw_sx_port_get_sset_count,
+	.get_settings		= mlxsw_sx_port_get_settings,
+	.set_settings		= mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+				  struct switchdev_attr *attr)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+	.switchdev_port_attr_get	= mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+	char spad_pl[MLXSW_REG_SPAD_LEN];
+	int err;
+
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+	return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	struct net_device *dev = mlxsw_sx_port->dev;
+	char ppad_pl[MLXSW_REG_PPAD_LEN];
+	int err;
+
+	mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+	/* The last byte value in base mac address is guaranteed
+	 * to be such it does not overflow when adding local_port
+	 * value.
+	 */
+	dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+	return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				       u16 vid, enum mlxsw_reg_spms_state state)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char *spms_pl;
+	int err;
+
+	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+	if (!spms_pl)
+		return -ENOMEM;
+	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+	kfree(spms_pl);
+	return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				   u32 speed)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				    enum mlxsw_reg_spmlr_learn_mode mode)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+	mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port;
+	struct net_device *dev;
+	bool usable;
+	int err;
+
+	dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+	if (!dev)
+		return -ENOMEM;
+	mlxsw_sx_port = netdev_priv(dev);
+	mlxsw_sx_port->dev = dev;
+	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+	mlxsw_sx_port->local_port = local_port;
+
+	mlxsw_sx_port->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+	if (!mlxsw_sx_port->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+	dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+	dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+	err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+			mlxsw_sx_port->local_port);
+		goto err_dev_addr_get;
+	}
+
+	netif_carrier_off(dev);
+
+	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+			 NETIF_F_VLAN_CHALLENGED;
+
+	/* Each packet needs to have a Tx header (metadata) on top all other
+	 * headers.
+	 */
+	dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+	err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_module_check;
+	}
+
+	if (!usable) {
+		dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+			mlxsw_sx_port->local_port);
+		goto port_not_usable;
+	}
+
+	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_swid_set;
+	}
+
+	err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+				      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_speed_set;
+	}
+
+	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_mtu_set;
+	}
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+	if (err)
+		goto err_port_admin_status_set;
+
+	err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+					  MLXSW_PORT_DEFAULT_VID,
+					  MLXSW_REG_SPMS_STATE_FORWARDING);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_stp_state_set;
+	}
+
+	err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+						  MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_mac_learning_mode_set;
+	}
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+			mlxsw_sx_port->local_port);
+		goto err_register_netdev;
+	}
+
+	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+	return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+	free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+	free_netdev(dev);
+	return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+	if (!mlxsw_sx_port)
+		return;
+	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+	free_percpu(mlxsw_sx_port->pcpu_stats);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+	int i;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+		mlxsw_sx_port_remove(mlxsw_sx, i);
+	kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+	size_t alloc_size;
+	int i;
+	int err;
+
+	alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+	mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_sx->ports)
+		return -ENOMEM;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+		err = mlxsw_sx_port_create(mlxsw_sx, i);
+		if (err)
+			goto err_port_create;
+	}
+	return 0;
+
+err_port_create:
+	for (i--; i >= 1; i--)
+		mlxsw_sx_port_remove(mlxsw_sx, i);
+	kfree(mlxsw_sx->ports);
+	return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+				     char *pude_pl, void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx_port *mlxsw_sx_port;
+	enum mlxsw_reg_pude_oper_status status;
+	u8 local_port;
+
+	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+	mlxsw_sx_port = mlxsw_sx->ports[local_port];
+	if (!mlxsw_sx_port) {
+		dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+			 local_port);
+		return;
+	}
+
+	status = mlxsw_reg_pude_oper_status_get(pude_pl);
+	if (MLXSW_PORT_OPER_STATUS_UP == status) {
+		netdev_info(mlxsw_sx_port->dev, "link up\n");
+		netif_carrier_on(mlxsw_sx_port->dev);
+	} else {
+		netdev_info(mlxsw_sx_port->dev, "link down\n");
+		netif_carrier_off(mlxsw_sx_port->dev);
+	}
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+	.func = mlxsw_sx_pude_event_func,
+	.trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+				   enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sx_pude_event;
+		break;
+	}
+	err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+	if (err)
+		goto err_event_trap_set;
+
+	return 0;
+
+err_event_trap_set:
+	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+	return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+				      enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sx_pude_event;
+		break;
+	}
+	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+				      void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+	if (unlikely(!mlxsw_sx_port)) {
+		if (net_ratelimit())
+			dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+				 local_port);
+		return;
+	}
+
+	skb->dev = mlxsw_sx_port->dev;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->rx_packets++;
+	pcpu_stats->rx_bytes += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_FDB_MC,
+	},
+	/* Traps for specific L2 packet types, not trapped as FDB MC */
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_STP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LACP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_EAPOL,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LLDP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MMRP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MVRP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_RPVST,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_DHCP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+	},
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+		err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+						      &mlxsw_sx_rx_listener[i],
+						      mlxsw_sx);
+		if (err)
+			goto err_rx_listener_register;
+
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+		if (err)
+			goto err_rx_trap_set;
+	}
+	return 0;
+
+err_rx_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+					  &mlxsw_sx_rx_listener[i],
+					  mlxsw_sx);
+err_rx_listener_register:
+	for (i--; i >= 0; i--) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+						  &mlxsw_sx_rx_listener[i],
+						  mlxsw_sx);
+	}
+	return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+						  &mlxsw_sx_rx_listener[i],
+						  mlxsw_sx);
+	}
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+	char sfgc_pl[MLXSW_REG_SFGC_LEN];
+	char sgcr_pl[MLXSW_REG_SGCR_LEN];
+	char *smid_pl;
+	char *sftr_pl;
+	int err;
+
+	/* Due to FW bug, we must configure SMID. */
+	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+	if (!smid_pl)
+		return -ENOMEM;
+	mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+	kfree(smid_pl);
+	if (err)
+		return err;
+
+	/* Configure a flooding table, which includes only CPU port. */
+	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+	if (!sftr_pl)
+		return -ENOMEM;
+	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+	kfree(sftr_pl);
+	if (err)
+		return err;
+
+	/* Flood different packet types using the flooding table. */
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_BROADCAST,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sgcr_pack(sgcr_pl, true);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+			 const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	int err;
+
+	mlxsw_sx->core = mlxsw_core;
+	mlxsw_sx->bus_info = mlxsw_bus_info;
+
+	err = mlxsw_sx_hw_id_get(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+		return err;
+	}
+
+	err = mlxsw_sx_ports_create(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+		return err;
+	}
+
+	err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+		goto err_event_register;
+	}
+
+	err = mlxsw_sx_traps_init(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+		goto err_rx_listener_register;
+	}
+
+	err = mlxsw_sx_flood_init(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+		goto err_flood_init;
+	}
+
+	return 0;
+
+err_flood_init:
+	mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+	mlxsw_sx_ports_remove(mlxsw_sx);
+	return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+
+	mlxsw_sx_traps_fini(mlxsw_sx);
+	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+	mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+	.used_max_vepa_channels		= 1,
+	.max_vepa_channels		= 0,
+	.used_max_lag			= 1,
+	.max_lag			= 64,
+	.used_max_port_per_lag		= 1,
+	.max_port_per_lag		= 16,
+	.used_max_mid			= 1,
+	.max_mid			= 7000,
+	.used_max_pgt			= 1,
+	.max_pgt			= 0,
+	.used_max_system_port		= 1,
+	.max_system_port		= 48000,
+	.used_max_vlan_groups		= 1,
+	.max_vlan_groups		= 127,
+	.used_max_regions		= 1,
+	.max_regions			= 400,
+	.used_flood_tables		= 1,
+	.max_flood_tables		= 2,
+	.max_vid_flood_tables		= 1,
+	.used_flood_mode		= 1,
+	.flood_mode			= 3,
+	.used_max_ib_mc			= 1,
+	.max_ib_mc			= 0,
+	.used_max_pkey			= 1,
+	.max_pkey			= 0,
+	.swid_config			= {
+		{
+			.used_type	= 1,
+			.type		= MLXSW_PORT_SWID_TYPE_ETH,
+		}
+	},
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+	.kind			= MLXSW_DEVICE_KIND_SWITCHX2,
+	.owner			= THIS_MODULE,
+	.priv_size		= sizeof(struct mlxsw_sx),
+	.init			= mlxsw_sx_init,
+	.fini			= mlxsw_sx_fini,
+	.txhdr_construct	= mlxsw_sx_txhdr_construct,
+	.txhdr_len		= MLXSW_TXHDR_LEN,
+	.profile		= &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+	return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 0000000..53a9550
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+	/* Ethernet EMAD and FDB miss */
+	MLXSW_TRAP_ID_FDB_MC = 0x01,
+	MLXSW_TRAP_ID_ETHEMAD = 0x05,
+	/* L2 traps for specific packet types */
+	MLXSW_TRAP_ID_STP = 0x10,
+	MLXSW_TRAP_ID_LACP = 0x11,
+	MLXSW_TRAP_ID_EAPOL = 0x12,
+	MLXSW_TRAP_ID_LLDP = 0x13,
+	MLXSW_TRAP_ID_MMRP = 0x14,
+	MLXSW_TRAP_ID_MVRP = 0x15,
+	MLXSW_TRAP_ID_RPVST = 0x16,
+	MLXSW_TRAP_ID_DHCP = 0x19,
+	MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+	MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+	MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+	MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+	MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+	MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+	/* Port Up/Down event generated by hardware */
+	MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 0000000..06fc46c7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+	MLXSW_TXHDR_ETH_CTL,
+	MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+	MLXSW_TXHDR_ETCLASS_0,
+	MLXSW_TXHDR_ETCLASS_1,
+	MLXSW_TXHDR_ETCLASS_2,
+	MLXSW_TXHDR_ETCLASS_3,
+	MLXSW_TXHDR_ETCLASS_4,
+	MLXSW_TXHDR_ETCLASS_5,
+	MLXSW_TXHDR_ETCLASS_6,
+	MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+	MLXSW_TXHDR_RDQ_OTHER,
+	MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+	MLXSW_TXHDR_NOT_EMAD,
+	MLXSW_TXHDR_EMAD,
+};
+
+enum {
+	MLXSW_TXHDR_TYPE_DATA,
+	MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 8aa50ac..a157aaa 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -658,6 +658,8 @@
 	__le32 dptr;	/* Descriptor pointer */
 };
 
+#define DPTR_ALIGN	4	/* Required descriptor pointer alignment */
+
 enum DIE_DT {
 	/* Frame data */
 	DT_FMID		= 0x40,
@@ -739,6 +741,7 @@
 #define RX_QUEUE_OFFSET	4
 #define NUM_RX_QUEUE	2
 #define NUM_TX_QUEUE	2
+#define NUM_TX_DESC	2	/* TX descriptors per packet */
 
 struct ravb_tstamp_skb {
 	struct list_head list;
@@ -777,9 +780,9 @@
 	dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
 	struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
 	struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+	void *tx_align[NUM_TX_QUEUE];
 	struct sk_buff **rx_skb[NUM_RX_QUEUE];
 	struct sk_buff **tx_skb[NUM_TX_QUEUE];
-	void **tx_buffers[NUM_TX_QUEUE];
 	u32 rx_over_errors;
 	u32 rx_fifo_errors;
 	struct net_device_stats stats[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 779bb58..3d972d8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -195,12 +195,8 @@
 	priv->tx_skb[q] = NULL;
 
 	/* Free aligned TX buffers */
-	if (priv->tx_buffers[q]) {
-		for (i = 0; i < priv->num_tx_ring[q]; i++)
-			kfree(priv->tx_buffers[q][i]);
-	}
-	kfree(priv->tx_buffers[q]);
-	priv->tx_buffers[q] = NULL;
+	kfree(priv->tx_align[q]);
+	priv->tx_align[q] = NULL;
 
 	if (priv->rx_ring[q]) {
 		ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@
 
 	if (priv->tx_ring[q]) {
 		ring_size = sizeof(struct ravb_tx_desc) *
-			    (priv->num_tx_ring[q] + 1);
+			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 		dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
 				  priv->tx_desc_dma[q]);
 		priv->tx_ring[q] = NULL;
@@ -227,7 +223,8 @@
 	struct ravb_tx_desc *tx_desc;
 	struct ravb_desc *desc;
 	int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
-	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+			   NUM_TX_DESC;
 	dma_addr_t dma_addr;
 	int i;
 
@@ -260,11 +257,12 @@
 
 	memset(priv->tx_ring[q], 0, tx_ring_size);
 	/* Build TX ring buffer */
-	for (i = 0; i < priv->num_tx_ring[q]; i++) {
-		tx_desc = &priv->tx_ring[q][i];
+	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+	     i++, tx_desc++) {
+		tx_desc->die_dt = DT_EEMPTY;
+		tx_desc++;
 		tx_desc->die_dt = DT_EEMPTY;
 	}
-	tx_desc = &priv->tx_ring[q][i];
 	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 	tx_desc->die_dt = DT_LINKFIX; /* type */
 
@@ -285,7 +283,6 @@
 	struct ravb_private *priv = netdev_priv(ndev);
 	struct sk_buff *skb;
 	int ring_size;
-	void *buffer;
 	int i;
 
 	/* Allocate RX and TX skb rings */
@@ -305,19 +302,11 @@
 	}
 
 	/* Allocate rings for the aligned buffers */
-	priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
-				      sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
-	if (!priv->tx_buffers[q])
+	priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+				    DPTR_ALIGN - 1, GFP_KERNEL);
+	if (!priv->tx_align[q])
 		goto error;
 
-	for (i = 0; i < priv->num_tx_ring[q]; i++) {
-		buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
-		if (!buffer)
-			goto error;
-		/* Aligned TX buffer */
-		priv->tx_buffers[q][i] = buffer;
-	}
-
 	/* Allocate all RX descriptors. */
 	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 	priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -329,7 +318,8 @@
 	priv->dirty_rx[q] = 0;
 
 	/* Allocate all TX descriptors. */
-	ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+	ring_size = sizeof(struct ravb_tx_desc) *
+		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 	priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
 					      &priv->tx_desc_dma[q],
 					      GFP_KERNEL);
@@ -443,7 +433,8 @@
 	u32 size;
 
 	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-		entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+					     NUM_TX_DESC);
 		desc = &priv->tx_ring[q][entry];
 		if (desc->die_dt != DT_FEMPTY)
 			break;
@@ -451,14 +442,18 @@
 		dma_rmb();
 		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 		/* Free the original skb. */
-		if (priv->tx_skb[q][entry]) {
+		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
 			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
 					 size, DMA_TO_DEVICE);
-			dev_kfree_skb_any(priv->tx_skb[q][entry]);
-			priv->tx_skb[q][entry] = NULL;
+			/* Last packet descriptor? */
+			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+				entry /= NUM_TX_DESC;
+				dev_kfree_skb_any(priv->tx_skb[q][entry]);
+				priv->tx_skb[q][entry] = NULL;
+				stats->tx_packets++;
+			}
 			free_num++;
 		}
-		stats->tx_packets++;
 		stats->tx_bytes += size;
 		desc->die_dt = DT_EEMPTY;
 	}
@@ -1284,37 +1279,53 @@
 	u32 dma_addr;
 	void *buffer;
 	u32 entry;
+	u32 len;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+	    NUM_TX_DESC) {
 		netif_err(priv, tx_queued, ndev,
 			  "still transmitting with the full ring!\n");
 		netif_stop_subqueue(ndev, q);
 		spin_unlock_irqrestore(&priv->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
-	entry = priv->cur_tx[q] % priv->num_tx_ring[q];
-	priv->tx_skb[q][entry] = skb;
+	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+	priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
 	if (skb_put_padto(skb, ETH_ZLEN))
 		goto drop;
 
-	buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
-	memcpy(buffer, skb->data, skb->len);
-	desc = &priv->tx_ring[q][entry];
-	desc->ds_tagl = cpu_to_le16(skb->len);
-	dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+	buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+		 entry / NUM_TX_DESC * DPTR_ALIGN;
+	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+	memcpy(buffer, skb->data, len);
+	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
 	if (dma_mapping_error(&ndev->dev, dma_addr))
 		goto drop;
+
+	desc = &priv->tx_ring[q][entry];
+	desc->ds_tagl = cpu_to_le16(len);
+	desc->dptr = cpu_to_le32(dma_addr);
+
+	buffer = skb->data + len;
+	len = skb->len - len;
+	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&ndev->dev, dma_addr))
+		goto unmap;
+
+	desc++;
+	desc->ds_tagl = cpu_to_le16(len);
 	desc->dptr = cpu_to_le32(dma_addr);
 
 	/* TX timestamp required */
 	if (q == RAVB_NC) {
 		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
 		if (!ts_skb) {
-			dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+			desc--;
+			dma_unmap_single(&ndev->dev, dma_addr, len,
 					 DMA_TO_DEVICE);
-			goto drop;
+			goto unmap;
 		}
 		ts_skb->skb = skb;
 		ts_skb->tag = priv->ts_skb_tag++;
@@ -1330,13 +1341,15 @@
 
 	/* Descriptor type must be set after all the above writes */
 	dma_wmb();
-	desc->die_dt = DT_FSINGLE;
+	desc->die_dt = DT_FEND;
+	desc--;
+	desc->die_dt = DT_FSTART;
 
 	ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 
-	priv->cur_tx[q]++;
-	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
-	    !ravb_tx_free(ndev, q))
+	priv->cur_tx[q] += NUM_TX_DESC;
+	if (priv->cur_tx[q] - priv->dirty_tx[q] >
+	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
 		netif_stop_subqueue(ndev, q);
 
 exit:
@@ -1344,9 +1357,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 	return NETDEV_TX_OK;
 
+unmap:
+	dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
 	dev_kfree_skb_any(skb);
-	priv->tx_skb[q][entry] = NULL;
+	priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
 	goto exit;
 }
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e817a1a..b1e5f24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -16,6 +16,46 @@
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	if (pdev->dev.of_node) {
+		plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+		if (IS_ERR(plat_dat)) {
+			dev_err(&pdev->dev, "dt configuration failed\n");
+			return PTR_ERR(plat_dat);
+		}
+	} else {
+		plat_dat = dev_get_platdata(&pdev->dev);
+		if (!plat_dat) {
+			dev_err(&pdev->dev, "no platform data provided\n");
+			return  -EINVAL;
+		}
+
+		/* Set default value for multicast hash bins */
+		plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+		/* Set default value for unicast filter entries */
+		plat_dat->unicast_filter_entries = 1;
+	}
+
+	/* Custom initialisation (if needed) */
+	if (plat_dat->init) {
+		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+		if (ret)
+			return ret;
+	}
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
 static const struct of_device_id dwmac_generic_match[] = {
 	{ .compatible = "st,spear600-gmac"},
 	{ .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@
 MODULE_DEVICE_TABLE(of, dwmac_generic_match);
 
 static struct platform_driver dwmac_generic_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = dwmac_generic_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = STMMAC_RESOURCE_NAME,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e..333489f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -248,23 +248,40 @@
 	return NULL;
 }
 
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
 {
+	struct ipq806x_gmac *gmac = priv;
+
+	ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
 	struct device *dev = &pdev->dev;
 	struct ipq806x_gmac *gmac;
 	int val;
 	void *err;
 
+	val = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (val)
+		return val;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
 	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
 	if (!gmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	gmac->pdev = pdev;
 
 	err = ipq806x_gmac_of_parse(gmac);
-	if (err) {
+	if (IS_ERR(err)) {
 		dev_err(dev, "device tree parsing error\n");
-		return err;
+		return PTR_ERR(err);
 	}
 
 	regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,7 +302,7 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return NULL;
+		return -EINVAL;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
 
@@ -304,7 +321,7 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return NULL;
+		return -EINVAL;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
 
@@ -327,30 +344,21 @@
 			     0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
 	}
 
-	return gmac;
+	plat_dat->has_gmac = true;
+	plat_dat->bsp_priv = gmac;
+	plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
-	struct ipq806x_gmac *gmac = priv;
-
-	ipq806x_gmac_set_speed(gmac, speed);
-}
-
-static const struct stmmac_of_data ipq806x_gmac_data = {
-	.has_gmac	= 1,
-	.setup		= ipq806x_gmac_setup,
-	.fix_mac_speed	= ipq806x_gmac_fix_mac_speed,
-};
-
 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
-	{ .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+	{ .compatible = "qcom,ipq806x-gmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
 
 static struct platform_driver ipq806x_gmac_dwmac_driver = {
-	.probe = stmmac_pltfr_probe,
+	.probe = ipq806x_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name		= "ipq806x-gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 00a1e1e..11baa4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,7 +46,7 @@
 	struct platform_device *pdev;
 	int phy_iface;
 	struct regulator *regulator;
-	struct rk_gmac_ops *ops;
+	const struct rk_gmac_ops *ops;
 
 	bool clk_enabled;
 	bool clock_input;
@@ -177,7 +177,7 @@
 	}
 }
 
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
 	.set_to_rgmii = rk3288_set_to_rgmii,
 	.set_to_rmii = rk3288_set_to_rmii,
 	.set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@
 	}
 }
 
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
 	.set_to_rgmii = rk3368_set_to_rgmii,
 	.set_to_rmii = rk3368_set_to_rmii,
 	.set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@
 }
 
 static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
-					  struct rk_gmac_ops *ops)
+					  const struct rk_gmac_ops *ops)
 {
 	struct rk_priv_data *bsp_priv;
 	struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@
 	return bsp_priv;
 }
 
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
-	return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
-	return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
 static int rk_gmac_init(struct platform_device *pdev, void *priv)
 {
 	struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@
 		dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-static const struct stmmac_of_data rk3288_gmac_data = {
-	.has_gmac = 1,
-	.fix_mac_speed = rk_fix_speed,
-	.setup = rk3288_gmac_setup,
-	.init = rk_gmac_init,
-	.exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	const struct rk_gmac_ops *data;
+	int ret;
 
-static const struct stmmac_of_data rk3368_gmac_data = {
-	.has_gmac = 1,
-	.fix_mac_speed = rk_fix_speed,
-	.setup = rk3368_gmac_setup,
-	.init = rk_gmac_init,
-	.exit = rk_gmac_exit,
-};
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "no of match data provided\n");
+		return -EINVAL;
+	}
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	plat_dat->has_gmac = true;
+	plat_dat->init = rk_gmac_init;
+	plat_dat->exit = rk_gmac_exit;
+	plat_dat->fix_mac_speed = rk_fix_speed;
+
+	plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+	if (IS_ERR(plat_dat->bsp_priv))
+		return PTR_ERR(plat_dat->bsp_priv);
+
+	ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
-	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
 
 static struct platform_driver rk_gmac_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = rk_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "rk_gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8141c5b..401383b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -175,31 +175,6 @@
 	return 0;
 }
 
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
-	struct device		*dev = &pdev->dev;
-	int			ret;
-	struct socfpga_dwmac	*dwmac;
-
-	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
-
-	ret = socfpga_dwmac_parse_data(dwmac, dev);
-	if (ret) {
-		dev_err(dev, "Unable to parse OF data\n");
-		return ERR_PTR(ret);
-	}
-
-	ret = socfpga_dwmac_setup(dwmac);
-	if (ret) {
-		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return dwmac;
-}
-
 static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
 {
 	struct socfpga_dwmac	*dwmac = priv;
@@ -257,21 +232,58 @@
 	return ret;
 }
 
-static const struct stmmac_of_data socfpga_gmac_data = {
-	.setup = socfpga_dwmac_probe,
-	.init = socfpga_dwmac_init,
-	.exit = socfpga_dwmac_exit,
-	.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct device		*dev = &pdev->dev;
+	int			ret;
+	struct socfpga_dwmac	*dwmac;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+	if (!dwmac)
+		return -ENOMEM;
+
+	ret = socfpga_dwmac_parse_data(dwmac, dev);
+	if (ret) {
+		dev_err(dev, "Unable to parse OF data\n");
+		return ret;
+	}
+
+	ret = socfpga_dwmac_setup(dwmac);
+	if (ret) {
+		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+		return ret;
+	}
+
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->init = socfpga_dwmac_init;
+	plat_dat->exit = socfpga_dwmac_exit;
+	plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+	ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id socfpga_dwmac_match[] = {
-	{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+	{ .compatible = "altr,socfpga-stmmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
 
 static struct platform_driver socfpga_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = socfpga_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "socfpga-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a2e8111..7f6f4a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,6 +21,7 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_net.h>
 
 #include "stmmac_platform.h"
@@ -128,6 +129,11 @@
 	struct device *dev;
 	struct regmap *regmap;
 	u32 speed;
+	void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+	void (*fix_retime_src)(void *priv, unsigned int speed);
 };
 
 static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@
 	regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
 }
 
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
 {
+	struct sti_dwmac *dwmac = priv;
 	struct regmap *regmap = dwmac->regmap;
 	int iface = dwmac->interface;
 	struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@
 
 	val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
 	regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
 
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
-	struct sti_dwmac *dwmac = priv;
-	u32 spd = dwmac->speed;
-
-	sti_dwmac_ctrl_init(dwmac);
-
-	stih4xx_fix_retime_src(priv, spd);
-
-	return 0;
-}
-
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
-	struct sti_dwmac *dwmac = priv;
-	u32 spd = dwmac->speed;
-
-	sti_dwmac_ctrl_init(dwmac);
-
-	stid127_fix_retime_src(priv, spd);
+	dwmac->fix_retime_src(priv, dwmac->speed);
 
 	return 0;
 }
@@ -334,36 +321,58 @@
 	return 0;
 }
 
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
 {
+	struct plat_stmmacenet_data *plat_dat;
+	const struct sti_dwmac_of_data *data;
+	struct stmmac_resources stmmac_res;
 	struct sti_dwmac *dwmac;
 	int ret;
 
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "No OF match data provided\n");
+		return -EINVAL;
+	}
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
 	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	ret = sti_dwmac_parse_data(dwmac, pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to parse OF data\n");
-		return ERR_PTR(ret);
+		return ret;
 	}
 
-	return dwmac;
+	dwmac->fix_retime_src = data->fix_retime_src;
+
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->init = sti_dwmac_init;
+	plat_dat->exit = sti_dwmac_exit;
+	plat_dat->fix_mac_speed = data->fix_retime_src;
+
+	ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data stih4xx_dwmac_data = {
-	.fix_mac_speed = stih4xx_fix_retime_src,
-	.setup = sti_dwmac_setup,
-	.init = stix4xx_init,
-	.exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+	.fix_retime_src = stih4xx_fix_retime_src,
 };
 
-static const struct stmmac_of_data stid127_dwmac_data = {
-	.fix_mac_speed = stid127_fix_retime_src,
-	.setup = sti_dwmac_setup,
-	.init = stid127_init,
-	.exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+	.fix_retime_src = stid127_fix_retime_src,
 };
 
 static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@
 MODULE_DEVICE_TABLE(of, sti_dwmac_match);
 
 static struct platform_driver sti_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = sti_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "sti-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 15048ca..52b8ed9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -33,35 +33,6 @@
 	struct regulator *regulator;
 };
 
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
-	struct sunxi_priv_data *gmac;
-	struct device *dev = &pdev->dev;
-
-	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-	if (!gmac)
-		return ERR_PTR(-ENOMEM);
-
-	gmac->interface = of_get_phy_mode(dev->of_node);
-
-	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
-	if (IS_ERR(gmac->tx_clk)) {
-		dev_err(dev, "could not get tx clock\n");
-		return gmac->tx_clk;
-	}
-
-	/* Optional regulator for PHY */
-	gmac->regulator = devm_regulator_get_optional(dev, "phy");
-	if (IS_ERR(gmac->regulator)) {
-		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
-			return ERR_PTR(-EPROBE_DEFER);
-		dev_info(dev, "no regulator found\n");
-		gmac->regulator = NULL;
-	}
-
-	return gmac;
-}
-
 #define SUN7I_GMAC_GMII_RGMII_RATE	125000000
 #define SUN7I_GMAC_MII_RATE		25000000
 
@@ -132,25 +103,67 @@
 	}
 }
 
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
-	.has_gmac = 1,
-	.tx_coe = 1,
-	.fix_mac_speed = sun7i_fix_speed,
-	.setup = sun7i_gmac_setup,
-	.init = sun7i_gmac_init,
-	.exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct sunxi_priv_data *gmac;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+	if (!gmac)
+		return -ENOMEM;
+
+	gmac->interface = of_get_phy_mode(dev->of_node);
+
+	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+	if (IS_ERR(gmac->tx_clk)) {
+		dev_err(dev, "could not get tx clock\n");
+		return PTR_ERR(gmac->tx_clk);
+	}
+
+	/* Optional regulator for PHY */
+	gmac->regulator = devm_regulator_get_optional(dev, "phy");
+	if (IS_ERR(gmac->regulator)) {
+		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no regulator found\n");
+		gmac->regulator = NULL;
+	}
+
+	/* platform data specifying hardware features and callbacks.
+	 * hardware features were copied from Allwinner drivers. */
+	plat_dat->tx_coe = 1;
+	plat_dat->has_gmac = true;
+	plat_dat->bsp_priv = gmac;
+	plat_dat->init = sun7i_gmac_init;
+	plat_dat->exit = sun7i_gmac_exit;
+	plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+	ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id sun7i_dwmac_match[] = {
-	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+	{ .compatible = "allwinner,sun7i-a20-gmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
 
 static struct platform_driver sun7i_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = sun7i_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "sun7i-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index eca0eb8..1cb6604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -109,30 +109,12 @@
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct plat_stmmacenet_data *plat;
-	const struct stmmac_of_data *data;
 	struct stmmac_dma_cfg *dma_cfg;
 
 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
 	if (!plat)
 		return ERR_PTR(-ENOMEM);
 
-	data = of_device_get_match_data(&pdev->dev);
-	if (data) {
-		plat->has_gmac = data->has_gmac;
-		plat->enh_desc = data->enh_desc;
-		plat->tx_coe = data->tx_coe;
-		plat->rx_coe = data->rx_coe;
-		plat->bugged_jumbo = data->bugged_jumbo;
-		plat->pmt = data->pmt;
-		plat->riwt_off = data->riwt_off;
-		plat->fix_mac_speed = data->fix_mac_speed;
-		plat->bus_setup = data->bus_setup;
-		plat->setup = data->setup;
-		plat->free = data->free;
-		plat->init = data->init;
-		plat->exit = data->exit;
-	}
-
 	*mac = of_get_mac_address(np);
 	plat->interface = of_get_phy_mode(np);
 
@@ -298,69 +280,12 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(stmmac_res->addr))
-		return PTR_ERR(stmmac_res->addr);
 
-	return 0;
+	return PTR_ERR_OR_ZERO(stmmac_res->addr);
 }
 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
 
 /**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
-{
-	struct plat_stmmacenet_data *plat_dat;
-	struct stmmac_resources stmmac_res;
-	int ret;
-
-	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
-	if (ret)
-		return ret;
-
-	if (pdev->dev.of_node) {
-		plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
-		if (IS_ERR(plat_dat)) {
-			dev_err(&pdev->dev, "dt configuration failed\n");
-			return PTR_ERR(plat_dat);
-		}
-	} else {
-		plat_dat = dev_get_platdata(&pdev->dev);
-		if (!plat_dat) {
-			dev_err(&pdev->dev, "no platform data provided\n");
-			return  -EINVAL;
-		}
-
-		/* Set default value for multicast hash bins */
-		plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-		/* Set default value for unicast filter entries */
-		plat_dat->unicast_filter_entries = 1;
-	}
-
-	/* Custom setup (if needed) */
-	if (plat_dat->setup) {
-		plat_dat->bsp_priv = plat_dat->setup(pdev);
-		if (IS_ERR(plat_dat->bsp_priv))
-			return PTR_ERR(plat_dat->bsp_priv);
-	}
-
-	/* Custom initialisation (if needed)*/
-	if (plat_dat->init) {
-		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
-		if (unlikely(ret))
-			return ret;
-	}
-
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
-
-/**
  * stmmac_pltfr_remove
  * @pdev: platform device pointer
  * Description: this function calls the main to free the net resources
@@ -375,9 +300,6 @@
 	if (priv->plat->exit)
 		priv->plat->exit(pdev, priv->plat->bsp_priv);
 
-	if (priv->plat->free)
-		priv->plat->free(pdev, priv->plat->bsp_priv);
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 84ceb53..ffeb8d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -27,7 +27,6 @@
 int stmmac_get_platform_resources(struct platform_device *pdev,
 				  struct stmmac_resources *stmmac_res);
 
-int stmmac_pltfr_probe(struct platform_device *pdev);
 int stmmac_pltfr_remove(struct platform_device *pdev);
 extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 0000000..a8f3151
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+	bool "Synopsys devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) device belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Synopsys devices. If you say Y, you will be asked
+	  for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+	tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+	select PHYLIB
+	select CRC32
+	select MII
+	depends on OF
+	---help---
+	  This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 0000000..7a37572
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644
index 0000000..85b3326
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -0,0 +1,3019 @@
+/*  Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ *  This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ *  This version introduced a lot of changes which breaks backwards
+ *  compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ *  Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ *  bit fields. The driver could be made compatible with 4.00, if all relevant
+ *  HW erratas are handled.
+ *
+ *  The GMAC is highly configurable at synthesis time. This driver has been
+ *  developed for a subset of the total available feature set. Currently
+ *  it supports:
+ *  - TSO
+ *  - Checksum offload for RX and TX.
+ *  - Energy efficient ethernet.
+ *  - GMII phy interface.
+ *  - The statistics module.
+ *  - Single RX and TX queue.
+ *
+ *  Copyright (C) 2015 Axis Communications AB.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME			"dwceqos"
+#define DRIVER_DESCRIPTION		"Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION			"0.9"
+
+#define DWCEQOS_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+	NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN      8
+#define DWCEQOS_LPI_TIMER_MAX      ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET    0x0000
+#define MAX_MAC_REG_OFFSET      0x0bd0
+#define START_MTL_REG_OFFSET    0x0c00
+#define MAX_MTL_REG_OFFSET      0x0d7c
+#define START_DMA_REG_OFFSET    0x1000
+#define MAX_DMA_REG_OFFSET      0x117C
+
+#define REG_SPACE_SIZE          0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE             0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE      0x1004
+#define REG_DWCEQOS_DMA_IS               0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0        0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL         0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL      0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL      0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST  0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST  0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL  0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL  0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN   0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN   0x1130
+#define REG_DWCEQOS_DMA_CH0_IE           0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC   0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC   0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF    0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG    0x115c
+#define REG_DWCEQOS_DMA_CH0_STA          0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR            BIT(11)
+#define DWCEQOS_DMA_MODE_DA              BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI   BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB       BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL      BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+	(((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK       GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+	(((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK       GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+	(((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT   GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8       BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x)      ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x)       ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START         BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x)   ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP            BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE            BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE           BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE           BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE           BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE           BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE          BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE          BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS             BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS             BIT(16)
+#define DWCEQOS_DMA_IS_MACIS             BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI            BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI            BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU           BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE           BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE           BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS           BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB           GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ   BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR  BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB           GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ   BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR  BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE    BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V   BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V   BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE    BIT(7)
+#define DWCEQOS_DMA_RDES3_ES      BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT    BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x)   ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT      0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP  BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP  BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC     BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT    BIT(30)
+#define DWCEQOS_DMA_TDES3_FD      BIT(29)
+#define DWCEQOS_DMA_TDES3_LD      BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH    BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP    BIT(17)
+#define DWCEQOS_DMA_TDES3_CA      0x00030000
+#define DWCEQOS_DMA_TDES3_TSE     BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x)   ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x)   ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV    BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED   0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER             0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST         0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST    0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST    0x0d38
+
+#define REG_DWCEQOS_MTL_IS               0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER        0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER        0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT     0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL         0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL         0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT        0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN            BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF              BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ              BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512           0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC             BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF       BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP              BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP              BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF              BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32            BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG              0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG          0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT         0x0008
+#define REG_DWCEQOS_MAC_WD_TO            0x000c
+#define REG_DWCEQOS_HASTABLE_LO          0x0010
+#define REG_DWCEQOS_HASTABLE_HI          0x0014
+#define REG_DWCEQOS_MAC_IS               0x00b0
+#define REG_DWCEQOS_MAC_IE               0x00b4
+#define REG_DWCEQOS_MAC_STAT             0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR        0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA        0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI     0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO     0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0       0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0      0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1      0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2      0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO     0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI     0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS  0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL  0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER  0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER  0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL     0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW	 0x0070
+
+#define DWCEQOS_MAC_CFG_ACS              BIT(20)
+#define DWCEQOS_MAC_CFG_JD               BIT(17)
+#define DWCEQOS_MAC_CFG_JE               BIT(16)
+#define DWCEQOS_MAC_CFG_PS               BIT(15)
+#define DWCEQOS_MAC_CFG_FES              BIT(14)
+#define DWCEQOS_MAC_CFG_DM               BIT(13)
+#define DWCEQOS_MAC_CFG_DO               BIT(10)
+#define DWCEQOS_MAC_CFG_TE               BIT(1)
+#define DWCEQOS_MAC_CFG_IPC              BIT(27)
+#define DWCEQOS_MAC_CFG_RE               BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg)           (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg)            (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT           BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT           BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN               BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN       BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA          BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF         BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF         BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF        BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF         BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM          BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF        BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC         BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC         BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR          BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x)      (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20      2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35      3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60      0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100     1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150     4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250     5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ   0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE  BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB         BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN  BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX  BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN  BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX  BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST  BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST  BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN   BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS     BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN   BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA  BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE   BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x)  ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE      (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+					  DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+					  DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE   BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time)	((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL    BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL   BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL  BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL   BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN    BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x)  (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+	(1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x)     (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x)      (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR            BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL             0x0700
+#define REG_DWCEQOS_MMC_RXIRQ            0x0704
+#define REG_DWCEQOS_MMC_TXIRQ            0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK        0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK        0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST          BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD         BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR           0x07F0
+#define DWC_MMC_TXLPIUSCNTR              0x07EC
+#define DWC_MMC_TXOVERSIZE_G             0x0778
+#define DWC_MMC_TXVLANPACKETS_G          0x0774
+#define DWC_MMC_TXPAUSEPACKETS           0x0770
+#define DWC_MMC_TXEXCESSDEF              0x076C
+#define DWC_MMC_TXPACKETCOUNT_G          0x0768
+#define DWC_MMC_TXOCTETCOUNT_G           0x0764
+#define DWC_MMC_TXCARRIERERROR           0x0760
+#define DWC_MMC_TXEXCESSCOL              0x075C
+#define DWC_MMC_TXLATECOL                0x0758
+#define DWC_MMC_TXDEFERRED               0x0754
+#define DWC_MMC_TXMULTICOL_G             0x0750
+#define DWC_MMC_TXSINGLECOL_G            0x074C
+#define DWC_MMC_TXUNDERFLOWERROR         0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB    0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB    0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB      0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB     0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB     0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB      0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB      0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB       0x0728
+#define DWC_MMC_TX64OCTETS_GB            0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G     0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G     0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB         0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB          0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR           0x07F8
+#define DWC_MMC_RXLPIUSCNTR              0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G          0x07E4
+#define DWC_MMC_RXRCVERROR               0x07E0
+#define DWC_MMC_RXWATCHDOG               0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB         0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW           0x07D4
+#define DWC_MMC_RXPAUSEPACKETS           0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE         0x07CC
+#define DWC_MMC_RXLENGTHERROR            0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G       0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB     0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB     0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB      0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB      0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB       0x07B0
+#define DWC_MMC_RX64OCTETS_GB            0x07AC
+#define DWC_MMC_RXOVERSIZE_G             0x07A8
+#define DWC_MMC_RXUNDERSIZE_G            0x07A4
+#define DWC_MMC_RXJABBERERROR            0x07A0
+#define DWC_MMC_RXRUNTERROR              0x079C
+#define DWC_MMC_RXALIGNMENTERROR         0x0798
+#define DWC_MMC_RXCRCERROR               0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G     0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G     0x078C
+#define DWC_MMC_RXOCTETCOUNT_G           0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB          0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB         0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+	u32	des0;
+	u32	des1;
+	u32	des2;
+	u32	des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+	__u64 txlpitranscntr;
+	__u64 txpiuscntr;
+	__u64 txoversize_g;
+	__u64 txvlanpackets_g;
+	__u64 txpausepackets;
+	__u64 txexcessdef;
+	__u64 txpacketcount_g;
+	__u64 txoctetcount_g;
+	__u64 txcarriererror;
+	__u64 txexcesscol;
+	__u64 txlatecol;
+	__u64 txdeferred;
+	__u64 txmulticol_g;
+	__u64 txsinglecol_g;
+	__u64 txunderflowerror;
+	__u64 txbroadcastpackets_gb;
+	__u64 txmulticastpackets_gb;
+	__u64 txunicastpackets_gb;
+	__u64 tx1024tomaxoctets_gb;
+	__u64 tx512to1023octets_gb;
+	__u64 tx256to511octets_gb;
+	__u64 tx128to255octets_gb;
+	__u64 tx65to127octets_gb;
+	__u64 tx64octets_gb;
+	__u64 txmulticastpackets_g;
+	__u64 txbroadcastpackets_g;
+	__u64 txpacketcount_gb;
+	__u64 txoctetcount_gb;
+
+	__u64 rxlpitranscntr;
+	__u64 rxlpiuscntr;
+	__u64 rxctrlpackets_g;
+	__u64 rxrcverror;
+	__u64 rxwatchdog;
+	__u64 rxvlanpackets_gb;
+	__u64 rxfifooverflow;
+	__u64 rxpausepackets;
+	__u64 rxoutofrangetype;
+	__u64 rxlengtherror;
+	__u64 rxunicastpackets_g;
+	__u64 rx1024tomaxoctets_gb;
+	__u64 rx512to1023octets_gb;
+	__u64 rx256to511octets_gb;
+	__u64 rx128to255octets_gb;
+	__u64 rx65to127octets_gb;
+	__u64 rx64octets_gb;
+	__u64 rxoversize_g;
+	__u64 rxundersize_g;
+	__u64 rxjabbererror;
+	__u64 rxrunterror;
+	__u64 rxalignmenterror;
+	__u64 rxcrcerror;
+	__u64 rxmulticastpackets_g;
+	__u64 rxbroadcastpackets_g;
+	__u64 rxoctetcount_g;
+	__u64 rxoctetcount_gb;
+	__u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+	const char stat_name[ETH_GSTRING_LEN];
+	int   offset;
+};
+
+#define STAT_ITEM(name, var) \
+	{\
+		name,\
+		offsetof(struct dwceqos_mmc_counters, var),\
+	}
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+	STAT_ITEM("tx_bytes", txoctetcount_gb),
+	STAT_ITEM("tx_packets", txpacketcount_gb),
+	STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+	STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+	STAT_ITEM("tx_multicast_packets",  txmulticastpackets_gb),
+	STAT_ITEM("tx_pause_packets", txpausepackets),
+	STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+	STAT_ITEM("tx_65_to_127_byte_packets",  tx65to127octets_gb),
+	STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+	STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+	STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+	STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+	STAT_ITEM("tx_underflow_errors", txunderflowerror),
+	STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+	STAT_ITEM("rx_bytes", rxoctetcount_gb),
+	STAT_ITEM("rx_packets", rxpacketcount_gb),
+	STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+	STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+	STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+	STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+	STAT_ITEM("rx_pause_packets", rxpausepackets),
+	STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+	STAT_ITEM("rx_65_to_127_byte_packets",  rx65to127octets_gb),
+	STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+	STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+	STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+	STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+	STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+	STAT_ITEM("rx_oversize_packets", rxoversize_g),
+	STAT_ITEM("rx_undersize_packets", rxundersize_g),
+	STAT_ITEM("rx_jabbers", rxjabbererror),
+	STAT_ITEM("rx_align_errors", rxalignmenterror),
+	STAT_ITEM("rx_crc_errors", rxcrcerror),
+	STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+	/* Enable AXI low-power interface. */
+	bool en_lpi;
+	/* Limit on number of outstanding AXI write requests. */
+	u32 write_requests;
+	/* Limit on number of outstanding AXI read requests. */
+	u32 read_requests;
+	/* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+	u32 burst_map;
+	/* DMA Programmable burst length*/
+	u32 tx_pbl;
+	u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+	int autoneg;
+	int rx;
+	int rx_current;
+	int tx;
+	int tx_current;
+};
+
+struct net_local {
+	void __iomem *baseaddr;
+	struct clk *phy_ref_clk;
+	struct clk *apb_pclk;
+
+	struct device_node *phy_node;
+	struct net_device *ndev;
+	struct platform_device *pdev;
+
+	u32 msg_enable;
+
+	struct tasklet_struct tx_bdreclaim_tasklet;
+	struct workqueue_struct *txtimeout_handler_wq;
+	struct work_struct txtimeout_reinit;
+
+	phy_interface_t phy_interface;
+	struct phy_device *phy_dev;
+	struct mii_bus *mii_bus;
+
+	unsigned int link;
+	unsigned int speed;
+	unsigned int duplex;
+
+	struct napi_struct napi;
+
+	/* DMA Descriptor Areas */
+	struct ring_desc *rx_skb;
+	struct ring_desc *tx_skb;
+
+	struct dwceqos_dma_desc *tx_descs;
+	struct dwceqos_dma_desc *rx_descs;
+
+	/* DMA Mapped Descriptor areas*/
+	dma_addr_t tx_descs_addr;
+	dma_addr_t rx_descs_addr;
+	dma_addr_t tx_descs_tail_addr;
+	dma_addr_t rx_descs_tail_addr;
+
+	size_t tx_free;
+	size_t tx_next;
+	size_t rx_cur;
+	size_t tx_cur;
+
+	/* Spinlocks for accessing DMA Descriptors */
+	spinlock_t tx_lock;
+
+	/* Spinlock for register read-modify-writes. */
+	spinlock_t hw_lock;
+
+	u32 feature0;
+	u32 feature1;
+	u32 feature2;
+
+	struct dwceqos_bus_cfg bus_cfg;
+	bool en_tx_lpi_clockgating;
+
+	int eee_enabled;
+	int eee_active;
+	int csr_val;
+	u32 gso_size;
+
+	struct dwceqos_mmc_counters mmc_counters;
+	/* Protect the mmc_counter updates. */
+	spinlock_t stats_lock;
+	u32 mmc_rx_counters_mask;
+	u32 mmc_tx_counters_mask;
+
+	struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+				      u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+				  unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg)						\
+	readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val)					\
+	writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+	lp->link    = 0;
+	lp->speed   = 0;
+	lp->duplex  = DUPLEX_UNKNOWN;
+	lp->flowcontrol.rx_current = 0;
+	lp->flowcontrol.tx_current = 0;
+	lp->eee_active = 0;
+	lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+	struct dwceqos_dma_desc *dd;
+
+	if (tx)
+		dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+	else
+		dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+	pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+		index, dd);
+	pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+		dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+	size_t desci, i;
+
+	pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+		lp->tx_cur, lp->tx_next);
+
+	print_descriptor(lp, lp->rx_cur, 0);
+
+	for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+		 i < DWCEQOS_TX_DCNT;
+		 ++i) {
+		print_descriptor(lp, desci, 1);
+		desci = (desci + 1) % DWCEQOS_TX_DCNT;
+	}
+
+	pr_info("DMA_Debug_Status0:          0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+	pr_info("DMA_CH0_Status:             0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+	pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+		dwceqos_read(lp, 0x1144));
+	pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+		dwceqos_read(lp, 0x1154));
+	pr_info("MTL_Debug_Status:      0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+	pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+	pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+	pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+		dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+	int rate = clk_get_rate(lp->apb_pclk);
+
+	if (rate <= 20000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+	else if (rate <= 35000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+	else if (rate <= 60000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+	else if (rate <= 100000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+	else if (rate <= 150000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+	else if (rate <= 250000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+	struct net_local *lp = bus->priv;
+	u32 regval;
+	int i;
+	int data;
+
+	regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+		DWCEQOS_MDIO_PHYREG(phyreg) |
+		DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+		DWCEQOS_MAC_MDIO_ADDR_GB |
+		DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+	for (i = 0; i < 5; ++i) {
+		usleep_range(64, 128);
+		if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+		      DWCEQOS_MAC_MDIO_ADDR_GB))
+			break;
+	}
+
+	data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+	if (i == 5) {
+		netdev_warn(lp->ndev, "MDIO read timed out\n");
+		data = 0xffff;
+	}
+
+	return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+			      u16 value)
+{
+	struct net_local *lp = bus->priv;
+	u32 regval;
+	int i;
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+	regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+		DWCEQOS_MDIO_PHYREG(phyreg) |
+		DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+		DWCEQOS_MAC_MDIO_ADDR_GB |
+		DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+	for (i = 0; i < 5; ++i) {
+		usleep_range(64, 128);
+		if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+		      DWCEQOS_MAC_MDIO_ADDR_GB))
+			break;
+	}
+	if (i == 5)
+		netdev_warn(lp->ndev, "MDIO write timed out\n");
+	return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return phy_mii_ioctl(phydev, rq, cmd);
+	default:
+		dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+		return -EOPNOTSUPP;
+	}
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Indicate link down to the LPI state machine */
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Indicate link up to the LPI state machine */
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+	lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+	/* Check for changed EEE capability */
+	if (!lp->eee_active && lp->eee_enabled) {
+		lp->eee_enabled = 0;
+
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	}
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+	struct phy_device *phydev = lp->phy_dev;
+	u32 regval;
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+		    DWCEQOS_MAC_CFG_DM);
+
+	if (phydev->duplex)
+		regval |= DWCEQOS_MAC_CFG_DM;
+	if (phydev->speed == SPEED_10) {
+		regval |= DWCEQOS_MAC_CFG_PS;
+	} else if (phydev->speed == SPEED_100) {
+		regval |= DWCEQOS_MAC_CFG_PS |
+			DWCEQOS_MAC_CFG_FES;
+	} else if (phydev->speed != SPEED_1000) {
+		netdev_err(lp->ndev,
+			   "unknown PHY speed %d\n",
+			   phydev->speed);
+		return;
+	}
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+	int status_change = 0;
+
+	if (phydev->link) {
+		if ((lp->speed != phydev->speed) ||
+		    (lp->duplex != phydev->duplex)) {
+			dwceqos_set_speed(lp);
+
+			lp->speed = phydev->speed;
+			lp->duplex = phydev->duplex;
+			status_change = 1;
+		}
+
+		if (lp->flowcontrol.autoneg) {
+			lp->flowcontrol.rx = phydev->pause ||
+					     phydev->asym_pause;
+			lp->flowcontrol.tx = phydev->pause ||
+					     phydev->asym_pause;
+		}
+
+		if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+			if (netif_msg_link(lp))
+				netdev_dbg(ndev, "set rx flow to %d\n",
+					   lp->flowcontrol.rx);
+			dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+			lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+		}
+		if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+			if (netif_msg_link(lp))
+				netdev_dbg(ndev, "set tx flow to %d\n",
+					   lp->flowcontrol.tx);
+			dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+			lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+		}
+	}
+
+	if (phydev->link != lp->link) {
+		lp->link = phydev->link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (phydev->link) {
+			lp->ndev->trans_start = jiffies;
+			dwceqos_link_up(lp);
+		} else {
+			dwceqos_link_down(lp);
+		}
+		phy_print_status(phydev);
+	}
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = NULL;
+
+	if (lp->phy_node) {
+		phydev = of_phy_connect(lp->ndev,
+					lp->phy_node,
+					&dwceqos_adjust_link,
+					0,
+					lp->phy_interface);
+
+		if (!phydev) {
+			netdev_err(ndev, "no PHY found\n");
+			return -1;
+		}
+	} else {
+		netdev_err(ndev, "no PHY configured\n");
+		return -ENODEV;
+	}
+
+	if (netif_msg_probe(lp))
+		netdev_dbg(lp->ndev,
+			   "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+			   phydev, phydev->phy_id, phydev->addr);
+
+	phydev->supported &= PHY_GBIT_FEATURES;
+
+	lp->link    = 0;
+	lp->speed   = 0;
+	lp->duplex  = DUPLEX_UNKNOWN;
+	lp->phy_dev = phydev;
+
+	if (netif_msg_probe(lp)) {
+		netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+			   lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+		netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+			   lp->phy_dev->drv->name);
+	}
+
+	return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+	struct sk_buff *new_skb;
+	dma_addr_t new_skb_baddr = 0;
+
+	new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+	if (!new_skb) {
+		netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+		goto err_out;
+	}
+
+	new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+				       new_skb->data, DWCEQOS_RX_BUF_SIZE,
+				       DMA_FROM_DEVICE);
+	if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+		netdev_err(lp->ndev, "DMA map error\n");
+		dev_kfree_skb(new_skb);
+		new_skb = NULL;
+		goto err_out;
+	}
+
+	lp->rx_descs[index].des0 = new_skb_baddr;
+	lp->rx_descs[index].des1 = 0;
+	lp->rx_descs[index].des2 = 0;
+	lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+				   DWCEQOS_DMA_RDES3_BUF1V |
+				   DWCEQOS_DMA_RDES3_OWN;
+
+	lp->rx_skb[index].mapping = new_skb_baddr;
+	lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+	lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+	int i;
+
+	if (lp->rx_skb) {
+		for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+			if (lp->rx_skb[i].skb) {
+				dma_unmap_single(lp->ndev->dev.parent,
+						 lp->rx_skb[i].mapping,
+						 lp->rx_skb[i].len,
+						 DMA_FROM_DEVICE);
+
+				dev_kfree_skb(lp->rx_skb[i].skb);
+				lp->rx_skb[i].skb = NULL;
+				lp->rx_skb[i].mapping = 0;
+			}
+		}
+	}
+
+	if (lp->tx_skb) {
+		for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+			if (lp->tx_skb[i].skb) {
+				dev_kfree_skb(lp->tx_skb[i].skb);
+				lp->tx_skb[i].skb = NULL;
+			}
+			if (lp->tx_skb[i].mapping) {
+				dma_unmap_single(lp->ndev->dev.parent,
+						 lp->tx_skb[i].mapping,
+						 lp->tx_skb[i].len,
+						 DMA_TO_DEVICE);
+				lp->tx_skb[i].mapping = 0;
+			}
+		}
+	}
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+	int size;
+
+	dwceqos_clean_rings(lp);
+
+	kfree(lp->tx_skb);
+	lp->tx_skb = NULL;
+	kfree(lp->rx_skb);
+	lp->rx_skb = NULL;
+
+	size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+	if (lp->rx_descs) {
+		dma_free_coherent(lp->ndev->dev.parent, size,
+				  (void *)(lp->rx_descs), lp->rx_descs_addr);
+		lp->rx_descs = NULL;
+	}
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+	if (lp->tx_descs) {
+		dma_free_coherent(lp->ndev->dev.parent, size,
+				  (void *)(lp->tx_descs), lp->tx_descs_addr);
+		lp->tx_descs = NULL;
+	}
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+	int size;
+	u32 i;
+
+	lp->gso_size = 0;
+
+	lp->tx_skb = NULL;
+	lp->rx_skb = NULL;
+	lp->rx_descs = NULL;
+	lp->tx_descs = NULL;
+
+	/* Reset the DMA indexes */
+	lp->rx_cur = 0;
+	lp->tx_cur = 0;
+	lp->tx_next = 0;
+	lp->tx_free = DWCEQOS_TX_DCNT;
+
+	/* Allocate Ring descriptors */
+	size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+	lp->rx_skb = kzalloc(size, GFP_KERNEL);
+	if (!lp->rx_skb)
+		goto err_out;
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+	lp->tx_skb = kzalloc(size, GFP_KERNEL);
+	if (!lp->tx_skb)
+		goto err_out;
+
+	/* Allocate DMA descriptors */
+	size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+	lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+			&lp->rx_descs_addr, 0);
+	if (!lp->rx_descs)
+		goto err_out;
+	lp->rx_descs_tail_addr = lp->rx_descs_addr +
+		sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+	lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+			&lp->tx_descs_addr, 0);
+	if (!lp->tx_descs)
+		goto err_out;
+	lp->tx_descs_tail_addr = lp->tx_descs_addr +
+		sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+	/* Initialize RX Ring Descriptors and buffers */
+	for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+		dwceqos_alloc_rxring_desc(lp, i);
+		if (!(lp->rx_skb[lp->rx_cur].skb))
+			goto err_out;
+	}
+
+	/* Initialize TX Descriptors */
+	for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+		lp->tx_descs[i].des0 = 0;
+		lp->tx_descs[i].des1 = 0;
+		lp->tx_descs[i].des2 = 0;
+		lp->tx_descs[i].des3 = 0;
+	}
+
+	/* Make descriptor writes visible to the DMA. */
+	wmb();
+
+	return 0;
+
+err_out:
+	dwceqos_descriptor_free(lp);
+	return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+	return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+	lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+	lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+	lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval |= DWCEQOS_DMA_CH0_IE_TIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval |= DWCEQOS_DMA_CH0_IE_RIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+	dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+	dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+	int ret = -ENXIO, i;
+	struct resource res;
+	struct device_node *mdionode;
+
+	mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+	if (!mdionode)
+		return 0;
+
+	lp->mii_bus = mdiobus_alloc();
+	if (!lp->mii_bus) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	lp->mii_bus->name  = "DWCEQOS MII bus";
+	lp->mii_bus->read  = &dwceqos_mdio_read;
+	lp->mii_bus->write = &dwceqos_mdio_write;
+	lp->mii_bus->priv = lp;
+	lp->mii_bus->parent = &lp->ndev->dev;
+
+	lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!lp->mii_bus->irq) {
+		ret = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		lp->mii_bus->irq[i] = PHY_POLL;
+	of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+		 (unsigned long long)res.start);
+	if (of_mdiobus_register(lp->mii_bus, mdionode))
+		goto err_out_free_mdio_irq;
+
+	return 0;
+
+err_out_free_mdio_irq:
+	kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+	mdiobus_free(lp->mii_bus);
+err_out:
+	of_node_put(mdionode);
+	return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+	/* Wait (at most) 0.5 seconds for DMA reset*/
+	int i = 5000;
+	u32 reg;
+
+	/* Force gigabit to guarantee a TX clock for GMII. */
+	reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+	reg |= DWCEQOS_MAC_CFG_DM;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+	do {
+		udelay(100);
+		i--;
+		reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+	} while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+	/* We might experience a timeout if the chip clock mux is broken */
+	if (!i)
+		netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+	if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+		netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+			   dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+				"read" : "write",
+			   dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+				"descr" : "data",
+			   dma_status);
+
+		print_status(lp);
+	}
+	if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+		netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+			   dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+				"read" : "write",
+			   dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+				"descr" : "data",
+			   dma_status);
+
+		print_status(lp);
+	}
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+
+	/* A latched mmc interrupt can not be masked, we must read
+	 *  all the counters with an interrupt pending.
+	 */
+	dwceqos_read_mmc_counters(lp,
+				  dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+				  dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+	u32 cause;
+
+	cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+	if (cause & DWCEQOS_MAC_IS_MMC_INT)
+		dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct net_local *lp = netdev_priv(ndev);
+
+	u32 cause;
+	u32 dma_status;
+	irqreturn_t ret = IRQ_NONE;
+
+	cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+	/* DMA Channel 0 Interrupt */
+	if (cause & DWCEQOS_DMA_IS_DC0IS) {
+		dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+		/* Transmit Interrupt */
+		if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+			tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+			dwceqos_dma_disable_txirq(lp);
+		}
+
+		/* Receive Interrupt */
+		if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+			/* Disable RX IRQs */
+			dwceqos_dma_disable_rxirq(lp);
+			napi_schedule(&lp->napi);
+		}
+
+		/* Fatal Bus Error interrupt */
+		if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+			dwceqos_fatal_bus_error(lp, dma_status);
+
+			/* errata 9000831707 */
+			dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+				      DWCEQOS_DMA_CH0_IS_REB;
+		}
+
+		/* Ack all DMA Channel 0 IRQs */
+		dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+		ret = IRQ_HANDLED;
+	}
+
+	if (cause & DWCEQOS_DMA_IS_MTLIS) {
+		u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+		dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+		ret = IRQ_HANDLED;
+	}
+
+	if (cause & DWCEQOS_DMA_IS_MACIS) {
+		dwceqos_mac_interrupt(lp);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+	if (enable)
+		regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+	else
+		regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	/* MTL flow control */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+	if (enable)
+		regval |= DWCEQOS_MTL_RXQ_EHFC;
+	else
+		regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+	dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+	/* MAC flow control */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+	if (enable)
+		regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+	else
+		regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+	int RQS, RFD, RFA;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+	/* The queue size is in units of 256 bytes. We want 512 bytes units for
+	 * the threshold fields.
+	 */
+	RQS = ((regval >> 20) & 0x3FF) + 1;
+	RQS /= 2;
+
+	/* The thresholds are relative to a full queue, with a bias
+	 * of 1 KiByte below full.
+	 */
+	RFD = RQS / 2 - 2;
+	RFA = RQS / 8 - 2;
+
+	regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+	if (RFD >= 0 && RFA >= 0) {
+		dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+	} else {
+		netdev_warn(lp->ndev,
+			    "FIFO too small for flow control.");
+	}
+
+	regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+		 DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+	unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+	BUG_ON(!rate_mhz);
+
+	dwceqos_write(lp,
+		      REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+		      DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+	u32 sysbus_reg;
+
+	/* N.B. We do not support the Fixed Burst mode because it
+	 * opens a race window by making HW access to DMA descriptors
+	 * non-atomic.
+	 */
+
+	sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+	if (lp->bus_cfg.en_lpi)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+	if (lp->bus_cfg.burst_map)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+			lp->bus_cfg.burst_map);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+			DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+	if (lp->bus_cfg.read_requests)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+			lp->bus_cfg.read_requests - 1);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+			DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+	if (lp->bus_cfg.write_requests)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+			lp->bus_cfg.write_requests - 1);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+			DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+	if (netif_msg_hw(lp))
+		netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+	u32 regval;
+	u32 buswidth;
+	u32 dma_skip;
+
+	/* Software reset */
+	dwceqos_reset_hw(lp);
+
+	dwceqos_configure_bus(lp);
+
+	/* Probe data bus width, 32/64/128 bits. */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+	buswidth = (regval ^ 0xF) + 1;
+
+	/* Cache-align dma descriptors. */
+	dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+		      DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+		      DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+	/* Initialize DMA Channel 0 */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+		      (u32)lp->tx_descs_addr);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+		      (u32)lp->rx_descs_addr);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+		      lp->tx_descs_tail_addr);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+		      lp->rx_descs_tail_addr);
+
+	if (lp->bus_cfg.tx_pbl)
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+	else
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+	/* Enable TSO if the HW support it */
+	if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+		regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+	if (lp->bus_cfg.rx_pbl)
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+	else
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+	regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+	regval |= DWCEQOS_DMA_CH_CTRL_START;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+	/* Initialize MTL Queues */
+	regval = DWCEQOS_MTL_SCHALG_STRICT;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+	regval = DWCEQOS_MTL_TXQ_SIZE(
+			DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+		DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+		DWCEQOS_MTL_TXQ_TTC512;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+	regval = DWCEQOS_MTL_RXQ_SIZE(
+			DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+		DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+	dwceqos_configure_flow_control(lp);
+
+	/* Initialize MAC */
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+	lp->eee_enabled = 0;
+
+	dwceqos_configure_clock(lp);
+
+	/* MMC counters */
+
+	/* probe implemented counters */
+	dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+	dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+	lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+	lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+	dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+		DWCEQOS_MMC_CTRL_RSTONRD);
+	dwceqos_enable_mmc_interrupt(lp);
+
+	/* Enable Interrupts */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+		      DWCEQOS_DMA_CH0_IE_NIE |
+		      DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+		      DWCEQOS_DMA_CH0_IE_AIE |
+		      DWCEQOS_DMA_CH0_IE_FBEE);
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+		DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+	/* Start TX DMA */
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+		      regval | DWCEQOS_DMA_CH_CTRL_START);
+
+	/* Enable MAC TX/RX */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+		      regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+	struct net_device *ndev = (struct net_device *)data;
+	struct net_local *lp = netdev_priv(ndev);
+	unsigned int tx_bytes = 0;
+	unsigned int tx_packets = 0;
+
+	spin_lock(&lp->tx_lock);
+
+	while (lp->tx_free < DWCEQOS_TX_DCNT) {
+		struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+		struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+		/* Descriptor still being held by DMA ? */
+		if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+			break;
+
+		if (rd->mapping)
+			dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+					 DMA_TO_DEVICE);
+
+		if (unlikely(rd->skb)) {
+			++tx_packets;
+			tx_bytes += rd->skb->len;
+			dev_consume_skb_any(rd->skb);
+		}
+
+		rd->skb = NULL;
+		rd->mapping = 0;
+		lp->tx_free++;
+		lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+		if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+		    (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+			if (netif_msg_tx_err(lp))
+				netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+					   dd->des3);
+			if (netif_msg_hw(lp))
+				print_status(lp);
+		}
+	}
+	spin_unlock(&lp->tx_lock);
+
+	netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+	dwceqos_dma_enable_txirq(lp);
+	netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+	struct sk_buff *skb;
+	u32 tot_size = 0;
+	unsigned int n_packets = 0;
+	unsigned int n_descs = 0;
+	u32 len;
+
+	struct dwceqos_dma_desc *dd;
+	struct sk_buff *new_skb;
+	dma_addr_t new_skb_baddr = 0;
+
+	while (n_descs < budget) {
+		if (!dwceqos_packet_avail(lp))
+			break;
+
+		new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+		if (!new_skb) {
+			netdev_err(lp->ndev, "no memory for new sk_buff\n");
+			break;
+		}
+
+		/* Get dma handle of skb->data */
+		new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+					new_skb->data,
+					DWCEQOS_RX_BUF_SIZE,
+					DMA_FROM_DEVICE);
+		if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+			netdev_err(lp->ndev, "DMA map error\n");
+			dev_kfree_skb(new_skb);
+			break;
+		}
+
+		/* Read descriptor data after reading owner bit. */
+		dma_rmb();
+
+		dd = &lp->rx_descs[lp->rx_cur];
+		len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+		skb = lp->rx_skb[lp->rx_cur].skb;
+
+		/* Unmap old buffer */
+		dma_unmap_single(lp->ndev->dev.parent,
+				 lp->rx_skb[lp->rx_cur].mapping,
+				 lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+		/* Discard packet on reception error or bad checksum */
+		if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+		    (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+			dev_kfree_skb(skb);
+			skb = NULL;
+		} else {
+			skb_put(skb, len);
+			skb->protocol = eth_type_trans(skb, lp->ndev);
+			switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+			case DWCEQOS_DMA_RDES1_PT_UDP:
+			case DWCEQOS_DMA_RDES1_PT_TCP:
+			case DWCEQOS_DMA_RDES1_PT_ICMP:
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				break;
+			default:
+				skb->ip_summed = CHECKSUM_NONE;
+				break;
+			}
+		}
+
+		if (unlikely(!skb)) {
+			if (netif_msg_rx_err(lp))
+				netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+					   lp->rx_descs[lp->rx_cur].des3);
+		} else {
+			tot_size += skb->len;
+			n_packets++;
+
+			netif_receive_skb(skb);
+		}
+
+		lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+		lp->rx_descs[lp->rx_cur].des1 = 0;
+		lp->rx_descs[lp->rx_cur].des2 = 0;
+		/* The DMA must observe des0/1/2 written before des3. */
+		wmb();
+		lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+						DWCEQOS_DMA_RDES3_OWN  |
+						DWCEQOS_DMA_RDES3_BUF1V;
+
+		lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+		lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+		lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+		n_descs++;
+		lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+	}
+
+	/* Make sure any ownership update is written to the descriptors before
+	 * DMA wakeup.
+	 */
+	wmb();
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+	/* Wake up RX by writing tail pointer */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+		      lp->rx_descs_tail_addr);
+
+	return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct net_local *lp = container_of(napi, struct net_local, napi);
+	int work_done = 0;
+
+	work_done = dwceqos_rx(lp, budget - work_done);
+
+	if (!dwceqos_packet_avail(lp) && work_done < budget) {
+		napi_complete(napi);
+		dwceqos_dma_enable_rxirq(lp);
+	} else {
+		work_done = budget;
+	}
+
+	return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+	struct net_local *lp = container_of(data, struct net_local,
+		txtimeout_reinit);
+
+	netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+		   DWCEQOS_TX_TIMEOUT);
+
+	if (netif_msg_hw(lp))
+		print_status(lp);
+
+	rtnl_lock();
+	dwceqos_stop(lp->ndev);
+	dwceqos_open(lp->ndev);
+	rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+	struct net_local *lp;
+	const void *mac_address;
+	struct dwceqos_bus_cfg *bus_cfg;
+	struct device_node *np = pdev->dev.of_node;
+
+	ndev = platform_get_drvdata(pdev);
+	lp = netdev_priv(ndev);
+	bus_cfg = &lp->bus_cfg;
+
+	/* Set the MAC address. */
+	mac_address = of_get_mac_address(pdev->dev.of_node);
+	if (mac_address)
+		ether_addr_copy(ndev->dev_addr, mac_address);
+
+	/* These are all optional parameters */
+	lp->en_tx_lpi_clockgating =  of_property_read_bool(np,
+		"snps,en-tx-lpi-clockgating");
+	bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+	of_property_read_u32(np, "snps,write-requests",
+			     &bus_cfg->write_requests);
+	of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+	of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+	of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+	of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+	netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+		   bus_cfg->en_lpi,
+		   bus_cfg->write_requests,
+		   bus_cfg->read_requests,
+		   bus_cfg->burst_map,
+		   bus_cfg->rx_pbl,
+		   bus_cfg->tx_pbl);
+
+	return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	int res;
+
+	dwceqos_reset_state(lp);
+	res = dwceqos_descriptor_init(lp);
+	if (res) {
+		netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+		return res;
+	}
+	netdev_reset_queue(ndev);
+
+	napi_enable(&lp->napi);
+	phy_start(lp->phy_dev);
+	dwceqos_init_hw(lp);
+
+	netif_start_queue(ndev);
+	tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+	return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+	u32 reg;
+
+	reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+	reg = DMA_GET_TX_STATE_CH0(reg);
+
+	return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+	/* Wait for all pending TX buffers to be sent. Upper limit based
+	 * on max frame size on a 10 Mbit link.
+	 */
+	size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+	while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+		usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	phy_stop(lp->phy_dev);
+
+	tasklet_disable(&lp->tx_bdreclaim_tasklet);
+	netif_stop_queue(ndev);
+	napi_disable(&lp->napi);
+
+	dwceqos_drain_dma(lp);
+
+	netif_tx_lock(lp->ndev);
+	dwceqos_reset_hw(lp);
+	dwceqos_descriptor_free(lp);
+	netif_tx_unlock(lp->ndev);
+
+	return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+				    unsigned short gso_size)
+{
+	struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+	dd->des0 = 0;
+	dd->des1 = 0;
+	dd->des2 = gso_size;
+	dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+	lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+		      lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+	size_t nr_descriptors;
+	size_t initial_descriptor;
+	size_t last_descriptor;
+	size_t prev_gso_size;
+	size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+			       struct dwceqos_tx *tx)
+{
+	size_t n = 1;
+	size_t i;
+
+	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+		++n;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		n +=  (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+		       BYTES_PER_DMA_DESC;
+	}
+
+	tx->nr_descriptors = n;
+	tx->initial_descriptor = lp->tx_next;
+	tx->last_descriptor = lp->tx_next;
+	tx->prev_gso_size = lp->gso_size;
+
+	tx->network_header_len = skb_transport_offset(skb);
+	if (skb_is_gso(skb))
+		tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+			     struct dwceqos_tx *tx)
+{
+	struct ring_desc *rd;
+	struct dwceqos_dma_desc *dd;
+	size_t payload_len;
+	dma_addr_t dma_handle;
+
+	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+		dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+		lp->gso_size = skb_shinfo(skb)->gso_size;
+	}
+
+	dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+				    skb_headlen(skb), DMA_TO_DEVICE);
+
+	if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+		netdev_err(lp->ndev, "TX DMA Mapping error\n");
+		return -ENOMEM;
+	}
+
+	rd = &lp->tx_skb[lp->tx_next];
+	dd = &lp->tx_descs[lp->tx_next];
+
+	rd->skb = NULL;
+	rd->len = skb_headlen(skb);
+	rd->mapping = dma_handle;
+
+	/* Set up DMA Descriptor */
+	dd->des0 = dma_handle;
+
+	if (skb_is_gso(skb)) {
+		payload_len = skb_headlen(skb) - tx->network_header_len;
+
+		if (payload_len)
+			dd->des1 = dma_handle + tx->network_header_len;
+		dd->des2 = tx->network_header_len |
+			DWCEQOS_DMA_DES2_B2L(payload_len);
+		dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+			DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+			(skb->len - tx->network_header_len);
+	} else {
+		dd->des1 = 0;
+		dd->des2 = skb_headlen(skb);
+		dd->des3 = skb->len;
+
+		switch (skb->ip_summed) {
+		case CHECKSUM_PARTIAL:
+			dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+		case CHECKSUM_NONE:
+		case CHECKSUM_UNNECESSARY:
+		case CHECKSUM_COMPLETE:
+		default:
+			break;
+		}
+	}
+
+	dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+	if (lp->tx_next  != tx->initial_descriptor)
+		dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+	tx->last_descriptor = lp->tx_next;
+	lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+	return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+			    struct dwceqos_tx *tx)
+{
+	struct ring_desc *rd = NULL;
+	struct dwceqos_dma_desc *dd;
+	dma_addr_t dma_handle;
+	size_t i;
+
+	/* Setup more ring and DMA descriptor if the packet is fragmented */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		size_t frag_size;
+		size_t consumed_size;
+
+		/* Map DMA Area */
+		dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+					      skb_frag_size(frag),
+					      DMA_TO_DEVICE);
+		if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+			netdev_err(lp->ndev, "DMA Mapping error\n");
+			return -ENOMEM;
+		}
+
+		/* order-3 fragments span more than one descriptor. */
+		frag_size = skb_frag_size(frag);
+		consumed_size = 0;
+		while (consumed_size < frag_size) {
+			size_t dma_size = min_t(size_t, 16376,
+						frag_size - consumed_size);
+
+			rd = &lp->tx_skb[lp->tx_next];
+			memset(rd, 0, sizeof(*rd));
+
+			dd = &lp->tx_descs[lp->tx_next];
+
+			/* Set DMA Descriptor fields */
+			dd->des0 = dma_handle;
+			dd->des1 = 0;
+			dd->des2 = dma_size;
+
+			if (skb_is_gso(skb))
+				dd->des3 = (skb->len - tx->network_header_len);
+			else
+				dd->des3 = skb->len;
+
+			dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+			tx->last_descriptor = lp->tx_next;
+			lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+			consumed_size += dma_size;
+		}
+
+		rd->len = skb_frag_size(frag);
+		rd->mapping = dma_handle;
+	}
+
+	return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+				struct dwceqos_tx *tx)
+{
+	lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+	lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+	lp->tx_skb[tx->last_descriptor].skb = skb;
+
+	/* Make all descriptor updates visible to the DMA before setting the
+	 * owner bit.
+	 */
+	wmb();
+
+	lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+	/* Make the owner bit visible before TX wakeup. */
+	wmb();
+
+	dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+	size_t i = tx->initial_descriptor;
+
+	while (i != lp->tx_next) {
+		if (lp->tx_skb[i].mapping)
+			dma_unmap_single(lp->ndev->dev.parent,
+					 lp->tx_skb[i].mapping,
+					 lp->tx_skb[i].len,
+					 DMA_TO_DEVICE);
+
+		lp->tx_skb[i].mapping = 0;
+		lp->tx_skb[i].skb = NULL;
+
+		memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+		i = (i + 1) % DWCEQOS_TX_DCNT;
+	}
+
+	lp->tx_next = tx->initial_descriptor;
+	lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct dwceqos_tx trans;
+	int err;
+
+	dwceqos_tx_prepare(skb, lp, &trans);
+	if (lp->tx_free < trans.nr_descriptors) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	err = dwceqos_tx_linear(skb, lp, &trans);
+	if (err)
+		goto tx_error;
+
+	err = dwceqos_tx_frags(skb, lp, &trans);
+	if (err)
+		goto tx_error;
+
+	WARN_ON(lp->tx_next !=
+		((trans.initial_descriptor + trans.nr_descriptors) %
+		 DWCEQOS_TX_DCNT));
+
+	dwceqos_tx_finalize(skb, lp, &trans);
+
+	netdev_sent_queue(ndev, skb->len);
+
+	spin_lock_bh(&lp->tx_lock);
+	lp->tx_free -= trans.nr_descriptors;
+	spin_unlock_bh(&lp->tx_lock);
+
+	ndev->trans_start = jiffies;
+	return 0;
+
+tx_error:
+	dwceqos_tx_rollback(lp, &trans);
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+	if (netif_running(ndev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(hwaddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+	return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+				  unsigned int reg_n)
+{
+	unsigned long data;
+
+	data = (addr[5] << 8) | addr[4];
+	dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+		      data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+	/* Do not disable MAC address 0 */
+	if (reg_n != 0)
+		dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 regval = 0;
+	u32 mc_filter[2];
+	int reg = 1;
+	struct netdev_hw_addr *ha;
+	unsigned int max_mac_addr;
+
+	max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+	if (ndev->flags & IFF_PROMISC) {
+		regval = DWCEQOS_MAC_PKT_FILT_PR;
+	} else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+				(ndev->flags & IFF_ALLMULTI))) {
+		regval = DWCEQOS_MAC_PKT_FILT_PM;
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+	} else if (!netdev_mc_empty(ndev)) {
+		regval = DWCEQOS_MAC_PKT_FILT_HMC;
+		memset(mc_filter, 0, sizeof(mc_filter));
+		netdev_for_each_mc_addr(ha, ndev) {
+			/* The upper 6 bits of the calculated CRC are used to
+			 * index the contens of the hash table
+			 */
+			int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+			/* The most significant bit determines the register
+			 * to use (H/L) while the other 5 bits determine
+			 * the bit within the register.
+			 */
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		}
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+	}
+	if (netdev_uc_count(ndev) > max_mac_addr) {
+		regval |= DWCEQOS_MAC_PKT_FILT_PR;
+	} else {
+		netdev_for_each_uc_addr(ha, ndev) {
+			dwceqos_set_umac_addr(lp, ha->addr, reg);
+			reg++;
+		}
+		for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+			dwceqos_disable_umac_addr(lp, reg);
+	}
+	dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+	disable_irq(ndev->irq);
+	dwceqos_interrupt(ndev->irq, ndev);
+	enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+				      u32 tx_mask)
+{
+	if (tx_mask & BIT(27))
+		lp->mmc_counters.txlpitranscntr +=
+			dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+	if (tx_mask & BIT(26))
+		lp->mmc_counters.txpiuscntr +=
+			dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+	if (tx_mask & BIT(25))
+		lp->mmc_counters.txoversize_g +=
+			dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+	if (tx_mask & BIT(24))
+		lp->mmc_counters.txvlanpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+	if (tx_mask & BIT(23))
+		lp->mmc_counters.txpausepackets +=
+			dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+	if (tx_mask & BIT(22))
+		lp->mmc_counters.txexcessdef +=
+			dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+	if (tx_mask & BIT(21))
+		lp->mmc_counters.txpacketcount_g +=
+			dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+	if (tx_mask & BIT(20))
+		lp->mmc_counters.txoctetcount_g +=
+			dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+	if (tx_mask & BIT(19))
+		lp->mmc_counters.txcarriererror +=
+			dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+	if (tx_mask & BIT(18))
+		lp->mmc_counters.txexcesscol +=
+			dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+	if (tx_mask & BIT(17))
+		lp->mmc_counters.txlatecol +=
+			dwceqos_read(lp, DWC_MMC_TXLATECOL);
+	if (tx_mask & BIT(16))
+		lp->mmc_counters.txdeferred +=
+			dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+	if (tx_mask & BIT(15))
+		lp->mmc_counters.txmulticol_g +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+	if (tx_mask & BIT(14))
+		lp->mmc_counters.txsinglecol_g +=
+			dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+	if (tx_mask & BIT(13))
+		lp->mmc_counters.txunderflowerror +=
+			dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+	if (tx_mask & BIT(12))
+		lp->mmc_counters.txbroadcastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+	if (tx_mask & BIT(11))
+		lp->mmc_counters.txmulticastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+	if (tx_mask & BIT(10))
+		lp->mmc_counters.txunicastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+	if (tx_mask & BIT(9))
+		lp->mmc_counters.tx1024tomaxoctets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+	if (tx_mask & BIT(8))
+		lp->mmc_counters.tx512to1023octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+	if (tx_mask & BIT(7))
+		lp->mmc_counters.tx256to511octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+	if (tx_mask & BIT(6))
+		lp->mmc_counters.tx128to255octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+	if (tx_mask & BIT(5))
+		lp->mmc_counters.tx65to127octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+	if (tx_mask & BIT(4))
+		lp->mmc_counters.tx64octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+	if (tx_mask & BIT(3))
+		lp->mmc_counters.txmulticastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+	if (tx_mask & BIT(2))
+		lp->mmc_counters.txbroadcastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+	if (tx_mask & BIT(1))
+		lp->mmc_counters.txpacketcount_gb +=
+			dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+	if (tx_mask & BIT(0))
+		lp->mmc_counters.txoctetcount_gb +=
+			dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+	if (rx_mask & BIT(27))
+		lp->mmc_counters.rxlpitranscntr +=
+			dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+	if (rx_mask & BIT(26))
+		lp->mmc_counters.rxlpiuscntr +=
+			dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+	if (rx_mask & BIT(25))
+		lp->mmc_counters.rxctrlpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+	if (rx_mask & BIT(24))
+		lp->mmc_counters.rxrcverror +=
+			dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+	if (rx_mask & BIT(23))
+		lp->mmc_counters.rxwatchdog +=
+			dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+	if (rx_mask & BIT(22))
+		lp->mmc_counters.rxvlanpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+	if (rx_mask & BIT(21))
+		lp->mmc_counters.rxfifooverflow +=
+			dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+	if (rx_mask & BIT(20))
+		lp->mmc_counters.rxpausepackets +=
+			dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+	if (rx_mask & BIT(19))
+		lp->mmc_counters.rxoutofrangetype +=
+			dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+	if (rx_mask & BIT(18))
+		lp->mmc_counters.rxlengtherror +=
+			dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+	if (rx_mask & BIT(17))
+		lp->mmc_counters.rxunicastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+	if (rx_mask & BIT(16))
+		lp->mmc_counters.rx1024tomaxoctets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+	if (rx_mask & BIT(15))
+		lp->mmc_counters.rx512to1023octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+	if (rx_mask & BIT(14))
+		lp->mmc_counters.rx256to511octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+	if (rx_mask & BIT(13))
+		lp->mmc_counters.rx128to255octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+	if (rx_mask & BIT(12))
+		lp->mmc_counters.rx65to127octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+	if (rx_mask & BIT(11))
+		lp->mmc_counters.rx64octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+	if (rx_mask & BIT(10))
+		lp->mmc_counters.rxoversize_g +=
+			dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+	if (rx_mask & BIT(9))
+		lp->mmc_counters.rxundersize_g +=
+			dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+	if (rx_mask & BIT(8))
+		lp->mmc_counters.rxjabbererror +=
+			dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+	if (rx_mask & BIT(7))
+		lp->mmc_counters.rxrunterror +=
+			dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+	if (rx_mask & BIT(6))
+		lp->mmc_counters.rxalignmenterror +=
+			dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+	if (rx_mask & BIT(5))
+		lp->mmc_counters.rxcrcerror +=
+			dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+	if (rx_mask & BIT(4))
+		lp->mmc_counters.rxmulticastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+	if (rx_mask & BIT(3))
+		lp->mmc_counters.rxbroadcastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+	if (rx_mask & BIT(2))
+		lp->mmc_counters.rxoctetcount_g +=
+			dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+	if (rx_mask & BIT(1))
+		lp->mmc_counters.rxoctetcount_gb +=
+			dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+	if (rx_mask & BIT(0))
+		lp->mmc_counters.rxpacketcount_gb +=
+			dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+	unsigned long flags;
+	struct net_local *lp = netdev_priv(ndev);
+	struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+	dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+				  lp->mmc_tx_counters_mask);
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+	s->rx_packets = hwstats->rxpacketcount_gb;
+	s->rx_bytes = hwstats->rxoctetcount_gb;
+	s->rx_errors = hwstats->rxpacketcount_gb -
+		hwstats->rxbroadcastpackets_g -
+		hwstats->rxmulticastpackets_g -
+		hwstats->rxunicastpackets_g;
+	s->multicast = hwstats->rxmulticastpackets_g;
+	s->rx_length_errors = hwstats->rxlengtherror;
+	s->rx_crc_errors = hwstats->rxcrcerror;
+	s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+	s->tx_packets = hwstats->txpacketcount_gb;
+	s->tx_bytes = hwstats->txoctetcount_gb;
+
+	if (lp->mmc_tx_counters_mask & BIT(21))
+		s->tx_errors = hwstats->txpacketcount_gb -
+			hwstats->txpacketcount_g;
+	else
+		s->tx_errors = hwstats->txunderflowerror +
+			hwstats->txcarriererror;
+
+	return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	strcpy(ed->driver, lp->pdev->dev.driver->name);
+	strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+				   struct ethtool_pauseparam *pp)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	pp->autoneg = lp->flowcontrol.autoneg;
+	pp->tx_pause = lp->flowcontrol.tx;
+	pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+				  struct ethtool_pauseparam *pp)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	int ret = 0;
+
+	lp->flowcontrol.autoneg = pp->autoneg;
+	if (pp->autoneg) {
+		lp->phy_dev->advertising |= ADVERTISED_Pause;
+		lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+	} else {
+		lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+		lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+		lp->flowcontrol.rx = pp->rx_pause;
+		lp->flowcontrol.tx = pp->tx_pause;
+	}
+
+	if (netif_running(ndev))
+		ret = phy_start_aneg(lp->phy_dev);
+
+	return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+				u8 *data)
+{
+	size_t i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+		memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+		       ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+				      struct ethtool_stats *stats, u64 *data)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	unsigned long flags;
+	size_t i;
+	u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+	dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+				  lp->mmc_tx_counters_mask);
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+		memcpy(data,
+		       mmcstat + dwceqos_ethtool_stats[i].offset,
+		       sizeof(u64));
+		data++;
+	}
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+	return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			     void *space)
+{
+	const struct net_local *lp = netdev_priv(dev);
+	u32 *reg_space = (u32 *)space;
+	int reg_offset;
+	int reg_ix = 0;
+
+	/* MAC registers */
+	for (reg_offset = START_MAC_REG_OFFSET;
+		reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+	/* MTL registers */
+	for (reg_offset = START_MTL_REG_OFFSET;
+		reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+
+	/* DMA registers */
+	for (reg_offset = START_DMA_REG_OFFSET;
+		reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+
+	BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+	return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+	return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+	return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 lpi_status;
+	u32 lpi_enabled;
+
+	if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+		return -EOPNOTSUPP;
+
+	edata->eee_active  = lp->eee_active;
+	edata->eee_enabled = lp->eee_enabled;
+	edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+	lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+	edata->tx_lpi_enabled = lpi_enabled;
+
+	if (netif_msg_hw(lp)) {
+		u32 regval;
+
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+		netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+			    dwceqos_get_rx_lpi_state(regval),
+			    dwceqos_get_tx_lpi_state(regval));
+	}
+
+	return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 regval;
+	unsigned long flags;
+
+	if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+		return -EOPNOTSUPP;
+
+	if (edata->eee_enabled && !lp->eee_active)
+		return -EOPNOTSUPP;
+
+	if (edata->tx_lpi_enabled) {
+		if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+		    edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+			return -EINVAL;
+	}
+
+	lp->eee_enabled = edata->eee_enabled;
+
+	if (edata->eee_enabled && edata->tx_lpi_enabled) {
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+			      edata->tx_lpi_timer);
+
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		if (lp->en_tx_lpi_clockgating)
+			regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	} else {
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	}
+
+	return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+	.get_settings   = dwceqos_get_settings,
+	.set_settings   = dwceqos_set_settings,
+	.get_drvinfo    = dwceqos_get_drvinfo,
+	.get_link       = ethtool_op_get_link,
+	.get_pauseparam = dwceqos_get_pauseparam,
+	.set_pauseparam = dwceqos_set_pauseparam,
+	.get_strings    = dwceqos_get_strings,
+	.get_ethtool_stats = dwceqos_get_ethtool_stats,
+	.get_sset_count = dwceqos_get_sset_count,
+	.get_regs       = dwceqos_get_regs,
+	.get_regs_len   = dwceqos_get_regs_len,
+	.get_eee        = dwceqos_get_eee,
+	.set_eee        = dwceqos_set_eee,
+	.get_msglevel   = dwceqos_get_msglevel,
+	.set_msglevel   = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+	.ndo_open		= dwceqos_open,
+	.ndo_stop		= dwceqos_stop,
+	.ndo_start_xmit		= dwceqos_start_xmit,
+	.ndo_set_rx_mode	= dwceqos_set_rx_mode,
+	.ndo_set_mac_address	= dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= dwceqos_poll_controller,
+#endif
+	.ndo_do_ioctl		= dwceqos_ioctl,
+	.ndo_tx_timeout		= dwceqos_tx_timeout,
+	.ndo_get_stats64	= dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+	{ .compatible = "snps,dwc-qos-ethernet-4.10", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+	struct resource *r_mem = NULL;
+	struct net_device *ndev;
+	struct net_local *lp;
+	int ret = -ENXIO;
+
+	r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r_mem) {
+		dev_err(&pdev->dev, "no IO resource defined.\n");
+		return -ENXIO;
+	}
+
+	ndev = alloc_etherdev(sizeof(*lp));
+	if (!ndev) {
+		dev_err(&pdev->dev, "etherdev allocation failed.\n");
+		return -ENOMEM;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	lp = netdev_priv(ndev);
+	lp->ndev = ndev;
+	lp->pdev = pdev;
+	lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+	spin_lock_init(&lp->tx_lock);
+	spin_lock_init(&lp->hw_lock);
+	spin_lock_init(&lp->stats_lock);
+
+	lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+	if (IS_ERR(lp->apb_pclk)) {
+		dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+		ret = PTR_ERR(lp->apb_pclk);
+		goto err_out_free_netdev;
+	}
+
+	ret = clk_prepare_enable(lp->apb_pclk);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+		goto err_out_free_netdev;
+	}
+
+	lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+	if (IS_ERR(lp->baseaddr)) {
+		dev_err(&pdev->dev, "failed to map baseaddress.\n");
+		ret = PTR_ERR(lp->baseaddr);
+		goto err_out_clk_dis_aper;
+	}
+
+	ndev->irq = platform_get_irq(pdev, 0);
+	ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+	ndev->netdev_ops = &netdev_ops;
+	ndev->ethtool_ops = &dwceqos_ethtool_ops;
+	ndev->base_addr = r_mem->start;
+
+	dwceqos_get_hwfeatures(lp);
+	dwceqos_mdio_set_csr(lp);
+
+	ndev->hw_features = NETIF_F_SG;
+
+	if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+	if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+	if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+		ndev->hw_features |= NETIF_F_RXCSUM;
+
+	ndev->features = ndev->hw_features;
+
+	netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+		goto err_out_clk_dis_aper;
+	}
+
+	lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+	if (IS_ERR(lp->phy_ref_clk)) {
+		dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+		ret = PTR_ERR(lp->phy_ref_clk);
+		goto err_out_unregister_netdev;
+	}
+
+	ret = clk_prepare_enable(lp->phy_ref_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to enable device clock.\n");
+		goto err_out_unregister_netdev;
+	}
+
+	lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+						"phy-handle", 0);
+	if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+		ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "invalid fixed-link");
+			goto err_out_unregister_netdev;
+		}
+
+		lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+	}
+
+	ret = of_get_phy_mode(lp->pdev->dev.of_node);
+	if (ret < 0) {
+		dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+		goto err_out_unregister_clk_notifier;
+	}
+
+	lp->phy_interface = ret;
+
+	ret = dwceqos_mii_init(lp);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+		goto err_out_unregister_clk_notifier;
+	}
+
+	ret = dwceqos_mii_probe(ndev);
+	if (ret != 0) {
+		netdev_err(ndev, "mii_probe fail.\n");
+		ret = -ENXIO;
+		goto err_out_unregister_clk_notifier;
+	}
+
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+	tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+		     (unsigned long)ndev);
+	tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+	lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+	INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+	platform_set_drvdata(pdev, ndev);
+	ret = dwceqos_probe_config_dt(pdev);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+			ret);
+		goto err_out_unregister_clk_notifier;
+	}
+	dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+		 pdev->id, ndev->base_addr, ndev->irq);
+
+	ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+			       ndev->name, ndev);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+			ndev->irq, ret);
+		goto err_out_unregister_clk_notifier;
+	}
+
+	if (netif_msg_probe(lp))
+		netdev_dbg(ndev, "net_local@%p\n", lp);
+
+	return 0;
+
+err_out_unregister_clk_notifier:
+	clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+	unregister_netdev(ndev);
+err_out_clk_dis_aper:
+	clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+	if (lp->phy_node)
+		of_node_put(lp->phy_node);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct net_local *lp;
+
+	if (ndev) {
+		lp = netdev_priv(ndev);
+
+		if (lp->phy_dev)
+			phy_disconnect(lp->phy_dev);
+		mdiobus_unregister(lp->mii_bus);
+		kfree(lp->mii_bus->irq);
+		mdiobus_free(lp->mii_bus);
+
+		unregister_netdev(ndev);
+
+		clk_disable_unprepare(lp->phy_ref_clk);
+		clk_disable_unprepare(lp->apb_pclk);
+
+		free_netdev(ndev);
+	}
+
+	return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+	.probe   = dwceqos_probe,
+	.remove  = dwceqos_remove,
+	.driver  = {
+		.name  = DRIVER_NAME,
+		.of_match_table = dwceq_of_match,
+	},
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 3ca87f2..6f2e151 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -51,6 +51,8 @@
 		    NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	|	\
 		    NETIF_MSG_RX_STATUS)
 
+#define NETCP_EFUSE_ADDR_SWAP	2
+
 #define knav_queue_get_id(q)	knav_queue_device_control(q, \
 				KNAV_QUEUE_GET_ID, (unsigned long)NULL)
 
@@ -172,13 +174,22 @@
 }
 
 /* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
 {
 	unsigned int addr0, addr1;
 
 	addr1 = readl(efuse_mac + 4);
 	addr0 = readl(efuse_mac);
 
+	switch (swap) {
+	case NETCP_EFUSE_ADDR_SWAP:
+		addr0 = addr1;
+		addr1 = readl(efuse_mac);
+		break;
+	default:
+		break;
+	}
+
 	x[0] = (addr1 & 0x0000ff00) >> 8;
 	x[1] = addr1 & 0x000000ff;
 	x[2] = (addr0 & 0xff000000) >> 24;
@@ -1902,7 +1913,7 @@
 			goto quit;
 		}
 
-		emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+		emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
 		if (is_valid_ether_addr(efuse_mac_addr))
 			ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
 		else
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a3..01a955c 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -295,8 +295,6 @@
 	u32	rx_dma_overruns;
 };
 
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
 struct gbenu_ss_regs {
 	u32	id_ver;
 	u32	synce_count;		/* NU */
@@ -480,7 +478,6 @@
 	u32	tx_pri7_drop_bcnt;
 };
 
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
 
 struct gbe_ss_regs {
@@ -615,7 +612,6 @@
 	u32	rx_dma_overruns;
 };
 
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
 #define GBE_MAX_HW_STAT_MODS			9
 #define GBE_HW_STATS_REG_MAP_SZ			0x100
 
@@ -646,6 +642,7 @@
 	bool				enable_ale;
 	u8				max_num_slaves;
 	u8				max_num_ports; /* max_num_slaves + 1 */
+	u8				num_stats_mods;
 	struct netcp_tx_pipe		tx_pipe;
 
 	int				host_port;
@@ -675,6 +672,7 @@
 	struct net_device		*dummy_ndev;
 
 	u64				*hw_stats;
+	u32				*hw_stats_prev;
 	const struct netcp_ethtool_stat *et_stats;
 	int				num_et_stats;
 	/*  Lock for updating the hwstats */
@@ -874,7 +872,7 @@
 };
 
 /* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE	33
+#define GBENU_ET_STATS_HOST_SIZE	52
 
 #define GBENU_STATS_HOST(field)					\
 {								\
@@ -883,8 +881,8 @@
 	offsetof(struct gbenu_hw_stats, field)			\
 }
 
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE	46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE	65
 
 #define GBENU_STATS_P1(field)					\
 {								\
@@ -976,7 +974,26 @@
 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
 	GBENU_STATS_HOST(ale_unknown_bcast),
 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+	GBENU_STATS_HOST(ale_pol_match),
+	GBENU_STATS_HOST(ale_pol_match_red),
+	GBENU_STATS_HOST(ale_pol_match_yellow),
 	GBENU_STATS_HOST(tx_mem_protect_err),
+	GBENU_STATS_HOST(tx_pri0_drop),
+	GBENU_STATS_HOST(tx_pri1_drop),
+	GBENU_STATS_HOST(tx_pri2_drop),
+	GBENU_STATS_HOST(tx_pri3_drop),
+	GBENU_STATS_HOST(tx_pri4_drop),
+	GBENU_STATS_HOST(tx_pri5_drop),
+	GBENU_STATS_HOST(tx_pri6_drop),
+	GBENU_STATS_HOST(tx_pri7_drop),
+	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
 	/* GBENU Module 1 */
 	GBENU_STATS_P1(rx_good_frames),
 	GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@
 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
 	GBENU_STATS_P1(ale_unknown_bcast),
 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
+	GBENU_STATS_P1(ale_pol_match),
+	GBENU_STATS_P1(ale_pol_match_red),
+	GBENU_STATS_P1(ale_pol_match_yellow),
 	GBENU_STATS_P1(tx_mem_protect_err),
+	GBENU_STATS_P1(tx_pri0_drop),
+	GBENU_STATS_P1(tx_pri1_drop),
+	GBENU_STATS_P1(tx_pri2_drop),
+	GBENU_STATS_P1(tx_pri3_drop),
+	GBENU_STATS_P1(tx_pri4_drop),
+	GBENU_STATS_P1(tx_pri5_drop),
+	GBENU_STATS_P1(tx_pri6_drop),
+	GBENU_STATS_P1(tx_pri7_drop),
+	GBENU_STATS_P1(tx_pri0_drop_bcnt),
+	GBENU_STATS_P1(tx_pri1_drop_bcnt),
+	GBENU_STATS_P1(tx_pri2_drop_bcnt),
+	GBENU_STATS_P1(tx_pri3_drop_bcnt),
+	GBENU_STATS_P1(tx_pri4_drop_bcnt),
+	GBENU_STATS_P1(tx_pri5_drop_bcnt),
+	GBENU_STATS_P1(tx_pri6_drop_bcnt),
+	GBENU_STATS_P1(tx_pri7_drop_bcnt),
 	/* GBENU Module 2 */
 	GBENU_STATS_P2(rx_good_frames),
 	GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@
 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
 	GBENU_STATS_P2(ale_unknown_bcast),
 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
+	GBENU_STATS_P2(ale_pol_match),
+	GBENU_STATS_P2(ale_pol_match_red),
+	GBENU_STATS_P2(ale_pol_match_yellow),
 	GBENU_STATS_P2(tx_mem_protect_err),
+	GBENU_STATS_P2(tx_pri0_drop),
+	GBENU_STATS_P2(tx_pri1_drop),
+	GBENU_STATS_P2(tx_pri2_drop),
+	GBENU_STATS_P2(tx_pri3_drop),
+	GBENU_STATS_P2(tx_pri4_drop),
+	GBENU_STATS_P2(tx_pri5_drop),
+	GBENU_STATS_P2(tx_pri6_drop),
+	GBENU_STATS_P2(tx_pri7_drop),
+	GBENU_STATS_P2(tx_pri0_drop_bcnt),
+	GBENU_STATS_P2(tx_pri1_drop_bcnt),
+	GBENU_STATS_P2(tx_pri2_drop_bcnt),
+	GBENU_STATS_P2(tx_pri3_drop_bcnt),
+	GBENU_STATS_P2(tx_pri4_drop_bcnt),
+	GBENU_STATS_P2(tx_pri5_drop_bcnt),
+	GBENU_STATS_P2(tx_pri6_drop_bcnt),
+	GBENU_STATS_P2(tx_pri7_drop_bcnt),
 	/* GBENU Module 3 */
 	GBENU_STATS_P3(rx_good_frames),
 	GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@
 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
 	GBENU_STATS_P3(ale_unknown_bcast),
 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
+	GBENU_STATS_P3(ale_pol_match),
+	GBENU_STATS_P3(ale_pol_match_red),
+	GBENU_STATS_P3(ale_pol_match_yellow),
 	GBENU_STATS_P3(tx_mem_protect_err),
+	GBENU_STATS_P3(tx_pri0_drop),
+	GBENU_STATS_P3(tx_pri1_drop),
+	GBENU_STATS_P3(tx_pri2_drop),
+	GBENU_STATS_P3(tx_pri3_drop),
+	GBENU_STATS_P3(tx_pri4_drop),
+	GBENU_STATS_P3(tx_pri5_drop),
+	GBENU_STATS_P3(tx_pri6_drop),
+	GBENU_STATS_P3(tx_pri7_drop),
+	GBENU_STATS_P3(tx_pri0_drop_bcnt),
+	GBENU_STATS_P3(tx_pri1_drop_bcnt),
+	GBENU_STATS_P3(tx_pri2_drop_bcnt),
+	GBENU_STATS_P3(tx_pri3_drop_bcnt),
+	GBENU_STATS_P3(tx_pri4_drop_bcnt),
+	GBENU_STATS_P3(tx_pri5_drop_bcnt),
+	GBENU_STATS_P3(tx_pri6_drop_bcnt),
+	GBENU_STATS_P3(tx_pri7_drop_bcnt),
 	/* GBENU Module 4 */
 	GBENU_STATS_P4(rx_good_frames),
 	GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@
 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
 	GBENU_STATS_P4(ale_unknown_bcast),
 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
+	GBENU_STATS_P4(ale_pol_match),
+	GBENU_STATS_P4(ale_pol_match_red),
+	GBENU_STATS_P4(ale_pol_match_yellow),
 	GBENU_STATS_P4(tx_mem_protect_err),
+	GBENU_STATS_P4(tx_pri0_drop),
+	GBENU_STATS_P4(tx_pri1_drop),
+	GBENU_STATS_P4(tx_pri2_drop),
+	GBENU_STATS_P4(tx_pri3_drop),
+	GBENU_STATS_P4(tx_pri4_drop),
+	GBENU_STATS_P4(tx_pri5_drop),
+	GBENU_STATS_P4(tx_pri6_drop),
+	GBENU_STATS_P4(tx_pri7_drop),
+	GBENU_STATS_P4(tx_pri0_drop_bcnt),
+	GBENU_STATS_P4(tx_pri1_drop_bcnt),
+	GBENU_STATS_P4(tx_pri2_drop_bcnt),
+	GBENU_STATS_P4(tx_pri3_drop_bcnt),
+	GBENU_STATS_P4(tx_pri4_drop_bcnt),
+	GBENU_STATS_P4(tx_pri5_drop_bcnt),
+	GBENU_STATS_P4(tx_pri6_drop_bcnt),
+	GBENU_STATS_P4(tx_pri7_drop_bcnt),
 	/* GBENU Module 5 */
 	GBENU_STATS_P5(rx_good_frames),
 	GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@
 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
 	GBENU_STATS_P5(ale_unknown_bcast),
 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
+	GBENU_STATS_P5(ale_pol_match),
+	GBENU_STATS_P5(ale_pol_match_red),
+	GBENU_STATS_P5(ale_pol_match_yellow),
 	GBENU_STATS_P5(tx_mem_protect_err),
+	GBENU_STATS_P5(tx_pri0_drop),
+	GBENU_STATS_P5(tx_pri1_drop),
+	GBENU_STATS_P5(tx_pri2_drop),
+	GBENU_STATS_P5(tx_pri3_drop),
+	GBENU_STATS_P5(tx_pri4_drop),
+	GBENU_STATS_P5(tx_pri5_drop),
+	GBENU_STATS_P5(tx_pri6_drop),
+	GBENU_STATS_P5(tx_pri7_drop),
+	GBENU_STATS_P5(tx_pri0_drop_bcnt),
+	GBENU_STATS_P5(tx_pri1_drop_bcnt),
+	GBENU_STATS_P5(tx_pri2_drop_bcnt),
+	GBENU_STATS_P5(tx_pri3_drop_bcnt),
+	GBENU_STATS_P5(tx_pri4_drop_bcnt),
+	GBENU_STATS_P5(tx_pri5_drop_bcnt),
+	GBENU_STATS_P5(tx_pri6_drop_bcnt),
+	GBENU_STATS_P5(tx_pri7_drop_bcnt),
 	/* GBENU Module 6 */
 	GBENU_STATS_P6(rx_good_frames),
 	GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@
 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
 	GBENU_STATS_P6(ale_unknown_bcast),
 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
+	GBENU_STATS_P6(ale_pol_match),
+	GBENU_STATS_P6(ale_pol_match_red),
+	GBENU_STATS_P6(ale_pol_match_yellow),
 	GBENU_STATS_P6(tx_mem_protect_err),
+	GBENU_STATS_P6(tx_pri0_drop),
+	GBENU_STATS_P6(tx_pri1_drop),
+	GBENU_STATS_P6(tx_pri2_drop),
+	GBENU_STATS_P6(tx_pri3_drop),
+	GBENU_STATS_P6(tx_pri4_drop),
+	GBENU_STATS_P6(tx_pri5_drop),
+	GBENU_STATS_P6(tx_pri6_drop),
+	GBENU_STATS_P6(tx_pri7_drop),
+	GBENU_STATS_P6(tx_pri0_drop_bcnt),
+	GBENU_STATS_P6(tx_pri1_drop_bcnt),
+	GBENU_STATS_P6(tx_pri2_drop_bcnt),
+	GBENU_STATS_P6(tx_pri3_drop_bcnt),
+	GBENU_STATS_P6(tx_pri4_drop_bcnt),
+	GBENU_STATS_P6(tx_pri5_drop_bcnt),
+	GBENU_STATS_P6(tx_pri6_drop_bcnt),
+	GBENU_STATS_P6(tx_pri7_drop_bcnt),
 	/* GBENU Module 7 */
 	GBENU_STATS_P7(rx_good_frames),
 	GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@
 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
 	GBENU_STATS_P7(ale_unknown_bcast),
 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
+	GBENU_STATS_P7(ale_pol_match),
+	GBENU_STATS_P7(ale_pol_match_red),
+	GBENU_STATS_P7(ale_pol_match_yellow),
 	GBENU_STATS_P7(tx_mem_protect_err),
+	GBENU_STATS_P7(tx_pri0_drop),
+	GBENU_STATS_P7(tx_pri1_drop),
+	GBENU_STATS_P7(tx_pri2_drop),
+	GBENU_STATS_P7(tx_pri3_drop),
+	GBENU_STATS_P7(tx_pri4_drop),
+	GBENU_STATS_P7(tx_pri5_drop),
+	GBENU_STATS_P7(tx_pri6_drop),
+	GBENU_STATS_P7(tx_pri7_drop),
+	GBENU_STATS_P7(tx_pri0_drop_bcnt),
+	GBENU_STATS_P7(tx_pri1_drop_bcnt),
+	GBENU_STATS_P7(tx_pri2_drop_bcnt),
+	GBENU_STATS_P7(tx_pri3_drop_bcnt),
+	GBENU_STATS_P7(tx_pri4_drop_bcnt),
+	GBENU_STATS_P7(tx_pri5_drop_bcnt),
+	GBENU_STATS_P7(tx_pri6_drop_bcnt),
+	GBENU_STATS_P7(tx_pri7_drop_bcnt),
 	/* GBENU Module 8 */
 	GBENU_STATS_P8(rx_good_frames),
 	GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@
 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
 	GBENU_STATS_P8(ale_unknown_bcast),
 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
+	GBENU_STATS_P8(ale_pol_match),
+	GBENU_STATS_P8(ale_pol_match_red),
+	GBENU_STATS_P8(ale_pol_match_yellow),
 	GBENU_STATS_P8(tx_mem_protect_err),
+	GBENU_STATS_P8(tx_pri0_drop),
+	GBENU_STATS_P8(tx_pri1_drop),
+	GBENU_STATS_P8(tx_pri2_drop),
+	GBENU_STATS_P8(tx_pri3_drop),
+	GBENU_STATS_P8(tx_pri4_drop),
+	GBENU_STATS_P8(tx_pri5_drop),
+	GBENU_STATS_P8(tx_pri6_drop),
+	GBENU_STATS_P8(tx_pri7_drop),
+	GBENU_STATS_P8(tx_pri0_drop_bcnt),
+	GBENU_STATS_P8(tx_pri1_drop_bcnt),
+	GBENU_STATS_P8(tx_pri2_drop_bcnt),
+	GBENU_STATS_P8(tx_pri3_drop_bcnt),
+	GBENU_STATS_P8(tx_pri4_drop_bcnt),
+	GBENU_STATS_P8(tx_pri5_drop_bcnt),
+	GBENU_STATS_P8(tx_pri6_drop_bcnt),
+	GBENU_STATS_P8(tx_pri7_drop_bcnt),
 };
 
 #define XGBE_STATS0_INFO(field)				\
@@ -1554,70 +1723,97 @@
 	}
 }
 
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
 {
-	void __iomem *base = NULL;
-	u32  __iomem *p;
-	u32 tmp = 0;
+	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+	u32  __iomem *p_stats_entry;
 	int i;
 
 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
-		base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
-		p = base + gbe_dev->et_stats[i].offset;
-		tmp = readl(p);
-		gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+		if (gbe_dev->et_stats[i].type == stats_mod) {
+			p_stats_entry = base + gbe_dev->et_stats[i].offset;
+			gbe_dev->hw_stats[i] = 0;
+			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+		}
+	}
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+					     int et_stats_entry)
+{
+	void __iomem *base = NULL;
+	u32  __iomem *p_stats_entry;
+	u32 curr, delta;
+
+	/* The hw_stats_regs pointers are already
+	 * properly set to point to the right base:
+	 */
+	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+	curr = readl(p_stats_entry);
+	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+	gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+	int i;
+
+	for (i = 0; i < gbe_dev->num_et_stats; i++) {
+		gbe_update_hw_stats_entry(gbe_dev, i);
+
 		if (data)
 			data[i] = gbe_dev->hw_stats[i];
-		/* write-to-decrement:
-		 * new register value = old register value - write value
-		 */
-		writel(tmp, p);
 	}
 }
 
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+					       int stats_mod)
+{
+	u32 val;
+
+	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+	switch (stats_mod) {
+	case GBE_STATSA_MODULE:
+	case GBE_STATSB_MODULE:
+		val &= ~GBE_STATS_CD_SEL;
+		break;
+	case GBE_STATSC_MODULE:
+	case GBE_STATSD_MODULE:
+		val |= GBE_STATS_CD_SEL;
+		break;
+	default:
+		return;
+	}
+
+	/* make the stat module visible */
+	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
+
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+	gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
 {
-	void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
-	void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
-	u64 *hw_stats = &gbe_dev->hw_stats[0];
-	void __iomem *base = NULL;
-	u32  __iomem *p;
-	u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
-	int i, j, pair;
+	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+	int et_entry, j, pair;
 
 	for (pair = 0; pair < 2; pair++) {
-		val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+						      GBE_STATSC_MODULE :
+						      GBE_STATSA_MODULE));
 
-		if (pair == 0)
-			val &= ~GBE_STATS_CD_SEL;
-		else
-			val |= GBE_STATS_CD_SEL;
+		for (j = 0; j < half_num_et_stats; j++) {
+			et_entry = pair * half_num_et_stats + j;
+			gbe_update_hw_stats_entry(gbe_dev, et_entry);
 
-		/* make the stat modules visible */
-		writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
-
-		for (i = 0; i < pair_size; i++) {
-			j = pair * pair_size + i;
-			switch (gbe_dev->et_stats[j].type) {
-			case GBE_STATSA_MODULE:
-			case GBE_STATSC_MODULE:
-				base = gbe_statsa;
-			break;
-			case GBE_STATSB_MODULE:
-			case GBE_STATSD_MODULE:
-				base  = gbe_statsb;
-			break;
-			}
-
-			p = base + gbe_dev->et_stats[j].offset;
-			tmp = readl(p);
-			hw_stats[j] += tmp;
 			if (data)
-				data[j] = hw_stats[j];
-			/* write-to-decrement:
-			 * new register value = old register value - write value
-			 */
-			writel(tmp, p);
+				data[et_entry] = gbe_dev->hw_stats[et_entry];
 		}
 	}
 }
@@ -2189,14 +2385,15 @@
 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
 	}
 
-	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	/* A timer runs as a BH, no need to block them */
+	spin_lock(&gbe_dev->hw_stats_lock);
 
 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
 		gbe_update_stats_ver14(gbe_dev, NULL);
 	else
 		gbe_update_stats(gbe_dev, NULL);
 
-	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+	spin_unlock(&gbe_dev->hw_stats_lock);
 
 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
 	add_timer(&gbe_dev->timer);
@@ -2554,15 +2751,28 @@
 	}
 	gbe_dev->xgbe_serdes_regs = regs;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = xgbe10_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-				  XGBE10_NUM_STAT_ENTRIES *
-				  (gbe_dev->max_num_ports) * sizeof(u64),
-				  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
 					XGBE10_SGMII_MODULE_OFFSET;
@@ -2576,8 +2786,6 @@
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = xgbe10_et_stats;
-	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
 	/* Subsystem registers */
@@ -2662,30 +2870,45 @@
 	}
 	gbe_dev->switch_regs = regs;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+	gbe_dev->et_stats = gbe13_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-					  GBE13_NUM_HW_STAT_ENTRIES *
-					  gbe_dev->max_num_slaves * sizeof(u64),
-					  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
 
+	/* K2HK has only 2 hw stats modules visible at a time, so
+	 * module 0 & 2 points to one base and
+	 * module 1 & 3 points to the other base
+	 */
 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
 		gbe_dev->hw_stats_regs[i] =
 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
-			(GBE_HW_STATS_REG_MAP_SZ * i);
+			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
 	}
 
 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = gbe13_et_stats;
-	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
 
 	/* Subsystem registers */
@@ -2712,15 +2935,34 @@
 	void __iomem *regs;
 	int i, ret;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = gbenu_et_stats;
+
+	if (IS_SS_ID_NU(gbe_dev))
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+	else
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+					GBENU_ET_STATS_PORT_SIZE;
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-				  GBENU_NUM_HW_STAT_ENTRIES *
-				  (gbe_dev->max_num_ports) * sizeof(u64),
-				  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
 	if (ret) {
 		dev_err(gbe_dev->dev,
@@ -2748,16 +2990,8 @@
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = gbenu_et_stats;
 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
-	if (IS_SS_ID_NU(gbe_dev))
-		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
-	else
-		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-					GBENU_ET_STATS_PORT_SIZE;
-
 	/* Subsystem registers */
 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
 
@@ -2787,7 +3021,7 @@
 	struct cpsw_ale_params ale_params;
 	struct gbe_priv *gbe_dev;
 	u32 slave_num;
-	int ret = 0;
+	int i, ret = 0;
 
 	if (!node) {
 		dev_err(dev, "device tree info unavailable\n");
@@ -2935,6 +3169,15 @@
 	/* initialize host port */
 	gbe_init_host_port(gbe_dev);
 
+	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+			gbe_reset_mod_stats_ver14(gbe_dev, i);
+		else
+			gbe_reset_mod_stats(gbe_dev, i);
+	}
+	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
 	init_timer(&gbe_dev->timer);
 	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
 	gbe_dev->timer.function = netcp_ethss_timer;
@@ -2946,6 +3189,8 @@
 quit:
 	if (gbe_dev->hw_stats)
 		devm_kfree(dev, gbe_dev->hw_stats);
+	if (gbe_dev->hw_stats_prev)
+		devm_kfree(dev, gbe_dev->hw_stats_prev);
 	cpsw_ale_destroy(gbe_dev->ale);
 	if (gbe_dev->ss_regs)
 		devm_iounmap(dev, gbe_dev->ss_regs);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 26cd14c..5ce7020 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -541,6 +541,29 @@
 	struct nvsp_2_free_rxbuf free_rxbuf;
 } __packed;
 
+struct nvsp_4_send_vf_association {
+	/* 1: allocated, serial number is valid. 0: not allocated */
+	u32 allocated;
+
+	/* Serial number of the VF to team with */
+	u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+	NVSP_DATAPATH_SYNTHETIC = 0,
+	NVSP_DATAPATH_VF,
+	NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+	u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+	struct nvsp_4_send_vf_association vf_assoc;
+	struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
 enum nvsp_subchannel_operation {
 	NVSP_SUBCHANNEL_NONE = 0,
 	NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +601,7 @@
 	union nvsp_message_init_uber init_msg;
 	union nvsp_1_message_uber v1_msg;
 	union nvsp_2_message_uber v2_msg;
+	union nvsp_4_message_uber v4_msg;
 	union nvsp_5_message_uber v5_msg;
 } __packed;
 
@@ -671,6 +695,8 @@
 	u32 send_table[VRSS_SEND_TAB_SIZE];
 	u32 max_chn;
 	u32 num_chn;
+	spinlock_t sc_lock; /* Protects num_sc_offered variable */
+	u32 num_sc_offered;
 	atomic_t queue_sends[NR_CPUS];
 
 	/* Holds rndis device info */
@@ -689,6 +715,11 @@
 
 	/* The net device context */
 	struct net_device_context *nd_ctx;
+
+	/* 1: allocated, serial number is valid. 0: not allocated */
+	u32 vf_alloc;
+	/* Serial number of the VF to team with */
+	u32 vf_serial;
 };
 
 /* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 23126a7..51e4c0f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -453,13 +453,16 @@
 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
 		return 0;
 
-	/* NVSPv2 only: Send NDIS config */
+	/* NVSPv2 or later: Send NDIS config */
 	memset(init_packet, 0, sizeof(struct nvsp_message));
 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
 	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
 						       ETH_HLEN;
 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
+	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
 	ret = vmbus_sendpacket(device->channel, init_packet,
 				sizeof(struct nvsp_message),
 				(unsigned long)init_packet,
@@ -1064,11 +1067,10 @@
 
 
 static void netvsc_send_table(struct hv_device *hdev,
-			      struct vmpacket_descriptor *vmpkt)
+			      struct nvsp_message *nvmsg)
 {
 	struct netvsc_device *nvscdev;
 	struct net_device *ndev;
-	struct nvsp_message *nvmsg;
 	int i;
 	u32 count, *tab;
 
@@ -1077,12 +1079,6 @@
 		return;
 	ndev = nvscdev->ndev;
 
-	nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
-					(vmpkt->offset8 << 3));
-
-	if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
-		return;
-
 	count = nvmsg->msg.v5_msg.send_table.count;
 	if (count != VRSS_SEND_TAB_SIZE) {
 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@
 		nvscdev->send_table[i] = tab[i];
 }
 
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+			   struct nvsp_message *nvmsg)
+{
+	nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+	nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+					 struct netvsc_device *nvdev,
+					 struct nvsp_message *nvmsg)
+{
+	switch (nvmsg->hdr.msg_type) {
+	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+		netvsc_send_table(hdev, nvmsg);
+		break;
+
+	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+		netvsc_send_vf(nvdev, nvmsg);
+		break;
+	}
+}
+
 void netvsc_channel_cb(void *context)
 {
 	int ret;
@@ -1108,6 +1126,7 @@
 	unsigned char *buffer;
 	int bufferlen = NETVSC_PACKET_SIZE;
 	struct net_device *ndev;
+	struct nvsp_message *nvmsg;
 
 	if (channel->primary_channel != NULL)
 		device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@
 		if (ret == 0) {
 			if (bytes_recvd > 0) {
 				desc = (struct vmpacket_descriptor *)buffer;
+				nvmsg = (struct nvsp_message *)((unsigned long)
+					 desc + (desc->offset8 << 3));
 				switch (desc->type) {
 				case VM_PKT_COMP:
 					netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@
 					break;
 
 				case VM_PKT_DATA_INBAND:
-					netvsc_send_table(device, desc);
+					netvsc_receive_inband(device,
+							      net_device,
+							      nvmsg);
 					break;
 
 				default:
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2e40417..9b8263d 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -984,9 +984,16 @@
 	struct netvsc_device *nvscdev;
 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
 	int ret;
+	unsigned long flags;
 
 	nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
 
+	spin_lock_irqsave(&nvscdev->sc_lock, flags);
+	nvscdev->num_sc_offered--;
+	spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+	if (nvscdev->num_sc_offered == 0)
+		complete(&nvscdev->channel_init_wait);
+
 	if (chn_index >= nvscdev->num_chn)
 		return;
 
@@ -1015,8 +1022,10 @@
 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
 	u32 mtu, size;
 	u32 num_rss_qs;
+	u32 sc_delta;
 	const struct cpumask *node_cpu_mask;
 	u32 num_possible_rss_qs;
+	unsigned long flags;
 
 	rndis_device = get_rndis_device();
 	if (!rndis_device)
@@ -1039,6 +1048,8 @@
 	net_device->max_chn = 1;
 	net_device->num_chn = 1;
 
+	spin_lock_init(&net_device->sc_lock);
+
 	net_device->extension = rndis_device;
 	rndis_device->net_dev = net_device;
 
@@ -1116,6 +1127,9 @@
 	num_possible_rss_qs = cpumask_weight(node_cpu_mask);
 	net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
 
+	num_rss_qs = net_device->num_chn - 1;
+	net_device->num_sc_offered = num_rss_qs;
+
 	if (net_device->num_chn == 1)
 		goto out;
 
@@ -1157,11 +1171,25 @@
 
 	ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
 
+	/*
+	 * Wait for the host to send us the sub-channel offers.
+	 */
+	spin_lock_irqsave(&net_device->sc_lock, flags);
+	sc_delta = num_rss_qs - (net_device->num_chn - 1);
+	net_device->num_sc_offered -= sc_delta;
+	spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+	while (net_device->num_sc_offered != 0) {
+		t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+		if (t == 0)
+			WARN(1, "Netvsc: Waiting for sub-channel processing");
+	}
 out:
 	if (ret) {
 		net_device->max_chn = 1;
 		net_device->num_chn = 1;
 	}
+
 	return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d6aff87..8ef8191 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -150,13 +150,13 @@
 	  will be called mdio-gpio.
 
 config MDIO_OCTEON
-	tristate "Support for MDIO buses on Octeon SOCs"
-	depends on CAVIUM_OCTEON_SOC
-	default y
+	tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+	depends on 64BIT
 	help
 
-	  This module provides a driver for the Octeon MDIO busses.
-	  It is required by the Octeon Ethernet device drivers.
+	  This module provides a driver for the Octeon and ThunderX MDIO
+	  busses. It is required by the Octeon and ThunderX ethernet device
+	  drivers.
 
 	  If in doubt, say Y.
 
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8a3bf54..32f1066 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -123,12 +123,8 @@
 	if (ret)
 		return ret;
 
-	ret = of_property_read_u32(of_node, "ti,fifo-depth",
+	return of_property_read_u32(of_node, "ti,fifo-depth",
 				   &dp83867->fifo_depth);
-	if (ret)
-		return ret;
-
-	return 0;
 }
 #else
 static int dp83867_of_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index c838ad6..fcf4e4d 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -14,11 +15,12 @@
 #include <linux/phy.h>
 #include <linux/io.h>
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
 
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
 
 #define SMI_CMD		0x0
 #define SMI_WR_DAT	0x8
@@ -26,6 +28,79 @@
 #define SMI_CLK		0x18
 #define SMI_EN		0x20
 
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more)	\
+	field;					\
+	more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more)	\
+	more					\
+	field;
+
+#endif
+
+union cvmx_smix_clk {
+	u64 u64;
+	struct cvmx_smix_clk_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+	  OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+	  ;))))))))))
+	} s;
+};
+
+union cvmx_smix_cmd {
+	u64 u64;
+	struct cvmx_smix_cmd_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+	  ;))))))
+	} s;
+};
+
+union cvmx_smix_en {
+	u64 u64;
+	struct cvmx_smix_en_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+	  OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+	  ;))
+	} s;
+};
+
+union cvmx_smix_rd_dat {
+	u64 u64;
+	struct cvmx_smix_rd_dat_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+	  ;))))
+	} s;
+};
+
+union cvmx_smix_wr_dat {
+	u64 u64;
+	struct cvmx_smix_wr_dat_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+	  ;))))
+	} s;
+};
+
 enum octeon_mdiobus_mode {
 	UNINIT = 0,
 	C22,
@@ -41,6 +116,21 @@
 	int phy_irq[PHY_MAX_ADDR];
 };
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+	cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+	return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr)	writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr)		readq_relaxed((void *)addr)
+#endif
+
 static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
 				    enum octeon_mdiobus_mode m)
 {
@@ -49,10 +139,10 @@
 	if (m == p->mode)
 		return;
 
-	smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+	smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
 	smi_clk.s.mode = (m == C45) ? 1 : 0;
 	smi_clk.s.preamble = 1;
-	cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+	oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
 	p->mode = m;
 }
 
@@ -67,7 +157,7 @@
 
 	smi_wr.u64 = 0;
 	smi_wr.s.dat = regnum & 0xffff;
-	cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
 	regnum = (regnum >> 16) & 0x1f;
 
@@ -75,14 +165,14 @@
 	smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
 	} while (smi_wr.s.pending && --timeout);
 
 	if (timeout <= 0)
@@ -114,14 +204,14 @@
 	smi_cmd.s.phy_op = op;
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+		smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
 	} while (smi_rd.s.pending && --timeout);
 
 	if (smi_rd.s.val)
@@ -153,20 +243,20 @@
 
 	smi_wr.u64 = 0;
 	smi_wr.s.dat = val;
-	cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
 	smi_cmd.u64 = 0;
 	smi_cmd.s.phy_op = op;
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
 	} while (smi_wr.s.pending && --timeout);
 
 	if (timeout <= 0)
@@ -187,30 +277,34 @@
 		return -ENOMEM;
 
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
 	if (res_mem == NULL) {
 		dev_err(&pdev->dev, "found no memory resource\n");
-		err = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
+
 	bus->mdio_phys = res_mem->start;
 	bus->regsize = resource_size(res_mem);
+
 	if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
 				     res_mem->name)) {
 		dev_err(&pdev->dev, "request_mem_region failed\n");
-		goto fail;
+		return -ENXIO;
 	}
+
 	bus->register_base =
 		(u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+	if (!bus->register_base) {
+		dev_err(&pdev->dev, "dev_ioremap failed\n");
+		return -ENOMEM;
+	}
 
 	bus->mii_bus = mdiobus_alloc();
-
 	if (!bus->mii_bus)
 		goto fail;
 
 	smi_en.u64 = 0;
 	smi_en.s.en = 1;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 
 	bus->mii_bus->priv = bus;
 	bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@
 	mdiobus_free(bus->mii_bus);
 fail:
 	smi_en.u64 = 0;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 	return err;
 }
 
@@ -248,7 +342,7 @@
 	mdiobus_unregister(bus->mii_bus);
 	mdiobus_free(bus->mii_bus);
 	smi_en.u64 = 0;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 	return 0;
 }
 
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4653015..f091d69 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -209,8 +209,6 @@
 	return ks8995_start(ks);
 }
 
-/* ------------------------------------------------------------------------ */
-
 static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
 	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -220,19 +218,9 @@
 	dev = container_of(kobj, struct device, kobj);
 	ks8995 = dev_get_drvdata(dev);
 
-	if (unlikely(off > ks8995->regs_attr.size))
-		return 0;
-
-	if ((off + count) > ks8995->regs_attr.size)
-		count = ks8995->regs_attr.size - off;
-
-	if (unlikely(!count))
-		return count;
-
 	return ks8995_read(ks8995, buf, off, count);
 }
 
-
 static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
 	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -242,19 +230,9 @@
 	dev = container_of(kobj, struct device, kobj);
 	ks8995 = dev_get_drvdata(dev);
 
-	if (unlikely(off >= ks8995->regs_attr.size))
-		return -EFBIG;
-
-	if ((off + count) > ks8995->regs_attr.size)
-		count = ks8995->regs_attr.size - off;
-
-	if (unlikely(!count))
-		return count;
-
 	return ks8995_write(ks8995, buf, off, count);
 }
 
-
 static const struct bin_attribute ks8995_registers_attr = {
 	.attr = {
 		.name   = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 7dcb5aa..91e1bec 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -51,8 +51,15 @@
 {
 	int reg;
 
-	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
-	return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+	/* auto negotiation state can only be checked when using copper
+	 * port, if using fiber port, just lie it's done.
+	 */
+	if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+		reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+		return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+	}
+
+	return 1;
 }
 
 static int teranetics_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e9feefb..81f0f24 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1686,7 +1686,7 @@
 			   struct sk_buff *skb,
 			   struct net_device *dev, struct in6_addr *saddr,
 			   struct in6_addr *daddr, __u8 prio, __u8 ttl,
-			   __be16 src_port, __be16 dst_port, __u32 vni,
+			   __be16 src_port, __be16 dst_port, __be32 vni,
 			   struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
 	struct vxlanhdr *vxh;
@@ -1771,7 +1771,7 @@
 
 static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
 			  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-			  __be16 src_port, __be16 dst_port, __u32 vni,
+			  __be16 src_port, __be16 dst_port, __be32 vni,
 			  struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
 	struct vxlanhdr *vxh;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 69d0055..fa2cab9 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/printk.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 
@@ -354,6 +355,16 @@
 		   offsetof(struct bpf_prog, insns[proglen]));
 }
 
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+	/* When classic BPF programs have been loaded and the arch
+	 * does not have a classic BPF JIT (anymore), they have been
+	 * converted via bpf_migrate_filter() to eBPF and thus always
+	 * have an unspec program type.
+	 */
+	return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -428,8 +439,9 @@
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 				u32 pass, void *image)
 {
-	pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
-	       flen, proglen, pass, image);
+	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+	       proglen, pass, image, current->comm, task_pid_nr(current));
+
 	if (image)
 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
 			       16, 1, image, proglen, false);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 06ed637..cb9dcad 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,6 +29,7 @@
 	__s32		max_desync_factor;
 	__s32		max_addresses;
 	__s32		accept_ra_defrtr;
+	__s32		accept_ra_min_hop_limit;
 	__s32		accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	__s32		accept_ra_rtr_pref;
diff --git a/include/linux/lwtunnel.h b/include/linux/lwtunnel.h
deleted file mode 100644
index 97f32f8..0000000
--- a/include/linux/lwtunnel.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _LINUX_LWTUNNEL_H_
-#define _LINUX_LWTUNNEL_H_
-
-#include <uapi/linux/lwtunnel.h>
-
-#endif /* _LINUX_LWTUNNEL_H_ */
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12..09cebe5 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@
 
 enum {
 	MLX4_CQE_L2_TUNNEL_IPOK		= 1 << 31,
-	MLX4_CQE_VLAN_PRESENT_MASK	= 1 << 29,
+	MLX4_CQE_CVLAN_PRESENT_MASK	= 1 << 29,
+	MLX4_CQE_SVLAN_PRESENT_MASK	= 1 << 30,
 	MLX4_CQE_L2_TUNNEL		= 1 << 27,
 	MLX4_CQE_L2_TUNNEL_CSUM		= 1 << 26,
 	MLX4_CQE_L2_TUNNEL_IPV4		= 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1c..bcbf8c7 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -211,6 +211,8 @@
 	MLX4_DEV_CAP_FLAG2_ETS_CFG		= 1LL <<  26,
 	MLX4_DEV_CAP_FLAG2_PORT_BEACON		= 1LL <<  27,
 	MLX4_DEV_CAP_FLAG2_IGNORE_FCS		= 1LL <<  28,
+	MLX4_DEV_CAP_FLAG2_PHV_EN		= 1LL <<  29,
+	MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN	= 1LL <<  30,
 };
 
 enum {
@@ -581,6 +583,7 @@
 	u64			phys_port_id[MLX4_MAX_PORTS + 1];
 	int			tunnel_offload_mode;
 	u8			rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+	u8			phv_bit[MLX4_MAX_PORTS + 1];
 	u8			alloc_res_qp_mask;
 	u32			dmfs_high_rate_qpn_base;
 	u32			dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@
 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
 			    u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539..de45a51 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@
 	MLX4_WQE_CTRL_SOLICITED		= 1 << 1,
 	MLX4_WQE_CTRL_IP_CSUM		= 1 << 4,
 	MLX4_WQE_CTRL_TCP_UDP_CSUM	= 1 << 5,
-	MLX4_WQE_CTRL_INS_VLAN		= 1 << 6,
+	MLX4_WQE_CTRL_INS_CVLAN		= 1 << 6,
+	MLX4_WQE_CTRL_INS_SVLAN		= 1 << 7,
 	MLX4_WQE_CTRL_STRONG_ORDER	= 1 << 7,
 	MLX4_WQE_CTRL_FORCE_LOOPBACK	= 1 << 0,
 };
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88..5fe0cae 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -380,7 +380,7 @@
 	u32			index;
 	struct list_head	bf_list;
 	unsigned		free_bf_bmap;
-	void __iomem	       *wc_map;
+	void __iomem	       *bf_map;
 	void __iomem	       *map;
 };
 
@@ -435,6 +435,8 @@
 	struct mlx5_uuar_info	uuari;
 	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
+	struct io_mapping	*bf_mapping;
+
 	/* pages stuff */
 	struct workqueue_struct *pg_wq;
 	struct rb_root		page_root;
@@ -463,6 +465,10 @@
 	/* end: mr staff */
 
 	/* start: alloc staff */
+	/* protect buffer alocation according to numa node */
+	struct mutex            alloc_mutex;
+	int                     numa_node;
+
 	struct mutex            pgdir_mutex;
 	struct list_head        pgdir_list;
 	/* end: alloc staff */
@@ -672,6 +678,8 @@
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -773,6 +781,8 @@
 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+		       int node);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
 const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fe..c60a62b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@
 };
 
 enum {
-	MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
-	MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
-	MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+	MLX5_RX_HASH_FN_NONE           = 0x0,
+	MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
+	MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
 };
 
 enum {
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c86a200..eead8ab9 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,28 +119,8 @@
 	int rx_fifo_size;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
 	void *bsp_priv;
 };
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
-	int has_gmac;
-	int enh_desc;
-	int tx_coe;
-	int rx_coe;
-	int bugged_jumbo;
-	int pmt;
-	int riwt_off;
-	void (*fix_mac_speed)(void *priv, unsigned int speed);
-	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
-	int (*init)(struct platform_device *pdev, void *priv);
-	void (*exit)(struct platform_device *pdev, void *priv);
-};
 #endif
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca2..1797235 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@
 	BOND_OPT_AD_ACTOR_SYS_PRIO,
 	BOND_OPT_AD_ACTOR_SYSTEM,
 	BOND_OPT_AD_USER_PORT_KEY,
+	BOND_OPT_NUM_PEER_NOTIF_ALIAS,
 	BOND_OPT_LAST
 };
 
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2..bee5f35 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -370,22 +370,6 @@
 	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 }
 
-static inline void inet_set_txhash(struct sock *sk)
-{
-	struct inet_sock *inet = inet_sk(sk);
-	struct flow_keys keys;
-
-	memset(&keys, 0, sizeof(keys));
-
-	keys.addrs.v4addrs.src = inet->inet_saddr;
-	keys.addrs.v4addrs.dst = inet->inet_daddr;
-	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
-	keys.ports.src = inet->inet_sport;
-	keys.ports.dst = inet->inet_dport;
-
-	sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
 {
 	const struct iphdr *iph = skb_gro_network_header(skb);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb0..7c79798 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,25 +707,6 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
-	struct inet_sock *inet = inet_sk(sk);
-	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct flow_keys keys;
-
-	memset(&keys, 0, sizeof(keys));
-
-	memcpy(&keys.addrs.v6addrs.src, &np->saddr,
-	       sizeof(keys.addrs.v6addrs.src));
-	memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
-	       sizeof(keys.addrs.v6addrs.dst));
-	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
-	keys.ports.src = inet->inet_sport;
-	keys.ports.dst = inet->inet_dport;
-
-	sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
 					__be32 flowlabel, bool autolabel)
 {
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 918e03c..33bd309 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -31,16 +31,17 @@
 	int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
 };
 
-extern const struct lwtunnel_encap_ops __rcu *
-		lwtun_encaps[LWTUNNEL_ENCAP_MAX+1];
-
 #ifdef CONFIG_LWTUNNEL
-static inline void lwtunnel_state_get(struct lwtunnel_state *lws)
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
 {
-	atomic_inc(&lws->refcnt);
+	if (lws)
+		atomic_inc(&lws->refcnt);
+
+	return lws;
 }
 
-static inline void lwtunnel_state_put(struct lwtunnel_state *lws)
+static inline void lwtstate_put(struct lwtunnel_state *lws)
 {
 	if (!lws)
 		return;
@@ -74,11 +75,13 @@
 
 #else
 
-static inline void lwtunnel_state_get(struct lwtunnel_state *lws)
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
 {
+	return lws;
 }
 
-static inline void lwtunnel_state_put(struct lwtunnel_state *lws)
+static inline void lwtstate_put(struct lwtunnel_state *lws)
 {
 }
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 4353ef7..24aa75c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1687,6 +1687,20 @@
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
+static inline void sk_set_txhash(struct sock *sk)
+{
+	sk->sk_txhash = prandom_u32();
+
+	if (unlikely(!sk->sk_txhash))
+		sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+	if (sk->sk_txhash)
+		sk_set_txhash(sk);
+}
+
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
 {
@@ -1711,6 +1725,8 @@
 {
 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
+	sk_rethink_txhash(sk);
+
 	if (dst && dst->ops->negative_advice) {
 		ndst = dst->ops->negative_advice(dst);
 
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1ff9942..aafb993 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -243,6 +243,7 @@
 header-y += llc.h
 header-y += loop.h
 header-y += lp.h
+header-y += lwtunnel.h
 header-y += magic.h
 header-y += major.h
 header-y += map_to_7segment.h
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 641a146..80f3b74 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -172,6 +172,7 @@
 	DEVCONF_ACCEPT_RA_MTU,
 	DEVCONF_STABLE_SECRET,
 	DEVCONF_USE_OIF_ADDRS_ONLY,
+	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
 	DEVCONF_MAX
 };
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 039d866..cd307df 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -648,6 +648,9 @@
 	struct verifier_state *state = &env->cur_state;
 	int size, err = 0;
 
+	if (state->regs[regno].type == PTR_TO_STACK)
+		off += state->regs[regno].imm;
+
 	size = bpf_size_to_bytes(bpf_size);
 	if (size < 0)
 		return size;
@@ -667,7 +670,8 @@
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
 
-	} else if (state->regs[regno].type == FRAME_PTR) {
+	} else if (state->regs[regno].type == FRAME_PTR ||
+		   state->regs[regno].type == PTR_TO_STACK) {
 		if (off >= 0 || off < -MAX_BPF_STACK) {
 			verbose("invalid stack off=%d size=%d\n", off, size);
 			return -EACCES;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 8b5e66f..3afddf2 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4613,6 +4613,8 @@
 		}
 
 		fp->len = flen;
+		/* Type doesn't really matter here as long as it's not unspec. */
+		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
 		memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
 		bpf_prog_select_runtime(fp);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6a591e6..5e9d1c5 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -247,6 +247,73 @@
 	__br_mdb_notify(dev, &entry, type);
 }
 
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+				   struct net_device *dev,
+				   int ifindex, u32 pid,
+				   u32 seq, int type, unsigned int flags)
+{
+	struct br_port_msg *bpm;
+	struct nlmsghdr *nlh;
+	struct nlattr *nest;
+
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	bpm = nlmsg_data(nlh);
+	memset(bpm, 0, sizeof(*bpm));
+	bpm->family = AF_BRIDGE;
+	bpm->ifindex = dev->ifindex;
+	nest = nla_nest_start(skb, MDBA_ROUTER);
+	if (!nest)
+		goto cancel;
+
+	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+		goto end;
+
+	nla_nest_end(skb, nest);
+	nlmsg_end(skb, nlh);
+	return 0;
+
+end:
+	nla_nest_end(skb, nest);
+cancel:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct br_port_msg))
+		+ nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+		   int type)
+{
+	struct net *net = dev_net(dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+	int ifindex;
+
+	ifindex = port ? port->dev->ifindex : 0;
+	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+	if (!skb)
+		goto errout;
+
+	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto errout;
+	}
+
+	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+	return;
+
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 {
 	if (entry->ifindex == 0)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ed5dc68..fd23858 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -766,6 +766,7 @@
 		goto out;
 
 	hlist_del_init_rcu(&port->rlist);
+	br_rtr_notify(br->dev, port, RTM_DELMDB);
 
 out:
 	spin_unlock(&br->multicast_lock);
@@ -977,8 +978,10 @@
 		if (pg->state == MDB_TEMPORARY)
 			br_multicast_del_pg(br, pg);
 
-	if (!hlist_unhashed(&port->rlist))
+	if (!hlist_unhashed(&port->rlist)) {
 		hlist_del_init_rcu(&port->rlist);
+		br_rtr_notify(br->dev, port, RTM_DELMDB);
+	}
 	del_timer(&port->multicast_router_timer);
 	del_timer(&port->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1216,6 +1219,7 @@
 		hlist_add_behind_rcu(&port->rlist, slot);
 	else
 		hlist_add_head_rcu(&port->rlist, &br->router_list);
+	br_rtr_notify(br->dev, port, RTM_NEWMDB);
 }
 
 static void br_multicast_mark_router(struct net_bridge *br,
@@ -1848,8 +1852,10 @@
 		p->multicast_router = val;
 		err = 0;
 
-		if (val < 2 && !hlist_unhashed(&p->rlist))
+		if (val < 2 && !hlist_unhashed(&p->rlist)) {
 			hlist_del_init_rcu(&p->rlist);
+			br_rtr_notify(br->dev, p, RTM_DELMDB);
+		}
 
 		if (val == 1)
 			break;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 364bdc9..793d247 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -164,8 +164,6 @@
 			    sizeof(vinfo), &vinfo))
 			goto nla_put_failure;
 
-		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
 		vinfo.vid = vid_end;
 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3ad1290..e2cb359 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -490,6 +490,8 @@
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
 		   struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+		   int type);
 
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
diff --git a/net/core/dev.c b/net/core/dev.c
index cb52cba..4870c35 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4995,7 +4995,7 @@
  * Gets the next netdev_adjacent->private from the dev's lower neighbour
  * list, starting from iter position. The caller must hold either hold the
  * RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next_private(struct net_device *dev,
 				    struct list_head **iter)
@@ -5050,7 +5050,7 @@
  * Gets the next netdev_adjacent from the dev's lower neighbour
  * list, starting from iter position. The caller must hold RTNL lock or
  * its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
 {
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index bb58826..c240c89 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -37,7 +37,7 @@
 }
 EXPORT_SYMBOL(lwtunnel_state_alloc);
 
-const struct lwtunnel_encap_ops __rcu *
+static const struct lwtunnel_encap_ops __rcu *
 		lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
 
 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
@@ -205,7 +205,7 @@
 	return ret;
 
 drop:
-	kfree(skb);
+	kfree_skb(skb);
 
 	return ret;
 }
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1ebdf1c0..0e0fb30 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -273,7 +273,6 @@
 
 	/* runtime counters relating to clone_skb */
 
-	__u64 allocated_skbs;
 	__u32 clone_count;
 	int last_ok;		/* Was last skb sent?
 				 * Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
 {
-	pkt_dev->pkt_overhead = 0;
+	pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
 	pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
 	pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
 	pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@
 	} else {
 		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
 	}
+	skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
 	return skb;
 }
@@ -3397,7 +3397,6 @@
 			return;
 		}
 		pkt_dev->last_pkt_size = pkt_dev->skb->len;
-		pkt_dev->allocated_skbs++;
 		pkt_dev->clone_count = 0;	/* reset counter */
 	}
 
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 574fad9..f915abf 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -74,7 +74,7 @@
 	inet->inet_daddr = fl4->daddr;
 	inet->inet_dport = usin->sin_port;
 	sk->sk_state = TCP_ESTABLISHED;
-	inet_set_txhash(sk);
+	sk_set_txhash(sk);
 	inet->inet_id = jiffies;
 
 	sk_dst_set(sk, &rt->dst);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 6754c64..65e0039 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -209,7 +209,7 @@
 	change_nexthops(fi) {
 		if (nexthop_nh->nh_dev)
 			dev_put(nexthop_nh->nh_dev);
-		lwtunnel_state_put(nexthop_nh->nh_lwtstate);
+		lwtstate_put(nexthop_nh->nh_lwtstate);
 		free_nh_exceptions(nexthop_nh);
 		rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
 		rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -438,13 +438,15 @@
 	if (n) {
 		state = n->nud_state;
 		neigh_release(n);
+	} else {
+		return 0;
 	}
 	if (state == NUD_REACHABLE)
 		return 0;
 	if ((state & NUD_VALID) && order != dflt)
 		return 0;
 	if ((state & NUD_VALID) ||
-	    (*last_idx < 0 && order > dflt)) {
+	    (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
 		*last_resort = fi;
 		*last_idx = order;
 	}
@@ -512,8 +514,8 @@
 							   nla, &lwtstate);
 				if (ret)
 					goto errout;
-				lwtunnel_state_get(lwtstate);
-				nexthop_nh->nh_lwtstate = lwtstate;
+				nexthop_nh->nh_lwtstate =
+					lwtstate_get(lwtstate);
 			}
 		}
 
@@ -969,8 +971,7 @@
 			if (err)
 				goto failure;
 
-			lwtunnel_state_get(lwtstate);
-			nh->nh_lwtstate = lwtstate;
+			nh->nh_lwtstate = lwtstate_get(lwtstate);
 		}
 		nh->nh_oif = cfg->fc_oif;
 		nh->nh_gw = cfg->fc_gw;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 519ec23..1109639 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1358,7 +1358,7 @@
 		list_del(&rt->rt_uncached);
 		spin_unlock_bh(&ul->lock);
 	}
-	lwtunnel_state_put(rt->rt_lwtstate);
+	lwtstate_put(rt->rt_lwtstate);
 }
 
 void rt_flush_dev(struct net_device *dev)
@@ -1407,12 +1407,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		rt->dst.tclassid = nh->nh_tclassid;
 #endif
-		if (nh->nh_lwtstate) {
-			lwtunnel_state_get(nh->nh_lwtstate);
-			rt->rt_lwtstate = nh->nh_lwtstate;
-		} else {
-			rt->rt_lwtstate = NULL;
-		}
+		rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
 		if (unlikely(fnhe))
 			cached = rt_bind_exception(rt, fnhe, daddr);
 		else if (!(rt->dst.flags & DST_NOCACHE))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 486ba96..d27eb54 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -222,7 +222,7 @@
 	if (err)
 		goto failure;
 
-	inet_set_txhash(sk);
+	sk_set_txhash(sk);
 
 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 			       inet->inet_sport, inet->inet_dport, sk);
@@ -1277,7 +1277,7 @@
 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
 	newinet->rcv_tos      = ip_hdr(skb)->tos;
 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
-	inet_set_txhash(newsk);
+	sk_set_txhash(newsk);
 	if (inet_opt)
 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 	newinet->inet_id = newtp->write_seq ^ jiffies;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 71057849..7d1efa7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1774,7 +1774,7 @@
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		goto send_now;
 
-	if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
 		goto send_now;
 
 	/* Avoid bursty behavior by allowing defer
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index eb0c6a3..53e3a9d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_from_local	= 0,
+	.accept_ra_min_hop_limit= 1,
 	.accept_ra_pinfo	= 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	.accept_ra_rtr_pref	= 1,
@@ -237,6 +238,7 @@
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_from_local	= 0,
+	.accept_ra_min_hop_limit= 1,
 	.accept_ra_pinfo	= 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	.accept_ra_rtr_pref	= 1,
@@ -4588,6 +4590,7 @@
 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -5485,6 +5488,13 @@
 			.proc_handler	= proc_dointvec,
 		},
 		{
+			.procname	= "accept_ra_min_hop_limit",
+			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
+		},
+		{
 			.procname	= "accept_ra_pinfo",
 			.data		= &ipv6_devconf.accept_ra_pinfo,
 			.maxlen		= sizeof(int),
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2572a32..9aadd57 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -199,7 +199,7 @@
 		      NULL);
 
 	sk->sk_state = TCP_ESTABLISHED;
-	ip6_set_txhash(sk);
+	sk_set_txhash(sk);
 out:
 	fl6_sock_release(flowlabel);
 	return err;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d715f2e..5693b5e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -178,7 +178,7 @@
 static void rt6_release(struct rt6_info *rt)
 {
 	if (atomic_dec_and_test(&rt->rt6i_ref)) {
-		lwtunnel_state_put(rt->rt6i_lwtstate);
+		lwtstate_put(rt->rt6i_lwtstate);
 		rt6_free_pcpu(rt);
 		dst_free(&rt->dst);
 	}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 57990c9..adba03a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -45,6 +45,7 @@
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
 
 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
@@ -55,7 +56,7 @@
 		if (ipprot && ipprot->early_demux)
 			ipprot->early_demux(skb);
 	}
-	if (!skb_dst(skb))
+	if (!skb_valid_dst(skb))
 		ip6_route_input(skb);
 
 	return dst_input(skb);
@@ -98,7 +99,7 @@
 	 * arrived via the sending interface (ethX), because of the
 	 * nature of scoping architecture. --yoshfuji
 	 */
-	IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+	IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
 
 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
 		goto err;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35..6e184e0 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1225,18 +1225,16 @@
 
 	if (rt)
 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
-	if (ra_msg->icmph.icmp6_hop_limit) {
-		/* Only set hop_limit on the interface if it is higher than
-		 * the current hop_limit.
-		 */
-		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+	if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+	    ra_msg->icmph.icmp6_hop_limit) {
+		if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
 			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+			if (rt)
+				dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+					       ra_msg->icmph.icmp6_hop_limit);
 		} else {
-			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
 		}
-		if (rt)
-			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
-				       ra_msg->icmph.icmp6_hop_limit);
 	}
 
 skip_defrtr:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7f2214f..54fccf0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -545,6 +545,7 @@
 
 static void rt6_probe(struct rt6_info *rt)
 {
+	struct __rt6_probe_work *work;
 	struct neighbour *neigh;
 	/*
 	 * Okay, this does not seem to be appropriate
@@ -559,34 +560,33 @@
 	rcu_read_lock_bh();
 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
 	if (neigh) {
-		write_lock(&neigh->lock);
 		if (neigh->nud_state & NUD_VALID)
 			goto out;
-	}
 
-	if (!neigh ||
-	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-		struct __rt6_probe_work *work;
-
-		work = kmalloc(sizeof(*work), GFP_ATOMIC);
-
-		if (neigh && work)
-			__neigh_set_probe_once(neigh);
-
-		if (neigh)
-			write_unlock(&neigh->lock);
-
-		if (work) {
-			INIT_WORK(&work->work, rt6_probe_deferred);
-			work->target = rt->rt6i_gateway;
-			dev_hold(rt->dst.dev);
-			work->dev = rt->dst.dev;
-			schedule_work(&work->work);
+		work = NULL;
+		write_lock(&neigh->lock);
+		if (!(neigh->nud_state & NUD_VALID) &&
+		    time_after(jiffies,
+			       neigh->updated +
+			       rt->rt6i_idev->cnf.rtr_probe_interval)) {
+			work = kmalloc(sizeof(*work), GFP_ATOMIC);
+			if (work)
+				__neigh_set_probe_once(neigh);
 		}
-	} else {
-out:
 		write_unlock(&neigh->lock);
+	} else {
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 	}
+
+	if (work) {
+		INIT_WORK(&work->work, rt6_probe_deferred);
+		work->target = rt->rt6i_gateway;
+		dev_hold(rt->dst.dev);
+		work->dev = rt->dst.dev;
+		schedule_work(&work->work);
+	}
+
+out:
 	rcu_read_unlock_bh();
 }
 #else
@@ -1778,9 +1778,9 @@
 					   cfg->fc_encap, &lwtstate);
 		if (err)
 			goto out;
-		lwtunnel_state_get(lwtstate);
-		rt->rt6i_lwtstate = lwtstate;
-		rt->dst.output = lwtunnel_output6;
+		rt->rt6i_lwtstate = lwtstate_get(lwtstate);
+		if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
+			rt->dst.output = lwtunnel_output6;
 	}
 
 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
@@ -2160,6 +2160,7 @@
 #endif
 	rt->rt6i_prefsrc = ort->rt6i_prefsrc;
 	rt->rt6i_table = ort->rt6i_table;
+	rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d540846..52dd0d9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -276,7 +276,7 @@
 	if (err)
 		goto late_failure;
 
-	ip6_set_txhash(sk);
+	sk_set_txhash(sk);
 
 	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
@@ -1090,7 +1090,7 @@
 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
 	newsk->sk_bound_dev_if = ireq->ir_iif;
 
-	ip6_set_txhash(newsk);
+	sk_set_txhash(newsk);
 
 	/* Now IPv6 options...
 
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 1119f46..1584040 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -44,6 +44,18 @@
 
 	  If unsure, say Y.
 
+config OPENVSWITCH_VXLAN
+	tristate "Open vSwitch VXLAN tunneling support"
+	depends on OPENVSWITCH
+	depends on VXLAN
+	default OPENVSWITCH
+	---help---
+	  If you say Y here, then the Open vSwitch will be able create vxlan vport.
+
+	  Say N to exclude this support and reduce the binary size.
+
+	  If unsure, say Y.
+
 config OPENVSWITCH_GENEVE
 	tristate "Open vSwitch Geneve tunneling support"
 	depends on OPENVSWITCH
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 38e0e14..6e1701d 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,5 +15,6 @@
 	vport-internal_dev.o \
 	vport-netdev.o
 
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
 obj-$(CONFIG_OPENVSWITCH_GRE)	+= vport-gre.o
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 68d0582f..cddb706 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -26,11 +26,10 @@
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/openvswitch.h>
+#include <linux/export.h>
 
-#include <net/udp.h>
 #include <net/ip_tunnels.h>
 #include <net/rtnetlink.h>
-#include <net/vxlan.h>
 
 #include "datapath.h"
 #include "vport.h"
@@ -58,7 +57,7 @@
 	skb_push(skb, ETH_HLEN);
 	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
 
-	ovs_vport_receive(vport, skb, NULL);
+	ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
 	return;
 
 error:
@@ -90,7 +89,7 @@
 	return local->dev;
 }
 
-static struct vport *netdev_link(struct vport *vport, const char *name)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
 {
 	int err;
 
@@ -135,6 +134,7 @@
 	ovs_vport_free(vport);
 	return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
 
 static struct vport *netdev_create(const struct vport_parms *parms)
 {
@@ -144,10 +144,10 @@
 	if (IS_ERR(vport))
 		return vport;
 
-	return netdev_link(vport, parms->name);
+	return ovs_netdev_link(vport, parms->name);
 }
 
-static void free_port_rcu(struct rcu_head *rcu)
+void ovs_vport_free_rcu(struct rcu_head *rcu)
 {
 	struct vport *vport = container_of(rcu, struct vport, rcu);
 
@@ -155,6 +155,7 @@
 		dev_put(vport->dev);
 	ovs_vport_free(vport);
 }
+EXPORT_SYMBOL_GPL(ovs_vport_free_rcu);
 
 void ovs_netdev_detach_dev(struct vport *vport)
 {
@@ -165,6 +166,7 @@
 				netdev_master_upper_dev_get(vport->dev));
 	dev_set_promiscuity(vport->dev, -1);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
 
 static void netdev_destroy(struct vport *vport)
 {
@@ -173,7 +175,7 @@
 		ovs_netdev_detach_dev(vport);
 	rtnl_unlock();
 
-	call_rcu(&vport->rcu, free_port_rcu);
+	call_rcu(&vport->rcu, ovs_vport_free_rcu);
 }
 
 static unsigned int packet_length(const struct sk_buff *skb)
@@ -186,7 +188,7 @@
 	return length;
 }
 
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
 {
 	int mtu = vport->dev->mtu;
 	int len;
@@ -208,6 +210,7 @@
 	kfree_skb(skb);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
 
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -223,205 +226,15 @@
 	.type		= OVS_VPORT_TYPE_NETDEV,
 	.create		= netdev_create,
 	.destroy	= netdev_destroy,
-	.send		= netdev_send,
+	.send		= ovs_netdev_send,
 };
 
-/* Compat code for old userspace. */
-#if IS_ENABLED(CONFIG_VXLAN)
-static struct vport_ops ovs_vxlan_netdev_vport_ops;
-
-static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
-{
-	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
-	__be16 dst_port = vxlan->cfg.dst_port;
-
-	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
-		return -EMSGSIZE;
-
-	if (vxlan->flags & VXLAN_F_GBP) {
-		struct nlattr *exts;
-
-		exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
-		if (!exts)
-			return -EMSGSIZE;
-
-		if (vxlan->flags & VXLAN_F_GBP &&
-		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
-			return -EMSGSIZE;
-
-		nla_nest_end(skb, exts);
-	}
-
-	return 0;
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
-	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_FLAG, },
-};
-
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
-				struct vxlan_config *conf)
-{
-	struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
-	int err;
-
-	if (nla_len(attr) < sizeof(struct nlattr))
-		return -EINVAL;
-
-	err = nla_parse_nested(exts, OVS_VXLAN_EXT_MAX, attr, exts_policy);
-	if (err < 0)
-		return err;
-
-	if (exts[OVS_VXLAN_EXT_GBP])
-		conf->flags |= VXLAN_F_GBP;
-
-	return 0;
-}
-
-static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
-{
-	struct net *net = ovs_dp_get_net(parms->dp);
-	struct nlattr *options = parms->options;
-	struct net_device *dev;
-	struct vport *vport;
-	struct nlattr *a;
-	int err;
-	struct vxlan_config conf = {
-		.no_share = true,
-		.flags = VXLAN_F_FLOW_BASED | VXLAN_F_COLLECT_METADATA,
-	};
-
-	if (!options) {
-		err = -EINVAL;
-		goto error;
-	}
-
-	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
-	if (a && nla_len(a) == sizeof(u16)) {
-		conf.dst_port = htons(nla_get_u16(a));
-	} else {
-		/* Require destination port from userspace. */
-		err = -EINVAL;
-		goto error;
-	}
-
-	vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
-	if (IS_ERR(vport))
-		return vport;
-
-	a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
-	if (a) {
-		err = vxlan_configure_exts(vport, a, &conf);
-		if (err) {
-			ovs_vport_free(vport);
-			goto error;
-		}
-	}
-
-	rtnl_lock();
-	dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
-	if (IS_ERR(dev)) {
-		rtnl_unlock();
-		ovs_vport_free(vport);
-		return ERR_CAST(dev);
-	}
-
-	dev_change_flags(dev, dev->flags | IFF_UP);
-	rtnl_unlock();
-	return vport;
-error:
-	return ERR_PTR(err);
-}
-
-static struct vport *vxlan_create(const struct vport_parms *parms)
-{
-	struct vport *vport;
-
-	vport = vxlan_tnl_create(parms);
-	if (IS_ERR(vport))
-		return vport;
-
-	return netdev_link(vport, parms->name);
-}
-
-static void vxlan_destroy(struct vport *vport)
-{
-	rtnl_lock();
-	if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
-		ovs_netdev_detach_dev(vport);
-
-	/* Early release so we can unregister the device */
-	dev_put(vport->dev);
-	rtnl_delete_link(vport->dev);
-	vport->dev = NULL;
-	rtnl_unlock();
-
-	call_rcu(&vport->rcu, free_port_rcu);
-}
-
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				     struct ip_tunnel_info *egress_tun_info)
-{
-	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
-	struct net *net = ovs_dp_get_net(vport->dp);
-	__be16 dst_port = vxlan_dev_dst_port(vxlan);
-	__be16 src_port;
-	int port_min;
-	int port_max;
-
-	inet_get_local_port_range(net, &port_min, &port_max);
-	src_port = udp_flow_src_port(net, skb, 0, 0, true);
-
-	return ovs_tunnel_get_egress_info(egress_tun_info, net,
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_UDP, skb->mark,
-					  src_port, dst_port);
-}
-
-static struct vport_ops ovs_vxlan_netdev_vport_ops = {
-	.type		= OVS_VPORT_TYPE_VXLAN,
-	.create		= vxlan_create,
-	.destroy	= vxlan_destroy,
-	.get_options	= vxlan_get_options,
-	.send		= netdev_send,
-	.get_egress_tun_info	= vxlan_get_egress_tun_info,
-};
-
-static int vxlan_compat_init(void)
-{
-	return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
-}
-
-static void vxlan_compat_exit(void)
-{
-	ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
-}
-#else
-static int vxlan_compat_init(void)
-{
-	return 0;
-}
-
-static void vxlan_compat_exit(void)
-{
-}
-#endif
-
 int __init ovs_netdev_init(void)
 {
-	int err;
-
-	err = ovs_vport_ops_register(&ovs_netdev_vport_ops);
-	if (err)
-		return err;
-	err = vxlan_compat_init();
-	if (err)
-		vxlan_compat_exit();
-	return err;
+	return ovs_vport_ops_register(&ovs_netdev_vport_ops);
 }
 
 void ovs_netdev_exit(void)
 {
 	ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
-	vxlan_compat_exit();
 }
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 684fb88..8044126 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -26,7 +26,10 @@
 
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
+void ovs_vport_free_rcu(struct rcu_head *);
 
 int __init ovs_netdev_init(void);
 void ovs_netdev_exit(void);
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
new file mode 100644
index 0000000..54717333
--- /dev/null
+++ b/net/openvswitch/vport-vxlan.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ * Copyright (c) 2013 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
+#include <linux/module.h>
+#include <net/udp.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
+#include <net/vxlan.h>
+
+#include "datapath.h"
+#include "vport.h"
+#include "vport-netdev.h"
+
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
+
+static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+	__be16 dst_port = vxlan->cfg.dst_port;
+
+	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
+		return -EMSGSIZE;
+
+	if (vxlan->flags & VXLAN_F_GBP) {
+		struct nlattr *exts;
+
+		exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
+		if (!exts)
+			return -EMSGSIZE;
+
+		if (vxlan->flags & VXLAN_F_GBP &&
+		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
+			return -EMSGSIZE;
+
+		nla_nest_end(skb, exts);
+	}
+
+	return 0;
+}
+
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
+	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_FLAG, },
+};
+
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+				struct vxlan_config *conf)
+{
+	struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
+	int err;
+
+	if (nla_len(attr) < sizeof(struct nlattr))
+		return -EINVAL;
+
+	err = nla_parse_nested(exts, OVS_VXLAN_EXT_MAX, attr, exts_policy);
+	if (err < 0)
+		return err;
+
+	if (exts[OVS_VXLAN_EXT_GBP])
+		conf->flags |= VXLAN_F_GBP;
+
+	return 0;
+}
+
+static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+{
+	struct net *net = ovs_dp_get_net(parms->dp);
+	struct nlattr *options = parms->options;
+	struct net_device *dev;
+	struct vport *vport;
+	struct nlattr *a;
+	int err;
+	struct vxlan_config conf = {
+		.no_share = true,
+		.flags = VXLAN_F_FLOW_BASED | VXLAN_F_COLLECT_METADATA,
+	};
+
+	if (!options) {
+		err = -EINVAL;
+		goto error;
+	}
+
+	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
+	if (a && nla_len(a) == sizeof(u16)) {
+		conf.dst_port = htons(nla_get_u16(a));
+	} else {
+		/* Require destination port from userspace. */
+		err = -EINVAL;
+		goto error;
+	}
+
+	vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
+
+	a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
+	if (a) {
+		err = vxlan_configure_exts(vport, a, &conf);
+		if (err) {
+			ovs_vport_free(vport);
+			goto error;
+		}
+	}
+
+	rtnl_lock();
+	dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
+		ovs_vport_free(vport);
+		return ERR_CAST(dev);
+	}
+
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
+	return vport;
+error:
+	return ERR_PTR(err);
+}
+
+static struct vport *vxlan_create(const struct vport_parms *parms)
+{
+	struct vport *vport;
+
+	vport = vxlan_tnl_create(parms);
+	if (IS_ERR(vport))
+		return vport;
+
+	return ovs_netdev_link(vport, parms->name);
+}
+
+static void vxlan_destroy(struct vport *vport)
+{
+	rtnl_lock();
+	if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+		ovs_netdev_detach_dev(vport);
+
+	/* Early release so we can unregister the device */
+	dev_put(vport->dev);
+	rtnl_delete_link(vport->dev);
+	vport->dev = NULL;
+	rtnl_unlock();
+
+	call_rcu(&vport->rcu, ovs_vport_free_rcu);
+}
+
+static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+				     struct ip_tunnel_info *egress_tun_info)
+{
+	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+	struct net *net = ovs_dp_get_net(vport->dp);
+	__be16 dst_port = vxlan_dev_dst_port(vxlan);
+	__be16 src_port;
+	int port_min;
+	int port_max;
+
+	inet_get_local_port_range(net, &port_min, &port_max);
+	src_port = udp_flow_src_port(net, skb, 0, 0, true);
+
+	return ovs_tunnel_get_egress_info(egress_tun_info, net,
+					  OVS_CB(skb)->egress_tun_info,
+					  IPPROTO_UDP, skb->mark,
+					  src_port, dst_port);
+}
+
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+	.type			= OVS_VPORT_TYPE_VXLAN,
+	.create			= vxlan_create,
+	.destroy		= vxlan_destroy,
+	.get_options		= vxlan_get_options,
+	.send			= ovs_netdev_send,
+	.get_egress_tun_info	= vxlan_get_egress_tun_info,
+};
+
+static int __init ovs_vxlan_tnl_init(void)
+{
+	return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
+}
+
+static void __exit ovs_vxlan_tnl_exit(void)
+{
+	ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
+}
+
+module_init(ovs_vxlan_tnl_init);
+module_exit(ovs_vxlan_tnl_exit);
+
+MODULE_DESCRIPTION("OVS: VXLAN switching port");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vport-type-4");
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741..2af8590 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -518,13 +518,11 @@
 }
 
 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
-		int tx_ring,
 		struct sk_buff_head *rb_queue)
 {
 	struct tpacket_kbdq_core *pkc;
 
-	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
-			GET_PBDQC_FROM_RB(&po->rx_ring);
+	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 
 	spin_lock_bh(&rb_queue->lock);
 	pkc->delete_blk_timer = 1;
@@ -4044,7 +4042,7 @@
 	if (closing && (po->tp_version > TPACKET_V2)) {
 		/* Because we don't support block-based V3 on tx-ring */
 		if (!tx_ring)
-			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+			prb_shutdown_retire_blk_timer(po, rb_queue);
 	}
 	release_sock(sk);
 
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b63d573..cc40aa6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -294,11 +294,14 @@
 {
 	struct sk_buff *skb;
 	struct sk_buff_head list;
+	u16 last_sent;
 
 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
 			      0, l->addr, link_own_addr(l), 0, 0, 0);
 	if (!skb)
 		return;
+	last_sent = tipc_bclink_get_last_sent(l->owner->net);
+	msg_set_last_bcast(buf_msg(skb), last_sent);
 	__skb_queue_head_init(&list);
 	__skb_queue_tail(&list, skb);
 	tipc_link_xmit(l, &list, xmitq);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 08b4cc7..562c926 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -463,60 +463,72 @@
 
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf:  buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err:  error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb:  buffer containing message to be reversed; may be replaced.
+ * @err:  error code to be set in message, if any
+ * Consumes buffer at failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
-		      int err)
+bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
-	struct tipc_msg *msg = buf_msg(buf);
+	struct sk_buff *_skb = *skb;
+	struct tipc_msg *hdr = buf_msg(_skb);
 	struct tipc_msg ohdr;
-	uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+	int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
 
-	if (skb_linearize(buf))
+	if (skb_linearize(_skb))
 		goto exit;
-	msg = buf_msg(buf);
-	if (msg_dest_droppable(msg))
+	hdr = buf_msg(_skb);
+	if (msg_dest_droppable(hdr))
 		goto exit;
-	if (msg_errcode(msg))
+	if (msg_errcode(hdr))
 		goto exit;
-	memcpy(&ohdr, msg, msg_hdr_sz(msg));
-	msg_set_errcode(msg, err);
-	msg_set_origport(msg, msg_destport(&ohdr));
-	msg_set_destport(msg, msg_origport(&ohdr));
-	msg_set_prevnode(msg, own_addr);
-	if (!msg_short(msg)) {
-		msg_set_orignode(msg, msg_destnode(&ohdr));
-		msg_set_destnode(msg, msg_orignode(&ohdr));
+
+	/* Take a copy of original header before altering message */
+	memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+	/* Never return SHORT header; expand by replacing buffer if necessary */
+	if (msg_short(hdr)) {
+		*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+		if (!*skb)
+			goto exit;
+		memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+		kfree_skb(_skb);
+		_skb = *skb;
+		hdr = buf_msg(_skb);
+		memcpy(hdr, &ohdr, BASIC_H_SIZE);
+		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
 	}
-	msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
-	skb_trim(buf, msg_size(msg));
-	skb_orphan(buf);
-	*dnode = msg_orignode(&ohdr);
+
+	/* Now reverse the concerned fields */
+	msg_set_errcode(hdr, err);
+	msg_set_origport(hdr, msg_destport(&ohdr));
+	msg_set_destport(hdr, msg_origport(&ohdr));
+	msg_set_destnode(hdr, msg_prevnode(&ohdr));
+	msg_set_prevnode(hdr, own_node);
+	msg_set_orignode(hdr, own_node);
+	msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+	skb_trim(_skb, msg_size(hdr));
+	skb_orphan(_skb);
 	return true;
 exit:
-	kfree_skb(buf);
-	*dnode = 0;
+	kfree_skb(_skb);
+	*skb = NULL;
 	return false;
 }
 
 /**
  * tipc_msg_lookup_dest(): try to find new destination for named message
  * @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
  * Does not consume buffer
  * Returns true if a destination is found, false otherwise
  */
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
-			  u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 {
 	struct tipc_msg *msg = buf_msg(skb);
-	u32 dport;
-	u32 own_addr = tipc_own_addr(net);
+	u32 dport, dnode;
+	u32 onode = tipc_own_addr(net);
 
 	if (!msg_isdata(msg))
 		return false;
@@ -529,15 +541,15 @@
 		return false;
 	if (msg_reroute_cnt(msg))
 		return false;
-	*dnode = addr_domain(net, msg_lookup_scope(msg));
+	dnode = addr_domain(net, msg_lookup_scope(msg));
 	dport = tipc_nametbl_translate(net, msg_nametype(msg),
-				       msg_nameinst(msg), dnode);
+				       msg_nameinst(msg), &dnode);
 	if (!dport)
 		return false;
 	msg_incr_reroute_cnt(msg);
-	if (*dnode != own_addr)
-		msg_set_prevnode(msg, own_addr);
-	msg_set_destnode(msg, *dnode);
+	if (dnode != onode)
+		msg_set_prevnode(msg, onode);
+	msg_set_destnode(msg, dnode);
 	msg_set_destport(msg, dport);
 	*err = TIPC_OK;
 	return true;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 2f1563b..234fb05 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -785,8 +785,7 @@
 
 struct sk_buff *tipc_buf_acquire(u32 size);
 bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
-		      int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
 		   u32 hsize, u32 destnode);
 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -799,8 +798,7 @@
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
-			  int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5b0b08d..1060d52 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -248,6 +248,22 @@
 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 }
 
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+	u32 selector;
+	u32 dnode;
+	u32 onode = tipc_own_addr(sock_net(sk));
+
+	if (!tipc_msg_reverse(onode, &skb, err))
+		return;
+
+	dnode = msg_destnode(buf_msg(skb));
+	selector = msg_origport(buf_msg(skb));
+	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
 /**
  * tsk_rej_rx_queue - reject all buffers in socket receive queue
  *
@@ -256,13 +272,9 @@
 static void tsk_rej_rx_queue(struct sock *sk)
 {
 	struct sk_buff *skb;
-	u32 dnode;
-	u32 own_node = tsk_own_node(tipc_sk(sk));
 
-	while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-		if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
-			tipc_node_xmit_skb(sock_net(sk), skb, dnode, 0);
-	}
+	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 }
 
 /* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@
 				tsk->connected = 0;
 				tipc_node_remove_conn(net, dnode, tsk->portid);
 			}
-			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-					     TIPC_ERR_NO_PORT))
-				tipc_node_xmit_skb(net, skb, dnode, 0);
+			tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 		}
 	}
 
@@ -764,35 +774,35 @@
 /**
  * tipc_sk_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 {
-	struct tipc_msg *msg = buf_msg(*skb);
+	struct sock *sk = &tsk->sk;
+	struct tipc_msg *hdr = buf_msg(skb);
+	int mtyp = msg_type(hdr);
 	int conn_cong;
-	u32 dnode;
-	u32 own_node = tsk_own_node(tsk);
+
 	/* Ignore if connection cannot be validated: */
-	if (!tsk_peer_msg(tsk, msg))
+	if (!tsk_peer_msg(tsk, hdr))
 		goto exit;
 
 	tsk->probing_state = TIPC_CONN_OK;
 
-	if (msg_type(msg) == CONN_ACK) {
+	if (mtyp == CONN_PROBE) {
+		msg_set_type(hdr, CONN_PROBE_REPLY);
+		tipc_sk_respond(sk, skb, TIPC_OK);
+		return;
+	} else if (mtyp == CONN_ACK) {
 		conn_cong = tsk_conn_cong(tsk);
-		tsk->sent_unacked -= msg_msgcnt(msg);
+		tsk->sent_unacked -= msg_msgcnt(hdr);
 		if (conn_cong)
-			tsk->sk.sk_write_space(&tsk->sk);
-	} else if (msg_type(msg) == CONN_PROBE) {
-		if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
-			msg_set_type(msg, CONN_PROBE_REPLY);
-			return;
-		}
+			sk->sk_write_space(sk);
+	} else if (mtyp != CONN_PROBE_REPLY) {
+		pr_warn("Received unknown CONN_PROTO msg\n");
 	}
-	/* Do nothing if msg_type() == CONN_PROBE_REPLY */
 exit:
-	kfree_skb(*skb);
-	*skb = NULL;
+	kfree_skb(skb);
 }
 
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -1510,82 +1520,81 @@
  * @tsk: TIPC socket
  * @skb: pointer to message buffer. Set to NULL if buffer is consumed
  *
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
  */
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 {
 	struct sock *sk = &tsk->sk;
 	struct net *net = sock_net(sk);
 	struct socket *sock = sk->sk_socket;
-	struct tipc_msg *msg = buf_msg(*skb);
-	int retval = -TIPC_ERR_NO_PORT;
+	struct tipc_msg *hdr = buf_msg(skb);
 
-	if (msg_mcast(msg))
-		return retval;
+	if (unlikely(msg_mcast(hdr)))
+		return false;
 
 	switch ((int)sock->state) {
 	case SS_CONNECTED:
+
 		/* Accept only connection-based messages sent by peer */
-		if (tsk_peer_msg(tsk, msg)) {
-			if (unlikely(msg_errcode(msg))) {
-				sock->state = SS_DISCONNECTING;
-				tsk->connected = 0;
-				/* let timer expire on it's own */
-				tipc_node_remove_conn(net, tsk_peer_node(tsk),
-						      tsk->portid);
-			}
-			retval = TIPC_OK;
+		if (unlikely(!tsk_peer_msg(tsk, hdr)))
+			return false;
+
+		if (unlikely(msg_errcode(hdr))) {
+			sock->state = SS_DISCONNECTING;
+			tsk->connected = 0;
+			/* Let timer expire on it's own */
+			tipc_node_remove_conn(net, tsk_peer_node(tsk),
+					      tsk->portid);
 		}
-		break;
+		return true;
+
 	case SS_CONNECTING:
+
 		/* Accept only ACK or NACK message */
+		if (unlikely(!msg_connected(hdr)))
+			return false;
 
-		if (unlikely(!msg_connected(msg)))
-			break;
-
-		if (unlikely(msg_errcode(msg))) {
+		if (unlikely(msg_errcode(hdr))) {
 			sock->state = SS_DISCONNECTING;
 			sk->sk_err = ECONNREFUSED;
-			retval = TIPC_OK;
-			break;
+			return true;
 		}
 
-		if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+		if (unlikely(!msg_isdata(hdr))) {
 			sock->state = SS_DISCONNECTING;
 			sk->sk_err = EINVAL;
-			retval = TIPC_OK;
-			break;
+			return true;
 		}
 
-		tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
-		msg_set_importance(&tsk->phdr, msg_importance(msg));
+		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+		msg_set_importance(&tsk->phdr, msg_importance(hdr));
 		sock->state = SS_CONNECTED;
 
-		/* If an incoming message is an 'ACK-', it should be
-		 * discarded here because it doesn't contain useful
-		 * data. In addition, we should try to wake up
-		 * connect() routine if sleeping.
-		 */
-		if (msg_data_sz(msg) == 0) {
-			kfree_skb(*skb);
-			*skb = NULL;
-			if (waitqueue_active(sk_sleep(sk)))
-				wake_up_interruptible(sk_sleep(sk));
-		}
-		retval = TIPC_OK;
-		break;
+		/* If 'ACK+' message, add to socket receive queue */
+		if (msg_data_sz(hdr))
+			return true;
+
+		/* If empty 'ACK-' message, wake up sleeping connect() */
+		if (waitqueue_active(sk_sleep(sk)))
+			wake_up_interruptible(sk_sleep(sk));
+
+		/* 'ACK-' message is neither accepted nor rejected: */
+		msg_set_dest_droppable(hdr, 1);
+		return false;
+
 	case SS_LISTENING:
 	case SS_UNCONNECTED:
+
 		/* Accept only SYN message */
-		if (!msg_connected(msg) && !(msg_errcode(msg)))
-			retval = TIPC_OK;
+		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+			return true;
 		break;
 	case SS_DISCONNECTING:
 		break;
 	default:
 		pr_err("Unknown socket state %u\n", sock->state);
 	}
-	return retval;
+	return false;
 }
 
 /**
@@ -1620,61 +1629,70 @@
 /**
  * filter_rcv - validate incoming message
  * @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
  *
  * Enqueues message on receive queue if acceptable; optionally handles
  * disconnect indication for a connected socket.
  *
  * Called with socket lock already taken
  *
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
  */
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct socket *sock = sk->sk_socket;
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_msg *msg = buf_msg(*skb);
-	unsigned int limit = rcvbuf_limit(sk, *skb);
-	int rc = TIPC_OK;
+	struct tipc_msg *hdr = buf_msg(skb);
+	unsigned int limit = rcvbuf_limit(sk, skb);
+	int err = TIPC_OK;
+	int usr = msg_user(hdr);
 
-	if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
 		tipc_sk_proto_rcv(tsk, skb);
-		return TIPC_OK;
+		return false;
 	}
 
-	if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
-		kfree_skb(*skb);
+	if (unlikely(usr == SOCK_WAKEUP)) {
+		kfree_skb(skb);
 		tsk->link_cong = 0;
 		sk->sk_write_space(sk);
-		*skb = NULL;
-		return TIPC_OK;
+		return false;
 	}
 
-	/* Reject message if it is wrong sort of message for socket */
-	if (msg_type(msg) > TIPC_DIRECT_MSG)
-		return -TIPC_ERR_NO_PORT;
+	/* Drop if illegal message type */
+	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+		kfree_skb(skb);
+		return false;
+	}
 
-	if (sock->state == SS_READY) {
-		if (msg_connected(msg))
-			return -TIPC_ERR_NO_PORT;
-	} else {
-		rc = filter_connect(tsk, skb);
-		if (rc != TIPC_OK || !*skb)
-			return rc;
+	/* Reject if wrong message type for current socket state */
+	if (unlikely(sock->state == SS_READY)) {
+		if (msg_connected(hdr)) {
+			err = TIPC_ERR_NO_PORT;
+			goto reject;
+		}
+	} else if (unlikely(!filter_connect(tsk, skb))) {
+		err = TIPC_ERR_NO_PORT;
+		goto reject;
 	}
 
 	/* Reject message if there isn't room to queue it */
-	if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
-		return -TIPC_ERR_OVERLOAD;
+	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+		err = TIPC_ERR_OVERLOAD;
+		goto reject;
+	}
 
 	/* Enqueue message */
-	TIPC_SKB_CB(*skb)->handle = NULL;
-	__skb_queue_tail(&sk->sk_receive_queue, *skb);
-	skb_set_owner_r(*skb, sk);
+	TIPC_SKB_CB(skb)->handle = NULL;
+	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	skb_set_owner_r(skb, sk);
 
 	sk->sk_data_ready(sk);
-	*skb = NULL;
-	return TIPC_OK;
+	return true;
+
+reject:
+	tipc_sk_respond(sk, skb, err);
+	return false;
 }
 
 /**
@@ -1688,22 +1706,10 @@
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
-	int err;
-	atomic_t *dcnt;
-	u32 dnode;
-	struct tipc_sock *tsk = tipc_sk(sk);
-	struct net *net = sock_net(sk);
-	uint truesize = skb->truesize;
+	unsigned int truesize = skb->truesize;
 
-	err = filter_rcv(sk, &skb);
-	if (likely(!skb)) {
-		dcnt = &tsk->dupl_rcvcnt;
-		if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-			atomic_add(truesize, dcnt);
-		return 0;
-	}
-	if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
-		tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
+	if (likely(filter_rcv(sk, skb)))
+		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
 	return 0;
 }
 
@@ -1713,45 +1719,43 @@
  * @inputq: list of incoming buffers with potentially different destinations
  * @sk: socket where the buffers should be enqueued
  * @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
  *
  * Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
  */
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-			   u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+			    u32 dport)
 {
 	unsigned int lim;
 	atomic_t *dcnt;
-	int err;
 	struct sk_buff *skb;
 	unsigned long time_limit = jiffies + 2;
 
 	while (skb_queue_len(inputq)) {
 		if (unlikely(time_after_eq(jiffies, time_limit)))
-			return TIPC_OK;
+			return;
+
 		skb = tipc_skb_dequeue(inputq, dport);
 		if (unlikely(!skb))
-			return TIPC_OK;
+			return;
+
+		/* Add message directly to receive queue if possible */
 		if (!sock_owned_by_user(sk)) {
-			err = filter_rcv(sk, &skb);
-			if (likely(!skb))
-				continue;
-			*_skb = skb;
-			return err;
+			filter_rcv(sk, skb);
+			continue;
 		}
+
+		/* Try backlog, compensating for double-counted bytes */
 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
 		if (sk->sk_backlog.len)
 			atomic_set(dcnt, 0);
 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
 		if (likely(!sk_add_backlog(sk, skb, lim)))
 			continue;
-		*_skb = skb;
-		return -TIPC_ERR_OVERLOAD;
+
+		/* Overload => reject message back to sender */
+		tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+		break;
 	}
-	return TIPC_OK;
 }
 
 /**
@@ -1759,49 +1763,46 @@
  * @inputq: buffer list containing the buffers
  * Consumes all buffers in list until inputq is empty
  * Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
  */
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
 	u32 dnode, dport = 0;
 	int err;
-	struct sk_buff *skb;
 	struct tipc_sock *tsk;
-	struct tipc_net *tn;
 	struct sock *sk;
+	struct sk_buff *skb;
 
 	while (skb_queue_len(inputq)) {
-		err = -TIPC_ERR_NO_PORT;
-		skb = NULL;
 		dport = tipc_skb_peek_port(inputq, dport);
 		tsk = tipc_sk_lookup(net, dport);
+
 		if (likely(tsk)) {
 			sk = &tsk->sk;
 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-				err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+				tipc_sk_enqueue(inputq, sk, dport);
 				spin_unlock_bh(&sk->sk_lock.slock);
-				dport = 0;
 			}
 			sock_put(sk);
-		} else {
-			skb = tipc_skb_dequeue(inputq, dport);
-		}
-		if (likely(!skb))
 			continue;
-		if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
-			goto xmit;
-		if (!err) {
-			dnode = msg_destnode(buf_msg(skb));
-			goto xmit;
 		}
-		tn = net_generic(net, tipc_net_id);
-		if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+		/* No destination socket => dequeue skb if still there */
+		skb = tipc_skb_dequeue(inputq, dport);
+		if (!skb)
+			return;
+
+		/* Try secondary lookup if unresolved named message */
+		err = TIPC_ERR_NO_PORT;
+		if (tipc_msg_lookup_dest(net, skb, &err))
+			goto xmit;
+
+		/* Prepare for message rejection */
+		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
 			continue;
 xmit:
+		dnode = msg_destnode(buf_msg(skb));
 		tipc_node_xmit_skb(net, skb, dnode, dport);
 	}
-	return err ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2070,7 +2071,10 @@
 	struct net *net = sock_net(sk);
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct sk_buff *skb;
-	u32 dnode;
+	u32 dnode = tsk_peer_node(tsk);
+	u32 dport = tsk_peer_port(tsk);
+	u32 onode = tipc_own_addr(net);
+	u32 oport = tsk->portid;
 	int res;
 
 	if (how != SHUT_RDWR)
@@ -2083,6 +2087,8 @@
 	case SS_CONNECTED:
 
 restart:
+		dnode = tsk_peer_node(tsk);
+
 		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
 		skb = __skb_dequeue(&sk->sk_receive_queue);
 		if (skb) {
@@ -2090,18 +2096,12 @@
 				kfree_skb(skb);
 				goto restart;
 			}
-			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-					     TIPC_CONN_SHUTDOWN))
-				tipc_node_xmit_skb(net, skb, dnode,
-						   tsk->portid);
+			tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
 		} else {
-			dnode = tsk_peer_node(tsk);
-
 			skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
 					      TIPC_CONN_MSG, SHORT_H_SIZE,
-					      0, dnode, tsk_own_node(tsk),
-					      tsk_peer_port(tsk),
-					      tsk->portid, TIPC_CONN_SHUTDOWN);
+					      0, dnode, onode, dport, oport,
+					      TIPC_CONN_SHUTDOWN);
 			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 		}
 		tsk->connected = 0;
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index bf65513..4241f22 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,7 +44,7 @@
 				  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 		       struct sk_buff_head *inputq);
 void tipc_sk_reinit(struct net *net);
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 6936059..ee0f110 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -822,6 +822,65 @@
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
+	{
+		"PTR_TO_STACK store/load",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
+	{
+		"PTR_TO_STACK store/load - bad alignment on off",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "misaligned access off -6 size 8",
+	},
+	{
+		"PTR_TO_STACK store/load - bad alignment on reg",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "misaligned access off -2 size 8",
+	},
+	{
+		"PTR_TO_STACK store/load - out of bounds low",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off=-79992 size=8",
+	},
+	{
+		"PTR_TO_STACK store/load - out of bounds high",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off=0 size=8",
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)