Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml
new file mode 100644
index 0000000..9fbeb62
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt7622-pcie-mirror.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek PCIE Mirror Controller for MT7622
+
+maintainers:
+  - Lorenzo Bianconi <lorenzo@kernel.org>
+  - Felix Fietkau <nbd@nbd.name>
+
+description:
+  The mediatek PCIE mirror provides a configuration interface for PCIE
+  controller on MT7622 soc.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - mediatek,mt7622-pcie-mirror
+      - const: syscon
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    soc {
+      #address-cells = <2>;
+      #size-cells = <2>;
+      pcie_mirror: pcie-mirror@10000400 {
+        compatible = "mediatek,mt7622-pcie-mirror", "syscon";
+        reg = <0 0x10000400 0 0x10>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
new file mode 100644
index 0000000..787d667
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt7622-wed.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek Wireless Ethernet Dispatch Controller for MT7622
+
+maintainers:
+  - Lorenzo Bianconi <lorenzo@kernel.org>
+  - Felix Fietkau <nbd@nbd.name>
+
+description:
+  The mediatek wireless ethernet dispatch controller can be configured to
+  intercept and handle access to the WLAN DMA queues and PCIe interrupts
+  and implement hardware flow offloading from ethernet to WLAN.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - mediatek,mt7622-wed
+      - const: syscon
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    soc {
+      #address-cells = <2>;
+      #size-cells = <2>;
+      wed0: wed@1020a000 {
+        compatible = "mediatek,mt7622-wed","syscon";
+        reg = <0 0x1020a000 0 0x1000>;
+        interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 72d03e0..f18d701 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -41,6 +41,16 @@
 - mediatek,pctl: phandle to the syscon node that handles the ports slew rate
 	and driver current: only for MT2701 and MT7623 SoC
 
+Optional properties:
+- dma-coherent: present if dma operations are coherent
+- mediatek,cci-control: phandle to the cache coherent interconnect node
+- mediatek,hifsys: phandle to the mediatek hifsys controller used to provide
+	various clocks and reset to the system.
+- mediatek,wed: a list of phandles to wireless ethernet dispatch nodes for
+	MT7622 SoC.
+- mediatek,pcie-mirror: phandle to the mediatek pcie-mirror controller for
+	MT7622 SoC.
+
 * Ethernet MAC node
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/net/mscc,miim.yaml b/Documentation/devicetree/bindings/net/mscc,miim.yaml
new file mode 100644
index 0000000..2c451cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mscc,miim.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/mscc,miim.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microsemi MII Management Controller (MIIM)
+
+maintainers:
+  - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+  - $ref: "mdio.yaml#"
+
+properties:
+  compatible:
+    enum:
+      - mscc,ocelot-miim
+      - microchip,lan966x-miim
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  reg:
+    items:
+      - description: base address
+      - description: associated reset register for internal PHYs
+    minItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-frequency: true
+
+required:
+  - compatible
+  - reg
+  - "#address-cells"
+  - "#size-cells"
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    mdio@107009c {
+      compatible = "mscc,ocelot-miim";
+      reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+      interrupts = <14>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      phy0: ethernet-phy@0 {
+        reg = <0>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/net/mscc-miim.txt b/Documentation/devicetree/bindings/net/mscc-miim.txt
deleted file mode 100644
index 70e0cb1..0000000
--- a/Documentation/devicetree/bindings/net/mscc-miim.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Microsemi MII Management Controller (MIIM) / MDIO
-=================================================
-
-Properties:
-- compatible: must be "mscc,ocelot-miim" or "microchip,lan966x-miim"
-- reg: The base address of the MDIO bus controller register bank. Optionally, a
-  second register bank can be defined if there is an associated reset register
-  for internal PHYs
-- #address-cells: Must be <1>.
-- #size-cells: Must be <0>.  MDIO addresses have no size component.
-- interrupts: interrupt specifier (refer to the interrupt binding)
-
-Typically an MDIO bus might have several children.
-
-Example:
-	mdio@107009c {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		compatible = "mscc,ocelot-miim";
-		reg = <0x107009c 0x36>, <0x10700f0 0x8>;
-		interrupts = <14>;
-
-		phy0: ethernet-phy@0 {
-			reg = <0>;
-		};
-	};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 6f8cb3a..47d223e 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -357,7 +357,7 @@ cci_control1: slave-if@4000 {
 		};
 
 		cci_control2: slave-if@5000 {
-			compatible = "arm,cci-400-ctrl-if";
+			compatible = "arm,cci-400-ctrl-if", "syscon";
 			interface-type = "ace";
 			reg = <0x5000 0x1000>;
 		};
@@ -901,6 +901,11 @@ sata_port: sata-phy@1a243000 {
 		};
 	};
 
+	hifsys: syscon@1af00000 {
+		compatible = "mediatek,mt7622-hifsys", "syscon";
+		reg = <0 0x1af00000 0 0x70>;
+	};
+
 	ethsys: syscon@1b000000 {
 		compatible = "mediatek,mt7622-ethsys",
 			     "syscon";
@@ -919,6 +924,26 @@ hsdma: dma-controller@1b007000 {
 		#dma-cells = <1>;
 	};
 
+	pcie_mirror: pcie-mirror@10000400 {
+		compatible = "mediatek,mt7622-pcie-mirror",
+			     "syscon";
+		reg = <0 0x10000400 0 0x10>;
+	};
+
+	wed0: wed@1020a000 {
+		compatible = "mediatek,mt7622-wed",
+			     "syscon";
+		reg = <0 0x1020a000 0 0x1000>;
+		interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	wed1: wed@1020b000 {
+		compatible = "mediatek,mt7622-wed",
+			     "syscon";
+		reg = <0 0x1020b000 0 0x1000>;
+		interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
+	};
+
 	eth: ethernet@1b100000 {
 		compatible = "mediatek,mt7622-eth",
 			     "mediatek,mt2701-eth",
@@ -945,6 +970,11 @@ eth: ethernet@1b100000 {
 		power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
 		mediatek,ethsys = <&ethsys>;
 		mediatek,sgmiisys = <&sgmiisys>;
+		mediatek,cci-control = <&cci_control2>;
+		mediatek,wed = <&wed0>, <&wed1>;
+		mediatek,pcie-mirror = <&pcie_mirror>;
+		mediatek,hifsys = <&hifsys>;
+		dma-coherent;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 5cb91509..f86838b 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -214,7 +214,6 @@
 CONFIG_ATH5K=y
 CONFIG_ATH5K_DEBUG=y
 CONFIG_WAN=y
-CONFIG_LANMEDIA=m
 CONFIG_HDLC=m
 CONFIG_HDLC_RAW=m
 CONFIG_HDLC_RAW_ETH=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 205d3b34..eb0b313 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -363,7 +363,6 @@
 CONFIG_USB_EPSON2888=y
 CONFIG_USB_SIERRA_NET=m
 CONFIG_WAN=y
-CONFIG_LANMEDIA=m
 CONFIG_HDLC=m
 CONFIG_HDLC_RAW=m
 CONFIG_HDLC_RAW_ETH=m
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index a660673..2776ca5 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -121,7 +121,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	if (sk->sk_state == MISDN_CLOSED)
 		return 0;
 
-	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		return err;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5caa75b..881ac33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6218,7 +6218,7 @@
 #define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0			 (0x1<<2)
 #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR			 (0x1<<12)
 #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY		 (0x1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY		 (0x1U<<31)
 #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY		 (0x1<<29)
 #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY		 (0x1<<30)
 #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT			 (0x1<<15)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 47c899c..e562778 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -421,13 +421,6 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
 	rule->re_arg.vtcam_id = ruleset->vtcam_id;
 	rule->re_key.prio = rule->priority;
 
-	/* setup counter */
-	rule->re_arg.count.valid = true;
-	err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index,
-					   &rule->re_arg.count.client);
-	if (err)
-		goto err_rule_add;
-
 	rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
 	err = WARN_ON(rule->re) ? -EEXIST : 0;
 	if (err)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 921959a..c12b09a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -70,6 +70,24 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
 	if (!flow_action_has_entries(flow_action))
 		return 0;
 
+	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+		return -EOPNOTSUPP;
+
+	act = flow_action_first_entry_get(flow_action);
+	if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+		/* Nothing to do */
+	} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
+		/* setup counter first */
+		rule->re_arg.count.valid = true;
+		err = prestera_acl_chain_to_client(chain_index,
+						   &rule->re_arg.count.client);
+		if (err)
+			return err;
+	} else {
+		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+		return -EOPNOTSUPP;
+	}
+
 	flow_action_for_each(i, act, flow_action) {
 		switch (act->id) {
 		case FLOW_ACTION_ACCEPT:
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 86d356b..da4ec23 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,6 +7,10 @@
 
 if NET_VENDOR_MEDIATEK
 
+config NET_MEDIATEK_SOC_WED
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	def_bool NET_MEDIATEK_SOC != n
+
 config NET_MEDIATEK_SOC
 	tristate "MediaTek SoC Gigabit Ethernet support"
 	depends on NET_DSA || !NET_DSA
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 79d4cdb..45ba097 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,4 +5,9 @@
 
 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
+ifdef CONFIG_DEBUG_FS
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
+endif
+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
 obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f02d07e..209d00f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
+#include <linux/of_address.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
@@ -20,9 +21,11 @@
 #include <linux/pinctrl/devinfo.h>
 #include <linux/phylink.h>
 #include <linux/jhash.h>
+#include <linux/bitfield.h>
 #include <net/dsa.h>
 
 #include "mtk_eth_soc.h"
+#include "mtk_wed.h"
 
 static int mtk_msg_level = -1;
 module_param_named(msg_level, mtk_msg_level, int, 0);
@@ -786,7 +789,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
 	dma_addr_t dma_addr;
 	int i;
 
-	eth->scratch_ring = dma_alloc_coherent(eth->dev,
+	eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
 					       cnt * sizeof(struct mtk_tx_dma),
 					       &eth->phy_scratch_ring,
 					       GFP_ATOMIC);
@@ -798,10 +801,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
 	if (unlikely(!eth->scratch_head))
 		return -ENOMEM;
 
-	dma_addr = dma_map_single(eth->dev,
+	dma_addr = dma_map_single(eth->dma_dev,
 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
 				  DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
 		return -ENOMEM;
 
 	phy_ring_tail = eth->phy_scratch_ring +
@@ -855,26 +858,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
 {
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 		if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-			dma_unmap_single(eth->dev,
+			dma_unmap_single(eth->dma_dev,
 					 dma_unmap_addr(tx_buf, dma_addr0),
 					 dma_unmap_len(tx_buf, dma_len0),
 					 DMA_TO_DEVICE);
 		} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr0),
 				       dma_unmap_len(tx_buf, dma_len0),
 				       DMA_TO_DEVICE);
 		}
 	} else {
 		if (dma_unmap_len(tx_buf, dma_len0)) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr0),
 				       dma_unmap_len(tx_buf, dma_len0),
 				       DMA_TO_DEVICE);
 		}
 
 		if (dma_unmap_len(tx_buf, dma_len1)) {
-			dma_unmap_page(eth->dev,
+			dma_unmap_page(eth->dma_dev,
 				       dma_unmap_addr(tx_buf, dma_addr1),
 				       dma_unmap_len(tx_buf, dma_len1),
 				       DMA_TO_DEVICE);
@@ -952,9 +955,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 	if (skb_vlan_tag_present(skb))
 		txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
 
-	mapped_addr = dma_map_single(eth->dev, skb->data,
+	mapped_addr = dma_map_single(eth->dma_dev, skb->data,
 				     skb_headlen(skb), DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+	if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
 		return -ENOMEM;
 
 	WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -993,10 +996,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 
 
 			frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
-			mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
+			mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
 						       frag_map_size,
 						       DMA_TO_DEVICE);
-			if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+			if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
 				goto err_dma;
 
 			if (i == nr_frags - 1 &&
@@ -1237,7 +1240,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
 		struct net_device *netdev;
 		unsigned int pktlen;
 		dma_addr_t dma_addr;
-		u32 hash;
+		u32 hash, reason;
 		int mac;
 
 		ring = mtk_get_rx_ring(eth);
@@ -1274,18 +1277,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
 			netdev->stats.rx_dropped++;
 			goto release_desc;
 		}
-		dma_addr = dma_map_single(eth->dev,
+		dma_addr = dma_map_single(eth->dma_dev,
 					  new_data + NET_SKB_PAD +
 					  eth->ip_align,
 					  ring->buf_size,
 					  DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
 			skb_free_frag(new_data);
 			netdev->stats.rx_dropped++;
 			goto release_desc;
 		}
 
-		dma_unmap_single(eth->dev, trxd.rxd1,
+		dma_unmap_single(eth->dma_dev, trxd.rxd1,
 				 ring->buf_size, DMA_FROM_DEVICE);
 
 		/* receive data */
@@ -1313,6 +1316,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
 		}
 
+		reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+			mtk_ppe_check_skb(eth->ppe, skb,
+					  trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+
 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
 		    (trxd.rxd2 & RX_DMA_VTAG))
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1558,7 +1566,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 	if (!ring->buf)
 		goto no_tx_mem;
 
-	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+	ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
 				       &ring->phys, GFP_ATOMIC);
 	if (!ring->dma)
 		goto no_tx_mem;
@@ -1576,7 +1584,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 	 * descriptors in ring->dma_pdma.
 	 */
 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
-		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+		ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
 						    &ring->phys_pdma,
 						    GFP_ATOMIC);
 		if (!ring->dma_pdma)
@@ -1635,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
 	}
 
 	if (ring->dma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * sizeof(*ring->dma),
 				  ring->dma,
 				  ring->phys);
@@ -1643,7 +1651,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
 	}
 
 	if (ring->dma_pdma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
 				  ring->dma_pdma,
 				  ring->phys_pdma);
@@ -1688,18 +1696,18 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 			return -ENOMEM;
 	}
 
-	ring->dma = dma_alloc_coherent(eth->dev,
+	ring->dma = dma_alloc_coherent(eth->dma_dev,
 				       rx_dma_size * sizeof(*ring->dma),
 				       &ring->phys, GFP_ATOMIC);
 	if (!ring->dma)
 		return -ENOMEM;
 
 	for (i = 0; i < rx_dma_size; i++) {
-		dma_addr_t dma_addr = dma_map_single(eth->dev,
+		dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
 				ring->buf_size,
 				DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
 			return -ENOMEM;
 		ring->dma[i].rxd1 = (unsigned int)dma_addr;
 
@@ -1735,7 +1743,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
 				continue;
 			if (!ring->dma[i].rxd1)
 				continue;
-			dma_unmap_single(eth->dev,
+			dma_unmap_single(eth->dma_dev,
 					 ring->dma[i].rxd1,
 					 ring->buf_size,
 					 DMA_FROM_DEVICE);
@@ -1746,7 +1754,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
 	}
 
 	if (ring->dma) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  ring->dma_size * sizeof(*ring->dma),
 				  ring->dma,
 				  ring->phys);
@@ -2099,7 +2107,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
 		if (eth->netdev[i])
 			netdev_reset_queue(eth->netdev[i]);
 	if (eth->scratch_ring) {
-		dma_free_coherent(eth->dev,
+		dma_free_coherent(eth->dma_dev,
 				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
 				  eth->scratch_ring,
 				  eth->phy_scratch_ring);
@@ -2267,7 +2275,7 @@ static int mtk_open(struct net_device *dev)
 		if (err)
 			return err;
 
-		if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
+		if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
 			gdm_config = MTK_GDMA_TO_PPE;
 
 		mtk_gdm_config(eth, gdm_config);
@@ -2341,7 +2349,7 @@ static int mtk_stop(struct net_device *dev)
 	mtk_dma_free(eth);
 
 	if (eth->soc->offload_version)
-		mtk_ppe_stop(&eth->ppe);
+		mtk_ppe_stop(eth->ppe);
 
 	return 0;
 }
@@ -2448,6 +2456,8 @@ static void mtk_dim_tx(struct work_struct *work)
 
 static int mtk_hw_init(struct mtk_eth *eth)
 {
+	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+		       ETHSYS_DMA_AG_MAP_PPE;
 	int i, val, ret;
 
 	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
@@ -2460,6 +2470,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
 	if (ret)
 		goto err_disable_pm;
 
+	if (eth->ethsys)
+		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+				   of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
+
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
 		ret = device_reset(eth->dev);
 		if (ret) {
@@ -3040,6 +3054,35 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
 	return err;
 }
 
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+	struct net_device *dev, *tmp;
+	LIST_HEAD(dev_list);
+	int i;
+
+	rtnl_lock();
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		dev = eth->netdev[i];
+
+		if (!dev || !(dev->flags & IFF_UP))
+			continue;
+
+		list_add_tail(&dev->close_list, &dev_list);
+	}
+
+	dev_close_many(&dev_list, false);
+
+	eth->dma_dev = dma_dev;
+
+	list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+		list_del_init(&dev->close_list);
+		dev_open(dev, NULL);
+	}
+
+	rtnl_unlock();
+}
+
 static int mtk_probe(struct platform_device *pdev)
 {
 	struct device_node *mac_np;
@@ -3053,6 +3096,7 @@ static int mtk_probe(struct platform_device *pdev)
 	eth->soc = of_device_get_match_data(&pdev->dev);
 
 	eth->dev = &pdev->dev;
+	eth->dma_dev = &pdev->dev;
 	eth->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(eth->base))
 		return PTR_ERR(eth->base);
@@ -3101,6 +3145,16 @@ static int mtk_probe(struct platform_device *pdev)
 		}
 	}
 
+	if (of_dma_is_coherent(pdev->dev.of_node)) {
+		struct regmap *cci;
+
+		cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						      "mediatek,cci-control");
+		/* enable CPU/bus coherency */
+		if (!IS_ERR(cci))
+			regmap_write(cci, 0, 3);
+	}
+
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
 		eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
 					  GFP_KERNEL);
@@ -3123,6 +3177,22 @@ static int mtk_probe(struct platform_device *pdev)
 		}
 	}
 
+	for (i = 0;; i++) {
+		struct device_node *np = of_parse_phandle(pdev->dev.of_node,
+							  "mediatek,wed", i);
+		static const u32 wdma_regs[] = {
+			MTK_WDMA0_BASE,
+			MTK_WDMA1_BASE
+		};
+		void __iomem *wdma;
+
+		if (!np || i >= ARRAY_SIZE(wdma_regs))
+			break;
+
+		wdma = eth->base + wdma_regs[i];
+		mtk_wed_add_hw(np, eth, wdma, i);
+	}
+
 	for (i = 0; i < 3; i++) {
 		if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
 			eth->irq[i] = eth->irq[0];
@@ -3198,10 +3268,11 @@ static int mtk_probe(struct platform_device *pdev)
 	}
 
 	if (eth->soc->offload_version) {
-		err = mtk_ppe_init(&eth->ppe, eth->dev,
-				   eth->base + MTK_ETH_PPE_BASE, 2);
-		if (err)
+		eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
+		if (!eth->ppe) {
+			err = -ENOMEM;
 			goto err_free_dev;
+		}
 
 		err = mtk_eth_offload_init(eth);
 		if (err)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index c9d42be..c98c7ee 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -295,6 +295,9 @@
 #define MTK_GDM1_TX_GPCNT	0x2438
 #define MTK_STAT_OFFSET		0x40
 
+#define MTK_WDMA0_BASE		0x2800
+#define MTK_WDMA1_BASE		0x2c00
+
 /* QDMA descriptor txd4 */
 #define TX_DMA_CHKSUM		(0x7 << 29)
 #define TX_DMA_TSO		BIT(28)
@@ -465,6 +468,12 @@
 #define RSTCTRL_FE		BIT(6)
 #define RSTCTRL_PPE		BIT(31)
 
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP	0x408
+#define ETHSYS_DMA_AG_MAP_PDMA	BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA	BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE	BIT(2)
+
 /* SGMII subsystem config registers */
 /* Register to auto-negotiation restart */
 #define SGMSYS_PCS_CONTROL_1	0x0
@@ -882,6 +891,7 @@ struct mtk_sgmii {
 /* struct mtk_eth -	This is the main datasructure for holding the state
  *			of the driver
  * @dev:		The device pointer
+ * @dev:		The device pointer used for dma mapping/alloc
  * @base:		The mapped register i/o base
  * @page_lock:		Make sure that register operations are atomic
  * @tx_irq__lock:	Make sure that IRQ register operations are atomic
@@ -925,6 +935,7 @@ struct mtk_sgmii {
 
 struct mtk_eth {
 	struct device			*dev;
+	struct device			*dma_dev;
 	void __iomem			*base;
 	spinlock_t			page_lock;
 	spinlock_t			tx_irq_lock;
@@ -974,7 +985,7 @@ struct mtk_eth {
 	u32				rx_dma_l4_valid;
 	int				ip_align;
 
-	struct mtk_ppe			ppe;
+	struct mtk_ppe			*ppe;
 	struct rhashtable		flow_table;
 };
 
@@ -1023,6 +1034,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
 int mtk_eth_offload_init(struct mtk_eth *eth);
 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
 		     void *type_data);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
 
 
 #endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 3ad10c7..282a1f3 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -6,9 +6,22 @@
 #include <linux/iopoll.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
 #include "mtk_ppe.h"
 #include "mtk_ppe_regs.h"
 
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
+	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+	.key_len = offsetof(struct mtk_foe_bridge, key_end),
+	.automatic_shrinking = true,
+};
+
 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
 {
 	writel(val, ppe->base + reg);
@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
 	return ppe_m32(ppe, reg, val, 0);
 }
 
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+	return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
 {
 	int ret;
@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
 	u32 hash;
 
 	switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
-		case MTK_PPE_PKT_TYPE_BRIDGE:
-			hv1 = e->bridge.src_mac_lo;
-			hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
-			hv2 = e->bridge.src_mac_hi >> 16;
-			hv2 ^= e->bridge.dest_mac_lo;
-			hv3 = e->bridge.dest_mac_hi;
-			break;
 		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
 		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
 			hv1 = e->ipv4.orig.ports;
@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
 {
 	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
 
+	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+		return &entry->bridge.l2;
+
 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 		return &entry->ipv6.l2;
 
@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
 {
 	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
 
+	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+		return &entry->bridge.ib2;
+
 	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 		return &entry->ipv6.ib2;
 
@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
 	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
 		entry->ipv6.ports = ports_pad;
 
-	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+		ether_addr_copy(entry->bridge.src_mac, src_mac);
+		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+		entry->bridge.ib2 = val;
+		l2 = &entry->bridge.l2;
+	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
 		entry->ipv6.ib2 = val;
 		l2 = &entry->ipv6.l2;
 	} else {
@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
 	return 0;
 }
 
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+			   int bss, int wcid)
+{
+	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+	u32 *ib2 = mtk_foe_entry_ib2(entry);
+
+	*ib2 &= ~MTK_FOE_IB2_PORT_MG;
+	*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+	if (wdma_idx)
+		*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+
+	l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+		    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+		    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+
+	return 0;
+}
+
 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
 {
 	return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
 	       FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
 }
 
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
-			 u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+{
+	int type, len;
+
+	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+		return false;
+
+	type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+	else
+		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+	return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+	struct hlist_head *head;
+	struct hlist_node *tmp;
+
+	if (entry->type == MTK_FLOW_TYPE_L2) {
+		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+				       mtk_flow_l2_ht_params);
+
+		head = &entry->l2_flows;
+		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+			__mtk_foe_entry_clear(ppe, entry);
+		return;
+	}
+
+	hlist_del_init(&entry->list);
+	if (entry->hash != 0xffff) {
+		ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
+		ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
+							      MTK_FOE_STATE_BIND);
+		dma_wmb();
+	}
+	entry->hash = 0xffff;
+
+	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+		return;
+
+	hlist_del_init(&entry->l2_data.list);
+	kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+	u16 timestamp;
+	u16 now;
+
+	now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
+	timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+
+	if (timestamp > now)
+		return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+	else
+		return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+	struct mtk_flow_entry *cur;
+	struct mtk_foe_entry *hwe;
+	struct hlist_node *tmp;
+	int idle;
+
+	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+		int cur_idle;
+		u32 ib1;
+
+		hwe = &ppe->foe_table[cur->hash];
+		ib1 = READ_ONCE(hwe->ib1);
+
+		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+			cur->hash = 0xffff;
+			__mtk_foe_entry_clear(ppe, cur);
+			continue;
+		}
+
+		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+		if (cur_idle >= idle)
+			continue;
+
+		idle = cur_idle;
+		entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+		entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+	}
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 {
 	struct mtk_foe_entry *hwe;
-	u32 hash;
+	struct mtk_foe_entry foe;
 
+	spin_lock_bh(&ppe_lock);
+
+	if (entry->type == MTK_FLOW_TYPE_L2) {
+		mtk_flow_entry_update_l2(ppe, entry);
+		goto out;
+	}
+
+	if (entry->hash == 0xffff)
+		goto out;
+
+	hwe = &ppe->foe_table[entry->hash];
+	memcpy(&foe, hwe, sizeof(foe));
+	if (!mtk_flow_entry_match(entry, &foe)) {
+		entry->hash = 0xffff;
+		goto out;
+	}
+
+	entry->data.ib1 = foe.ib1;
+
+out:
+	spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+		       u16 hash)
+{
+	struct mtk_foe_entry *hwe;
+	u16 timestamp;
+
+	timestamp = mtk_eth_timestamp(ppe->eth);
 	timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
 	entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
 	entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
 
-	hash = mtk_ppe_hash_entry(entry);
 	hwe = &ppe->foe_table[hash];
-	if (!mtk_foe_entry_usable(hwe)) {
-		hwe++;
-		hash++;
-
-		if (!mtk_foe_entry_usable(hwe))
-			return -ENOSPC;
-	}
-
 	memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
 	wmb();
 	hwe->ib1 = entry->ib1;
@@ -362,32 +519,194 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
 	dma_wmb();
 
 	mtk_ppe_cache_clear(ppe);
-
-	return hash;
 }
 
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+	spin_lock_bh(&ppe_lock);
+	__mtk_foe_entry_clear(ppe, entry);
+	spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+	entry->type = MTK_FLOW_TYPE_L2;
+
+	return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
+				      mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+	u32 hash;
+
+	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+		return mtk_foe_entry_commit_l2(ppe, entry);
+
+	hash = mtk_ppe_hash_entry(&entry->data);
+	entry->hash = 0xffff;
+	spin_lock_bh(&ppe_lock);
+	hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+	spin_unlock_bh(&ppe_lock);
+
+	return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+			     u16 hash)
+{
+	struct mtk_flow_entry *flow_info;
+	struct mtk_foe_entry foe, *hwe;
+	struct mtk_foe_mac_info *l2;
+	u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+	int type;
+
+	flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+			    GFP_ATOMIC);
+	if (!flow_info)
+		return;
+
+	flow_info->l2_data.base_flow = entry;
+	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+	flow_info->hash = hash;
+	hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+	hwe = &ppe->foe_table[hash];
+	memcpy(&foe, hwe, sizeof(foe));
+	foe.ib1 &= ib1_mask;
+	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+	l2 = mtk_foe_entry_l2(&foe);
+	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+	type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+		l2->etype = ETH_P_IPV6;
+
+	*mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+
+	__mtk_foe_entry_commit(ppe, &foe, hash);
+}
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+	struct hlist_head *head = &ppe->foe_flow[hash / 2];
+	struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+	struct mtk_flow_entry *entry;
+	struct mtk_foe_bridge key = {};
+	struct ethhdr *eh;
+	bool found = false;
+	u8 *tag;
+
+	spin_lock_bh(&ppe_lock);
+
+	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+		goto out;
+
+	hlist_for_each_entry(entry, head, list) {
+		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+				     MTK_FOE_STATE_BIND))
+				continue;
+
+			entry->hash = 0xffff;
+			__mtk_foe_entry_clear(ppe, entry);
+			continue;
+		}
+
+		if (found || !mtk_flow_entry_match(entry, hwe)) {
+			if (entry->hash != 0xffff)
+				entry->hash = 0xffff;
+			continue;
+		}
+
+		entry->hash = hash;
+		__mtk_foe_entry_commit(ppe, &entry->data, hash);
+		found = true;
+	}
+
+	if (found)
+		goto out;
+
+	eh = eth_hdr(skb);
+	ether_addr_copy(key.dest_mac, eh->h_dest);
+	ether_addr_copy(key.src_mac, eh->h_source);
+	tag = skb->data - 2;
+	key.vlan = 0;
+	switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+	case htons(ETH_P_XDSA):
+		if (!netdev_uses_dsa(skb->dev) ||
+		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+			goto out;
+
+		tag += 4;
+		if (get_unaligned_be16(tag) != ETH_P_8021Q)
+			break;
+
+		fallthrough;
+#endif
+	case htons(ETH_P_8021Q):
+		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+		break;
+	default:
+		break;
+	}
+
+	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
+	if (!entry)
+		goto out;
+
+	mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+	spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+	mtk_flow_entry_update(ppe, entry);
+
+	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
 		 int version)
 {
+	struct device *dev = eth->dev;
 	struct mtk_foe_entry *foe;
+	struct mtk_ppe *ppe;
+
+	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+	if (!ppe)
+		return NULL;
+
+	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
 
 	/* need to allocate a separate device, since it PPE DMA access is
 	 * not coherent.
 	 */
 	ppe->base = base;
+	ppe->eth = eth;
 	ppe->dev = dev;
 	ppe->version = version;
 
 	foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
 				  &ppe->foe_phys, GFP_KERNEL);
 	if (!foe)
-		return -ENOMEM;
+		return NULL;
 
 	ppe->foe_table = foe;
 
 	mtk_ppe_debugfs_init(ppe);
 
-	return 0;
+	return ppe;
 }
 
 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
@@ -443,7 +762,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
 	      MTK_PPE_FLOW_CFG_IP4_NAT |
 	      MTK_PPE_FLOW_CFG_IP4_NAPT |
 	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
-	      MTK_PPE_FLOW_CFG_L2_BRIDGE |
 	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
 	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
 
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 242fb8f..1f5cf1c 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -6,6 +6,7 @@
 
 #include <linux/kernel.h>
 #include <linux/bitfield.h>
+#include <linux/rhashtable.h>
 
 #define MTK_ETH_PPE_BASE		0xc00
 
@@ -48,9 +49,9 @@ enum {
 #define MTK_FOE_IB2_DEST_PORT		GENMASK(7, 5)
 #define MTK_FOE_IB2_MULTICAST		BIT(8)
 
-#define MTK_FOE_IB2_WHNAT_QID2		GENMASK(13, 12)
-#define MTK_FOE_IB2_WHNAT_DEVIDX	BIT(16)
-#define MTK_FOE_IB2_WHNAT_NAT		BIT(17)
+#define MTK_FOE_IB2_WDMA_QID2		GENMASK(13, 12)
+#define MTK_FOE_IB2_WDMA_DEVIDX		BIT(16)
+#define MTK_FOE_IB2_WDMA_WINFO		BIT(17)
 
 #define MTK_FOE_IB2_PORT_MG		GENMASK(17, 12)
 
@@ -58,9 +59,9 @@ enum {
 
 #define MTK_FOE_IB2_DSCP		GENMASK(31, 24)
 
-#define MTK_FOE_VLAN2_WHNAT_BSS		GEMMASK(5, 0)
-#define MTK_FOE_VLAN2_WHNAT_WCID	GENMASK(13, 6)
-#define MTK_FOE_VLAN2_WHNAT_RING	GENMASK(15, 14)
+#define MTK_FOE_VLAN2_WINFO_BSS		GENMASK(5, 0)
+#define MTK_FOE_VLAN2_WINFO_WCID	GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WINFO_RING	GENMASK(15, 14)
 
 enum {
 	MTK_FOE_STATE_INVALID,
@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
 	u16 src_mac_lo;
 };
 
+/* software-only entry type */
 struct mtk_foe_bridge {
-	u32 dest_mac_hi;
+	u8 dest_mac[ETH_ALEN];
+	u8 src_mac[ETH_ALEN];
+	u16 vlan;
 
-	u16 src_mac_lo;
-	u16 dest_mac_lo;
-
-	u32 src_mac_hi;
+	struct {} key_end;
 
 	u32 ib2;
 
-	u32 _rsv[5];
-
-	u32 udf_tsid;
 	struct mtk_foe_mac_info l2;
 };
 
@@ -235,7 +233,37 @@ enum {
 	MTK_PPE_CPU_REASON_INVALID			= 0x1f,
 };
 
+enum {
+	MTK_FLOW_TYPE_L4,
+	MTK_FLOW_TYPE_L2,
+	MTK_FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct mtk_flow_entry {
+	union {
+		struct hlist_node list;
+		struct {
+			struct rhash_head l2_node;
+			struct hlist_head l2_flows;
+		};
+	};
+	u8 type;
+	s8 wed_index;
+	u16 hash;
+	union {
+		struct mtk_foe_entry data;
+		struct {
+			struct mtk_flow_entry *base_flow;
+			struct hlist_node list;
+			struct {} end;
+		} l2_data;
+	};
+	struct rhash_head node;
+	unsigned long cookie;
+};
+
 struct mtk_ppe {
+	struct mtk_eth *eth;
 	struct device *dev;
 	void __iomem *base;
 	int version;
@@ -243,19 +271,35 @@ struct mtk_ppe {
 	struct mtk_foe_entry *foe_table;
 	dma_addr_t foe_phys;
 
+	u16 foe_check_time[MTK_PPE_ENTRIES];
+	struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+
+	struct rhashtable l2_flows;
+
 	void *acct_table;
 };
 
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
-		 int version);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
 int mtk_ppe_start(struct mtk_ppe *ppe);
 int mtk_ppe_stop(struct mtk_ppe *ppe);
 
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
 static inline void
-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
 {
-	ppe->foe_table[hash].ib1 = 0;
-	dma_wmb();
+	u16 now, diff;
+
+	if (!ppe)
+		return;
+
+	now = (u16)jiffies;
+	diff = now - ppe->foe_check_time[hash];
+	if (diff < HZ / 10)
+		return;
+
+	ppe->foe_check_time[hash] = now;
+	__mtk_ppe_check_skb(ppe, skb, hash);
 }
 
 static inline int
@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
-			 u16 timestamp);
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+			   int bss, int wcid);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
 
 #endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index d4b4823..eb0b598 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
 	static const char * const type_str[] = {
 		[MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
 		[MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
-		[MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
 		[MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
 		[MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
 		[MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 7bb1f20..1fe3105 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -6,10 +6,12 @@
 #include <linux/if_ether.h>
 #include <linux/rhashtable.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <net/flow_offload.h>
 #include <net/pkt_cls.h>
 #include <net/dsa.h>
 #include "mtk_eth_soc.h"
+#include "mtk_wed.h"
 
 struct mtk_flow_data {
 	struct ethhdr eth;
@@ -19,11 +21,18 @@ struct mtk_flow_data {
 			__be32 src_addr;
 			__be32 dst_addr;
 		} v4;
+
+		struct {
+			struct in6_addr src_addr;
+			struct in6_addr dst_addr;
+		} v6;
 	};
 
 	__be16 src_port;
 	__be16 dst_port;
 
+	u16 vlan_in;
+
 	struct {
 		u16 id;
 		__be16 proto;
@@ -35,12 +44,6 @@ struct mtk_flow_data {
 	} pppoe;
 };
 
-struct mtk_flow_entry {
-	struct rhash_head node;
-	unsigned long cookie;
-	u16 hash;
-};
-
 static const struct rhashtable_params mtk_flow_ht_params = {
 	.head_offset = offsetof(struct mtk_flow_entry, node),
 	.key_offset = offsetof(struct mtk_flow_entry, cookie),
@@ -48,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
 	.automatic_shrinking = true,
 };
 
-static u32
-mtk_eth_timestamp(struct mtk_eth *eth)
-{
-	return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
-}
-
 static int
 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
 		       bool egress)
@@ -63,6 +60,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
 					    data->v4.dst_addr, data->dst_port);
 }
 
+static int
+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
+{
+	return mtk_foe_entry_set_ipv6_tuple(foe,
+					    data->v6.src_addr.s6_addr32, data->src_port,
+					    data->v6.dst_addr.s6_addr32, data->dst_port);
+}
+
 static void
 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
 {
@@ -80,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
 	memcpy(dest, src, act->mangle.mask ? 2 : 4);
 }
 
+static int
+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
+{
+	struct net_device_path_ctx ctx = {
+		.dev = dev,
+		.daddr = addr,
+	};
+	struct net_device_path path = {};
+
+	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+		return -1;
+
+	if (!dev->netdev_ops->ndo_fill_forward_path)
+		return -1;
+
+	if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
+		return -1;
+
+	if (path.type != DEV_PATH_MTK_WDMA)
+		return -1;
+
+	info->wdma_idx = path.mtk_wdma.wdma_idx;
+	info->queue = path.mtk_wdma.queue;
+	info->bss = path.mtk_wdma.bss;
+	info->wcid = path.mtk_wdma.wcid;
+
+	return 0;
+}
+
 
 static int
 mtk_flow_mangle_ports(const struct flow_action_entry *act,
@@ -149,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
 
 static int
 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
-			   struct net_device *dev)
+			   struct net_device *dev, const u8 *dest_mac,
+			   int *wed_index)
 {
+	struct mtk_wdma_info info = {};
 	int pse_port, dsa_port;
 
+	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+		mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
+				       info.wcid);
+		pse_port = 3;
+		*wed_index = info.wdma_idx;
+		goto out;
+	}
+
 	dsa_port = mtk_flow_get_dsa_port(&dev);
 	if (dsa_port >= 0)
 		mtk_foe_entry_set_dsa(foe, dsa_port);
@@ -164,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
 	else
 		return -EOPNOTSUPP;
 
+out:
 	mtk_foe_entry_set_pse_port(foe, pse_port);
 
 	return 0;
@@ -179,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 	struct net_device *odev = NULL;
 	struct mtk_flow_entry *entry;
 	int offload_type = 0;
+	int wed_index = -1;
 	u16 addr_type = 0;
-	u32 timestamp;
 	u8 l4proto = 0;
 	int err = 0;
-	int hash;
 	int i;
 
 	if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
@@ -215,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 		return -EOPNOTSUPP;
 	}
 
+	switch (addr_type) {
+	case 0:
+		offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
+		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+			struct flow_match_eth_addrs match;
+
+			flow_rule_match_eth_addrs(rule, &match);
+			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+		} else {
+			return -EOPNOTSUPP;
+		}
+
+		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+			struct flow_match_vlan match;
+
+			flow_rule_match_vlan(rule, &match);
+
+			if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
+				return -EOPNOTSUPP;
+
+			data.vlan_in = match.key->vlan_id;
+		}
+		break;
+	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+		break;
+	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+		offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
 	flow_action_for_each(i, act, &rule->action) {
 		switch (act->id) {
 		case FLOW_ACTION_MANGLE:
+			if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+				return -EOPNOTSUPP;
 			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
 				mtk_flow_offload_mangle_eth(act, &data.eth);
 			break;
@@ -249,14 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 		}
 	}
 
-	switch (addr_type) {
-	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
-		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
 	if (!is_valid_ether_addr(data.eth.h_source) ||
 	    !is_valid_ether_addr(data.eth.h_dest))
 		return -EINVAL;
@@ -270,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
 		struct flow_match_ports ports;
 
+		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+			return -EOPNOTSUPP;
+
 		flow_rule_match_ports(rule, &ports);
 		data.src_port = ports.key->src;
 		data.dst_port = ports.key->dst;
-	} else {
+	} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
 		return -EOPNOTSUPP;
 	}
 
@@ -288,10 +363,24 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 		mtk_flow_set_ipv4_addr(&foe, &data, false);
 	}
 
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+		struct flow_match_ipv6_addrs addrs;
+
+		flow_rule_match_ipv6_addrs(rule, &addrs);
+
+		data.v6.src_addr = addrs.key->src;
+		data.v6.dst_addr = addrs.key->dst;
+
+		mtk_flow_set_ipv6_addr(&foe, &data);
+	}
+
 	flow_action_for_each(i, act, &rule->action) {
 		if (act->id != FLOW_ACTION_MANGLE)
 			continue;
 
+		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+			return -EOPNOTSUPP;
+
 		switch (act->mangle.htype) {
 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
@@ -317,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 			return err;
 	}
 
+	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+		foe.bridge.vlan = data.vlan_in;
+
 	if (data.vlan.num == 1) {
 		if (data.vlan.proto != htons(ETH_P_8021Q))
 			return -EOPNOTSUPP;
@@ -326,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
 	if (data.pppoe.num == 1)
 		mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
 
-	err = mtk_flow_set_output_device(eth, &foe, odev);
+	err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+					 &wed_index);
 	if (err)
 		return err;
 
+	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+		return err;
+
 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
 		return -ENOMEM;
 
 	entry->cookie = f->cookie;
-	timestamp = mtk_eth_timestamp(eth);
-	hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
-	if (hash < 0) {
-		err = hash;
-		goto free;
-	}
+	memcpy(&entry->data, &foe, sizeof(entry->data));
+	entry->wed_index = wed_index;
 
-	entry->hash = hash;
+	if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
+		goto free;
+
 	err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
 				     mtk_flow_ht_params);
 	if (err < 0)
-		goto clear_flow;
+		goto clear;
 
 	return 0;
-clear_flow:
-	mtk_foe_entry_clear(&eth->ppe, hash);
+
+clear:
+	mtk_foe_entry_clear(eth->ppe, entry);
 free:
 	kfree(entry);
+	if (wed_index >= 0)
+	    mtk_wed_flow_remove(wed_index);
 	return err;
 }
 
@@ -366,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
 	if (!entry)
 		return -ENOENT;
 
-	mtk_foe_entry_clear(&eth->ppe, entry->hash);
+	mtk_foe_entry_clear(eth->ppe, entry);
 	rhashtable_remove_fast(&eth->flow_table, &entry->node,
 			       mtk_flow_ht_params);
+	if (entry->wed_index >= 0)
+		mtk_wed_flow_remove(entry->wed_index);
 	kfree(entry);
 
 	return 0;
@@ -378,7 +477,6 @@ static int
 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
 {
 	struct mtk_flow_entry *entry;
-	int timestamp;
 	u32 idle;
 
 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -386,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
 	if (!entry)
 		return -ENOENT;
 
-	timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
-	if (timestamp < 0)
-		return -ETIMEDOUT;
-
-	idle = mtk_eth_timestamp(eth) - timestamp;
+	idle = mtk_foe_entry_idle_time(eth->ppe, entry);
 	f->stats.lastused = jiffies - idle * HZ;
 
 	return 0;
@@ -442,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
 	struct flow_block_cb *block_cb;
 	flow_setup_cb_t *cb;
 
-	if (!eth->ppe.foe_table)
+	if (!eth->ppe || !eth->ppe->foe_table)
 		return -EOPNOTSUPP;
 
 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -483,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
 		     void *type_data)
 {
-	if (type == TC_SETUP_FT)
+	switch (type) {
+	case TC_SETUP_BLOCK:
+	case TC_SETUP_FT:
 		return mtk_eth_setup_tc_block(dev, type_data);
-
-	return -EOPNOTSUPP;
+	default:
+		return -EOPNOTSUPP;
+	}
 }
 
 int mtk_eth_offload_init(struct mtk_eth *eth)
 {
-	if (!eth->ppe.foe_table)
+	if (!eth->ppe || !eth->ppe->foe_table)
 		return 0;
 
 	return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
new file mode 100644
index 0000000..f0eacf8
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/debugfs.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed.h"
+#include "mtk_ppe.h"
+
+#define MTK_PCIE_BASE(n)		(0x1a143000 + (n) * 0x2000)
+
+#define MTK_WED_PKT_SIZE		1900
+#define MTK_WED_BUF_SIZE		2048
+#define MTK_WED_BUF_PER_PAGE		(PAGE_SIZE / 2048)
+
+#define MTK_WED_TX_RING_SIZE		2048
+#define MTK_WED_WDMA_RING_SIZE		1024
+
+static struct mtk_wed_hw *hw_list[2];
+static DEFINE_MUTEX(hw_lock);
+
+static void
+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+	regmap_update_bits(dev->hw->regs, reg, mask | val, val);
+}
+
+static void
+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+	return wed_m32(dev, reg, 0, mask);
+}
+
+static void
+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+	return wed_m32(dev, reg, mask, 0);
+}
+
+static void
+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+	wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
+}
+
+static void
+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+	wdma_m32(dev, reg, 0, mask);
+}
+
+static u32
+mtk_wed_read_reset(struct mtk_wed_device *dev)
+{
+	return wed_r32(dev, MTK_WED_RESET);
+}
+
+static void
+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
+{
+	u32 status;
+
+	wed_w32(dev, MTK_WED_RESET, mask);
+	if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
+			       !(status & mask), 0, 1000))
+		WARN_ON_ONCE(1);
+}
+
+static struct mtk_wed_hw *
+mtk_wed_assign(struct mtk_wed_device *dev)
+{
+	struct mtk_wed_hw *hw;
+
+	hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+	if (!hw || hw->wed_dev)
+		return NULL;
+
+	hw->wed_dev = dev;
+	return hw;
+}
+
+static int
+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+{
+	struct mtk_wdma_desc *desc;
+	dma_addr_t desc_phys;
+	void **page_list;
+	int token = dev->wlan.token_start;
+	int ring_size;
+	int n_pages;
+	int i, page_idx;
+
+	ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+	n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+
+	page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+	if (!page_list)
+		return -ENOMEM;
+
+	dev->buf_ring.size = ring_size;
+	dev->buf_ring.pages = page_list;
+
+	desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+				  &desc_phys, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	dev->buf_ring.desc = desc;
+	dev->buf_ring.desc_phys = desc_phys;
+
+	for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+		dma_addr_t page_phys, buf_phys;
+		struct page *page;
+		void *buf;
+		int s;
+
+		page = __dev_alloc_pages(GFP_KERNEL, 0);
+		if (!page)
+			return -ENOMEM;
+
+		page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+					 DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev->hw->dev, page_phys)) {
+			__free_page(page);
+			return -ENOMEM;
+		}
+
+		page_list[page_idx++] = page;
+		dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+					DMA_BIDIRECTIONAL);
+
+		buf = page_to_virt(page);
+		buf_phys = page_phys;
+
+		for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+			u32 txd_size;
+
+			txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+
+			desc->buf0 = buf_phys;
+			desc->buf1 = buf_phys + txd_size;
+			desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
+						txd_size) |
+				     FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+						MTK_WED_BUF_SIZE - txd_size) |
+				     MTK_WDMA_DESC_CTRL_LAST_SEG1;
+			desc->info = 0;
+			desc++;
+
+			buf += MTK_WED_BUF_SIZE;
+			buf_phys += MTK_WED_BUF_SIZE;
+		}
+
+		dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+					   DMA_BIDIRECTIONAL);
+	}
+
+	return 0;
+}
+
+static void
+mtk_wed_free_buffer(struct mtk_wed_device *dev)
+{
+	struct mtk_wdma_desc *desc = dev->buf_ring.desc;
+	void **page_list = dev->buf_ring.pages;
+	int page_idx;
+	int i;
+
+	if (!page_list)
+		return;
+
+	if (!desc)
+		goto free_pagelist;
+
+	for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+		void *page = page_list[page_idx++];
+
+		if (!page)
+			break;
+
+		dma_unmap_page(dev->hw->dev, desc[i].buf0,
+			       PAGE_SIZE, DMA_BIDIRECTIONAL);
+		__free_page(page);
+	}
+
+	dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
+			  desc, dev->buf_ring.desc_phys);
+
+free_pagelist:
+	kfree(page_list);
+}
+
+static void
+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
+{
+	if (!ring->desc)
+		return;
+
+	dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
+			  ring->desc, ring->desc_phys);
+}
+
+static void
+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+		mtk_wed_free_ring(dev, &dev->tx_ring[i]);
+	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+		mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
+}
+
+static void
+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+{
+	u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+	if (!dev->hw->num_flows)
+		mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+	wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
+	wed_r32(dev, MTK_WED_EXT_INT_MASK);
+}
+
+static void
+mtk_wed_stop(struct mtk_wed_device *dev)
+{
+	regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+	mtk_wed_set_ext_int(dev, false);
+
+	wed_clr(dev, MTK_WED_CTRL,
+		MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+		MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+		MTK_WED_CTRL_WED_TX_BM_EN |
+		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+	wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+	wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+	wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+	wed_clr(dev, MTK_WED_GLO_CFG,
+		MTK_WED_GLO_CFG_TX_DMA_EN |
+		MTK_WED_GLO_CFG_RX_DMA_EN);
+	wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+	wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+		MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+}
+
+static void
+mtk_wed_detach(struct mtk_wed_device *dev)
+{
+	struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
+	struct mtk_wed_hw *hw = dev->hw;
+
+	mutex_lock(&hw_lock);
+
+	mtk_wed_stop(dev);
+
+	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+	mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+	mtk_wed_free_buffer(dev);
+	mtk_wed_free_tx_rings(dev);
+
+	if (of_dma_is_coherent(wlan_node))
+		regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+				   BIT(hw->index), BIT(hw->index));
+
+	if (!hw_list[!hw->index]->wed_dev &&
+	    hw->eth->dma_dev != hw->eth->dev)
+		mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
+
+	memset(dev, 0, sizeof(*dev));
+	module_put(THIS_MODULE);
+
+	hw->wed_dev = NULL;
+	mutex_unlock(&hw_lock);
+}
+
+static void
+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+{
+	u32 mask, set;
+	u32 offset;
+
+	mtk_wed_stop(dev);
+	mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+	mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+	       MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+	       MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+	set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+	      MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+	      MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+	wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+	wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
+
+	offset = dev->hw->index ? 0x04000400 : 0;
+	wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+	wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+
+	wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
+	wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+}
+
+static void
+mtk_wed_hw_init(struct mtk_wed_device *dev)
+{
+	if (dev->init_done)
+		return;
+
+	dev->init_done = true;
+	mtk_wed_set_ext_int(dev, false);
+	wed_w32(dev, MTK_WED_TX_BM_CTRL,
+		MTK_WED_TX_BM_CTRL_PAUSE |
+		FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+			   dev->buf_ring.size / 128) |
+		FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+			   MTK_WED_TX_RING_SIZE / 256));
+
+	wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+
+	wed_w32(dev, MTK_WED_TX_BM_TKID,
+		FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+			   dev->wlan.token_start) |
+		FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+			   dev->wlan.token_start + dev->wlan.nbuf - 1));
+
+	wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+	wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+		FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+		MTK_WED_TX_BM_DYN_THR_HI);
+
+	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+	wed_set(dev, MTK_WED_CTRL,
+		MTK_WED_CTRL_WED_TX_BM_EN |
+		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+	wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+}
+
+static void
+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		desc[i].buf0 = 0;
+		desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+		desc[i].buf1 = 0;
+		desc[i].info = 0;
+	}
+}
+
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev)
+{
+	if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
+		return true;
+
+	if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
+	    MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
+		return true;
+
+	if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
+		return true;
+
+	if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
+	    MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+		return true;
+
+	if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
+	    MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+		return true;
+
+	if (wed_r32(dev, MTK_WED_CTRL) &
+	    (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
+		return true;
+
+	return false;
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev)
+{
+	int sleep = 15000;
+	int timeout = 100 * sleep;
+	u32 val;
+
+	return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+				 timeout, false, dev);
+}
+
+static void
+mtk_wed_reset_dma(struct mtk_wed_device *dev)
+{
+	bool busy = false;
+	u32 val;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+		struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
+
+		if (!desc)
+			continue;
+
+		mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
+	}
+
+	if (mtk_wed_poll_busy(dev))
+		busy = mtk_wed_check_busy(dev);
+
+	if (busy) {
+		mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+	} else {
+		wed_w32(dev, MTK_WED_RESET_IDX,
+			MTK_WED_RESET_IDX_TX |
+			MTK_WED_RESET_IDX_RX);
+		wed_w32(dev, MTK_WED_RESET_IDX, 0);
+	}
+
+	wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+	wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+	if (busy) {
+		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+		mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+	} else {
+		wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+			MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+		wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+
+		wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+			MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+
+		wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+			MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+	}
+
+	for (i = 0; i < 100; i++) {
+		val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+		if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+			break;
+	}
+
+	mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+	mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+	if (busy) {
+		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+		mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+	} else {
+		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+			MTK_WED_WPDMA_RESET_IDX_TX |
+			MTK_WED_WPDMA_RESET_IDX_RX);
+		wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
+	}
+
+}
+
+static int
+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+		   int size)
+{
+	ring->desc = dma_alloc_coherent(dev->hw->dev,
+					size * sizeof(*ring->desc),
+					&ring->desc_phys, GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	ring->size = size;
+	mtk_wed_ring_reset(ring->desc, size);
+
+	return 0;
+}
+
+static int
+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
+{
+	struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+
+	if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
+		return -ENOMEM;
+
+	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+		 wdma->desc_phys);
+	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+		 size);
+	wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+	wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+		wdma->desc_phys);
+	wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+		size);
+
+	return 0;
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+	u32 wdma_mask;
+	u32 val;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+		if (!dev->tx_wdma[i].desc)
+			mtk_wed_wdma_ring_setup(dev, i, 16);
+
+	wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
+	mtk_wed_hw_init(dev);
+
+	wed_set(dev, MTK_WED_CTRL,
+		MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+		MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+		MTK_WED_CTRL_WED_TX_BM_EN |
+		MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+	wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+	wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+		MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+		MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+	wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+		MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+	wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+	wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+
+	wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+	wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+
+	wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+	wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+	wed_set(dev, MTK_WED_GLO_CFG,
+		MTK_WED_GLO_CFG_TX_DMA_EN |
+		MTK_WED_GLO_CFG_RX_DMA_EN);
+	wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+		MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+		MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+	wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+		MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+	mtk_wed_set_ext_int(dev, true);
+	val = dev->wlan.wpdma_phys |
+	      MTK_PCIE_MIRROR_MAP_EN |
+	      FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
+
+	if (dev->hw->index)
+		val |= BIT(1);
+	val |= BIT(0);
+	regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+
+	dev->running = true;
+}
+
+static int
+mtk_wed_attach(struct mtk_wed_device *dev)
+	__releases(RCU)
+{
+	struct mtk_wed_hw *hw;
+	int ret = 0;
+
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+			 "mtk_wed_attach without holding the RCU read lock");
+
+	if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
+	    !try_module_get(THIS_MODULE))
+		ret = -ENODEV;
+
+	rcu_read_unlock();
+
+	if (ret)
+		return ret;
+
+	mutex_lock(&hw_lock);
+
+	hw = mtk_wed_assign(dev);
+	if (!hw) {
+		module_put(THIS_MODULE);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
+
+	dev->hw = hw;
+	dev->dev = hw->dev;
+	dev->irq = hw->irq;
+	dev->wdma_idx = hw->index;
+
+	if (hw->eth->dma_dev == hw->eth->dev &&
+	    of_dma_is_coherent(hw->eth->dev->of_node))
+		mtk_eth_set_dma_device(hw->eth, hw->dev);
+
+	ret = mtk_wed_buffer_alloc(dev);
+	if (ret) {
+		mtk_wed_detach(dev);
+		goto out;
+	}
+
+	mtk_wed_hw_init_early(dev);
+	regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
+
+out:
+	mutex_unlock(&hw_lock);
+
+	return ret;
+}
+
+static int
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+	struct mtk_wed_ring *ring = &dev->tx_ring[idx];
+
+	/*
+	 * Tx ring redirection:
+	 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
+	 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
+	 * registers.
+	 *
+	 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
+	 * into MTK_WED_WPDMA_RING_TX(n) registers.
+	 * It gets filled with packets picked up from WED TX ring and from
+	 * WDMA RX.
+	 */
+
+	BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
+
+	if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
+		return -ENOMEM;
+
+	if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
+		return -ENOMEM;
+
+	ring->reg_base = MTK_WED_RING_TX(idx);
+	ring->wpdma = regs;
+
+	/* WED -> WPDMA */
+	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+	wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
+
+	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+		ring->desc_phys);
+	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+		MTK_WED_TX_RING_SIZE);
+	wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+	return 0;
+}
+
+static int
+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+	struct mtk_wed_ring *ring = &dev->txfree_ring;
+	int i;
+
+	/*
+	 * For txfree event handling, the same DMA ring is shared between WED
+	 * and WLAN. The WLAN driver accesses the ring index registers through
+	 * WED
+	 */
+	ring->reg_base = MTK_WED_RING_RX(1);
+	ring->wpdma = regs;
+
+	for (i = 0; i < 12; i += 4) {
+		u32 val = readl(regs + i);
+
+		wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
+		wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
+	}
+
+	return 0;
+}
+
+static u32
+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+{
+	u32 val;
+
+	val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+	wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+	val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+	if (!dev->hw->num_flows)
+		val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+	if (val && net_ratelimit())
+		pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
+
+	val = wed_r32(dev, MTK_WED_INT_STATUS);
+	val &= mask;
+	wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
+
+	return val;
+}
+
+static void
+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+{
+	if (!dev->running)
+		return;
+
+	mtk_wed_set_ext_int(dev, !!mask);
+	wed_w32(dev, MTK_WED_INT_MASK, mask);
+}
+
+int mtk_wed_flow_add(int index)
+{
+	struct mtk_wed_hw *hw = hw_list[index];
+	int ret;
+
+	if (!hw || !hw->wed_dev)
+		return -ENODEV;
+
+	if (hw->num_flows) {
+		hw->num_flows++;
+		return 0;
+	}
+
+	mutex_lock(&hw_lock);
+	if (!hw->wed_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
+	if (!ret)
+		hw->num_flows++;
+	mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+	mutex_unlock(&hw_lock);
+
+	return ret;
+}
+
+void mtk_wed_flow_remove(int index)
+{
+	struct mtk_wed_hw *hw = hw_list[index];
+
+	if (!hw)
+		return;
+
+	if (--hw->num_flows)
+		return;
+
+	mutex_lock(&hw_lock);
+	if (!hw->wed_dev)
+		goto out;
+
+	hw->wed_dev->wlan.offload_disable(hw->wed_dev);
+	mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+	mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+		    void __iomem *wdma, int index)
+{
+	static const struct mtk_wed_ops wed_ops = {
+		.attach = mtk_wed_attach,
+		.tx_ring_setup = mtk_wed_tx_ring_setup,
+		.txfree_ring_setup = mtk_wed_txfree_ring_setup,
+		.start = mtk_wed_start,
+		.stop = mtk_wed_stop,
+		.reset_dma = mtk_wed_reset_dma,
+		.reg_read = wed_r32,
+		.reg_write = wed_w32,
+		.irq_get = mtk_wed_irq_get,
+		.irq_set_mask = mtk_wed_irq_set_mask,
+		.detach = mtk_wed_detach,
+	};
+	struct device_node *eth_np = eth->dev->of_node;
+	struct platform_device *pdev;
+	struct mtk_wed_hw *hw;
+	struct regmap *regs;
+	int irq;
+
+	if (!np)
+		return;
+
+	pdev = of_find_device_by_node(np);
+	if (!pdev)
+		return;
+
+	get_device(&pdev->dev);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return;
+
+	regs = syscon_regmap_lookup_by_phandle(np, NULL);
+	if (!regs)
+		return;
+
+	rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
+
+	mutex_lock(&hw_lock);
+
+	if (WARN_ON(hw_list[index]))
+		goto unlock;
+
+	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+	hw->node = np;
+	hw->regs = regs;
+	hw->eth = eth;
+	hw->dev = &pdev->dev;
+	hw->wdma = wdma;
+	hw->index = index;
+	hw->irq = irq;
+	hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+						     "mediatek,pcie-mirror");
+	hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+						     "mediatek,hifsys");
+	if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+		kfree(hw);
+		goto unlock;
+	}
+
+	if (!index) {
+		regmap_write(hw->mirror, 0, 0);
+		regmap_write(hw->mirror, 4, 0);
+	}
+	mtk_wed_hw_add_debugfs(hw);
+
+	hw_list[index] = hw;
+
+unlock:
+	mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_exit(void)
+{
+	int i;
+
+	rcu_assign_pointer(mtk_soc_wed_ops, NULL);
+
+	synchronize_rcu();
+
+	for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+		struct mtk_wed_hw *hw;
+
+		hw = hw_list[i];
+		if (!hw)
+			continue;
+
+		hw_list[i] = NULL;
+		debugfs_remove(hw->debugfs_dir);
+		put_device(hw->dev);
+		kfree(hw);
+	}
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
new file mode 100644
index 0000000..981ec61
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_PRIV_H
+#define __MTK_WED_PRIV_H
+
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/netdevice.h>
+
+struct mtk_eth;
+
+struct mtk_wed_hw {
+	struct device_node *node;
+	struct mtk_eth *eth;
+	struct regmap *regs;
+	struct regmap *hifsys;
+	struct device *dev;
+	void __iomem *wdma;
+	struct regmap *mirror;
+	struct dentry *debugfs_dir;
+	struct mtk_wed_device *wed_dev;
+	u32 debugfs_reg;
+	u32 num_flows;
+	char dirname[5];
+	int irq;
+	int index;
+};
+
+struct mtk_wdma_info {
+	u8 wdma_idx;
+	u8 queue;
+	u16 wcid;
+	u8 bss;
+};
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline void
+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+	regmap_write(dev->hw->regs, reg, val);
+}
+
+static inline u32
+wed_r32(struct mtk_wed_device *dev, u32 reg)
+{
+	unsigned int val;
+
+	regmap_read(dev->hw->regs, reg, &val);
+
+	return val;
+}
+
+static inline void
+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+	writel(val, dev->hw->wdma + reg);
+}
+
+static inline u32
+wdma_r32(struct mtk_wed_device *dev, u32 reg)
+{
+	return readl(dev->hw->wdma + reg);
+}
+
+static inline u32
+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+	if (!dev->tx_ring[ring].wpdma)
+		return 0;
+
+	return readl(dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+	if (!dev->tx_ring[ring].wpdma)
+		return;
+
+	writel(val, dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
+{
+	if (!dev->txfree_ring.wpdma)
+		return 0;
+
+	return readl(dev->txfree_ring.wpdma + reg);
+}
+
+static inline void
+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+	if (!dev->txfree_ring.wpdma)
+		return;
+
+	writel(val, dev->txfree_ring.wpdma + reg);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+		    void __iomem *wdma, int index);
+void mtk_wed_exit(void);
+int mtk_wed_flow_add(int index);
+void mtk_wed_flow_remove(int index);
+#else
+static inline void
+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+	       void __iomem *wdma, int index)
+{
+}
+static inline void
+mtk_wed_exit(void)
+{
+}
+static inline int mtk_wed_flow_add(int index)
+{
+	return -EINVAL;
+}
+static inline void mtk_wed_flow_remove(int index)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
+#else
+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
new file mode 100644
index 0000000..a81d3fd
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/seq_file.h>
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+
+struct reg_dump {
+	const char *name;
+	u16 offset;
+	u8 type;
+	u8 base;
+};
+
+enum {
+	DUMP_TYPE_STRING,
+	DUMP_TYPE_WED,
+	DUMP_TYPE_WDMA,
+	DUMP_TYPE_WPDMA_TX,
+	DUMP_TYPE_WPDMA_TXFREE,
+};
+
+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_RING(_prefix, _base, ...)				\
+	{ _prefix " BASE", _base, __VA_ARGS__ },		\
+	{ _prefix " CNT",  _base + 0x4, __VA_ARGS__ },	\
+	{ _prefix " CIDX", _base + 0x8, __VA_ARGS__ },	\
+	{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
+
+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+
+static void
+print_reg_val(struct seq_file *s, const char *name, u32 val)
+{
+	seq_printf(s, "%-32s %08x\n", name, val);
+}
+
+static void
+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+	      const struct reg_dump *regs, int n_regs)
+{
+	const struct reg_dump *cur;
+	u32 val;
+
+	for (cur = regs; cur < &regs[n_regs]; cur++) {
+		switch (cur->type) {
+		case DUMP_TYPE_STRING:
+			seq_printf(s, "%s======== %s:\n",
+				   cur > regs ? "\n" : "",
+				   cur->name);
+			continue;
+		case DUMP_TYPE_WED:
+			val = wed_r32(dev, cur->offset);
+			break;
+		case DUMP_TYPE_WDMA:
+			val = wdma_r32(dev, cur->offset);
+			break;
+		case DUMP_TYPE_WPDMA_TX:
+			val = wpdma_tx_r32(dev, cur->base, cur->offset);
+			break;
+		case DUMP_TYPE_WPDMA_TXFREE:
+			val = wpdma_txfree_r32(dev, cur->offset);
+			break;
+		}
+		print_reg_val(s, cur->name, val);
+	}
+}
+
+
+static int
+wed_txinfo_show(struct seq_file *s, void *data)
+{
+	static const struct reg_dump regs[] = {
+		DUMP_STR("WED TX"),
+		DUMP_WED(WED_TX_MIB(0)),
+		DUMP_WED_RING(WED_RING_TX(0)),
+
+		DUMP_WED(WED_TX_MIB(1)),
+		DUMP_WED_RING(WED_RING_TX(1)),
+
+		DUMP_STR("WPDMA TX"),
+		DUMP_WED(WED_WPDMA_TX_MIB(0)),
+		DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
+		DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
+
+		DUMP_WED(WED_WPDMA_TX_MIB(1)),
+		DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
+		DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
+
+		DUMP_STR("WPDMA TX"),
+		DUMP_WPDMA_TX_RING(0),
+		DUMP_WPDMA_TX_RING(1),
+
+		DUMP_STR("WED WDMA RX"),
+		DUMP_WED(WED_WDMA_RX_MIB(0)),
+		DUMP_WED_RING(WED_WDMA_RING_RX(0)),
+		DUMP_WED(WED_WDMA_RX_THRES(0)),
+		DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
+		DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
+
+		DUMP_WED(WED_WDMA_RX_MIB(1)),
+		DUMP_WED_RING(WED_WDMA_RING_RX(1)),
+		DUMP_WED(WED_WDMA_RX_THRES(1)),
+		DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
+		DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
+
+		DUMP_STR("WDMA RX"),
+		DUMP_WDMA(WDMA_GLO_CFG),
+		DUMP_WDMA_RING(WDMA_RING_RX(0)),
+		DUMP_WDMA_RING(WDMA_RING_RX(1)),
+	};
+	struct mtk_wed_hw *hw = s->private;
+	struct mtk_wed_device *dev = hw->wed_dev;
+
+	if (!dev)
+		return 0;
+
+	dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+
+
+static int
+mtk_wed_reg_set(void *data, u64 val)
+{
+	struct mtk_wed_hw *hw = data;
+
+	regmap_write(hw->regs, hw->debugfs_reg, val);
+
+	return 0;
+}
+
+static int
+mtk_wed_reg_get(void *data, u64 *val)
+{
+	struct mtk_wed_hw *hw = data;
+	unsigned int regval;
+	int ret;
+
+	ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
+	if (ret)
+		return ret;
+
+	*val = regval;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+             "0x%08llx\n");
+
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+	struct dentry *dir;
+
+	snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
+	dir = debugfs_create_dir(hw->dirname, NULL);
+	if (!dir)
+		return;
+
+	hw->debugfs_dir = dir;
+	debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+	debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+	debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
new file mode 100644
index 0000000..a5d9d8a
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+
+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
new file mode 100644
index 0000000..0a0465e
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_REGS_H
+#define __MTK_WED_REGS_H
+
+#define MTK_WDMA_DESC_CTRL_LEN1			GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG1		BIT(15)
+#define MTK_WDMA_DESC_CTRL_BURST		BIT(16)
+#define MTK_WDMA_DESC_CTRL_LEN0			GENMASK(29, 16)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG0		BIT(30)
+#define MTK_WDMA_DESC_CTRL_DMA_DONE		BIT(31)
+
+struct mtk_wdma_desc {
+	__le32 buf0;
+	__le32 ctrl;
+	__le32 buf1;
+	__le32 info;
+} __packed __aligned(4);
+
+#define MTK_WED_RESET					0x008
+#define MTK_WED_RESET_TX_BM				BIT(0)
+#define MTK_WED_RESET_TX_FREE_AGENT			BIT(4)
+#define MTK_WED_RESET_WPDMA_TX_DRV			BIT(8)
+#define MTK_WED_RESET_WPDMA_RX_DRV			BIT(9)
+#define MTK_WED_RESET_WPDMA_INT_AGENT			BIT(11)
+#define MTK_WED_RESET_WED_TX_DMA			BIT(12)
+#define MTK_WED_RESET_WDMA_RX_DRV			BIT(17)
+#define MTK_WED_RESET_WDMA_INT_AGENT			BIT(19)
+#define MTK_WED_RESET_WED				BIT(31)
+
+#define MTK_WED_CTRL					0x00c
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN			BIT(0)
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY		BIT(1)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN			BIT(2)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY		BIT(3)
+#define MTK_WED_CTRL_WED_TX_BM_EN			BIT(8)
+#define MTK_WED_CTRL_WED_TX_BM_BUSY			BIT(9)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN		BIT(10)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY		BIT(11)
+#define MTK_WED_CTRL_RESERVE_EN				BIT(12)
+#define MTK_WED_CTRL_RESERVE_BUSY			BIT(13)
+#define MTK_WED_CTRL_FINAL_DIDX_READ			BIT(24)
+#define MTK_WED_CTRL_MIB_READ_CLEAR			BIT(28)
+
+#define MTK_WED_EXT_INT_STATUS				0x020
+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR		BIT(0)
+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD		BIT(1)
+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID	BIT(4)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH		BIT(8)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH		BIT(9)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH		BIT(12)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH		BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR	BIT(16)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR	BIT(17)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT		BIT(18)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN	BIT(19)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT	BIT(20)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR	BIT(21)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR	BIT(22)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE	BIT(24)
+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK		(MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+							 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+							 MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+							 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+							 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+							 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+							 MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
+							 MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
+
+#define MTK_WED_EXT_INT_MASK				0x028
+
+#define MTK_WED_STATUS					0x060
+#define MTK_WED_STATUS_TX				GENMASK(15, 8)
+
+#define MTK_WED_TX_BM_CTRL				0x080
+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM			GENMASK(6, 0)
+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM			GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_PAUSE			BIT(28)
+
+#define MTK_WED_TX_BM_BASE				0x084
+
+#define MTK_WED_TX_BM_TKID				0x088
+#define MTK_WED_TX_BM_TKID_START			GENMASK(15, 0)
+#define MTK_WED_TX_BM_TKID_END				GENMASK(31, 16)
+
+#define MTK_WED_TX_BM_BUF_LEN				0x08c
+
+#define MTK_WED_TX_BM_INTF				0x09c
+#define MTK_WED_TX_BM_INTF_TKID				GENMASK(15, 0)
+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP			GENMASK(23, 16)
+#define MTK_WED_TX_BM_INTF_TKID_VALID			BIT(28)
+#define MTK_WED_TX_BM_INTF_TKID_READ			BIT(29)
+
+#define MTK_WED_TX_BM_DYN_THR				0x0a0
+#define MTK_WED_TX_BM_DYN_THR_LO			GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_HI			GENMASK(22, 16)
+
+#define MTK_WED_INT_STATUS				0x200
+#define MTK_WED_INT_MASK				0x204
+
+#define MTK_WED_GLO_CFG					0x208
+#define MTK_WED_GLO_CFG_TX_DMA_EN			BIT(0)
+#define MTK_WED_GLO_CFG_TX_DMA_BUSY			BIT(1)
+#define MTK_WED_GLO_CFG_RX_DMA_EN			BIT(2)
+#define MTK_WED_GLO_CFG_RX_DMA_BUSY			BIT(3)
+#define MTK_WED_GLO_CFG_RX_BT_SIZE			GENMASK(5, 4)
+#define MTK_WED_GLO_CFG_TX_WB_DDONE			BIT(6)
+#define MTK_WED_GLO_CFG_BIG_ENDIAN			BIT(7)
+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN		BIT(8)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO			BIT(9)
+#define MTK_WED_GLO_CFG_MULTI_DMA_EN			GENMASK(11, 10)
+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN		BIT(12)
+#define MTK_WED_GLO_CFG_MI_DEPTH_RD			GENMASK(21, 13)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI			GENMASK(23, 22)
+#define MTK_WED_GLO_CFG_SW_RESET			BIT(24)
+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY		BIT(26)
+#define MTK_WED_GLO_CFG_OMIT_RX_INFO			BIT(27)
+#define MTK_WED_GLO_CFG_OMIT_TX_INFO			BIT(28)
+#define MTK_WED_GLO_CFG_BYTE_SWAP			BIT(29)
+#define MTK_WED_GLO_CFG_RX_2B_OFFSET			BIT(31)
+
+#define MTK_WED_RESET_IDX				0x20c
+#define MTK_WED_RESET_IDX_TX				GENMASK(3, 0)
+#define MTK_WED_RESET_IDX_RX				GENMASK(17, 16)
+
+#define MTK_WED_TX_MIB(_n)				(0x2a0 + (_n) * 4)
+
+#define MTK_WED_RING_TX(_n)				(0x300 + (_n) * 0x10)
+
+#define MTK_WED_RING_RX(_n)				(0x400 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_INT_TRIGGER			0x504
+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE		BIT(1)
+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE		GENMASK(5, 4)
+
+#define MTK_WED_WPDMA_GLO_CFG				0x508
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN			BIT(0)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY		BIT(1)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN			BIT(2)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY		BIT(3)
+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE		GENMASK(5, 4)
+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE		BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN		BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN		BIT(8)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO		BIT(9)
+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN		GENMASK(11, 10)
+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN	BIT(12)
+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD		GENMASK(21, 13)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI		GENMASK(23, 22)
+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET			BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY		BIT(26)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO		BIT(27)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO		BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP			BIT(29)
+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET		BIT(31)
+
+#define MTK_WED_WPDMA_RESET_IDX				0x50c
+#define MTK_WED_WPDMA_RESET_IDX_TX			GENMASK(3, 0)
+#define MTK_WED_WPDMA_RESET_IDX_RX			GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_INT_CTRL				0x520
+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV		BIT(21)
+
+#define MTK_WED_WPDMA_INT_MASK				0x524
+
+#define MTK_WED_PCIE_CFG_BASE				0x560
+
+#define MTK_WED_PCIE_INT_TRIGGER			0x570
+#define MTK_WED_PCIE_INT_TRIGGER_STATUS			BIT(16)
+
+#define MTK_WED_WPDMA_CFG_BASE				0x580
+
+#define MTK_WED_WPDMA_TX_MIB(_n)			(0x5a0 + (_n) * 4)
+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n)		(0x5d0 + (_n) * 4)
+
+#define MTK_WED_WPDMA_RING_TX(_n)			(0x600 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX(_n)			(0x700 + (_n) * 0x10)
+#define MTK_WED_WDMA_RING_RX(_n)			(0x900 + (_n) * 0x10)
+#define MTK_WED_WDMA_RX_THRES(_n)			(0x940 + (_n) * 0x4)
+
+#define MTK_WED_WDMA_GLO_CFG				0xa04
+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN			BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN			BIT(2)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY		BIT(3)
+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE			GENMASK(5, 4)
+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE		BIT(6)
+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE	BIT(13)
+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL		BIT(16)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS	BIT(17)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS		BIT(18)
+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE		BIT(19)
+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT		BIT(20)
+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW		BIT(21)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W	BIT(22)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY		BIT(23)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP	BIT(24)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE	BIT(25)
+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE		BIT(26)
+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS	BIT(30)
+
+#define MTK_WED_WDMA_RESET_IDX				0xa08
+#define MTK_WED_WDMA_RESET_IDX_RX			GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_DRV			GENMASK(25, 24)
+
+#define MTK_WED_WDMA_INT_TRIGGER			0xa28
+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE		GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_CTRL				0xa2c
+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL		GENMASK(17, 16)
+
+#define MTK_WED_WDMA_OFFSET0				0xaa4
+#define MTK_WED_WDMA_OFFSET1				0xaa8
+
+#define MTK_WED_WDMA_RX_MIB(_n)				(0xae0 + (_n) * 4)
+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n)			(0xae8 + (_n) * 4)
+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n)		(0xaf0 + (_n) * 4)
+
+#define MTK_WED_RING_OFS_BASE				0x00
+#define MTK_WED_RING_OFS_COUNT				0x04
+#define MTK_WED_RING_OFS_CPU_IDX			0x08
+#define MTK_WED_RING_OFS_DMA_IDX			0x0c
+
+#define MTK_WDMA_RING_RX(_n)				(0x100 + (_n) * 0x10)
+
+#define MTK_WDMA_GLO_CFG				0x204
+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES			GENMASK(28, 26)
+
+#define MTK_WDMA_RESET_IDX				0x208
+#define MTK_WDMA_RESET_IDX_TX				GENMASK(3, 0)
+#define MTK_WDMA_RESET_IDX_RX				GENMASK(17, 16)
+
+#define MTK_WDMA_INT_MASK				0x228
+#define MTK_WDMA_INT_MASK_TX_DONE			GENMASK(3, 0)
+#define MTK_WDMA_INT_MASK_RX_DONE			GENMASK(17, 16)
+#define MTK_WDMA_INT_MASK_TX_DELAY			BIT(28)
+#define MTK_WDMA_INT_MASK_TX_COHERENT			BIT(29)
+#define MTK_WDMA_INT_MASK_RX_DELAY			BIT(30)
+#define MTK_WDMA_INT_MASK_RX_COHERENT			BIT(31)
+
+#define MTK_WDMA_INT_GRP1				0x250
+#define MTK_WDMA_INT_GRP2				0x254
+
+#define MTK_PCIE_MIRROR_MAP(n)				((n) ? 0x4 : 0x0)
+#define MTK_PCIE_MIRROR_MAP_EN				BIT(0)
+#define MTK_PCIE_MIRROR_MAP_WED_ID			BIT(1)
+
+/* DMA channel mapping */
+#define HIFSYS_DMA_AG_MAP				0x008
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 79deb19..79fd486 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -7010,7 +7010,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
 		if (IS_ERR(mlxsw_sp_rt6)) {
 			err = PTR_ERR(mlxsw_sp_rt6);
-			goto err_rt6_create;
+			goto err_rt6_unwind;
 		}
 
 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
@@ -7019,14 +7019,12 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
 
 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
 	if (err)
-		goto err_nexthop6_group_update;
+		goto err_rt6_unwind;
 
 	return 0;
 
-err_nexthop6_group_update:
-	i = nrt6;
-err_rt6_create:
-	for (i--; i >= 0; i--) {
+err_rt6_unwind:
+	for (; i > 0; i--) {
 		fib6_entry->nrt6--;
 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
 					       struct mlxsw_sp_rt6, list);
@@ -7154,7 +7152,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
 		if (IS_ERR(mlxsw_sp_rt6)) {
 			err = PTR_ERR(mlxsw_sp_rt6);
-			goto err_rt6_create;
+			goto err_rt6_unwind;
 		}
 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
 		fib6_entry->nrt6++;
@@ -7162,7 +7160,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
 
 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
 	if (err)
-		goto err_nexthop6_group_get;
+		goto err_rt6_unwind;
 
 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
 					     fib_node->fib);
@@ -7181,10 +7179,8 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
 err_nexthop_group_vr_link:
 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
-err_nexthop6_group_get:
-	i = nrt6;
-err_rt6_create:
-	for (i--; i >= 0; i--) {
+err_rt6_unwind:
+	for (; i > 0; i--) {
 		fib6_entry->nrt6--;
 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
 					       struct mlxsw_sp_rt6, list);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
index 96a2077f..7e286cd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
@@ -161,11 +161,11 @@ EXPORT_SYMBOL(qed_vlan_get_ndev);
 
 struct pci_dev *qed_validate_ndev(struct net_device *ndev)
 {
-	struct pci_dev *pdev = NULL;
 	struct net_device *upper;
+	struct pci_dev *pdev;
 
 	for_each_pci_dev(pdev) {
-		if (pdev && pdev->driver &&
+		if (pdev->driver &&
 		    !strcmp(pdev->driver->name, "qede")) {
 			upper = pci_get_drvdata(pdev);
 			if (upper->ifindex == ndev->ifindex)
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 0c6cc21..6bbdb5d 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -718,12 +718,14 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
 				     struct ef4_rx_queue *rx_queue)
 {
 	unsigned int bufs_in_recycle_ring, page_ring_size;
+	struct iommu_domain __maybe_unused *domain;
 
 	/* Set the RX recycle ring size */
 #ifdef CONFIG_PPC64
 	bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
 #else
-	if (iommu_present(&pci_bus_type))
+	domain = iommu_get_domain_for_dev(&efx->pci_dev->dev);
+	if (domain && domain->type != IOMMU_DOMAIN_IDENTITY)
 		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
 	else
 		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 869e362..3f6b9df 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1515,7 +1515,7 @@ static int temac_probe(struct platform_device *pdev)
 				of_node_put(dma_np);
 				return PTR_ERR(lp->sdma_regs);
 			}
-			if (of_get_property(dma_np, "little-endian", NULL)) {
+			if (of_property_read_bool(dma_np, "little-endian")) {
 				lp->dma_in = temac_dma_in32_le;
 				lp->dma_out = temac_dma_out32_le;
 			} else {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9442f75..4061af5 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -792,9 +792,9 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
 	int queue_sends;
 	u64 cmd_rqst;
 
-	cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
+	cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
 	if (cmd_rqst == VMBUS_RQST_ERROR) {
-		netdev_err(ndev, "Incorrect transaction id\n");
+		netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
 		return;
 	}
 
@@ -854,9 +854,9 @@ static void netvsc_send_completion(struct net_device *ndev,
 	/* First check if this is a VMBUS completion without data payload */
 	if (!msglen) {
 		cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
-								   (u64)desc->trans_id);
+								   desc->trans_id);
 		if (cmd_rqst == VMBUS_RQST_ERROR) {
-			netdev_err(ndev, "Invalid transaction id\n");
+			netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
 			return;
 		}
 
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 7856905..232c4a0 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -137,7 +137,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
 {
 	struct netdev_bpf xdp;
-	bpf_op_t ndo_bpf;
 	int ret;
 
 	ASSERT_RTNL();
@@ -145,8 +144,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
 	if (!vf_netdev)
 		return 0;
 
-	ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
-	if (!ndo_bpf)
+	if (!vf_netdev->netdev_ops->ndo_bpf)
 		return 0;
 
 	memset(&xdp, 0, sizeof(xdp));
@@ -157,7 +155,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
 	xdp.command = XDP_SETUP_PROG;
 	xdp.prog = prog;
 
-	ret = ndo_bpf(vf_netdev, &xdp);
+	ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
 
 	if (ret && prog)
 		bpf_prog_put(prog);
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 5829697..85b4b09 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
@@ -30,6 +31,8 @@
 #define		MSCC_MIIM_CMD_VLD		BIT(31)
 #define MSCC_MIIM_REG_DATA		0xC
 #define		MSCC_MIIM_DATA_ERROR		(BIT(16) | BIT(17))
+#define MSCC_MIIM_REG_CFG		0x10
+#define		MSCC_MIIM_CFG_PRESCALE_MASK	GENMASK(7, 0)
 
 #define MSCC_PHY_REG_PHY_CFG	0x0
 #define		PHY_CFG_PHY_ENA		(BIT(0) | BIT(1) | BIT(2) | BIT(3))
@@ -50,6 +53,8 @@ struct mscc_miim_dev {
 	int mii_status_offset;
 	struct regmap *phy_regs;
 	const struct mscc_miim_info *info;
+	struct clk *clk;
+	u32 bus_freq;
 };
 
 /* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -241,9 +246,32 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
 }
 EXPORT_SYMBOL(mscc_miim_setup);
 
+static int mscc_miim_clk_set(struct mii_bus *bus)
+{
+	struct mscc_miim_dev *miim = bus->priv;
+	unsigned long rate;
+	u32 div;
+
+	/* Keep the current settings */
+	if (!miim->bus_freq)
+		return 0;
+
+	rate = clk_get_rate(miim->clk);
+
+	div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1;
+	if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) {
+		dev_err(&bus->dev, "Incorrect MDIO clock frequency\n");
+		return -EINVAL;
+	}
+
+	return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG,
+				  MSCC_MIIM_CFG_PRESCALE_MASK, div);
+}
+
 static int mscc_miim_probe(struct platform_device *pdev)
 {
 	struct regmap *mii_regmap, *phy_regmap = NULL;
+	struct device_node *np = pdev->dev.of_node;
 	void __iomem *regs, *phy_regs;
 	struct mscc_miim_dev *miim;
 	struct resource *res;
@@ -294,21 +322,47 @@ static int mscc_miim_probe(struct platform_device *pdev)
 	if (!miim->info)
 		return -EINVAL;
 
-	ret = of_mdiobus_register(bus, pdev->dev.of_node);
+	miim->clk = devm_clk_get_optional(&pdev->dev, NULL);
+	if (IS_ERR(miim->clk))
+		return PTR_ERR(miim->clk);
+
+	of_property_read_u32(np, "clock-frequency", &miim->bus_freq);
+
+	if (miim->bus_freq && !miim->clk) {
+		dev_err(&pdev->dev,
+			"cannot use clock-frequency without a clock\n");
+		return -EINVAL;
+	}
+
+	ret = clk_prepare_enable(miim->clk);
+	if (ret)
+		return ret;
+
+	ret = mscc_miim_clk_set(bus);
+	if (ret)
+		goto out_disable_clk;
+
+	ret = of_mdiobus_register(bus, np);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
-		return ret;
+		goto out_disable_clk;
 	}
 
 	platform_set_drvdata(pdev, bus);
 
 	return 0;
+
+out_disable_clk:
+	clk_disable_unprepare(miim->clk);
+	return ret;
 }
 
 static int mscc_miim_remove(struct platform_device *pdev)
 {
 	struct mii_bus *bus = platform_get_drvdata(pdev);
+	struct mscc_miim_dev *miim = bus->priv;
 
+	clk_disable_unprepare(miim->clk);
 	mdiobus_unregister(bus);
 
 	return 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3619520..1b41cd9 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1011,8 +1011,7 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
 		goto end;
 	}
 
-	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
-				flags & MSG_DONTWAIT, &error);
+	skb = skb_recv_datagram(sk, flags, &error);
 	if (error < 0)
 		goto end;
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3353e76..17cf0d1 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -190,7 +190,6 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 		skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
 		if (!skbn)
 			return 0;
-		skbn->dev = net;
 
 		switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
 		case 0x40:
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 140780ac..588b233 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -57,34 +57,6 @@
 	  The driver will be compiled as a module: the
 	  module will be called cosa.
 
-#
-# Lan Media's board. Currently 1000, 1200, 5200, 5245
-#
-config LANMEDIA
-	tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
-	depends on PCI && VIRT_TO_BUS && HDLC
-	help
-	  Driver for the following Lan Media family of serial boards:
-
-	  - LMC 1000 board allows you to connect synchronous serial devices
-	  (for example base-band modems, or any other device with the X.21,
-	  V.24, V.35 or V.36 interface) to your Linux box.
-
-	  - LMC 1200 with on board DSU board allows you to connect your Linux
-	  box directly to a T1 or E1 circuit.
-
-	  - LMC 5200 board provides a HSSI interface capable of running up to
-	  52 Mbits per second.
-
-	  - LMC 5245 board connects directly to a T3 circuit saving the
-	  additional external hardware.
-
-	  To change setting such as clock source you will need lmcctl.
-	  It is available at <ftp://ftp.lanmedia.com/> (broken link).
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called lmc.
-
 # There is no way to detect a Sealevel board. Force it modular
 config SEALEVEL_4021
 	tristate "Sealevel Systems 4021 support"
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 480bcd1..1cd4214 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -19,8 +19,6 @@
 obj-$(CONFIG_COSA)		+= cosa.o
 obj-$(CONFIG_FARSYNC)		+= farsync.o
 
-obj-$(CONFIG_LANMEDIA)		+= lmc/
-
 obj-$(CONFIG_LAPBETHER)		+= lapbether.o
 obj-$(CONFIG_N2)		+= n2.o
 obj-$(CONFIG_C101)		+= c101.o
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
deleted file mode 100644
index f00fe44..0000000
--- a/drivers/net/wan/lmc/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Lan Media 21140 based WAN cards
-# Specifically the 1000,1200,5200,5245
-#
-
-obj-$(CONFIG_LANMEDIA) += lmc.o
-
-lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
-
-# Like above except every packet gets echoed to KERN_DEBUG
-# in hex
-#
-# DBDEF = \
-# -DDEBUG \
-# -DLMC_PACKET_LOG
-
-ccflags-y := $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
deleted file mode 100644
index d7d59b4..0000000
--- a/drivers/net/wan/lmc/lmc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_H_
-#define _LMC_H_
-
-#include "lmc_var.h"
-
-/*
- * prototypes for everyone
- */
-int lmc_probe(struct net_device * dev);
-unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
-			  devaddr, unsigned regno);
-void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
-			       unsigned regno, unsigned data);
-void lmc_led_on(lmc_softc_t * const, u32);
-void lmc_led_off(lmc_softc_t * const, u32);
-unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
-void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
-
-int lmc_ioctl(struct net_device *dev, struct if_settings *ifs);
-
-extern lmc_media_t lmc_ds3_media;
-extern lmc_media_t lmc_ssi_media;
-extern lmc_media_t lmc_t1_media;
-extern lmc_media_t lmc_hssi_media;
-
-#ifdef _DBG_EVENTLOG
-static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-#endif
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
deleted file mode 100644
index 2b6051b..0000000
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-
-#include "lmc_debug.h"
-
-/*
- * Prints out len, max to 80 octets using printk, 20 per line
- */
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
-{
-  int iNewLine = 1;
-  char str[80], *pstr;
-  
-  sprintf(str, KERN_DEBUG "lmc: %s: ", type);
-  pstr = str+strlen(str);
-  
-  if(iLen > 240){
-      printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
-    iLen = 240;
-  }
-  else{
-      printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
-  }
-
-  while(iLen > 0) 
-    {
-      sprintf(pstr, "%02x ", *ucData);
-      pstr+=3;
-      ucData++;
-      if( !(iNewLine % 20))
-	{
-	  sprintf(pstr, "\n");
-	  printk(str);
-	  sprintf(str, KERN_DEBUG "lmc: %s: ", type);
-	  pstr=str+strlen(str);
-	}
-      iNewLine++;
-      iLen--;
-    }
-  sprintf(pstr, "\n");
-  printk(str);
-}
-#endif
-#endif
-
-#ifdef DEBUG
-u32 lmcEventLogIndex;
-u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
-{
-  lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
-  lmcEventLogBuf[lmcEventLogIndex++] = arg2;
-  lmcEventLogBuf[lmcEventLogIndex++] = arg3;
-  lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
-
-  lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
-}
-#endif  /*  DEBUG  */
-
-/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
deleted file mode 100644
index cfae9ed..0000000
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_DEBUG_H_
-#define _LMC_DEBUG_H_
-
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-
-
-
-/* Debug --- Event log definitions --- */
-/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
-#define LMC_EVENTLOGSIZE 1024	/* number of events in eventlog */
-#define LMC_EVENTLOGARGS 4		/* number of args for each event */
-
-/* event indicators */
-#define LMC_EVENT_XMT           1
-#define LMC_EVENT_XMTEND        2
-#define LMC_EVENT_XMTINT        3
-#define LMC_EVENT_RCVINT        4
-#define LMC_EVENT_RCVEND        5
-#define LMC_EVENT_INT           6
-#define LMC_EVENT_XMTINTTMO     7
-#define LMC_EVENT_XMTPRCTMO     8
-#define LMC_EVENT_INTEND        9
-#define LMC_EVENT_RESET1       10
-#define LMC_EVENT_RESET2       11
-#define LMC_EVENT_FORCEDRESET  12
-#define LMC_EVENT_WATCHDOG     13
-#define LMC_EVENT_BADPKTSURGE  14
-#define LMC_EVENT_TBUSY0       15
-#define LMC_EVENT_TBUSY1       16
-
-
-#ifdef DEBUG
-extern u32 lmcEventLogIndex;
-extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
-#else
-#define LMC_EVENT_LOG(x,y,z)
-#endif /* end ifdef _DBG_EVENTLOG */
-
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
deleted file mode 100644
index 8c65e21..0000000
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_IOCTL_H_
-#define _LMC_IOCTL_H_
-/*	$Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $	*/
-
- /*
-  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
-  * All rights reserved.  www.lanmedia.com
-  *
-  * This code is written by:
-  * Andrew Stanley-Jones (asj@cban.com)
-  * Rob Braun (bbraun@vix.com),
-  * Michael Graff (explorer@vix.com) and
-  * Matt Thomas (matt@3am-software.com).
-  */
-
-#define LMCIOCGINFO             SIOCDEVPRIVATE+3 /* get current state */
-#define LMCIOCSINFO             SIOCDEVPRIVATE+4 /* set state to user values */
-#define LMCIOCGETLMCSTATS       SIOCDEVPRIVATE+5
-#define LMCIOCCLEARLMCSTATS     SIOCDEVPRIVATE+6
-#define LMCIOCDUMPEVENTLOG      SIOCDEVPRIVATE+7
-#define LMCIOCGETXINFO          SIOCDEVPRIVATE+8
-#define LMCIOCSETCIRCUIT        SIOCDEVPRIVATE+9
-#define LMCIOCUNUSEDATM         SIOCDEVPRIVATE+10
-#define LMCIOCRESET             SIOCDEVPRIVATE+11
-#define LMCIOCT1CONTROL         SIOCDEVPRIVATE+12
-#define LMCIOCIFTYPE            SIOCDEVPRIVATE+13
-#define LMCIOCXILINX            SIOCDEVPRIVATE+14
-
-#define LMC_CARDTYPE_UNKNOWN            -1
-#define LMC_CARDTYPE_HSSI               1       /* probed card is a HSSI card */
-#define LMC_CARDTYPE_DS3                2       /* probed card is a DS3 card */
-#define LMC_CARDTYPE_SSI                3       /* probed card is a SSI card */
-#define LMC_CARDTYPE_T1                 4       /* probed card is a T1 card */
-
-#define LMC_CTL_CARDTYPE_LMC5200	0	/* HSSI */
-#define LMC_CTL_CARDTYPE_LMC5245	1	/* DS3 */
-#define LMC_CTL_CARDTYPE_LMC1000	2	/* SSI, V.35 */
-#define LMC_CTL_CARDTYPE_LMC1200        3       /* DS1 */
-
-#define LMC_CTL_OFF			0	/* generic OFF value */
-#define LMC_CTL_ON			1	/* generic ON value */
-
-#define LMC_CTL_CLOCK_SOURCE_EXT	0	/* clock off line */
-#define LMC_CTL_CLOCK_SOURCE_INT	1	/* internal clock */
-
-#define LMC_CTL_CRC_LENGTH_16		16
-#define LMC_CTL_CRC_LENGTH_32		32
-#define LMC_CTL_CRC_BYTESIZE_2          2
-#define LMC_CTL_CRC_BYTESIZE_4          4
-
-
-#define LMC_CTL_CABLE_LENGTH_LT_100FT	0	/* DS3 cable < 100 feet */
-#define LMC_CTL_CABLE_LENGTH_GT_100FT	1	/* DS3 cable >= 100 feet */
-
-#define LMC_CTL_CIRCUIT_TYPE_E1 0
-#define LMC_CTL_CIRCUIT_TYPE_T1 1
-
-/*
- * IFTYPE defines
- */
-#define LMC_PPP         1               /* use generic HDLC interface */
-#define LMC_NET         2               /* use direct net interface */
-#define LMC_RAW         3               /* use direct net interface */
-
-/*
- * These are not in the least IOCTL related, but I want them common.
- */
-/*
- * assignments for the GPIO register on the DEC chip (common)
- */
-#define LMC_GEP_INIT		0x01 /* 0: */
-#define LMC_GEP_RESET		0x02 /* 1: */
-#define LMC_GEP_MODE		0x10 /* 4: */
-#define LMC_GEP_DP		0x20 /* 5: */
-#define LMC_GEP_DATA		0x40 /* 6: serial out */
-#define LMC_GEP_CLK	        0x80 /* 7: serial clock */
-
-/*
- * HSSI GPIO assignments
- */
-#define LMC_GEP_HSSI_ST		0x04 /* 2: receive timing sense (deprecated) */
-#define LMC_GEP_HSSI_CLOCK	0x08 /* 3: clock source */
-
-/*
- * T1 GPIO assignments
- */
-#define LMC_GEP_SSI_GENERATOR	0x04 /* 2: enable prog freq gen serial i/f */
-#define LMC_GEP_SSI_TXCLOCK	0x08 /* 3: provide clock on TXCLOCK output */
-
-/*
- * Common MII16 bits
- */
-#define LMC_MII16_LED0         0x0080
-#define LMC_MII16_LED1         0x0100
-#define LMC_MII16_LED2         0x0200
-#define LMC_MII16_LED3         0x0400  /* Error, and the red one */
-#define LMC_MII16_LED_ALL      0x0780  /* LED bit mask */
-#define LMC_MII16_FIFO_RESET   0x0800
-
-/*
- * definitions for HSSI
- */
-#define LMC_MII16_HSSI_TA      0x0001
-#define LMC_MII16_HSSI_CA      0x0002
-#define LMC_MII16_HSSI_LA      0x0004
-#define LMC_MII16_HSSI_LB      0x0008
-#define LMC_MII16_HSSI_LC      0x0010
-#define LMC_MII16_HSSI_TM      0x0020
-#define LMC_MII16_HSSI_CRC     0x0040
-
-/*
- * assignments for the MII register 16 (DS3)
- */
-#define LMC_MII16_DS3_ZERO	0x0001
-#define LMC_MII16_DS3_TRLBK	0x0002
-#define LMC_MII16_DS3_LNLBK	0x0004
-#define LMC_MII16_DS3_RAIS	0x0008
-#define LMC_MII16_DS3_TAIS	0x0010
-#define LMC_MII16_DS3_BIST	0x0020
-#define LMC_MII16_DS3_DLOS	0x0040
-#define LMC_MII16_DS3_CRC	0x1000
-#define LMC_MII16_DS3_SCRAM	0x2000
-#define LMC_MII16_DS3_SCRAM_LARS 0x4000
-
-/* Note: 2 pairs of LEDs where swapped by mistake
- * in Xilinx code for DS3 & DS1 adapters */
-#define LMC_DS3_LED0    0x0100          /* bit 08  yellow */
-#define LMC_DS3_LED1    0x0080          /* bit 07  blue   */
-#define LMC_DS3_LED2    0x0400          /* bit 10  green  */
-#define LMC_DS3_LED3    0x0200          /* bit 09  red    */
-
-/*
- * framer register 0 and 7 (7 is latched and reset on read)
- */
-#define LMC_FRAMER_REG0_DLOS            0x80    /* digital loss of service */
-#define LMC_FRAMER_REG0_OOFS            0x40    /* out of frame sync */
-#define LMC_FRAMER_REG0_AIS             0x20    /* alarm indication signal */
-#define LMC_FRAMER_REG0_CIS             0x10    /* channel idle */
-#define LMC_FRAMER_REG0_LOC             0x08    /* loss of clock */
-
-/*
- * Framer register 9 contains the blue alarm signal
- */
-#define LMC_FRAMER_REG9_RBLUE          0x02     /* Blue alarm failure */
-
-/*
- * Framer register 0x10 contains xbit error
- */
-#define LMC_FRAMER_REG10_XBIT          0x01     /* X bit error alarm failure */
-
-/*
- * And SSI, LMC1000
- */
-#define LMC_MII16_SSI_DTR	0x0001	/* DTR output RW */
-#define LMC_MII16_SSI_DSR	0x0002	/* DSR input RO */
-#define LMC_MII16_SSI_RTS	0x0004	/* RTS output RW */
-#define LMC_MII16_SSI_CTS	0x0008	/* CTS input RO */
-#define LMC_MII16_SSI_DCD	0x0010	/* DCD input RO */
-#define LMC_MII16_SSI_RI		0x0020	/* RI input RO */
-#define LMC_MII16_SSI_CRC                0x1000  /* CRC select - RW */
-
-/*
- * bits 0x0080 through 0x0800 are generic, and described
- * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
- */
-#define LMC_MII16_SSI_LL		0x1000	/* LL output RW */
-#define LMC_MII16_SSI_RL		0x2000	/* RL output RW */
-#define LMC_MII16_SSI_TM		0x4000	/* TM input RO */
-#define LMC_MII16_SSI_LOOP	0x8000	/* loopback enable RW */
-
-/*
- * Some of the MII16 bits are mirrored in the MII17 register as well,
- * but let's keep thing separate for now, and get only the cable from
- * the MII17.
- */
-#define LMC_MII17_SSI_CABLE_MASK	0x0038	/* mask to extract the cable type */
-#define LMC_MII17_SSI_CABLE_SHIFT 3	/* shift to extract the cable type */
-
-/*
- * And T1, LMC1200
- */
-#define LMC_MII16_T1_UNUSED1    0x0003
-#define LMC_MII16_T1_XOE                0x0004
-#define LMC_MII16_T1_RST                0x0008  /* T1 chip reset - RW */
-#define LMC_MII16_T1_Z                  0x0010  /* output impedance T1=1, E1=0 output - RW */
-#define LMC_MII16_T1_INTR               0x0020  /* interrupt from 8370 - RO */
-#define LMC_MII16_T1_ONESEC             0x0040  /* one second square wave - ro */
-
-#define LMC_MII16_T1_LED0               0x0100
-#define LMC_MII16_T1_LED1               0x0080
-#define LMC_MII16_T1_LED2               0x0400
-#define LMC_MII16_T1_LED3               0x0200
-#define LMC_MII16_T1_FIFO_RESET 0x0800
-
-#define LMC_MII16_T1_CRC                0x1000  /* CRC select - RW */
-#define LMC_MII16_T1_UNUSED2    0xe000
-
-
-/* 8370 framer registers  */
-
-#define T1FRAMER_ALARM1_STATUS  0x47
-#define T1FRAMER_ALARM2_STATUS  0x48
-#define T1FRAMER_FERR_LSB               0x50
-#define T1FRAMER_FERR_MSB               0x51    /* framing bit error counter */
-#define T1FRAMER_LCV_LSB                0x54
-#define T1FRAMER_LCV_MSB                0x55    /* line code violation counter */
-#define T1FRAMER_AERR                   0x5A
-
-/* mask for the above AERR register */
-#define T1FRAMER_LOF_MASK               (0x0f0) /* receive loss of frame */
-#define T1FRAMER_COFA_MASK              (0x0c0) /* change of frame alignment */
-#define T1FRAMER_SEF_MASK               (0x03)  /* severely errored frame  */
-
-/* 8370 framer register ALM1 (0x47) values
- * used to determine link status
- */
-
-#define T1F_SIGFRZ      0x01    /* signaling freeze */
-#define T1F_RLOF        0x02    /* receive loss of frame alignment */
-#define T1F_RLOS        0x04    /* receive loss of signal */
-#define T1F_RALOS       0x08    /* receive analog loss of signal or RCKI loss of clock */
-#define T1F_RAIS        0x10    /* receive alarm indication signal */
-#define T1F_UNUSED      0x20
-#define T1F_RYEL        0x40    /* receive yellow alarm */
-#define T1F_RMYEL       0x80    /* receive multiframe yellow alarm */
-
-#define LMC_T1F_WRITE       0
-#define LMC_T1F_READ        1
-
-typedef struct lmc_st1f_control {
-  int command;
-  int address;
-  int value;
-  char __user *data;
-} lmc_t1f_control;
-
-enum lmc_xilinx_c {
-    lmc_xilinx_reset = 1,
-    lmc_xilinx_load_prom = 2,
-    lmc_xilinx_load = 3
-};
-
-struct lmc_xilinx_control {
-    enum lmc_xilinx_c command;
-    int len;
-    char __user *data;
-};
-
-/* ------------------ end T1 defs ------------------- */
-
-#define LMC_MII_LedMask                 0x0780
-#define LMC_MII_LedBitPos               7
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
deleted file mode 100644
index 76c6b4f..0000000
--- a/drivers/net/wan/lmc/lmc_main.c
+++ /dev/null
@@ -1,2009 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
-  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
-  * All rights reserved.  www.lanmedia.com
-  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
-  *
-  * This code is written by:
-  * Andrew Stanley-Jones (asj@cban.com)
-  * Rob Braun (bbraun@vix.com),
-  * Michael Graff (explorer@vix.com) and
-  * Matt Thomas (matt@3am-software.com).
-  *
-  * With Help By:
-  * David Boggs
-  * Ron Crane
-  * Alan Cox
-  *
-  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
-  *
-  * To control link specific options lmcctl is required.
-  * It can be obtained from ftp.lanmedia.com.
-  *
-  * Linux driver notes:
-  * Linux uses the device struct lmc_private to pass private information
-  * around.
-  *
-  * The initialization portion of this driver (the lmc_reset() and the
-  * lmc_dec_reset() functions, as well as the led controls and the
-  * lmc_initcsrs() functions.
-  *
-  * The watchdog function runs every second and checks to see if
-  * we still have link, and that the timing source is what we expected
-  * it to be.  If link is lost, the interface is marked down, and
-  * we no longer can transmit.
-  */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-#include <asm/processor.h>             /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-#include <linux/jiffies.h>
-//#include <asm/spinlock.h>
-
-#define DRIVER_MAJOR_VERSION     1
-#define DRIVER_MINOR_VERSION    34
-#define DRIVER_SUB_VERSION       0
-
-#define DRIVER_VERSION  ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-#include "lmc_proto.h"
-
-static int LMC_PKT_BUF_SZ = 1542;
-
-static const struct pci_device_id lmc_pci_tbl[] = {
-	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
-	  PCI_VENDOR_ID_LMC, PCI_ANY_ID },
-	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
-	  PCI_ANY_ID, PCI_VENDOR_ID_LMC },
-	{ 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
-MODULE_LICENSE("GPL v2");
-
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
-					struct net_device *dev);
-static int lmc_rx (struct net_device *dev);
-static int lmc_open(struct net_device *dev);
-static int lmc_close(struct net_device *dev);
-static struct net_device_stats *lmc_get_stats(struct net_device *dev);
-static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
-static void lmc_softreset(lmc_softc_t * const);
-static void lmc_running_reset(struct net_device *dev);
-static int lmc_ifdown(struct net_device * const);
-static void lmc_watchdog(struct timer_list *t);
-static void lmc_reset(lmc_softc_t * const sc);
-static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
-
-/*
- * linux reserves 16 device specific IOCTLs.  We call them
- * LMCIOC* to control various bits of our world.
- */
-static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
-			      void __user *data, int cmd) /*fold00*/
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    lmc_ctl_t ctl;
-    int ret = -EOPNOTSUPP;
-    u16 regVal;
-    unsigned long flags;
-
-    /*
-     * Most functions mess with the structure
-     * Disable interrupts while we do the polling
-     */
-
-    switch (cmd) {
-        /*
-         * Return current driver state.  Since we keep this up
-         * To date internally, just copy this out to the user.
-         */
-    case LMCIOCGINFO: /*fold01*/
-	if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
-		ret = -EFAULT;
-	else
-		ret = 0;
-        break;
-
-    case LMCIOCSINFO: /*fold01*/
-        if (!capable(CAP_NET_ADMIN)) {
-            ret = -EPERM;
-            break;
-        }
-
-        if(dev->flags & IFF_UP){
-            ret = -EBUSY;
-            break;
-        }
-
-	if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
-		ret = -EFAULT;
-		break;
-	}
-
-	spin_lock_irqsave(&sc->lmc_lock, flags);
-        sc->lmc_media->set_status (sc, &ctl);
-
-        if(ctl.crc_length != sc->ictl.crc_length) {
-            sc->lmc_media->set_crc_length(sc, ctl.crc_length);
-	    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
-		sc->TxDescriptControlInit |=  LMC_TDES_ADD_CRC_DISABLE;
-	    else
-		sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
-        }
-	spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-        ret = 0;
-        break;
-
-    case LMCIOCIFTYPE: /*fold01*/
-        {
-	    u16 old_type = sc->if_type;
-	    u16	new_type;
-
-	    if (!capable(CAP_NET_ADMIN)) {
-		ret = -EPERM;
-		break;
-	    }
-
-	    if (copy_from_user(&new_type, data, sizeof(u16))) {
-		ret = -EFAULT;
-		break;
-	    }
-
-            
-	    if (new_type == old_type)
-	    {
-		ret = 0 ;
-		break;				/* no change */
-            }
-            
-	    spin_lock_irqsave(&sc->lmc_lock, flags);
-            lmc_proto_close(sc);
-
-            sc->if_type = new_type;
-            lmc_proto_attach(sc);
-	    ret = lmc_proto_open(sc);
-	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-	    break;
-	}
-
-    case LMCIOCGETXINFO: /*fold01*/
-	spin_lock_irqsave(&sc->lmc_lock, flags);
-        sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
-
-        sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
-        sc->lmc_xinfo.PciSlotNumber = 0;
-        sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
-        sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
-        sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
-        sc->lmc_xinfo.XilinxRevisionNumber =
-            lmc_mii_readreg (sc, 0, 3) & 0xf;
-        sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
-        sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
-        sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
-	spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
-
-	if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
-		ret = -EFAULT;
-	else
-		ret = 0;
-
-        break;
-
-    case LMCIOCGETLMCSTATS:
-	    spin_lock_irqsave(&sc->lmc_lock, flags);
-	    if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
-		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
-		    sc->extra_stats.framingBitErrorCount +=
-			    lmc_mii_readreg(sc, 0, 18) & 0xff;
-		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
-		    sc->extra_stats.framingBitErrorCount +=
-			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
-		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
-		    sc->extra_stats.lineCodeViolationCount +=
-			    lmc_mii_readreg(sc, 0, 18) & 0xff;
-		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
-		    sc->extra_stats.lineCodeViolationCount +=
-			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
-		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
-		    regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
-
-		    sc->extra_stats.lossOfFrameCount +=
-			    (regVal & T1FRAMER_LOF_MASK) >> 4;
-		    sc->extra_stats.changeOfFrameAlignmentCount +=
-			    (regVal & T1FRAMER_COFA_MASK) >> 2;
-		    sc->extra_stats.severelyErroredFrameCount +=
-			    regVal & T1FRAMER_SEF_MASK;
-	    }
-	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-	    if (copy_to_user(data, &sc->lmc_device->stats,
-			     sizeof(sc->lmc_device->stats)) ||
-		copy_to_user(data + sizeof(sc->lmc_device->stats),
-			     &sc->extra_stats, sizeof(sc->extra_stats)))
-		    ret = -EFAULT;
-	    else
-		    ret = 0;
-	    break;
-
-    case LMCIOCCLEARLMCSTATS:
-	    if (!capable(CAP_NET_ADMIN)) {
-		    ret = -EPERM;
-		    break;
-	    }
-
-	    spin_lock_irqsave(&sc->lmc_lock, flags);
-	    memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
-	    memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
-	    sc->extra_stats.check = STATCHECK;
-	    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
-		    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
-	    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
-	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-	    ret = 0;
-	    break;
-
-    case LMCIOCSETCIRCUIT: /*fold01*/
-        if (!capable(CAP_NET_ADMIN)){
-            ret = -EPERM;
-            break;
-        }
-
-        if(dev->flags & IFF_UP){
-            ret = -EBUSY;
-            break;
-        }
-
-	if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
-		ret = -EFAULT;
-		break;
-	}
-	spin_lock_irqsave(&sc->lmc_lock, flags);
-        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
-        sc->ictl.circuit_type = ctl.circuit_type;
-	spin_unlock_irqrestore(&sc->lmc_lock, flags);
-        ret = 0;
-
-        break;
-
-    case LMCIOCRESET: /*fold01*/
-        if (!capable(CAP_NET_ADMIN)){
-            ret = -EPERM;
-            break;
-        }
-
-	spin_lock_irqsave(&sc->lmc_lock, flags);
-        /* Reset driver and bring back to current state */
-        printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
-        lmc_running_reset (dev);
-        printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
-
-        LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
-	spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-        ret = 0;
-        break;
-
-#ifdef DEBUG
-    case LMCIOCDUMPEVENTLOG:
-	if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
-		ret = -EFAULT;
-		break;
-	}
-	if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
-			 sizeof(lmcEventLogBuf)))
-		ret = -EFAULT;
-	else
-		ret = 0;
-
-        break;
-#endif /* end ifdef _DBG_EVENTLOG */
-    case LMCIOCT1CONTROL: /*fold01*/
-        if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
-            ret = -EOPNOTSUPP;
-            break;
-        }
-        break;
-    case LMCIOCXILINX: /*fold01*/
-        {
-            struct lmc_xilinx_control xc; /*fold02*/
-
-            if (!capable(CAP_NET_ADMIN)){
-                ret = -EPERM;
-                break;
-            }
-
-            /*
-             * Stop the xwitter whlie we restart the hardware
-             */
-            netif_stop_queue(dev);
-
-	    if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
-		ret = -EFAULT;
-		break;
-	    }
-            switch(xc.command){
-            case lmc_xilinx_reset: /*fold02*/
-                {
-		    spin_lock_irqsave(&sc->lmc_lock, flags);
-                    lmc_mii_readreg (sc, 0, 16);
-
-                    /*
-                     * Make all of them 0 and make input
-                     */
-                    lmc_gpio_mkinput(sc, 0xff);
-
-                    /*
-                     * make the reset output
-                     */
-                    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
-                    /*
-                     * RESET low to force configuration.  This also forces
-                     * the transmitter clock to be internal, but we expect to reset
-                     * that later anyway.
-                     */
-
-                    sc->lmc_gpio &= ~LMC_GEP_RESET;
-                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
-                    /*
-                     * hold for more than 10 microseconds
-                     */
-                    udelay(50);
-
-                    sc->lmc_gpio |= LMC_GEP_RESET;
-                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
-                    /*
-                     * stop driving Xilinx-related signals
-                     */
-                    lmc_gpio_mkinput(sc, 0xff);
-
-                    /* Reset the frammer hardware */
-                    sc->lmc_media->set_link_status (sc, 1);
-                    sc->lmc_media->set_status (sc, NULL);
-//                    lmc_softreset(sc);
-
-                    {
-                        int i;
-                        for(i = 0; i < 5; i++){
-                            lmc_led_on(sc, LMC_DS3_LED0);
-                            mdelay(100);
-                            lmc_led_off(sc, LMC_DS3_LED0);
-                            lmc_led_on(sc, LMC_DS3_LED1);
-                            mdelay(100);
-                            lmc_led_off(sc, LMC_DS3_LED1);
-                            lmc_led_on(sc, LMC_DS3_LED3);
-                            mdelay(100);
-                            lmc_led_off(sc, LMC_DS3_LED3);
-                            lmc_led_on(sc, LMC_DS3_LED2);
-                            mdelay(100);
-                            lmc_led_off(sc, LMC_DS3_LED2);
-                        }
-                    }
-		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-                    
-                    
-
-                    ret = 0x0;
-
-                }
-
-                break;
-            case lmc_xilinx_load_prom: /*fold02*/
-                {
-                    int timeout = 500000;
-		    spin_lock_irqsave(&sc->lmc_lock, flags);
-                    lmc_mii_readreg (sc, 0, 16);
-
-                    /*
-                     * Make all of them 0 and make input
-                     */
-                    lmc_gpio_mkinput(sc, 0xff);
-
-                    /*
-                     * make the reset output
-                     */
-                    lmc_gpio_mkoutput(sc,  LMC_GEP_DP | LMC_GEP_RESET);
-
-                    /*
-                     * RESET low to force configuration.  This also forces
-                     * the transmitter clock to be internal, but we expect to reset
-                     * that later anyway.
-                     */
-
-                    sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
-                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
-                    /*
-                     * hold for more than 10 microseconds
-                     */
-                    udelay(50);
-
-                    sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
-                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-                    /*
-                     * busy wait for the chip to reset
-                     */
-                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
-                           (timeout-- > 0))
-                        cpu_relax();
-
-
-                    /*
-                     * stop driving Xilinx-related signals
-                     */
-                    lmc_gpio_mkinput(sc, 0xff);
-		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-                    ret = 0x0;
-                    
-
-                    break;
-
-                }
-
-            case lmc_xilinx_load: /*fold02*/
-                {
-                    char *data;
-                    int pos;
-                    int timeout = 500000;
-
-                    if (!xc.data) {
-                            ret = -EINVAL;
-                            break;
-                    }
-
-                    data = memdup_user(xc.data, xc.len);
-                    if (IS_ERR(data)) {
-                            ret = PTR_ERR(data);
-                            break;
-                    }
-
-                    printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
-
-		    spin_lock_irqsave(&sc->lmc_lock, flags);
-                    lmc_gpio_mkinput(sc, 0xff);
-
-                    /*
-                     * Clear the Xilinx and start prgramming from the DEC
-                     */
-
-                    /*
-                     * Set ouput as:
-                     * Reset: 0 (active)
-                     * DP:    0 (active)
-                     * Mode:  1
-                     *
-                     */
-                    sc->lmc_gpio = 0x00;
-                    sc->lmc_gpio &= ~LMC_GEP_DP;
-                    sc->lmc_gpio &= ~LMC_GEP_RESET;
-                    sc->lmc_gpio |=  LMC_GEP_MODE;
-                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-                    lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
-
-                    /*
-                     * Wait at least 10 us 20 to be safe
-                     */
-                    udelay(50);
-
-                    /*
-                     * Clear reset and activate programming lines
-                     * Reset: Input
-                     * DP:    Input
-                     * Clock: Output
-                     * Data:  Output
-                     * Mode:  Output
-                     */
-                    lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
-                    /*
-                     * Set LOAD, DATA, Clock to 1
-                     */
-                    sc->lmc_gpio = 0x00;
-                    sc->lmc_gpio |= LMC_GEP_MODE;
-                    sc->lmc_gpio |= LMC_GEP_DATA;
-                    sc->lmc_gpio |= LMC_GEP_CLK;
-                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-                    
-                    lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
-
-                    /*
-                     * busy wait for the chip to reset
-                     */
-                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
-                           (timeout-- > 0))
-                        cpu_relax();
-
-                    printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout);
-
-                    for(pos = 0; pos < xc.len; pos++){
-                        switch(data[pos]){
-                        case 0:
-                            sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
-                            break;
-                        case 1:
-                            sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
-                            break;
-                        default:
-                            printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
-                            sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
-                        }
-                        sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
-                        sc->lmc_gpio |= LMC_GEP_MODE;
-                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-                        udelay(1);
-                        
-                        sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
-                        sc->lmc_gpio |= LMC_GEP_MODE;
-                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-                        udelay(1);
-                    }
-                    if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
-                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
-                    }
-                    else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
-                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
-                    }
-                    else {
-                        printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
-                    }
-
-                    lmc_gpio_mkinput(sc, 0xff);
-                    
-                    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
-                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
-                    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
-                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-                    kfree(data);
-                    
-                    ret = 0;
-                    
-                    break;
-                }
-            default: /*fold02*/
-                ret = -EBADE;
-                break;
-            }
-
-            netif_wake_queue(dev);
-            sc->lmc_txfull = 0;
-
-        }
-        break;
-    default:
-	break;
-    }
-
-    return ret;
-}
-
-
-/* the watchdog process that cruises around */
-static void lmc_watchdog(struct timer_list *t) /*fold00*/
-{
-    lmc_softc_t *sc = from_timer(sc, t, timer);
-    struct net_device *dev = sc->lmc_device;
-    int link_status;
-    u32 ticks;
-    unsigned long flags;
-
-    spin_lock_irqsave(&sc->lmc_lock, flags);
-
-    if(sc->check != 0xBEAFCAFE){
-        printk("LMC: Corrupt net_device struct, breaking out\n");
-	spin_unlock_irqrestore(&sc->lmc_lock, flags);
-        return;
-    }
-
-
-    /* Make sure the tx jabber and rx watchdog are off,
-     * and the transmit and receive processes are running.
-     */
-
-    LMC_CSR_WRITE (sc, csr_15, 0x00000011);
-    sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
-    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
-    if (sc->lmc_ok == 0)
-        goto kick_timer;
-
-    LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
-
-    /* --- begin time out check -----------------------------------
-     * check for a transmit interrupt timeout
-     * Has the packet xmt vs xmt serviced threshold been exceeded */
-    if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
-	sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
-	sc->tx_TimeoutInd == 0)
-    {
-
-        /* wait for the watchdog to come around again */
-        sc->tx_TimeoutInd = 1;
-    }
-    else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
-	     sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
-	     sc->tx_TimeoutInd)
-    {
-
-        LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
-
-        sc->tx_TimeoutDisplay = 1;
-	sc->extra_stats.tx_TimeoutCnt++;
-
-        /* DEC chip is stuck, hit it with a RESET!!!! */
-        lmc_running_reset (dev);
-
-
-        /* look at receive & transmit process state to make sure they are running */
-        LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
-
-        /* look at: DSR - 02  for Reg 16
-         *                  CTS - 08
-         *                  DCD - 10
-         *                  RI  - 20
-         * for Reg 17
-         */
-        LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
-
-        /* reset the transmit timeout detection flag */
-        sc->tx_TimeoutInd = 0;
-        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
-	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
-    } else {
-        sc->tx_TimeoutInd = 0;
-        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
-	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
-    }
-
-    /* --- end time out check ----------------------------------- */
-
-
-    link_status = sc->lmc_media->get_link_status (sc);
-
-    /*
-     * hardware level link lost, but the interface is marked as up.
-     * Mark it as down.
-     */
-    if ((link_status == 0) && (sc->last_link_status != 0)) {
-        printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
-        sc->last_link_status = 0;
-        /* lmc_reset (sc); Why reset??? The link can go down ok */
-
-        /* Inform the world that link has been lost */
-	netif_carrier_off(dev);
-    }
-
-    /*
-     * hardware link is up, but the interface is marked as down.
-     * Bring it back up again.
-     */
-     if (link_status != 0 && sc->last_link_status == 0) {
-         printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
-         sc->last_link_status = 1;
-         /* lmc_reset (sc); Again why reset??? */
-
-	 netif_carrier_on(dev);
-     }
-
-    /* Call media specific watchdog functions */
-    sc->lmc_media->watchdog(sc);
-
-    /*
-     * Poke the transmitter to make sure it
-     * never stops, even if we run out of mem
-     */
-    LMC_CSR_WRITE(sc, csr_rxpoll, 0);
-
-    /*
-     * Check for code that failed
-     * and try and fix it as appropriate
-     */
-    if(sc->failed_ring == 1){
-        /*
-         * Failed to setup the recv/xmit rin
-         * Try again
-         */
-        sc->failed_ring = 0;
-        lmc_softreset(sc);
-    }
-    if(sc->failed_recv_alloc == 1){
-        /*
-         * We failed to alloc mem in the
-         * interrupt handler, go through the rings
-         * and rebuild them
-         */
-        sc->failed_recv_alloc = 0;
-        lmc_softreset(sc);
-    }
-
-
-    /*
-     * remember the timer value
-     */
-kick_timer:
-
-    ticks = LMC_CSR_READ (sc, csr_gp_timer);
-    LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
-    sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
-    /*
-     * restart this timer.
-     */
-    sc->timer.expires = jiffies + (HZ);
-    add_timer (&sc->timer);
-
-    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
-
-static int lmc_attach(struct net_device *dev, unsigned short encoding,
-		      unsigned short parity)
-{
-	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
-		return 0;
-	return -EINVAL;
-}
-
-static const struct net_device_ops lmc_ops = {
-	.ndo_open       = lmc_open,
-	.ndo_stop       = lmc_close,
-	.ndo_start_xmit = hdlc_start_xmit,
-	.ndo_siocwandev = hdlc_ioctl,
-	.ndo_siocdevprivate = lmc_siocdevprivate,
-	.ndo_tx_timeout = lmc_driver_timeout,
-	.ndo_get_stats  = lmc_get_stats,
-};
-
-static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	lmc_softc_t *sc;
-	struct net_device *dev;
-	u16 subdevice;
-	u16 AdapModelNum;
-	int err;
-	static int cards_found;
-
-	err = pcim_enable_device(pdev);
-	if (err) {
-		printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
-		return err;
-	}
-
-	err = pci_request_regions(pdev, "lmc");
-	if (err) {
-		printk(KERN_ERR "lmc: pci_request_region failed\n");
-		return err;
-	}
-
-	/*
-	 * Allocate our own device structure
-	 */
-	sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
-	if (!sc)
-		return -ENOMEM;
-
-	dev = alloc_hdlcdev(sc);
-	if (!dev) {
-		printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
-		return -ENOMEM;
-	}
-
-
-	dev->type = ARPHRD_HDLC;
-	dev_to_hdlc(dev)->xmit = lmc_start_xmit;
-	dev_to_hdlc(dev)->attach = lmc_attach;
-	dev->netdev_ops = &lmc_ops;
-	dev->watchdog_timeo = HZ; /* 1 second */
-	dev->tx_queue_len = 100;
-	sc->lmc_device = dev;
-	sc->name = dev->name;
-	sc->if_type = LMC_PPP;
-	sc->check = 0xBEAFCAFE;
-	dev->base_addr = pci_resource_start(pdev, 0);
-	dev->irq = pdev->irq;
-	pci_set_drvdata(pdev, dev);
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	/*
-	 * This will get the protocol layer ready and do any 1 time init's
-	 * Must have a valid sc and dev structure
-	 */
-	lmc_proto_attach(sc);
-
-	/* Init the spin lock so can call it latter */
-
-	spin_lock_init(&sc->lmc_lock);
-	pci_set_master(pdev);
-
-	printk(KERN_INFO "hdlc: detected at %lx, irq %d\n",
-	       dev->base_addr, dev->irq);
-
-	err = register_hdlc_device(dev);
-	if (err) {
-		printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
-		free_netdev(dev);
-		return err;
-	}
-
-    sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
-    sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
-
-    /*
-     *
-     * Check either the subvendor or the subdevice, some systems reverse
-     * the setting in the bois, seems to be version and arch dependent?
-     * Fix the error, exchange the two values 
-     */
-    if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
-	    subdevice = pdev->subsystem_vendor;
-
-    switch (subdevice) {
-    case PCI_DEVICE_ID_LMC_HSSI:
-	printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
-        sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
-        sc->lmc_media = &lmc_hssi_media;
-        break;
-    case PCI_DEVICE_ID_LMC_DS3:
-	printk(KERN_INFO "%s: LMC DS3\n", dev->name);
-        sc->lmc_cardtype = LMC_CARDTYPE_DS3;
-        sc->lmc_media = &lmc_ds3_media;
-        break;
-    case PCI_DEVICE_ID_LMC_SSI:
-	printk(KERN_INFO "%s: LMC SSI\n", dev->name);
-        sc->lmc_cardtype = LMC_CARDTYPE_SSI;
-        sc->lmc_media = &lmc_ssi_media;
-        break;
-    case PCI_DEVICE_ID_LMC_T1:
-	printk(KERN_INFO "%s: LMC T1\n", dev->name);
-        sc->lmc_cardtype = LMC_CARDTYPE_T1;
-        sc->lmc_media = &lmc_t1_media;
-        break;
-    default:
-	printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
-	unregister_hdlc_device(dev);
-	return -EIO;
-        break;
-    }
-
-    lmc_initcsrs (sc, dev->base_addr, 8);
-
-    lmc_gpio_mkinput (sc, 0xff);
-    sc->lmc_gpio = 0;		/* drive no signals yet */
-
-    sc->lmc_media->defaults (sc);
-
-    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
-    /* verify that the PCI Sub System ID matches the Adapter Model number
-     * from the MII register
-     */
-    AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
-
-    if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
-	 subdevice != PCI_DEVICE_ID_LMC_T1) &&
-	(AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
-	 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
-	(AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
-	 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
-	(AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
-	 subdevice != PCI_DEVICE_ID_LMC_HSSI))
-	    printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
-		   " Subsystem ID = 0x%04x\n",
-		   dev->name, AdapModelNum, subdevice);
-
-    /*
-     * reset clock
-     */
-    LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
-
-    sc->board_idx = cards_found++;
-    sc->extra_stats.check = STATCHECK;
-    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
-	    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
-    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
-
-    sc->lmc_ok = 0;
-    sc->last_link_status = 0;
-
-    return 0;
-}
-
-/*
- * Called from pci when removing module.
- */
-static void lmc_remove_one(struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-
-	if (dev) {
-		printk(KERN_DEBUG "%s: removing...\n", dev->name);
-		unregister_hdlc_device(dev);
-		free_netdev(dev);
-	}
-}
-
-/* After this is called, packets can be sent.
- * Does not initialize the addresses
- */
-static int lmc_open(struct net_device *dev)
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    int err;
-
-    lmc_led_on(sc, LMC_DS3_LED0);
-
-    lmc_dec_reset(sc);
-    lmc_reset(sc);
-
-    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
-    LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
-		  lmc_mii_readreg(sc, 0, 17));
-
-    if (sc->lmc_ok)
-        return 0;
-
-    lmc_softreset (sc);
-
-    /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
-    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
-        printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
-        return -EAGAIN;
-    }
-    sc->got_irq = 1;
-
-    /* Assert Terminal Active */
-    sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
-    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
-    /*
-     * reset to last state.
-     */
-    sc->lmc_media->set_status (sc, NULL);
-
-    /* setup default bits to be used in tulip_desc_t transmit descriptor
-     * -baz */
-    sc->TxDescriptControlInit = (
-                                 LMC_TDES_INTERRUPT_ON_COMPLETION
-                                 | LMC_TDES_FIRST_SEGMENT
-                                 | LMC_TDES_LAST_SEGMENT
-                                 | LMC_TDES_SECOND_ADDR_CHAINED
-                                 | LMC_TDES_DISABLE_PADDING
-                                );
-
-    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
-        /* disable 32 bit CRC generated by ASIC */
-        sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
-    }
-    sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
-    /* Acknoledge the Terminal Active and light LEDs */
-
-    /* dev->flags |= IFF_UP; */
-
-    if ((err = lmc_proto_open(sc)) != 0)
-	    return err;
-
-    netif_start_queue(dev);
-    sc->extra_stats.tx_tbusy0++;
-
-    /*
-     * select what interrupts we want to get
-     */
-    sc->lmc_intrmask = 0;
-    /* Should be using the default interrupt mask defined in the .h file. */
-    sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
-                         | TULIP_STS_RXINTR
-                         | TULIP_STS_TXINTR
-                         | TULIP_STS_ABNRMLINTR
-                         | TULIP_STS_SYSERROR
-                         | TULIP_STS_TXSTOPPED
-                         | TULIP_STS_TXUNDERFLOW
-                         | TULIP_STS_RXSTOPPED
-		         | TULIP_STS_RXNOBUF
-                        );
-    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
-    sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
-    sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
-    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
-    sc->lmc_ok = 1; /* Run watchdog */
-
-    /*
-     * Set the if up now - pfb
-     */
-
-    sc->last_link_status = 1;
-
-    /*
-     * Setup a timer for the watchdog on probe, and start it running.
-     * Since lmc_ok == 0, it will be a NOP for now.
-     */
-    timer_setup(&sc->timer, lmc_watchdog, 0);
-    sc->timer.expires = jiffies + HZ;
-    add_timer (&sc->timer);
-
-    return 0;
-}
-
-/* Total reset to compensate for the AdTran DSU doing bad things
- *  under heavy load
- */
-
-static void lmc_running_reset (struct net_device *dev) /*fold00*/
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-
-    /* stop interrupts */
-    /* Clear the interrupt mask */
-    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
-    lmc_dec_reset (sc);
-    lmc_reset (sc);
-    lmc_softreset (sc);
-    /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
-    sc->lmc_media->set_link_status (sc, 1);
-    sc->lmc_media->set_status (sc, NULL);
-
-    netif_wake_queue(dev);
-
-    sc->lmc_txfull = 0;
-    sc->extra_stats.tx_tbusy0++;
-
-    sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
-    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
-    sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
-    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-}
-
-
-/* This is what is called when you ifconfig down a device.
- * This disables the timer for the watchdog and keepalives,
- * and disables the irq for dev.
- */
-static int lmc_close(struct net_device *dev)
-{
-    /* not calling release_region() as we should */
-    lmc_softc_t *sc = dev_to_sc(dev);
-
-    sc->lmc_ok = 0;
-    sc->lmc_media->set_link_status (sc, 0);
-    del_timer (&sc->timer);
-    lmc_proto_close(sc);
-    lmc_ifdown (dev);
-
-    return 0;
-}
-
-/* Ends the transfer of packets */
-/* When the interface goes down, this is called */
-static int lmc_ifdown (struct net_device *dev) /*fold00*/
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    u32 csr6;
-    int i;
-
-    /* Don't let anything else go on right now */
-    //    dev->start = 0;
-    netif_stop_queue(dev);
-    sc->extra_stats.tx_tbusy1++;
-
-    /* stop interrupts */
-    /* Clear the interrupt mask */
-    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
-    /* Stop Tx and Rx on the chip */
-    csr6 = LMC_CSR_READ (sc, csr_command);
-    csr6 &= ~LMC_DEC_ST;		/* Turn off the Transmission bit */
-    csr6 &= ~LMC_DEC_SR;		/* Turn off the Receive bit */
-    LMC_CSR_WRITE (sc, csr_command, csr6);
-
-    sc->lmc_device->stats.rx_missed_errors +=
-	    LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
-    /* release the interrupt */
-    if(sc->got_irq == 1){
-        free_irq (dev->irq, dev);
-        sc->got_irq = 0;
-    }
-
-    /* free skbuffs in the Rx queue */
-    for (i = 0; i < LMC_RXDESCS; i++)
-    {
-        struct sk_buff *skb = sc->lmc_rxq[i];
-        sc->lmc_rxq[i] = NULL;
-        sc->lmc_rxring[i].status = 0;
-        sc->lmc_rxring[i].length = 0;
-        sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
-        if (skb != NULL)
-            dev_kfree_skb(skb);
-        sc->lmc_rxq[i] = NULL;
-    }
-
-    for (i = 0; i < LMC_TXDESCS; i++)
-    {
-        if (sc->lmc_txq[i] != NULL)
-            dev_kfree_skb(sc->lmc_txq[i]);
-        sc->lmc_txq[i] = NULL;
-    }
-
-    lmc_led_off (sc, LMC_MII16_LED_ALL);
-
-    netif_wake_queue(dev);
-    sc->extra_stats.tx_tbusy0++;
-
-    return 0;
-}
-
-/* Interrupt handling routine.  This will take an incoming packet, or clean
- * up after a trasmit.
- */
-static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
-{
-    struct net_device *dev = (struct net_device *) dev_instance;
-    lmc_softc_t *sc = dev_to_sc(dev);
-    u32 csr;
-    int i;
-    s32 stat;
-    unsigned int badtx;
-    int max_work = LMC_RXDESCS;
-    int handled = 0;
-
-    spin_lock(&sc->lmc_lock);
-
-    /*
-     * Read the csr to find what interrupts we have (if any)
-     */
-    csr = LMC_CSR_READ (sc, csr_status);
-
-    /*
-     * Make sure this is our interrupt
-     */
-    if ( ! (csr & sc->lmc_intrmask)) {
-        goto lmc_int_fail_out;
-    }
-
-    /* always go through this loop at least once */
-    while (csr & sc->lmc_intrmask) {
-	handled = 1;
-
-        /*
-         * Clear interrupt bits, we handle all case below
-         */
-        LMC_CSR_WRITE (sc, csr_status, csr);
-
-        /*
-         * One of
-         *  - Transmit process timed out CSR5<1>
-         *  - Transmit jabber timeout    CSR5<3>
-         *  - Transmit underflow         CSR5<5>
-         *  - Transmit Receiver buffer unavailable CSR5<7>
-         *  - Receive process stopped    CSR5<8>
-         *  - Receive watchdog timeout   CSR5<9>
-         *  - Early transmit interrupt   CSR5<10>
-         *
-         * Is this really right? Should we do a running reset for jabber?
-         * (being a WAN card and all)
-         */
-        if (csr & TULIP_STS_ABNRMLINTR){
-            lmc_running_reset (dev);
-            break;
-        }
-
-        if (csr & TULIP_STS_RXINTR)
-            lmc_rx (dev);
-
-        if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
-
-	    int		n_compl = 0 ;
-            /* reset the transmit timeout detection flag -baz */
-	    sc->extra_stats.tx_NoCompleteCnt = 0;
-
-            badtx = sc->lmc_taint_tx;
-            i = badtx % LMC_TXDESCS;
-
-            while ((badtx < sc->lmc_next_tx)) {
-                stat = sc->lmc_txring[i].status;
-
-                LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
-						 sc->lmc_txring[i].length);
-                /*
-                 * If bit 31 is 1 the tulip owns it break out of the loop
-                 */
-                if (stat & 0x80000000)
-                    break;
-
-		n_compl++ ;		/* i.e., have an empty slot in ring */
-                /*
-                 * If we have no skbuff or have cleared it
-                 * Already continue to the next buffer
-                 */
-                if (sc->lmc_txq[i] == NULL)
-                    continue;
-
-		/*
-		 * Check the total error summary to look for any errors
-		 */
-		if (stat & 0x8000) {
-			sc->lmc_device->stats.tx_errors++;
-			if (stat & 0x4104)
-				sc->lmc_device->stats.tx_aborted_errors++;
-			if (stat & 0x0C00)
-				sc->lmc_device->stats.tx_carrier_errors++;
-			if (stat & 0x0200)
-				sc->lmc_device->stats.tx_window_errors++;
-			if (stat & 0x0002)
-				sc->lmc_device->stats.tx_fifo_errors++;
-		} else {
-			sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
-
-			sc->lmc_device->stats.tx_packets++;
-                }
-
-		dev_consume_skb_irq(sc->lmc_txq[i]);
-                sc->lmc_txq[i] = NULL;
-
-                badtx++;
-                i = badtx % LMC_TXDESCS;
-            }
-
-            if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
-            {
-                printk ("%s: out of sync pointer\n", dev->name);
-                badtx += LMC_TXDESCS;
-            }
-            LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
-            sc->lmc_txfull = 0;
-            netif_wake_queue(dev);
-	    sc->extra_stats.tx_tbusy0++;
-
-
-#ifdef DEBUG
-	    sc->extra_stats.dirtyTx = badtx;
-	    sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
-	    sc->extra_stats.lmc_txfull = sc->lmc_txfull;
-#endif
-            sc->lmc_taint_tx = badtx;
-
-            /*
-             * Why was there a break here???
-             */
-        }			/* end handle transmit interrupt */
-
-        if (csr & TULIP_STS_SYSERROR) {
-            u32 error;
-            printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
-            error = csr>>23 & 0x7;
-            switch(error){
-            case 0x000:
-                printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
-                break;
-            case 0x001:
-                printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
-                break;
-            case 0x002:
-                printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
-                break;
-            default:
-                printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
-            }
-            lmc_dec_reset (sc);
-            lmc_reset (sc);
-            LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
-            LMC_EVENT_LOG(LMC_EVENT_RESET2,
-                          lmc_mii_readreg (sc, 0, 16),
-                          lmc_mii_readreg (sc, 0, 17));
-
-        }
-
-        
-        if(max_work-- <= 0)
-            break;
-        
-        /*
-         * Get current csr status to make sure
-         * we've cleared all interrupts
-         */
-        csr = LMC_CSR_READ (sc, csr_status);
-    }				/* end interrupt loop */
-    LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
-
-lmc_int_fail_out:
-
-    spin_unlock(&sc->lmc_lock);
-
-    return IRQ_RETVAL(handled);
-}
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
-					struct net_device *dev)
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    u32 flag;
-    int entry;
-    unsigned long flags;
-
-    spin_lock_irqsave(&sc->lmc_lock, flags);
-
-    /* normal path, tbusy known to be zero */
-
-    entry = sc->lmc_next_tx % LMC_TXDESCS;
-
-    sc->lmc_txq[entry] = skb;
-    sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
-
-    LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
-
-#ifndef GCOM
-    /* If the queue is less than half full, don't interrupt */
-    if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
-    {
-        /* Do not interrupt on completion of this packet */
-        flag = 0x60000000;
-        netif_wake_queue(dev);
-    }
-    else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
-    {
-        /* This generates an interrupt on completion of this packet */
-        flag = 0xe0000000;
-        netif_wake_queue(dev);
-    }
-    else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
-    {
-        /* Do not interrupt on completion of this packet */
-        flag = 0x60000000;
-        netif_wake_queue(dev);
-    }
-    else
-    {
-        /* This generates an interrupt on completion of this packet */
-        flag = 0xe0000000;
-        sc->lmc_txfull = 1;
-        netif_stop_queue(dev);
-    }
-#else
-    flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
-
-    if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
-    {				/* ring full, go busy */
-        sc->lmc_txfull = 1;
-	netif_stop_queue(dev);
-	sc->extra_stats.tx_tbusy1++;
-        LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
-    }
-#endif
-
-
-    if (entry == LMC_TXDESCS - 1)	/* last descriptor in ring */
-	flag |= LMC_TDES_END_OF_RING;	/* flag as such for Tulip */
-
-    /* don't pad small packets either */
-    flag = sc->lmc_txring[entry].length = (skb->len) | flag |
-						sc->TxDescriptControlInit;
-
-    /* set the transmit timeout flag to be checked in
-     * the watchdog timer handler. -baz
-     */
-
-    sc->extra_stats.tx_NoCompleteCnt++;
-    sc->lmc_next_tx++;
-
-    /* give ownership to the chip */
-    LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
-    sc->lmc_txring[entry].status = 0x80000000;
-
-    /* send now! */
-    LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
-    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-    return NETDEV_TX_OK;
-}
-
-
-static int lmc_rx(struct net_device *dev)
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    int i;
-    int rx_work_limit = LMC_RXDESCS;
-    int rxIntLoopCnt;		/* debug -baz */
-    int localLengthErrCnt = 0;
-    long stat;
-    struct sk_buff *skb, *nsb;
-    u16 len;
-
-    lmc_led_on(sc, LMC_DS3_LED3);
-
-    rxIntLoopCnt = 0;		/* debug -baz */
-
-    i = sc->lmc_next_rx % LMC_RXDESCS;
-
-    while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
-    {
-        rxIntLoopCnt++;		/* debug -baz */
-        len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
-        if ((stat & 0x0300) != 0x0300) {  /* Check first segment and last segment */
-		if ((stat & 0x0000ffff) != 0x7fff) {
-			/* Oversized frame */
-			sc->lmc_device->stats.rx_length_errors++;
-			goto skip_packet;
-		}
-	}
-
-	if (stat & 0x00000008) { /* Catch a dribbling bit error */
-		sc->lmc_device->stats.rx_errors++;
-		sc->lmc_device->stats.rx_frame_errors++;
-		goto skip_packet;
-	}
-
-
-	if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
-		sc->lmc_device->stats.rx_errors++;
-		sc->lmc_device->stats.rx_crc_errors++;
-		goto skip_packet;
-	}
-
-	if (len > LMC_PKT_BUF_SZ) {
-		sc->lmc_device->stats.rx_length_errors++;
-		localLengthErrCnt++;
-		goto skip_packet;
-	}
-
-	if (len < sc->lmc_crcSize + 2) {
-		sc->lmc_device->stats.rx_length_errors++;
-		sc->extra_stats.rx_SmallPktCnt++;
-		localLengthErrCnt++;
-		goto skip_packet;
-	}
-
-        if(stat & 0x00004000){
-            printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
-        }
-
-        len -= sc->lmc_crcSize;
-
-        skb = sc->lmc_rxq[i];
-
-        /*
-         * We ran out of memory at some point
-         * just allocate an skb buff and continue.
-         */
-        
-        if (!skb) {
-            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
-            if (nsb) {
-                sc->lmc_rxq[i] = nsb;
-                nsb->dev = dev;
-                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
-            }
-            sc->failed_recv_alloc = 1;
-            goto skip_packet;
-        }
-        
-	sc->lmc_device->stats.rx_packets++;
-	sc->lmc_device->stats.rx_bytes += len;
-
-        LMC_CONSOLE_LOG("recv", skb->data, len);
-
-        /*
-         * I'm not sure of the sanity of this
-         * Packets could be arriving at a constant
-         * 44.210mbits/sec and we're going to copy
-         * them into a new buffer??
-         */
-        
-        if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
-            /*
-             * If it's a large packet don't copy it just hand it up
-             */
-        give_it_anyways:
-
-            sc->lmc_rxq[i] = NULL;
-            sc->lmc_rxring[i].buffer1 = 0x0;
-
-            skb_put (skb, len);
-            skb->protocol = lmc_proto_type(sc, skb);
-            skb_reset_mac_header(skb);
-            /* skb_reset_network_header(skb); */
-            skb->dev = dev;
-            lmc_proto_netif(sc, skb);
-
-            /*
-             * This skb will be destroyed by the upper layers, make a new one
-             */
-            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
-            if (nsb) {
-                sc->lmc_rxq[i] = nsb;
-                nsb->dev = dev;
-                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
-                /* Transferred to 21140 below */
-            }
-            else {
-                /*
-                 * We've run out of memory, stop trying to allocate
-                 * memory and exit the interrupt handler
-                 *
-                 * The chip may run out of receivers and stop
-                 * in which care we'll try to allocate the buffer
-                 * again.  (once a second)
-                 */
-		sc->extra_stats.rx_BuffAllocErr++;
-                LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
-                sc->failed_recv_alloc = 1;
-                goto skip_out_of_mem;
-            }
-        }
-        else {
-            nsb = dev_alloc_skb(len);
-            if(!nsb) {
-                goto give_it_anyways;
-            }
-            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
-            
-            nsb->protocol = lmc_proto_type(sc, nsb);
-            skb_reset_mac_header(nsb);
-            /* skb_reset_network_header(nsb); */
-            nsb->dev = dev;
-            lmc_proto_netif(sc, nsb);
-        }
-
-    skip_packet:
-        LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
-        sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
-
-        sc->lmc_next_rx++;
-        i = sc->lmc_next_rx % LMC_RXDESCS;
-        rx_work_limit--;
-        if (rx_work_limit < 0)
-            break;
-    }
-
-    /* detect condition for LMC1000 where DSU cable attaches and fills
-     * descriptors with bogus packets
-     *
-    if (localLengthErrCnt > LMC_RXDESCS - 3) {
-	sc->extra_stats.rx_BadPktSurgeCnt++;
-	LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
-		      sc->extra_stats.rx_BadPktSurgeCnt);
-    } */
-
-    /* save max count of receive descriptors serviced */
-    if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
-	    sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
-
-#ifdef DEBUG
-    if (rxIntLoopCnt == 0)
-    {
-        for (i = 0; i < LMC_RXDESCS; i++)
-        {
-            if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
-                != DESC_OWNED_BY_DC21X4)
-            {
-                rxIntLoopCnt++;
-            }
-        }
-        LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
-    }
-#endif
-
-
-    lmc_led_off(sc, LMC_DS3_LED3);
-
-skip_out_of_mem:
-    return 0;
-}
-
-static struct net_device_stats *lmc_get_stats(struct net_device *dev)
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    unsigned long flags;
-
-    spin_lock_irqsave(&sc->lmc_lock, flags);
-
-    sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
-    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-    return &sc->lmc_device->stats;
-}
-
-static struct pci_driver lmc_driver = {
-	.name		= "lmc",
-	.id_table	= lmc_pci_tbl,
-	.probe		= lmc_init_one,
-	.remove		= lmc_remove_one,
-};
-
-module_pci_driver(lmc_driver);
-
-unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
-{
-    int i;
-    int command = (0xf6 << 10) | (devaddr << 5) | regno;
-    int retval = 0;
-
-    LMC_MII_SYNC (sc);
-
-    for (i = 15; i >= 0; i--)
-    {
-        int dataval = (command & (1 << i)) ? 0x20000 : 0;
-
-        LMC_CSR_WRITE (sc, csr_9, dataval);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-        LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-    }
-
-    for (i = 19; i > 0; i--)
-    {
-        LMC_CSR_WRITE (sc, csr_9, 0x40000);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-        retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
-        LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-    }
-
-    return (retval >> 1) & 0xffff;
-}
-
-void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
-{
-    int i = 32;
-    int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
-
-    LMC_MII_SYNC (sc);
-
-    i = 31;
-    while (i >= 0)
-    {
-        int datav;
-
-        if (command & (1 << i))
-            datav = 0x20000;
-        else
-            datav = 0x00000;
-
-        LMC_CSR_WRITE (sc, csr_9, datav);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-        LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-        i--;
-    }
-
-    i = 2;
-    while (i > 0)
-    {
-        LMC_CSR_WRITE (sc, csr_9, 0x40000);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-        LMC_CSR_WRITE (sc, csr_9, 0x50000);
-        lmc_delay ();
-        /* __SLOW_DOWN_IO; */
-        i--;
-    }
-}
-
-static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
-{
-    int i;
-
-    /* Initialize the receive rings and buffers. */
-    sc->lmc_txfull = 0;
-    sc->lmc_next_rx = 0;
-    sc->lmc_next_tx = 0;
-    sc->lmc_taint_rx = 0;
-    sc->lmc_taint_tx = 0;
-
-    /*
-     * Setup each one of the receiver buffers
-     * allocate an skbuff for each one, setup the descriptor table
-     * and point each buffer at the next one
-     */
-
-    for (i = 0; i < LMC_RXDESCS; i++)
-    {
-        struct sk_buff *skb;
-
-        if (sc->lmc_rxq[i] == NULL)
-        {
-            skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
-            if(skb == NULL){
-                printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
-                sc->failed_ring = 1;
-                break;
-            }
-            else{
-                sc->lmc_rxq[i] = skb;
-            }
-        }
-        else
-        {
-            skb = sc->lmc_rxq[i];
-        }
-
-        skb->dev = sc->lmc_device;
-
-        /* owned by 21140 */
-        sc->lmc_rxring[i].status = 0x80000000;
-
-        /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
-        sc->lmc_rxring[i].length = skb_tailroom(skb);
-
-        /* use to be tail which is dumb since you're thinking why write
-         * to the end of the packj,et but since there's nothing there tail == data
-         */
-        sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
-
-        /* This is fair since the structure is static and we have the next address */
-        sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
-
-    }
-
-    /*
-     * Sets end of ring
-     */
-    if (i != 0) {
-        sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
-        sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
-    }
-    LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
-
-    /* Initialize the transmit rings and buffers */
-    for (i = 0; i < LMC_TXDESCS; i++)
-    {
-        if (sc->lmc_txq[i] != NULL){		/* have buffer */
-            dev_kfree_skb(sc->lmc_txq[i]);	/* free it */
-	    sc->lmc_device->stats.tx_dropped++;	/* We just dropped a packet */
-        }
-        sc->lmc_txq[i] = NULL;
-        sc->lmc_txring[i].status = 0x00000000;
-        sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
-    }
-    sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
-    LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
-}
-
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
-    sc->lmc_gpio_io &= ~bits;
-    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
-    sc->lmc_gpio_io |= bits;
-    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
-    if ((~sc->lmc_miireg16) & led) /* Already on! */
-        return;
-
-    sc->lmc_miireg16 &= ~led;
-    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
-    if (sc->lmc_miireg16 & led) /* Already set don't do anything */
-        return;
-
-    sc->lmc_miireg16 |= led;
-    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
-{
-    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
-    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
-    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
-    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
-    /*
-     * make some of the GPIO pins be outputs
-     */
-    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
-    /*
-     * RESET low to force state reset.  This also forces
-     * the transmitter clock to be internal, but we expect to reset
-     * that later anyway.
-     */
-    sc->lmc_gpio &= ~(LMC_GEP_RESET);
-    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-    /*
-     * hold for more than 10 microseconds
-     */
-    udelay(50);
-
-    /*
-     * stop driving Xilinx-related signals
-     */
-    lmc_gpio_mkinput(sc, LMC_GEP_RESET);
-
-    /*
-     * Call media specific init routine
-     */
-    sc->lmc_media->init(sc);
-
-    sc->extra_stats.resetCount++;
-}
-
-static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
-{
-    u32 val;
-
-    /*
-     * disable all interrupts
-     */
-    sc->lmc_intrmask = 0;
-    LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
-
-    /*
-     * Reset the chip with a software reset command.
-     * Wait 10 microseconds (actually 50 PCI cycles but at
-     * 33MHz that comes to two microseconds but wait a
-     * bit longer anyways)
-     */
-    LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
-    udelay(25);
-#ifdef __sparc__
-    sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
-    sc->lmc_busmode = 0x00100000;
-    sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
-    LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
-#endif
-    sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
-
-    /*
-     * We want:
-     *   no ethernet address in frames we write
-     *   disable padding (txdesc, padding disable)
-     *   ignore runt frames (rdes0 bit 15)
-     *   no receiver watchdog or transmitter jabber timer
-     *       (csr15 bit 0,14 == 1)
-     *   if using 16-bit CRC, turn off CRC (trans desc, crc disable)
-     */
-
-    sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
-                         | TULIP_CMD_FULLDUPLEX
-                         | TULIP_CMD_PASSBADPKT
-                         | TULIP_CMD_NOHEARTBEAT
-                         | TULIP_CMD_PORTSELECT
-                         | TULIP_CMD_RECEIVEALL
-                         | TULIP_CMD_MUSTBEONE
-                       );
-    sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
-                          | TULIP_CMD_THRESHOLDCTL
-                          | TULIP_CMD_STOREFWD
-                          | TULIP_CMD_TXTHRSHLDCTL
-                        );
-
-    LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
-
-    /*
-     * disable receiver watchdog and transmit jabber
-     */
-    val = LMC_CSR_READ(sc, csr_sia_general);
-    val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
-    LMC_CSR_WRITE(sc, csr_sia_general, val);
-}
-
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
-                         size_t csr_size)
-{
-    sc->lmc_csrs.csr_busmode	        = csr_base +  0 * csr_size;
-    sc->lmc_csrs.csr_txpoll		= csr_base +  1 * csr_size;
-    sc->lmc_csrs.csr_rxpoll		= csr_base +  2 * csr_size;
-    sc->lmc_csrs.csr_rxlist		= csr_base +  3 * csr_size;
-    sc->lmc_csrs.csr_txlist		= csr_base +  4 * csr_size;
-    sc->lmc_csrs.csr_status		= csr_base +  5 * csr_size;
-    sc->lmc_csrs.csr_command	        = csr_base +  6 * csr_size;
-    sc->lmc_csrs.csr_intr		= csr_base +  7 * csr_size;
-    sc->lmc_csrs.csr_missed_frames	= csr_base +  8 * csr_size;
-    sc->lmc_csrs.csr_9		        = csr_base +  9 * csr_size;
-    sc->lmc_csrs.csr_10		        = csr_base + 10 * csr_size;
-    sc->lmc_csrs.csr_11		        = csr_base + 11 * csr_size;
-    sc->lmc_csrs.csr_12		        = csr_base + 12 * csr_size;
-    sc->lmc_csrs.csr_13		        = csr_base + 13 * csr_size;
-    sc->lmc_csrs.csr_14		        = csr_base + 14 * csr_size;
-    sc->lmc_csrs.csr_15		        = csr_base + 15 * csr_size;
-}
-
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
-{
-    lmc_softc_t *sc = dev_to_sc(dev);
-    u32 csr6;
-    unsigned long flags;
-
-    spin_lock_irqsave(&sc->lmc_lock, flags);
-
-    printk("%s: Xmitter busy|\n", dev->name);
-
-    sc->extra_stats.tx_tbusy_calls++;
-    if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT))
-	    goto bug_out;
-
-    /*
-     * Chip seems to have locked up
-     * Reset it
-     * This whips out all our descriptor
-     * table and starts from scartch
-     */
-
-    LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
-                  LMC_CSR_READ (sc, csr_status),
-		  sc->extra_stats.tx_ProcTimeout);
-
-    lmc_running_reset (dev);
-
-    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
-    LMC_EVENT_LOG(LMC_EVENT_RESET2,
-                  lmc_mii_readreg (sc, 0, 16),
-                  lmc_mii_readreg (sc, 0, 17));
-
-    /* restart the tx processes */
-    csr6 = LMC_CSR_READ (sc, csr_command);
-    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
-    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
-
-    /* immediate transmit */
-    LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
-    sc->lmc_device->stats.tx_errors++;
-    sc->extra_stats.tx_ProcTimeout++; /* -baz */
-
-    netif_trans_update(dev); /* prevent tx timeout */
-
-bug_out:
-
-    spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
deleted file mode 100644
index ec1ac7b..0000000
--- a/drivers/net/wan/lmc/lmc_media.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h>             /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/uaccess.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-
-#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
-
- /*
-  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
-  * All rights reserved.  www.lanmedia.com
-  *
-  * This code is written by:
-  * Andrew Stanley-Jones (asj@cban.com)
-  * Rob Braun (bbraun@vix.com),
-  * Michael Graff (explorer@vix.com) and
-  * Matt Thomas (matt@3am-software.com).
-  */
-
-/*
- * protocol independent method.
- */
-static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-static void lmc_ds3_init (lmc_softc_t * const);
-static void lmc_ds3_default (lmc_softc_t * const);
-static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
-static int lmc_ds3_get_link_status (lmc_softc_t * const);
-static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ds3_set_scram (lmc_softc_t * const, int);
-static void lmc_ds3_watchdog (lmc_softc_t * const);
-
-static void lmc_hssi_init (lmc_softc_t * const);
-static void lmc_hssi_default (lmc_softc_t * const);
-static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_hssi_set_clock (lmc_softc_t * const, int);
-static int lmc_hssi_get_link_status (lmc_softc_t * const);
-static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_hssi_watchdog (lmc_softc_t * const);
-
-static void lmc_ssi_init (lmc_softc_t * const);
-static void lmc_ssi_default (lmc_softc_t * const);
-static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ssi_set_clock (lmc_softc_t * const, int);
-static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_ssi_get_link_status (lmc_softc_t * const);
-static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ssi_watchdog (lmc_softc_t * const);
-
-static void lmc_t1_init (lmc_softc_t * const);
-static void lmc_t1_default (lmc_softc_t * const);
-static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_t1_get_link_status (lmc_softc_t * const);
-static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
-static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
-static void lmc_t1_set_clock (lmc_softc_t * const, int);
-static void lmc_t1_watchdog (lmc_softc_t * const);
-
-static void lmc_dummy_set_1 (lmc_softc_t * const, int);
-static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
-
-static inline void write_av9110_bit (lmc_softc_t *, int);
-static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
-
-lmc_media_t lmc_ds3_media = {
-  .init = lmc_ds3_init,				/* special media init stuff */
-  .defaults = lmc_ds3_default,			/* reset to default state */
-  .set_status = lmc_ds3_set_status,		/* reset status to state provided */
-  .set_clock_source = lmc_dummy_set_1,		/* set clock source */
-  .set_speed = lmc_dummy_set2_1,		/* set line speed */
-  .set_cable_length = lmc_ds3_set_100ft,	/* set cable length */
-  .set_scrambler = lmc_ds3_set_scram,		/* set scrambler */
-  .get_link_status = lmc_ds3_get_link_status,	/* get link status */
-  .set_link_status = lmc_dummy_set_1,		/* set link status */
-  .set_crc_length = lmc_ds3_set_crc_length,	/* set CRC length */
-  .set_circuit_type = lmc_dummy_set_1,		/* set T1 or E1 circuit type */
-  .watchdog = lmc_ds3_watchdog
-};
-
-lmc_media_t lmc_hssi_media = {
-  .init = lmc_hssi_init,			/* special media init stuff */
-  .defaults = lmc_hssi_default,			/* reset to default state */
-  .set_status = lmc_hssi_set_status,		/* reset status to state provided */
-  .set_clock_source = lmc_hssi_set_clock,	/* set clock source */
-  .set_speed = lmc_dummy_set2_1,		/* set line speed */
-  .set_cable_length = lmc_dummy_set_1,		/* set cable length */
-  .set_scrambler = lmc_dummy_set_1,		/* set scrambler */
-  .get_link_status = lmc_hssi_get_link_status,	/* get link status */
-  .set_link_status = lmc_hssi_set_link_status,	/* set link status */
-  .set_crc_length = lmc_hssi_set_crc_length,	/* set CRC length */
-  .set_circuit_type = lmc_dummy_set_1,		/* set T1 or E1 circuit type */
-  .watchdog = lmc_hssi_watchdog
-};
-
-lmc_media_t lmc_ssi_media = {
-  .init = lmc_ssi_init,				/* special media init stuff */
-  .defaults = lmc_ssi_default,			/* reset to default state */
-  .set_status = lmc_ssi_set_status,		/* reset status to state provided */
-  .set_clock_source = lmc_ssi_set_clock,	/* set clock source */
-  .set_speed = lmc_ssi_set_speed,		/* set line speed */
-  .set_cable_length = lmc_dummy_set_1,		/* set cable length */
-  .set_scrambler = lmc_dummy_set_1,		/* set scrambler */
-  .get_link_status = lmc_ssi_get_link_status,	/* get link status */
-  .set_link_status = lmc_ssi_set_link_status,	/* set link status */
-  .set_crc_length = lmc_ssi_set_crc_length,	/* set CRC length */
-  .set_circuit_type = lmc_dummy_set_1,		/* set T1 or E1 circuit type */
-  .watchdog = lmc_ssi_watchdog
-};
-
-lmc_media_t lmc_t1_media = {
-  .init = lmc_t1_init,				/* special media init stuff */
-  .defaults = lmc_t1_default,			/* reset to default state */
-  .set_status = lmc_t1_set_status,		/* reset status to state provided */
-  .set_clock_source = lmc_t1_set_clock,		/* set clock source */
-  .set_speed = lmc_dummy_set2_1,		/* set line speed */
-  .set_cable_length = lmc_dummy_set_1,		/* set cable length */
-  .set_scrambler = lmc_dummy_set_1,		/* set scrambler */
-  .get_link_status = lmc_t1_get_link_status,	/* get link status */
-  .set_link_status = lmc_dummy_set_1,		/* set link status */
-  .set_crc_length = lmc_t1_set_crc_length,	/* set CRC length */
-  .set_circuit_type = lmc_t1_set_circuit_type,	/* set T1 or E1 circuit type */
-  .watchdog = lmc_t1_watchdog
-};
-
-static void
-lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
-{
-}
-
-static void
-lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
-{
-}
-
-/*
- *  HSSI methods
- */
-
-static void
-lmc_hssi_init (lmc_softc_t * const sc)
-{
-  sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
-
-  lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
-}
-
-static void
-lmc_hssi_default (lmc_softc_t * const sc)
-{
-  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
-  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
-  sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
-  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it.  This will
- * always reset the card if needed.
- */
-static void
-lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
-  if (ctl == NULL)
-    {
-      sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
-      lmc_set_protocol (sc, NULL);
-
-      return;
-    }
-
-  /*
-   * check for change in clock source
-   */
-  if (ctl->clock_source && !sc->ictl.clock_source)
-    {
-      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
-      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
-    }
-  else if (!ctl->clock_source && sc->ictl.clock_source)
-    {
-      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
-      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
-    }
-
-  lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
-{
-  int old;
-  old = sc->ictl.clock_source;
-  if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
-    {
-      sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
-      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
-      if(old != ie)
-        printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
-    }
-  else
-    {
-      sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
-      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-      if(old != ie)
-        printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
-    }
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_hssi_get_link_status (lmc_softc_t * const sc)
-{
-    /*
-     * We're using the same code as SSI since
-     * they're practically the same
-     */
-    return lmc_ssi_get_link_status(sc);
-}
-
-static void
-lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
-{
-  if (state == LMC_LINK_UP)
-    sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
-  else
-    sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
-  if (state == LMC_CTL_CRC_LENGTH_32)
-    {
-      /* 32 bit */
-      sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
-    }
-  else
-    {
-      /* 16 bit */
-      sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
-    }
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_hssi_watchdog (lmc_softc_t * const sc)
-{
-  /* HSSI is blank */
-}
-
-/*
- *  DS3 methods
- */
-
-/*
- * Set cable length
- */
-static void
-lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
-{
-  if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
-    {
-      sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
-      sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
-    }
-  else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
-    {
-      sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
-      sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
-    }
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_default (lmc_softc_t * const sc)
-{
-  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
-  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
-  sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
-  sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
-  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it.  This will
- * always reset the card if needed.
- */
-static void
-lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
-  if (ctl == NULL)
-    {
-      sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
-      sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
-      lmc_set_protocol (sc, NULL);
-
-      return;
-    }
-
-  /*
-   * check for change in cable length setting
-   */
-  if (ctl->cable_length && !sc->ictl.cable_length)
-    lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
-  else if (!ctl->cable_length && sc->ictl.cable_length)
-    lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
-
-  /*
-   * Check for change in scrambler setting (requires reset)
-   */
-  if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
-    lmc_ds3_set_scram (sc, LMC_CTL_ON);
-  else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
-    lmc_ds3_set_scram (sc, LMC_CTL_OFF);
-
-  lmc_set_protocol (sc, ctl);
-}
-
-static void
-lmc_ds3_init (lmc_softc_t * const sc)
-{
-  int i;
-
-  sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
-
-  /* writes zeros everywhere */
-  for (i = 0; i < 21; i++)
-    {
-      lmc_mii_writereg (sc, 0, 17, i);
-      lmc_mii_writereg (sc, 0, 18, 0);
-    }
-
-  /* set some essential bits */
-  lmc_mii_writereg (sc, 0, 17, 1);
-  lmc_mii_writereg (sc, 0, 18, 0x25);	/* ser, xtx */
-
-  lmc_mii_writereg (sc, 0, 17, 5);
-  lmc_mii_writereg (sc, 0, 18, 0x80);	/* emode */
-
-  lmc_mii_writereg (sc, 0, 17, 14);
-  lmc_mii_writereg (sc, 0, 18, 0x30);	/* rcgen, tcgen */
-
-  /* clear counters and latched bits */
-  for (i = 0; i < 21; i++)
-    {
-      lmc_mii_writereg (sc, 0, 17, i);
-      lmc_mii_readreg (sc, 0, 18);
-    }
-}
-
-/*
- * 1 == DS3 payload scrambled, 0 == not scrambled
- */
-static void
-lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
-{
-  if (ie == LMC_CTL_ON)
-    {
-      sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
-      sc->ictl.scrambler_onoff = LMC_CTL_ON;
-    }
-  else
-    {
-      sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
-      sc->ictl.scrambler_onoff = LMC_CTL_OFF;
-    }
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ds3_get_link_status (lmc_softc_t * const sc)
-{
-    u16 link_status, link_status_11;
-    int ret = 1;
-
-    lmc_mii_writereg (sc, 0, 17, 7);
-    link_status = lmc_mii_readreg (sc, 0, 18);
-
-    /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
-     * led0 yellow = far-end adapter is in Red alarm condition
-     * led1 blue   = received an Alarm Indication signal
-     *               (upstream failure)
-     * led2 Green  = power to adapter, Gate Array loaded & driver
-     *               attached
-     * led3 red    = Loss of Signal (LOS) or out of frame (OOF)
-     *               conditions detected on T3 receive signal
-     */
-
-    lmc_led_on(sc, LMC_DS3_LED2);
-
-    if ((link_status & LMC_FRAMER_REG0_DLOS) ||
-        (link_status & LMC_FRAMER_REG0_OOFS)){
-        ret = 0;
-        if(sc->last_led_err[3] != 1){
-	    u16 r1;
-            lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
-            r1 = lmc_mii_readreg (sc, 0, 18);
-            r1 &= 0xfe;
-            lmc_mii_writereg(sc, 0, 18, r1);
-            printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED3);	/* turn on red LED */
-        sc->last_led_err[3] = 1;
-    }
-    else {
-        lmc_led_off(sc, LMC_DS3_LED3);	/* turn on red LED */
-        if(sc->last_led_err[3] == 1){
-	    u16 r1;
-            lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
-            r1 = lmc_mii_readreg (sc, 0, 18);
-            r1 |= 0x01;
-            lmc_mii_writereg(sc, 0, 18, r1);
-        }
-        sc->last_led_err[3] = 0;
-    }
-
-    lmc_mii_writereg(sc, 0, 17, 0x10);
-    link_status_11 = lmc_mii_readreg(sc, 0, 18);
-    if((link_status & LMC_FRAMER_REG0_AIS) ||
-       (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
-        ret = 0;
-        if(sc->last_led_err[0] != 1){
-            printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
-            printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED0);
-        sc->last_led_err[0] = 1;
-    }
-    else {
-        lmc_led_off(sc, LMC_DS3_LED0);
-        sc->last_led_err[0] = 0;
-    }
-
-    lmc_mii_writereg (sc, 0, 17, 9);
-    link_status = lmc_mii_readreg (sc, 0, 18);
-    
-    if(link_status & LMC_FRAMER_REG9_RBLUE){
-        ret = 0;
-        if(sc->last_led_err[1] != 1){
-            printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED1);
-        sc->last_led_err[1] = 1;
-    }
-    else {
-        lmc_led_off(sc, LMC_DS3_LED1);
-        sc->last_led_err[1] = 0;
-    }
-
-    return ret;
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
-{
-  if (state == LMC_CTL_CRC_LENGTH_32)
-    {
-      /* 32 bit */
-      sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
-    }
-  else
-    {
-      /* 16 bit */
-      sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
-    }
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_watchdog (lmc_softc_t * const sc)
-{
-    
-}
-
-
-/*
- *  SSI methods
- */
-
-static void lmc_ssi_init(lmc_softc_t * const sc)
-{
-	u16 mii17;
-	int cable;
-
-	sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
-
-	mii17 = lmc_mii_readreg(sc, 0, 17);
-
-	cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
-	sc->ictl.cable_type = cable;
-
-	lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
-}
-
-static void
-lmc_ssi_default (lmc_softc_t * const sc)
-{
-  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
-  /*
-   * make TXCLOCK always be an output
-   */
-  lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
-
-  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
-  sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
-  sc->lmc_media->set_speed (sc, NULL);
-  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it.  This will
- * always reset the card if needed.
- */
-static void
-lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
-  if (ctl == NULL)
-    {
-      sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
-      sc->lmc_media->set_speed (sc, &sc->ictl);
-      lmc_set_protocol (sc, NULL);
-
-      return;
-    }
-
-  /*
-   * check for change in clock source
-   */
-  if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
-      && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
-    {
-      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
-      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
-    }
-  else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
-	   && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
-    {
-      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
-      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
-    }
-
-  if (ctl->clock_rate != sc->ictl.clock_rate)
-    sc->lmc_media->set_speed (sc, ctl);
-
-  lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
-{
-  int old;
-  old = ie;
-  if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
-    {
-      sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
-      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
-      if(ie != old)
-        printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
-    }
-  else
-    {
-      sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
-      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-      if(ie != old)
-        printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
-    }
-}
-
-static void
-lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
-  lmc_ctl_t *ictl = &sc->ictl;
-  lmc_av9110_t *av;
-
-  /* original settings for clock rate of:
-   *  100 Khz (8,25,0,0,2) were incorrect
-   *  they should have been 80,125,1,3,3
-   *  There are 17 param combinations to produce this freq.
-   *  For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
-   */
-  if (ctl == NULL)
-    {
-      av = &ictl->cardspec.ssi;
-      ictl->clock_rate = 1500000;
-      av->f = ictl->clock_rate;
-      av->n = 120;
-      av->m = 100;
-      av->v = 1;
-      av->x = 1;
-      av->r = 2;
-
-      write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
-      return;
-    }
-
-  av = &ctl->cardspec.ssi;
-
-  if (av->f == 0)
-    return;
-
-  ictl->clock_rate = av->f;	/* really, this is the rate we are */
-  ictl->cardspec.ssi = *av;
-
-  write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ssi_get_link_status (lmc_softc_t * const sc)
-{
-  u16 link_status;
-  u32 ticks;
-  int ret = 1;
-  int hw_hdsk = 1;
-
-  /*
-   * missing CTS?  Hmm.  If we require CTS on, we may never get the
-   * link to come up, so omit it in this test.
-   *
-   * Also, it seems that with a loopback cable, DCD isn't asserted,
-   * so just check for things like this:
-   *      DSR _must_ be asserted.
-   *      One of DCD or CTS must be asserted.
-   */
-
-  /* LMC 1000 (SSI) LED definitions
-   * led0 Green = power to adapter, Gate Array loaded &
-   *              driver attached
-   * led1 Green = DSR and DTR and RTS and CTS are set
-   * led2 Green = Cable detected
-   * led3 red   = No timing is available from the
-   *              cable or the on-board frequency
-   *              generator.
-   */
-
-  link_status = lmc_mii_readreg (sc, 0, 16);
-
-  /* Is the transmit clock still available */
-  ticks = LMC_CSR_READ (sc, csr_gp_timer);
-  ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
-  lmc_led_on (sc, LMC_MII16_LED0);
-
-  /* ====== transmit clock determination ===== */
-  if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
-      lmc_led_off(sc, LMC_MII16_LED3);
-  }
-  else if (ticks == 0 ) {				/* no clock found ? */
-      ret = 0;
-      if (sc->last_led_err[3] != 1) {
-	      sc->extra_stats.tx_lossOfClockCnt++;
-	      printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
-      }
-      sc->last_led_err[3] = 1;
-      lmc_led_on (sc, LMC_MII16_LED3);	/* turn ON red LED */
-  }
-  else {
-      if(sc->last_led_err[3] == 1)
-          printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
-      sc->last_led_err[3] = 0;
-      lmc_led_off (sc, LMC_MII16_LED3);		/* turn OFF red LED */
-  }
-
-  if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
-      ret = 0;
-      hw_hdsk = 0;
-  }
-
-#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
-  if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
-      ret = 0;
-      hw_hdsk = 0;
-  }
-#endif
-
-  if(hw_hdsk == 0){
-      if(sc->last_led_err[1] != 1)
-          printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
-      sc->last_led_err[1] = 1;
-      lmc_led_off(sc, LMC_MII16_LED1);
-  }
-  else {
-      if(sc->last_led_err[1] != 0)
-          printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
-      sc->last_led_err[1] = 0;
-      lmc_led_on(sc, LMC_MII16_LED1);
-  }
-
-  if(ret == 1) {
-      lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
-  }
-  
-  return ret;
-}
-
-static void
-lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
-{
-  if (state == LMC_LINK_UP)
-    {
-      sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
-      printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
-    }
-  else
-    {
-      sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
-      printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
-    }
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
-  if (state == LMC_CTL_CRC_LENGTH_32)
-    {
-      /* 32 bit */
-      sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
-      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
-    }
-  else
-    {
-      /* 16 bit */
-      sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
-      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
-    }
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * These are bits to program the ssi frequency generator
- */
-static inline void
-write_av9110_bit (lmc_softc_t * sc, int c)
-{
-  /*
-   * set the data bit as we need it.
-   */
-  sc->lmc_gpio &= ~(LMC_GEP_CLK);
-  if (c & 0x01)
-    sc->lmc_gpio |= LMC_GEP_DATA;
-  else
-    sc->lmc_gpio &= ~(LMC_GEP_DATA);
-  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
-  /*
-   * set the clock to high
-   */
-  sc->lmc_gpio |= LMC_GEP_CLK;
-  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
-  /*
-   * set the clock to low again.
-   */
-  sc->lmc_gpio &= ~(LMC_GEP_CLK);
-  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-}
-
-static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
-{
-  int i;
-
-#if 0
-  printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
-	  LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
-#endif
-
-  sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
-  sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
-  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
-  /*
-   * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
-   * as outputs.
-   */
-  lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
-			  | LMC_GEP_SSI_GENERATOR));
-
-  sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
-  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
-  /*
-   * a shifting we will go...
-   */
-  for (i = 0; i < 7; i++)
-    write_av9110_bit (sc, n >> i);
-  for (i = 0; i < 7; i++)
-    write_av9110_bit (sc, m >> i);
-  for (i = 0; i < 1; i++)
-    write_av9110_bit (sc, v >> i);
-  for (i = 0; i < 2; i++)
-    write_av9110_bit (sc, x >> i);
-  for (i = 0; i < 2; i++)
-    write_av9110_bit (sc, r >> i);
-  for (i = 0; i < 5; i++)
-    write_av9110_bit (sc, 0x17 >> i);
-
-  /*
-   * stop driving serial-related signals
-   */
-  lmc_gpio_mkinput (sc,
-		    (LMC_GEP_DATA | LMC_GEP_CLK
-		     | LMC_GEP_SSI_GENERATOR));
-}
-
-static void lmc_ssi_watchdog(lmc_softc_t * const sc)
-{
-	u16 mii17 = lmc_mii_readreg(sc, 0, 17);
-	if (((mii17 >> 3) & 7) == 7)
-		lmc_led_off(sc, LMC_MII16_LED2);
-	else
-		lmc_led_on(sc, LMC_MII16_LED2);
-}
-
-/*
- *  T1 methods
- */
-
-/*
- * The framer regs are multiplexed through MII regs 17 & 18
- *  write the register address to MII reg 17 and the *  data to MII reg 18. */
-static void
-lmc_t1_write (lmc_softc_t * const sc, int a, int d)
-{
-  lmc_mii_writereg (sc, 0, 17, a);
-  lmc_mii_writereg (sc, 0, 18, d);
-}
-
-/* Save a warning
-static int
-lmc_t1_read (lmc_softc_t * const sc, int a)
-{
-  lmc_mii_writereg (sc, 0, 17, a);
-  return lmc_mii_readreg (sc, 0, 18);
-}
-*/
-
-
-static void
-lmc_t1_init (lmc_softc_t * const sc)
-{
-  u16 mii16;
-  int i;
-
-  sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
-  mii16 = lmc_mii_readreg (sc, 0, 16);
-
-  /* reset 8370 */
-  mii16 &= ~LMC_MII16_T1_RST;
-  lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
-  lmc_mii_writereg (sc, 0, 16, mii16);
-
-  /* set T1 or E1 line.  Uses sc->lmcmii16 reg in function so update it */
-  sc->lmc_miireg16 = mii16;
-  lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
-  mii16 = sc->lmc_miireg16;
-
-  lmc_t1_write (sc, 0x01, 0x1B);	/* CR0     - primary control             */
-  lmc_t1_write (sc, 0x02, 0x42);	/* JAT_CR  - jitter atten config         */
-  lmc_t1_write (sc, 0x14, 0x00);	/* LOOP    - loopback config             */
-  lmc_t1_write (sc, 0x15, 0x00);	/* DL3_TS  - external data link timeslot */
-  lmc_t1_write (sc, 0x18, 0xFF);	/* PIO     - programmable I/O            */
-  lmc_t1_write (sc, 0x19, 0x30);	/* POE     - programmable OE             */
-  lmc_t1_write (sc, 0x1A, 0x0F);	/* CMUX    - clock input mux             */
-  lmc_t1_write (sc, 0x20, 0x41);	/* LIU_CR  - RX LIU config               */
-  lmc_t1_write (sc, 0x22, 0x76);	/* RLIU_CR - RX LIU config               */
-  lmc_t1_write (sc, 0x40, 0x03);	/* RCR0    - RX config                   */
-  lmc_t1_write (sc, 0x45, 0x00);	/* RALM    - RX alarm config             */
-  lmc_t1_write (sc, 0x46, 0x05);	/* LATCH   - RX alarm/err/cntr latch     */
-  lmc_t1_write (sc, 0x68, 0x40);	/* TLIU_CR - TX LIU config               */
-  lmc_t1_write (sc, 0x70, 0x0D);	/* TCR0    - TX framer config            */
-  lmc_t1_write (sc, 0x71, 0x05);	/* TCR1    - TX config                   */
-  lmc_t1_write (sc, 0x72, 0x0B);	/* TFRM    - TX frame format             */
-  lmc_t1_write (sc, 0x73, 0x00);	/* TERROR  - TX error insert             */
-  lmc_t1_write (sc, 0x74, 0x00);	/* TMAN    - TX manual Sa/FEBE config    */
-  lmc_t1_write (sc, 0x75, 0x00);	/* TALM    - TX alarm signal config      */
-  lmc_t1_write (sc, 0x76, 0x00);	/* TPATT   - TX test pattern config      */
-  lmc_t1_write (sc, 0x77, 0x00);	/* TLB     - TX inband loopback config   */
-  lmc_t1_write (sc, 0x90, 0x05);	/* CLAD_CR - clock rate adapter config   */
-  lmc_t1_write (sc, 0x91, 0x05);	/* CSEL    - clad freq sel               */
-  lmc_t1_write (sc, 0xA6, 0x00);	/* DL1_CTL - DL1 control                 */
-  lmc_t1_write (sc, 0xB1, 0x00);	/* DL2_CTL - DL2 control                 */
-  lmc_t1_write (sc, 0xD0, 0x47);	/* SBI_CR  - sys bus iface config        */
-  lmc_t1_write (sc, 0xD1, 0x70);	/* RSB_CR  - RX sys bus config           */
-  lmc_t1_write (sc, 0xD4, 0x30);	/* TSB_CR  - TX sys bus config           */
-  for (i = 0; i < 32; i++)
-    {
-      lmc_t1_write (sc, 0x0E0 + i, 0x00);	/* SBCn - sys bus per-channel ctl    */
-      lmc_t1_write (sc, 0x100 + i, 0x00);	/* TPCn - TX per-channel ctl         */
-      lmc_t1_write (sc, 0x180 + i, 0x00);	/* RPCn - RX per-channel ctl         */
-    }
-  for (i = 1; i < 25; i++)
-    {
-      lmc_t1_write (sc, 0x0E0 + i, 0x0D);	/* SBCn - sys bus per-channel ctl    */
-    }
-
-  mii16 |= LMC_MII16_T1_XOE;
-  lmc_mii_writereg (sc, 0, 16, mii16);
-  sc->lmc_miireg16 = mii16;
-}
-
-static void
-lmc_t1_default (lmc_softc_t * const sc)
-{
-  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
-  sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
-  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-  /* Right now we can only clock from out internal source */
-  sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-}
-/* * Given a user provided state, set ourselves up to match it.  This will * always reset the card if needed.
- */
-static void
-lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
-  if (ctl == NULL)
-    {
-      sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
-      lmc_set_protocol (sc, NULL);
-
-      return;
-    }
-  /*
-   * check for change in circuit type         */
-  if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
-      && sc->ictl.circuit_type ==
-      LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
-								LMC_CTL_CIRCUIT_TYPE_E1);
-  else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
-	   && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
-    sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
-  lmc_set_protocol (sc, ctl);
-}
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */ static int
-lmc_t1_get_link_status (lmc_softc_t * const sc)
-{
-    u16 link_status;
-    int ret = 1;
-
-  /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
-   * led0 yellow = far-end adapter is in Red alarm condition
-   * led1 blue   = received an Alarm Indication signal
-   *               (upstream failure)
-   * led2 Green  = power to adapter, Gate Array loaded & driver
-   *               attached
-   * led3 red    = Loss of Signal (LOS) or out of frame (OOF)
-   *               conditions detected on T3 receive signal
-   */
-    lmc_led_on(sc, LMC_DS3_LED2);
-
-    lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
-    link_status = lmc_mii_readreg (sc, 0, 18);
-
-
-    if (link_status & T1F_RAIS) {			/* turn on blue LED */
-        ret = 0;
-        if(sc->last_led_err[1] != 1){
-            printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED1);
-        sc->last_led_err[1] = 1;
-    }
-    else {
-        if(sc->last_led_err[1] != 0){
-            printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
-        }
-        lmc_led_off (sc, LMC_DS3_LED1);
-        sc->last_led_err[1] = 0;
-    }
-
-    /*
-     * Yellow Alarm is nasty evil stuff, looks at data patterns
-     * inside the channel and confuses it with HDLC framing
-     * ignore all yellow alarms.
-     *
-     * Do listen to MultiFrame Yellow alarm which while implemented
-     * different ways isn't in the channel and hence somewhat
-     * more reliable
-     */
-
-    if (link_status & T1F_RMYEL) {
-        ret = 0;
-        if(sc->last_led_err[0] != 1){
-            printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED0);
-        sc->last_led_err[0] = 1;
-    }
-    else {
-        if(sc->last_led_err[0] != 0){
-            printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
-        }
-        lmc_led_off(sc, LMC_DS3_LED0);
-        sc->last_led_err[0] = 0;
-    }
-
-    /*
-     * Loss of signal and los of frame
-     * Use the green bit to identify which one lit the led
-     */
-    if(link_status & T1F_RLOF){
-        ret = 0;
-        if(sc->last_led_err[3] != 1){
-            printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED3);
-        sc->last_led_err[3] = 1;
-
-    }
-    else {
-        if(sc->last_led_err[3] != 0){
-            printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
-        }
-        if( ! (link_status & T1F_RLOS))
-            lmc_led_off(sc, LMC_DS3_LED3);
-        sc->last_led_err[3] = 0;
-    }
-    
-    if(link_status & T1F_RLOS){
-        ret = 0;
-        if(sc->last_led_err[2] != 1){
-            printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
-        }
-        lmc_led_on(sc, LMC_DS3_LED3);
-        sc->last_led_err[2] = 1;
-
-    }
-    else {
-        if(sc->last_led_err[2] != 0){
-            printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
-        }
-        if( ! (link_status & T1F_RLOF))
-            lmc_led_off(sc, LMC_DS3_LED3);
-        sc->last_led_err[2] = 0;
-    }
-
-    sc->lmc_xinfo.t1_alarm1_status = link_status;
-
-    lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
-    sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
-
-    return ret;
-}
-
-/*
- * 1 == T1 Circuit Type , 0 == E1 Circuit Type
- */
-static void
-lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
-{
-  if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
-      sc->lmc_miireg16 |= LMC_MII16_T1_Z;
-      sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
-      printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
-  }
-  else {
-      sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
-      sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
-      printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
-  }
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-  
-}
-
-/*
- * 0 == 16bit, 1 == 32bit */
-static void
-lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
-{
-  if (state == LMC_CTL_CRC_LENGTH_32)
-    {
-      /* 32 bit */
-      sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
-      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
-    }
-  else
-    {
-      /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
-      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
-      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
-
-    }
-
-  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
-{
-  int old;
-  old = ie;
-  if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
-    {
-      sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
-      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
-      if(old != ie)
-        printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
-    }
-  else
-    {
-      sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
-      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-      if(old != ie)
-        printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
-    }
-}
-
-static void
-lmc_t1_watchdog (lmc_softc_t * const sc)
-{
-}
-
-static void
-lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
-	if (!ctl)
-		sc->ictl.keepalive_onoff = LMC_CTL_ON;
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
deleted file mode 100644
index e5487616..0000000
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
-  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
-  * All rights reserved.  www.lanmedia.com
-  *
-  * This code is written by:
-  * Andrew Stanley-Jones (asj@cban.com)
-  * Rob Braun (bbraun@vix.com),
-  * Michael Graff (explorer@vix.com) and
-  * Matt Thomas (matt@3am-software.com).
-  *
-  * With Help By:
-  * David Boggs
-  * Ron Crane
-  * Allan Cox
-  *
-  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
-  */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/workqueue.h>
-#include <linux/proc_fs.h>
-#include <linux/bitops.h>
-#include <asm/processor.h>             /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/smp.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_debug.h"
-#include "lmc_ioctl.h"
-#include "lmc_proto.h"
-
-// attach
-void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
-{
-    if (sc->if_type == LMC_NET) {
-            struct net_device *dev = sc->lmc_device;
-            /*
-	     * They set a few basics because they don't use HDLC
-             */
-            dev->flags |= IFF_POINTOPOINT;
-            dev->hard_header_len = 0;
-            dev->addr_len = 0;
-        }
-}
-
-int lmc_proto_open(lmc_softc_t *sc)
-{
-	int ret = 0;
-
-	if (sc->if_type == LMC_PPP) {
-		ret = hdlc_open(sc->lmc_device);
-		if (ret < 0)
-			printk(KERN_WARNING "%s: HDLC open failed: %d\n",
-			       sc->name, ret);
-	}
-	return ret;
-}
-
-void lmc_proto_close(lmc_softc_t *sc)
-{
-	if (sc->if_type == LMC_PPP)
-		hdlc_close(sc->lmc_device);
-}
-
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
-    switch(sc->if_type){
-    case LMC_PPP:
-	    return hdlc_type_trans(skb, sc->lmc_device);
-    case LMC_NET:
-        return htons(ETH_P_802_2);
-    case LMC_RAW: /* Packet type for skbuff kind of useless */
-        return htons(ETH_P_802_2);
-    default:
-        printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
-        return htons(ETH_P_802_2);
-    }
-}
-
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
-    switch(sc->if_type){
-    case LMC_PPP:
-    case LMC_NET:
-    default:
-        netif_rx(skb);
-        break;
-    case LMC_RAW:
-        break;
-    }
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
deleted file mode 100644
index e56e707..0000000
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_PROTO_H_
-#define _LMC_PROTO_H_
-
-#include <linux/hdlc.h>
-
-void lmc_proto_attach(lmc_softc_t *sc);
-int lmc_proto_open(lmc_softc_t *sc);
-void lmc_proto_close(lmc_softc_t *sc);
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
-
-static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
-{
-	return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
-}
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
deleted file mode 100644
index 99f0aa7..0000000
--- a/drivers/net/wan/lmc/lmc_var.h
+++ /dev/null
@@ -1,468 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_VAR_H_
-#define _LMC_VAR_H_
-
- /*
-  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
-  * All rights reserved.  www.lanmedia.com
-  *
-  * This code is written by:
-  * Andrew Stanley-Jones (asj@cban.com)
-  * Rob Braun (bbraun@vix.com),
-  * Michael Graff (explorer@vix.com) and
-  * Matt Thomas (matt@3am-software.com).
-  */
-
-#include <linux/timer.h>
-
-/*
- * basic definitions used in lmc include files
- */
-
-typedef struct lmc___softc lmc_softc_t;
-typedef struct lmc___media lmc_media_t;
-typedef struct lmc___ctl lmc_ctl_t;
-
-#define lmc_csrptr_t    unsigned long
-
-#define LMC_REG_RANGE 0x80
-
-#define LMC_PRINTF_FMT  "%s"
-#define LMC_PRINTF_ARGS	(sc->lmc_device->name)
-
-#define TX_TIMEOUT (2*HZ)
-
-#define LMC_TXDESCS            32
-#define LMC_RXDESCS            32
-
-#define LMC_LINK_UP            1
-#define LMC_LINK_DOWN          0
-
-/* These macros for generic read and write to and from the dec chip */
-#define LMC_CSR_READ(sc, csr) \
-	inl((sc)->lmc_csrs.csr)
-#define LMC_CSR_WRITE(sc, reg, val) \
-	outl((val), (sc)->lmc_csrs.reg)
-
-//#ifdef _LINUX_DELAY_H
-//	#define SLOW_DOWN_IO udelay(2);
-//	#undef __SLOW_DOWN_IO
-//	#define __SLOW_DOWN_IO udelay(2);
-//#endif
-
-#define DELAY(n) SLOW_DOWN_IO
-
-#define lmc_delay() inl(sc->lmc_csrs.csr_9)
-
-/* This macro sync's up with the mii so that reads and writes can take place */
-#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
-                LMC_CSR_WRITE((sc), csr_9, 0x20000); \
-		lmc_delay(); \
-		LMC_CSR_WRITE((sc), csr_9, 0x30000); \
-                lmc_delay(); \
-		n--; }} while(0)
-
-struct lmc_regfile_t {
-    lmc_csrptr_t csr_busmode;                  /* CSR0 */
-    lmc_csrptr_t csr_txpoll;                   /* CSR1 */
-    lmc_csrptr_t csr_rxpoll;                   /* CSR2 */
-    lmc_csrptr_t csr_rxlist;                   /* CSR3 */
-    lmc_csrptr_t csr_txlist;                   /* CSR4 */
-    lmc_csrptr_t csr_status;                   /* CSR5 */
-    lmc_csrptr_t csr_command;                  /* CSR6 */
-    lmc_csrptr_t csr_intr;                     /* CSR7 */
-    lmc_csrptr_t csr_missed_frames;            /* CSR8 */
-    lmc_csrptr_t csr_9;                        /* CSR9 */
-    lmc_csrptr_t csr_10;                       /* CSR10 */
-    lmc_csrptr_t csr_11;                       /* CSR11 */
-    lmc_csrptr_t csr_12;                       /* CSR12 */
-    lmc_csrptr_t csr_13;                       /* CSR13 */
-    lmc_csrptr_t csr_14;                       /* CSR14 */
-    lmc_csrptr_t csr_15;                       /* CSR15 */
-};
-
-#define csr_enetrom             csr_9   /* 21040 */
-#define csr_reserved            csr_10  /* 21040 */
-#define csr_full_duplex         csr_11  /* 21040 */
-#define csr_bootrom             csr_10  /* 21041/21140A/?? */
-#define csr_gp                  csr_12  /* 21140* */
-#define csr_watchdog            csr_15  /* 21140* */
-#define csr_gp_timer            csr_11  /* 21041/21140* */
-#define csr_srom_mii            csr_9   /* 21041/21140* */
-#define csr_sia_status          csr_12  /* 2104x */
-#define csr_sia_connectivity    csr_13  /* 2104x */
-#define csr_sia_tx_rx           csr_14  /* 2104x */
-#define csr_sia_general         csr_15  /* 2104x */
-
-/* tulip length/control transmit descriptor definitions
- *  used to define bits in the second tulip_desc_t field (length)
- *  for the transmit descriptor -baz */
-
-#define LMC_TDES_FIRST_BUFFER_SIZE       ((u32)(0x000007FF))
-#define LMC_TDES_SECOND_BUFFER_SIZE      ((u32)(0x003FF800))
-#define LMC_TDES_HASH_FILTERING          ((u32)(0x00400000))
-#define LMC_TDES_DISABLE_PADDING         ((u32)(0x00800000))
-#define LMC_TDES_SECOND_ADDR_CHAINED     ((u32)(0x01000000))
-#define LMC_TDES_END_OF_RING             ((u32)(0x02000000))
-#define LMC_TDES_ADD_CRC_DISABLE         ((u32)(0x04000000))
-#define LMC_TDES_SETUP_PACKET            ((u32)(0x08000000))
-#define LMC_TDES_INVERSE_FILTERING       ((u32)(0x10000000))
-#define LMC_TDES_FIRST_SEGMENT           ((u32)(0x20000000))
-#define LMC_TDES_LAST_SEGMENT            ((u32)(0x40000000))
-#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
-
-#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER  11
-#define TDES_COLLISION_COUNT_BIT_NUMBER     3
-
-/* Constants for the RCV descriptor RDES */
-
-#define LMC_RDES_OVERFLOW             ((u32)(0x00000001))
-#define LMC_RDES_CRC_ERROR            ((u32)(0x00000002))
-#define LMC_RDES_DRIBBLING_BIT        ((u32)(0x00000004))
-#define LMC_RDES_REPORT_ON_MII_ERR    ((u32)(0x00000008))
-#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
-#define LMC_RDES_FRAME_TYPE           ((u32)(0x00000020))
-#define LMC_RDES_COLLISION_SEEN       ((u32)(0x00000040))
-#define LMC_RDES_FRAME_TOO_LONG       ((u32)(0x00000080))
-#define LMC_RDES_LAST_DESCRIPTOR      ((u32)(0x00000100))
-#define LMC_RDES_FIRST_DESCRIPTOR     ((u32)(0x00000200))
-#define LMC_RDES_MULTICAST_FRAME      ((u32)(0x00000400))
-#define LMC_RDES_RUNT_FRAME           ((u32)(0x00000800))
-#define LMC_RDES_DATA_TYPE            ((u32)(0x00003000))
-#define LMC_RDES_LENGTH_ERROR         ((u32)(0x00004000))
-#define LMC_RDES_ERROR_SUMMARY        ((u32)(0x00008000))
-#define LMC_RDES_FRAME_LENGTH         ((u32)(0x3FFF0000))
-#define LMC_RDES_OWN_BIT              ((u32)(0x80000000))
-
-#define RDES_FRAME_LENGTH_BIT_NUMBER       16
-
-#define LMC_RDES_ERROR_MASK ( (u32)( \
-	  LMC_RDES_OVERFLOW \
-	| LMC_RDES_DRIBBLING_BIT \
-	| LMC_RDES_REPORT_ON_MII_ERR \
-        | LMC_RDES_COLLISION_SEEN ) )
-
-
-/*
- * Ioctl info
- */
-
-typedef struct {
-	u32	n;
-	u32	m;
-	u32	v;
-	u32	x;
-	u32	r;
-	u32	f;
-	u32	exact;
-} lmc_av9110_t;
-
-/*
- * Common structure passed to the ioctl code.
- */
-struct lmc___ctl {
-	u32	cardtype;
-	u32	clock_source;		/* HSSI, T1 */
-	u32	clock_rate;		/* T1 */
-	u32	crc_length;
-	u32	cable_length;		/* DS3 */
-	u32	scrambler_onoff;	/* DS3 */
-	u32	cable_type;		/* T1 */
-	u32	keepalive_onoff;	/* protocol */
-	u32	ticks;			/* ticks/sec */
-	union {
-		lmc_av9110_t	ssi;
-	} cardspec;
-	u32       circuit_type;   /* T1 or E1 */
-};
-
-
-/*
- * Careful, look at the data sheet, there's more to this
- * structure than meets the eye.  It should probably be:
- *
- * struct tulip_desc_t {
- *         u8  own:1;
- *         u32 status:31;
- *         u32 control:10;
- *         u32 buffer1;
- *         u32 buffer2;
- * };
- * You could also expand status control to provide more bit information
- */
-
-struct tulip_desc_t {
-	s32 status;
-	s32 length;
-	u32 buffer1;
-	u32 buffer2;
-};
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-struct lmc___media {
-	void	(* init)(lmc_softc_t * const);
-	void	(* defaults)(lmc_softc_t * const);
-	void	(* set_status)(lmc_softc_t * const, lmc_ctl_t *);
-	void	(* set_clock_source)(lmc_softc_t * const, int);
-	void	(* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
-	void	(* set_cable_length)(lmc_softc_t * const, int);
-	void	(* set_scrambler)(lmc_softc_t * const, int);
-	int	(* get_link_status)(lmc_softc_t * const);
-	void	(* set_link_status)(lmc_softc_t * const, int);
-	void	(* set_crc_length)(lmc_softc_t * const, int);
-        void    (* set_circuit_type)(lmc_softc_t * const, int);
-        void	(* watchdog)(lmc_softc_t * const);
-};
-
-
-#define STATCHECK     0xBEEFCAFE
-
-struct lmc_extra_statistics
-{
-	u32       version_size;
-	u32       lmc_cardtype;
-
-	u32       tx_ProcTimeout;
-	u32       tx_IntTimeout;
-	u32       tx_NoCompleteCnt;
-	u32       tx_MaxXmtsB4Int;
-	u32       tx_TimeoutCnt;
-	u32       tx_OutOfSyncPtr;
-	u32       tx_tbusy0;
-	u32       tx_tbusy1;
-	u32       tx_tbusy_calls;
-	u32       resetCount;
-	u32       lmc_txfull;
-	u32       tbusy;
-	u32       dirtyTx;
-	u32       lmc_next_tx;
-	u32       otherTypeCnt;
-	u32       lastType;
-	u32       lastTypeOK;
-	u32       txLoopCnt;
-	u32       usedXmtDescripCnt;
-	u32       txIndexCnt;
-	u32       rxIntLoopCnt;
-
-	u32       rx_SmallPktCnt;
-	u32       rx_BadPktSurgeCnt;
-	u32       rx_BuffAllocErr;
-	u32       tx_lossOfClockCnt;
-
-	/* T1 error counters */
-	u32       framingBitErrorCount;
-	u32       lineCodeViolationCount;
-
-	u32       lossOfFrameCount;
-	u32       changeOfFrameAlignmentCount;
-	u32       severelyErroredFrameCount;
-
-	u32       check;
-};
-
-typedef struct lmc_xinfo {
-	u32       Magic0;                         /* BEEFCAFE */
-
-	u32       PciCardType;
-	u32       PciSlotNumber;          /* PCI slot number       */
-
-	u16	       DriverMajorVersion;
-	u16	       DriverMinorVersion;
-	u16	       DriverSubVersion;
-
-	u16	       XilinxRevisionNumber;
-	u16	       MaxFrameSize;
-
-	u16     	  t1_alarm1_status;
-	u16       	t1_alarm2_status;
-
-	int             link_status;
-	u32       mii_reg16;
-
-	u32       Magic1;                         /* DEADBEEF */
-} LMC_XINFO;
-
-
-/*
- * forward decl
- */
-struct lmc___softc {
-	char                   *name;
-	u8			board_idx;
-	struct lmc_extra_statistics extra_stats;
-	struct net_device      *lmc_device;
-
-	int                     hang, rxdesc, bad_packet, some_counter;
-	u32  	         	txgo;
-	struct lmc_regfile_t	lmc_csrs;
-	volatile u32		lmc_txtick;
-	volatile u32		lmc_rxtick;
-	u32			lmc_flags;
-	u32			lmc_intrmask;	/* our copy of csr_intr */
-	u32			lmc_cmdmode;	/* our copy of csr_cmdmode */
-	u32			lmc_busmode;	/* our copy of csr_busmode */
-	u32			lmc_gpio_io;	/* state of in/out settings */
-	u32			lmc_gpio;	/* state of outputs */
-	struct sk_buff*		lmc_txq[LMC_TXDESCS];
-	struct sk_buff*		lmc_rxq[LMC_RXDESCS];
-	volatile
-	struct tulip_desc_t	lmc_rxring[LMC_RXDESCS];
-	volatile
-	struct tulip_desc_t	lmc_txring[LMC_TXDESCS];
-	unsigned int		lmc_next_rx, lmc_next_tx;
-	volatile
-	unsigned int		lmc_taint_tx, lmc_taint_rx;
-	int			lmc_tx_start, lmc_txfull;
-	int			lmc_txbusy;
-	u16			lmc_miireg16;
-	int			lmc_ok;
-	int			last_link_status;
-	int			lmc_cardtype;
-	u32               	last_frameerr;
-	lmc_media_t	       *lmc_media;
-	struct timer_list	timer;
-	lmc_ctl_t		ictl;
-	u32			TxDescriptControlInit;
-
-	int                     tx_TimeoutInd; /* additional driver state */
-	int                     tx_TimeoutDisplay;
-	unsigned int		lastlmc_taint_tx;
-	int                     lasttx_packets;
-	u32			tx_clockState;
-	u32			lmc_crcSize;
-	LMC_XINFO		lmc_xinfo;
-	char                    lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
-	char                    lmc_timing; /* for HSSI and SSI */
-	int                     got_irq;
-
-	char                    last_led_err[4];
-
-	u32                     last_int;
-	u32                     num_int;
-
-	spinlock_t              lmc_lock;
-	u16			if_type;       /* HDLC/PPP or NET */
-
-	/* Failure cases */
-	u8			failed_ring;
-	u8			failed_recv_alloc;
-
-	/* Structure check */
-	u32                     check;
-};
-
-#define LMC_PCI_TIME 1
-#define LMC_EXT_TIME 0
-
-#define PKT_BUF_SZ              1542  /* was 1536 */
-
-/* CSR5 settings */
-#define TIMER_INT     0x00000800
-#define TP_LINK_FAIL  0x00001000
-#define TP_LINK_PASS  0x00000010
-#define NORMAL_INT    0x00010000
-#define ABNORMAL_INT  0x00008000
-#define RX_JABBER_INT 0x00000200
-#define RX_DIED       0x00000100
-#define RX_NOBUFF     0x00000080
-#define RX_INT        0x00000040
-#define TX_FIFO_UNDER 0x00000020
-#define TX_JABBER     0x00000008
-#define TX_NOBUFF     0x00000004
-#define TX_DIED       0x00000002
-#define TX_INT        0x00000001
-
-/* CSR6 settings */
-#define OPERATION_MODE  0x00000200 /* Full Duplex      */
-#define PROMISC_MODE    0x00000040 /* Promiscuous Mode */
-#define RECEIVE_ALL     0x40000000 /* Receive All      */
-#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames  */
-
-/* Dec control registers  CSR6 as well */
-#define LMC_DEC_ST 0x00002000
-#define LMC_DEC_SR 0x00000002
-
-/* CSR15 settings */
-#define RECV_WATCHDOG_DISABLE 0x00000010
-#define JABBER_DISABLE        0x00000001
-
-/* More settings */
-/*
- * aSR6 -- Command (Operation Mode) Register
- */
-#define TULIP_CMD_RECEIVEALL    0x40000000L /* (RW)  Receivel all frames? */
-#define TULIP_CMD_MUSTBEONE     0x02000000L /* (RW)  Must Be One (21140) */
-#define TULIP_CMD_TXTHRSHLDCTL  0x00400000L /* (RW)  Transmit Threshold Mode (21140) */
-#define TULIP_CMD_STOREFWD      0x00200000L /* (RW)  Store and Forward (21140) */
-#define TULIP_CMD_NOHEARTBEAT   0x00080000L /* (RW)  No Heartbeat (21140) */
-#define TULIP_CMD_PORTSELECT    0x00040000L /* (RW)  Post Select (100Mb) (21140) */
-#define TULIP_CMD_FULLDUPLEX    0x00000200L /* (RW)  Full Duplex Mode */
-#define TULIP_CMD_OPERMODE      0x00000C00L /* (RW)  Operating Mode */
-#define TULIP_CMD_PROMISCUOUS   0x00000041L /* (RW)  Promiscuous Mode */
-#define TULIP_CMD_PASSBADPKT    0x00000008L /* (RW)  Pass Bad Frames */
-#define TULIP_CMD_THRESHOLDCTL  0x0000C000L /* (RW)  Threshold Control */
-
-#define TULIP_GP_PINSET         0x00000100L
-#define TULIP_BUSMODE_SWRESET   0x00000001L
-#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
-#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
-
-#define TULIP_STS_NORMALINTR    0x00010000L /* (RW)  Normal Interrupt */
-#define TULIP_STS_ABNRMLINTR    0x00008000L /* (RW)  Abnormal Interrupt */
-#define TULIP_STS_ERI           0x00004000L /* (RW)  Early Receive Interrupt */
-#define TULIP_STS_SYSERROR      0x00002000L /* (RW)  System Error */
-#define TULIP_STS_GTE           0x00000800L /* (RW)  General Pupose Timer Exp */
-#define TULIP_STS_ETI           0x00000400L /* (RW)  Early Transmit Interrupt */
-#define TULIP_STS_RXWT          0x00000200L /* (RW)  Receiver Watchdog Timeout */
-#define TULIP_STS_RXSTOPPED     0x00000100L /* (RW)  Receiver Process Stopped */
-#define TULIP_STS_RXNOBUF       0x00000080L /* (RW)  Receive Buf Unavail */
-#define TULIP_STS_RXINTR        0x00000040L /* (RW)  Receive Interrupt */
-#define TULIP_STS_TXUNDERFLOW   0x00000020L /* (RW)  Transmit Underflow */
-#define TULIP_STS_TXJABER       0x00000008L /* (RW)  Jabber timeout */
-#define TULIP_STS_TXNOBUF       0x00000004L
-#define TULIP_STS_TXSTOPPED     0x00000002L /* (RW)  Transmit Process Stopped */
-#define TULIP_STS_TXINTR        0x00000001L /* (RW)  Transmit Interrupt */
-
-#define TULIP_STS_RXS_STOPPED   0x00000000L /*        000 - Stopped */
-
-#define TULIP_STS_RXSTOPPED     0x00000100L             /* (RW)  Receive Process Stopped */
-#define TULIP_STS_RXNOBUF       0x00000080L
-
-#define TULIP_CMD_TXRUN         0x00002000L /* (RW)  Start/Stop Transmitter */
-#define TULIP_CMD_RXRUN         0x00000002L /* (RW)  Start/Stop Receive Filtering */
-#define TULIP_DSTS_TxDEFERRED   0x00000001      /* Initially Deferred */
-#define TULIP_DSTS_OWNER        0x80000000      /* Owner (1 = 21040) */
-#define TULIP_DSTS_RxMIIERR     0x00000008
-#define LMC_DSTS_ERRSUM         (TULIP_DSTS_RxMIIERR)
-
-#define TULIP_DEFAULT_INTR_MASK  (TULIP_STS_NORMALINTR \
-  | TULIP_STS_RXINTR       \
-  | TULIP_STS_TXINTR     \
-  | TULIP_STS_ABNRMLINTR \
-  | TULIP_STS_SYSERROR   \
-  | TULIP_STS_TXSTOPPED  \
-  | TULIP_STS_TXUNDERFLOW\
-  | TULIP_STS_RXSTOPPED )
-
-#define DESC_OWNED_BY_SYSTEM   ((u32)(0x00000000))
-#define DESC_OWNED_BY_DC21X4   ((u32)(0x80000000))
-
-#ifndef TULIP_CMD_RECEIVEALL
-#define TULIP_CMD_RECEIVEALL 0x40000000L
-#endif
-
-/* Adapter module number */
-#define LMC_ADAP_HSSI           2
-#define LMC_ADAP_DS3            3
-#define LMC_ADAP_SSI            4
-#define LMC_ADAP_T1             5
-
-#define LMC_MTU 1500
-
-#define LMC_CRC_LEN_16 2  /* 16-bit CRC */
-#define LMC_CRC_LEN_32 4
-
-#endif /* _LMC_VAR_H_ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 59e27a2..28ea4f8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -59,7 +59,8 @@ struct dsa_port;
 struct ip_tunnel_parm;
 struct macsec_context;
 struct macsec_ops;
-
+struct netdev_name_node;
+struct sd_flow_limit;
 struct sfp_bus;
 /* 802.11 specific */
 struct wireless_dev;
@@ -202,6 +203,7 @@ struct net_device_core_stats {
 	local_t		rx_dropped;
 	local_t		tx_dropped;
 	local_t		rx_nohandler;
+	local_t		rx_otherhost_dropped;
 } __aligned(4 * sizeof(local_t));
 
 #include <linux/cache.h>
@@ -862,6 +864,7 @@ enum net_device_path_type {
 	DEV_PATH_BRIDGE,
 	DEV_PATH_PPPOE,
 	DEV_PATH_DSA,
+	DEV_PATH_MTK_WDMA,
 };
 
 struct net_device_path {
@@ -887,6 +890,12 @@ struct net_device_path {
 			int port;
 			u16 proto;
 		} dsa;
+		struct {
+			u8 wdma_idx;
+			u8 queue;
+			u16 wcid;
+			u8 bss;
+		} mtk_wdma;
 	};
 };
 
@@ -1013,16 +1022,6 @@ struct dev_ifalias {
 struct devlink;
 struct tlsdev_ops;
 
-struct netdev_name_node {
-	struct hlist_node hlist;
-	struct list_head list;
-	struct net_device *dev;
-	const char *name;
-};
-
-int netdev_name_node_alt_create(struct net_device *dev, const char *name);
-int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
-
 struct netdev_net_notifier {
 	struct list_head list;
 	struct notifier_block *nb;
@@ -2968,7 +2967,6 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex);
 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int netdev_get_name(struct net *net, char *name, int ifindex);
 int dev_restart(struct net_device *dev);
 
 
@@ -3027,19 +3025,6 @@ static inline bool dev_has_header(const struct net_device *dev)
 	return dev->header_ops && dev->header_ops->create;
 }
 
-#ifdef CONFIG_NET_FLOW_LIMIT
-#define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
-struct sd_flow_limit {
-	u64			count;
-	unsigned int		num_buckets;
-	unsigned int		history_head;
-	u16			history[FLOW_LIMIT_HISTORY];
-	u8			buckets[];
-};
-
-extern int netdev_flow_limit_table_len;
-#endif /* CONFIG_NET_FLOW_LIMIT */
-
 /*
  * Incoming packets are placed on per-CPU queues
  */
@@ -3763,7 +3748,6 @@ int dev_change_flags(struct net_device *dev, unsigned int flags,
 		     struct netlink_ext_ack *extack);
 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
 			unsigned int gchanges);
-int dev_change_name(struct net_device *, const char *);
 int dev_set_alias(struct net_device *, const char *, size_t);
 int dev_get_alias(const struct net_device *, char *, size_t);
 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
@@ -3775,13 +3759,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net,
 	return __dev_change_net_namespace(dev, net, pat, 0);
 }
 int __dev_set_mtu(struct net_device *, int);
-int dev_validate_mtu(struct net_device *dev, int mtu,
-		     struct netlink_ext_ack *extack);
-int dev_set_mtu_ext(struct net_device *dev, int mtu,
-		    struct netlink_ext_ack *extack);
 int dev_set_mtu(struct net_device *, int);
-int dev_change_tx_queue_len(struct net_device *, unsigned long);
-void dev_set_group(struct net_device *, int);
 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
 			      struct netlink_ext_ack *extack);
 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
@@ -3789,24 +3767,13 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
 			     struct netlink_ext_ack *extack);
 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
-int dev_change_carrier(struct net_device *, bool new_carrier);
-int dev_get_phys_port_id(struct net_device *dev,
-			 struct netdev_phys_item_id *ppid);
-int dev_get_phys_port_name(struct net_device *dev,
-			   char *name, size_t len);
 int dev_get_port_parent_id(struct net_device *dev,
 			   struct netdev_phys_item_id *ppid, bool recurse);
 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-int dev_change_proto_down(struct net_device *dev, bool proto_down);
-void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
-				  u32 value);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 				    struct netdev_queue *txq, int *ret);
 
-typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
-int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
-		      int fd, int expected_fd, u32 flags);
 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 u8 dev_xdp_prog_count(struct net_device *dev);
 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
@@ -3871,6 +3838,7 @@ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev)		\
 DEV_CORE_STATS_INC(rx_dropped)
 DEV_CORE_STATS_INC(tx_dropped)
 DEV_CORE_STATS_INC(rx_nohandler)
+DEV_CORE_STATS_INC(rx_otherhost_dropped)
 
 static __always_inline int ____dev_forward_skb(struct net_device *dev,
 					       struct sk_buff *skb,
@@ -3891,12 +3859,6 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
 bool dev_nit_active(struct net_device *dev);
 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
 
-extern int		netdev_budget;
-extern unsigned int	netdev_budget_usecs;
-
-/* Called by rtnetlink.c:rtnl_unlock() */
-void netdev_run_todo(void);
-
 static inline void __dev_put(struct net_device *dev)
 {
 	if (dev) {
@@ -4013,10 +3975,7 @@ static inline void dev_replace_track(struct net_device *odev,
  * called netif_lowerlayer_*() because they represent the state of any
  * kind of lower layer not just hardware media.
  */
-
-void linkwatch_init_dev(struct net_device *dev);
 void linkwatch_fire_event(struct net_device *dev);
-void linkwatch_forget_dev(struct net_device *dev);
 
 /**
  *	netif_carrier_ok - test if carrier present
@@ -4462,9 +4421,6 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
 		 unsigned char addr_type);
 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
 		 unsigned char addr_type);
-void dev_addr_flush(struct net_device *dev);
-int dev_addr_init(struct net_device *dev);
-void dev_addr_check(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -4554,7 +4510,6 @@ static inline void __dev_mc_unsync(struct net_device *dev,
 
 /* Functions used for secondary unicast and multicast support */
 void dev_set_rx_mode(struct net_device *dev);
-void __dev_set_rx_mode(struct net_device *dev);
 int dev_set_promiscuity(struct net_device *dev, int inc);
 int dev_set_allmulti(struct net_device *dev, int inc);
 void netdev_state_change(struct net_device *dev);
@@ -4572,11 +4527,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
 
 extern int		netdev_max_backlog;
-extern int		netdev_tstamp_prequeue;
-extern int		netdev_unregister_timeout_secs;
-extern int		weight_p;
-extern int		dev_weight_rx_bias;
-extern int		dev_weight_tx_bias;
 extern int		dev_rx_weight;
 extern int		dev_tx_weight;
 extern int		gro_normal_batch;
@@ -4764,12 +4714,6 @@ static inline void netdev_rx_csum_fault(struct net_device *dev,
 void net_enable_timestamp(void);
 void net_disable_timestamp(void);
 
-#ifdef CONFIG_PROC_FS
-int __init dev_proc_init(void);
-#else
-#define dev_proc_init() 0
-#endif
-
 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
 					      struct sk_buff *skb, struct net_device *dev,
 					      bool more)
@@ -4805,8 +4749,6 @@ extern const struct kobj_ns_type_operations net_ns_type_operations;
 
 const char *netdev_drivername(const struct net_device *dev);
 
-void linkwatch_run_queue(void);
-
 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
 							  netdev_features_t f2)
 {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3a30cae..2394441 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3836,8 +3836,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
 struct sk_buff *__skb_recv_datagram(struct sock *sk,
 				    struct sk_buff_head *sk_queue,
 				    unsigned int flags, int *off, int *err);
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
-				  int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
 __poll_t datagram_poll(struct file *file, struct socket *sock,
 			   struct poll_table_struct *wait);
 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
new file mode 100644
index 0000000..7e00cca
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -0,0 +1,131 @@
+#ifndef __MTK_WED_H
+#define __MTK_WED_H
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/regmap.h>
+#include <linux/pci.h>
+
+#define MTK_WED_TX_QUEUES		2
+
+struct mtk_wed_hw;
+struct mtk_wdma_desc;
+
+struct mtk_wed_ring {
+	struct mtk_wdma_desc *desc;
+	dma_addr_t desc_phys;
+	int size;
+
+	u32 reg_base;
+	void __iomem *wpdma;
+};
+
+struct mtk_wed_device {
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+	const struct mtk_wed_ops *ops;
+	struct device *dev;
+	struct mtk_wed_hw *hw;
+	bool init_done, running;
+	int wdma_idx;
+	int irq;
+
+	struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
+	struct mtk_wed_ring txfree_ring;
+	struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+
+	struct {
+		int size;
+		void **pages;
+		struct mtk_wdma_desc *desc;
+		dma_addr_t desc_phys;
+	} buf_ring;
+
+	/* filled by driver: */
+	struct {
+		struct pci_dev *pci_dev;
+
+		u32 wpdma_phys;
+
+		u16 token_start;
+		unsigned int nbuf;
+
+		u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+		int (*offload_enable)(struct mtk_wed_device *wed);
+		void (*offload_disable)(struct mtk_wed_device *wed);
+	} wlan;
+#endif
+};
+
+struct mtk_wed_ops {
+	int (*attach)(struct mtk_wed_device *dev);
+	int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
+			     void __iomem *regs);
+	int (*txfree_ring_setup)(struct mtk_wed_device *dev,
+				 void __iomem *regs);
+	void (*detach)(struct mtk_wed_device *dev);
+
+	void (*stop)(struct mtk_wed_device *dev);
+	void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
+	void (*reset_dma)(struct mtk_wed_device *dev);
+
+	u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
+	void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
+
+	u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
+	void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+};
+
+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+
+static inline int
+mtk_wed_device_attach(struct mtk_wed_device *dev)
+{
+	int ret = -ENODEV;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+	rcu_read_lock();
+	dev->ops = rcu_dereference(mtk_soc_wed_ops);
+	if (dev->ops)
+		ret = dev->ops->attach(dev);
+	else
+		rcu_read_unlock();
+
+	if (ret)
+		dev->ops = NULL;
+#endif
+
+	return ret;
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+#define mtk_wed_device_active(_dev) !!(_dev)->ops
+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
+	(_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
+	(_dev)->ops->txfree_ring_setup(_dev, _regs)
+#define mtk_wed_device_reg_read(_dev, _reg) \
+	(_dev)->ops->reg_read(_dev, _reg)
+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
+	(_dev)->ops->reg_write(_dev, _reg, _val)
+#define mtk_wed_device_irq_get(_dev, _mask) \
+	(_dev)->ops->irq_get(_dev, _mask)
+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
+	(_dev)->ops->irq_set_mask(_dev, _mask)
+#else
+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+{
+	return false;
+}
+#define mtk_wed_device_detach(_dev) do {} while (0)
+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_reg_read(_dev, _reg) 0
+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
+#define mtk_wed_device_irq_get(_dev, _mask) 0
+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
+#endif
+
+#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 4cfdef6..c849072 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -64,6 +64,14 @@ struct inet6_ifaddr {
 
 	struct hlist_node	addr_lst;
 	struct list_head	if_list;
+	/*
+	 * Used to safely traverse idev->addr_list in process context
+	 * if the idev->lock needed to protect idev->addr_list cannot be held.
+	 * In that case, add the items to this list temporarily and iterate
+	 * without holding idev->lock.
+	 * See addrconf_ifdown and dev_forward_change.
+	 */
+	struct list_head	if_list_aux;
 
 	struct list_head	tmp_list;
 	struct inet6_ifaddr	*ifpub;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70ca4a5..6d50a66 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1139,15 +1139,6 @@ static inline bool tcp_ca_needs_ecn(const struct sock *sk)
 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
 }
 
-static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-
-	if (icsk->icsk_ca_ops->set_state)
-		icsk->icsk_ca_ops->set_state(sk, ca_state);
-	icsk->icsk_ca_state = ca_state;
-}
-
 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1156,6 +1147,9 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 		icsk->icsk_ca_ops->cwnd_event(sk, event);
 }
 
+/* From tcp_cong.c */
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
+
 /* From tcp_rate.c */
 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
@@ -1207,9 +1201,20 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 
 #define TCP_INFINITE_SSTHRESH	0x7fffffff
 
+static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
+{
+	return tp->snd_cwnd;
+}
+
+static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
+{
+	WARN_ON_ONCE((int)val <= 0);
+	tp->snd_cwnd = val;
+}
+
 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
 {
-	return tp->snd_cwnd < tp->snd_ssthresh;
+	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
 }
 
 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
@@ -1235,8 +1240,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 		return tp->snd_ssthresh;
 	else
 		return max(tp->snd_ssthresh,
-			   ((tp->snd_cwnd >> 1) +
-			    (tp->snd_cwnd >> 2)));
+			   ((tcp_snd_cwnd(tp) >> 1) +
+			    (tcp_snd_cwnd(tp) >> 2)));
 }
 
 /* Use define here intentionally to get WARN_ON location shown at the caller */
@@ -1278,7 +1283,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
 
 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
 	if (tcp_in_slow_start(tp))
-		return tp->snd_cwnd < 2 * tp->max_packets_out;
+		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
 
 	return tp->is_cwnd_limited;
 }
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 521059d..901b440 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -279,7 +279,7 @@ TRACE_EVENT(tcp_probe,
 		__entry->data_len = skb->len - __tcp_hdrlen(th);
 		__entry->snd_nxt = tp->snd_nxt;
 		__entry->snd_una = tp->snd_una;
-		__entry->snd_cwnd = tp->snd_cwnd;
+		__entry->snd_cwnd = tcp_snd_cwnd(tp);
 		__entry->snd_wnd = tp->snd_wnd;
 		__entry->rcv_wnd = tp->rcv_wnd;
 		__entry->ssthresh = tcp_current_ssthresh(sk);
@@ -371,6 +371,51 @@ DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
 	TP_ARGS(skb)
 );
 
+TRACE_EVENT(tcp_cong_state_set,
+
+	TP_PROTO(struct sock *sk, const u8 ca_state),
+
+	TP_ARGS(sk, ca_state),
+
+	TP_STRUCT__entry(
+		__field(const void *, skaddr)
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__array(__u8, saddr, 4)
+		__array(__u8, daddr, 4)
+		__array(__u8, saddr_v6, 16)
+		__array(__u8, daddr_v6, 16)
+		__field(__u8, cong_state)
+	),
+
+	TP_fast_assign(
+		struct inet_sock *inet = inet_sk(sk);
+		__be32 *p32;
+
+		__entry->skaddr = sk;
+
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+
+		p32 = (__be32 *) __entry->saddr;
+		*p32 = inet->inet_saddr;
+
+		p32 = (__be32 *) __entry->daddr;
+		*p32 =  inet->inet_daddr;
+
+		TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+			   sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+		__entry->cong_state = ca_state;
+	),
+
+	TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+		  __entry->sport, __entry->dport,
+		  __entry->saddr, __entry->daddr,
+		  __entry->saddr_v6, __entry->daddr_v6,
+		  __entry->cong_state)
+);
+
 #endif /* _TRACE_TCP_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cc284c0..d1e6008 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -211,6 +211,9 @@ struct rtnl_link_stats {
  * @rx_nohandler: Number of packets received on the interface
  *   but dropped by the networking stack because the device is
  *   not designated to receive packets (e.g. backup link in a bond).
+ *
+ * @rx_otherhost_dropped: Number of packets dropped due to mismatch
+ *   in destination MAC address.
  */
 struct rtnl_link_stats64 {
 	__u64	rx_packets;
@@ -243,6 +246,8 @@ struct rtnl_link_stats64 {
 	__u64	rx_compressed;
 	__u64	tx_compressed;
 	__u64	rx_nohandler;
+
+	__u64	rx_otherhost_dropped;
 };
 
 /* Subset of link stats useful for in-HW collection. Meaning of the fields is as
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 4dfc056..c00adf2 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -43,10 +43,6 @@
 #include <linux/tipc.h>
 #include <asm/byteorder.h>
 
-#ifndef __KERNEL__
-#include <arpa/inet.h> /* for ntohs etc. */
-#endif
-
 /*
  * Configuration
  *
@@ -269,33 +265,33 @@ static inline int TLV_OK(const void *tlv, __u16 space)
 	 */
 
 	return (space >= TLV_SPACE(0)) &&
-		(ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+		(__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_len) <= space);
 }
 
 static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
 {
 	return TLV_OK(tlv, space) &&
-		(ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+		(__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
 }
 
 static inline int TLV_GET_LEN(struct tlv_desc *tlv)
 {
-	return ntohs(tlv->tlv_len);
+	return __be16_to_cpu(tlv->tlv_len);
 }
 
 static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len)
 {
-	tlv->tlv_len = htons(len);
+	tlv->tlv_len = __cpu_to_be16(len);
 }
 
 static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv,  __u16 type)
 {
-	return (ntohs(tlv->tlv_type) == type);
+	return (__be16_to_cpu(tlv->tlv_type) == type);
 }
 
 static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type)
 {
-	tlv->tlv_type = htons(type);
+	tlv->tlv_type = __cpu_to_be16(type);
 }
 
 static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
@@ -305,8 +301,8 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
 
 	tlv_len = TLV_LENGTH(len);
 	tlv_ptr = (struct tlv_desc *)tlv;
-	tlv_ptr->tlv_type = htons(type);
-	tlv_ptr->tlv_len  = htons(tlv_len);
+	tlv_ptr->tlv_type = __cpu_to_be16(type);
+	tlv_ptr->tlv_len  = __cpu_to_be16(tlv_len);
 	if (len && data) {
 		memcpy(TLV_DATA(tlv_ptr), data, len);
 		memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
@@ -348,7 +344,7 @@ static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
 
 static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
 {
-	__u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+	__u16 tlv_space = TLV_ALIGN(__be16_to_cpu(list->tlv_ptr->tlv_len));
 
 	list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
 	list->tlv_space -= tlv_space;
@@ -404,9 +400,9 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
 
 	msg_len = TCM_LENGTH(data_len);
 	tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
-	tcm_hdr->tcm_len   = htonl(msg_len);
-	tcm_hdr->tcm_type  = htons(cmd);
-	tcm_hdr->tcm_flags = htons(flags);
+	tcm_hdr->tcm_len   = __cpu_to_be32(msg_len);
+	tcm_hdr->tcm_type  = __cpu_to_be16(cmd);
+	tcm_hdr->tcm_flags = __cpu_to_be16(flags);
 	if (data_len && data) {
 		memcpy(TCM_DATA(msg), data, data_len);
 		memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bf5736c..a06f4d4 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1753,8 +1753,7 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	int err = 0;
 	struct sk_buff *skb;
 
-	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
-						flags & MSG_DONTWAIT, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	lock_sock(sk);
 
 	if (!skb)
diff --git a/net/atm/common.c b/net/atm/common.c
index 1cfa9bf..d0c8ab7 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -540,7 +540,7 @@ int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	    !test_bit(ATM_VF_READY, &vcc->flags))
 		return 0;
 
-	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
+	skb = skb_recv_datagram(sk, flags, &error);
 	if (!skb)
 		return error;
 
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 363d47f..116481e 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1669,8 +1669,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	}
 
 	/* Now we can treat all alike */
-	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
-				flags & MSG_DONTWAIT, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (skb == NULL)
 		goto out;
 
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a0cb2e3..6270573 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -251,7 +251,6 @@ EXPORT_SYMBOL(bt_accept_dequeue);
 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 		    int flags)
 {
-	int noblock = flags & MSG_DONTWAIT;
 	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	size_t copied;
@@ -263,7 +262,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb) {
 		if (sk->sk_shutdown & RCV_SHUTDOWN)
 			return 0;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 33b3c0f..189e311 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1453,7 +1453,6 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 			    size_t len, int flags)
 {
-	int noblock = flags & MSG_DONTWAIT;
 	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	int copied, err;
@@ -1470,7 +1469,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 	if (sk->sk_state == BT_CLOSED)
 		return 0;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		return err;
 
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2b8892d5..251e666 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -282,7 +282,7 @@ static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m,
 	if (flags & MSG_OOB)
 		goto read_error;
 
-	skb = skb_recv_datagram(sk, flags, 0 , &ret);
+	skb = skb_recv_datagram(sk, flags, &ret);
 	if (!skb)
 		goto read_error;
 	copylen = skb->len;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 95d209b..64c07e6 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1632,12 +1632,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	int error = 0;
-	int noblock;
 	int err;
 
-	noblock =  flags & MSG_DONTWAIT;
-	flags   &= ~MSG_DONTWAIT;
-	skb = skb_recv_datagram(sk, flags, noblock, &error);
+	skb = skb_recv_datagram(sk, flags, &error);
 	if (!skb)
 		return error;
 
diff --git a/net/can/isotp.c b/net/can/isotp.c
index bafb0fb..02d81ef 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -1047,7 +1047,6 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	struct isotp_sock *so = isotp_sk(sk);
-	int noblock = flags & MSG_DONTWAIT;
 	int ret = 0;
 
 	if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
@@ -1056,8 +1055,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	if (!so->bound)
 		return -EADDRNOTAVAIL;
 
-	flags &= ~MSG_DONTWAIT;
-	skb = skb_recv_datagram(sk, flags, noblock, &ret);
+	skb = skb_recv_datagram(sk, flags, &ret);
 	if (!skb)
 		return ret;
 
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 6dff451..0bb4fd3 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -802,7 +802,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
 		return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
 					  SCM_J1939_ERRQUEUE);
 
-	skb = skb_recv_datagram(sk, flags, 0, &ret);
+	skb = skb_recv_datagram(sk, flags, &ret);
 	if (!skb)
 		return ret;
 
diff --git a/net/can/raw.c b/net/can/raw.c
index 7105fa4..0cf728d 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -846,16 +846,12 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	int err = 0;
-	int noblock;
-
-	noblock = flags & MSG_DONTWAIT;
-	flags &= ~MSG_DONTWAIT;
 
 	if (flags & MSG_ERRQUEUE)
 		return sock_recv_errqueue(sk, msg, size,
 					  SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		return err;
 
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ee29077..70126d1 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -310,12 +310,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
 EXPORT_SYMBOL(__skb_recv_datagram);
 
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
-				  int noblock, int *err)
+				  int *err)
 {
 	int off = 0;
 
-	return __skb_recv_datagram(sk, &sk->sk_receive_queue,
-				   flags | (noblock ? MSG_DONTWAIT : 0),
+	return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags,
 				   &off, err);
 }
 EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8c6c084..e027410 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -151,6 +151,7 @@
 #include <linux/prandom.h>
 #include <linux/once_lite.h>
 
+#include "dev.h"
 #include "net-sysfs.h"
 
 
@@ -701,6 +702,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
 		if (WARN_ON_ONCE(last_dev == ctx.dev))
 			return -1;
 	}
+
+	if (!ctx.dev)
+		return ret;
+
 	path = dev_fwd_path(stack);
 	if (!path)
 		return -1;
@@ -8641,7 +8646,6 @@ void dev_set_group(struct net_device *dev, int new_group)
 {
 	dev->group = new_group;
 }
-EXPORT_SYMBOL(dev_set_group);
 
 /**
  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
@@ -8756,7 +8760,6 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
 		return -ENODEV;
 	return ops->ndo_change_carrier(dev, new_carrier);
 }
-EXPORT_SYMBOL(dev_change_carrier);
 
 /**
  *	dev_get_phys_port_id - Get device physical port ID
@@ -8774,7 +8777,6 @@ int dev_get_phys_port_id(struct net_device *dev,
 		return -EOPNOTSUPP;
 	return ops->ndo_get_phys_port_id(dev, ppid);
 }
-EXPORT_SYMBOL(dev_get_phys_port_id);
 
 /**
  *	dev_get_phys_port_name - Get device physical port name
@@ -8797,7 +8799,6 @@ int dev_get_phys_port_name(struct net_device *dev,
 	}
 	return devlink_compat_phys_port_name_get(dev, name, len);
 }
-EXPORT_SYMBOL(dev_get_phys_port_name);
 
 /**
  *	dev_get_port_parent_id - Get the device's port parent identifier
@@ -8879,7 +8880,6 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
 	dev->proto_down = proto_down;
 	return 0;
 }
-EXPORT_SYMBOL(dev_change_proto_down);
 
 /**
  *	dev_change_proto_down_reason - proto down reason
@@ -8904,7 +8904,6 @@ void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
 		}
 	}
 }
-EXPORT_SYMBOL(dev_change_proto_down_reason);
 
 struct bpf_xdp_link {
 	struct bpf_link link;
@@ -9431,7 +9430,7 @@ static int dev_new_index(struct net *net)
 }
 
 /* Delayed registration/unregisteration */
-static LIST_HEAD(net_todo_list);
+LIST_HEAD(net_todo_list);
 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
@@ -10359,6 +10358,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 			storage->rx_dropped += local_read(&core_stats->rx_dropped);
 			storage->tx_dropped += local_read(&core_stats->tx_dropped);
 			storage->rx_nohandler += local_read(&core_stats->rx_nohandler);
+			storage->rx_otherhost_dropped += local_read(&core_stats->rx_otherhost_dropped);
 		}
 	}
 	return storage;
diff --git a/net/core/dev.h b/net/core/dev.h
new file mode 100644
index 0000000..27923df
--- /dev/null
+++ b/net/core/dev.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_CORE_DEV_H
+#define _NET_CORE_DEV_H
+
+#include <linux/types.h>
+
+struct net;
+struct net_device;
+struct netdev_bpf;
+struct netdev_phys_item_id;
+struct netlink_ext_ack;
+
+/* Random bits of netdevice that don't need to be exposed */
+#define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */
+struct sd_flow_limit {
+	u64			count;
+	unsigned int		num_buckets;
+	unsigned int		history_head;
+	u16			history[FLOW_LIMIT_HISTORY];
+	u8			buckets[];
+};
+
+extern int netdev_flow_limit_table_len;
+
+#ifdef CONFIG_PROC_FS
+int __init dev_proc_init(void);
+#else
+#define dev_proc_init() 0
+#endif
+
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_run_queue(void);
+
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
+void dev_addr_check(struct net_device *dev);
+
+/* sysctls not referred to from outside net/core/ */
+extern int		netdev_budget;
+extern unsigned int	netdev_budget_usecs;
+
+extern int		netdev_tstamp_prequeue;
+extern int		netdev_unregister_timeout_secs;
+extern int		weight_p;
+extern int		dev_weight_rx_bias;
+extern int		dev_weight_tx_bias;
+
+/* rtnl helpers */
+extern struct list_head net_todo_list;
+void netdev_run_todo(void);
+
+/* netdev management, shared between various uAPI entry points */
+struct netdev_name_node {
+	struct hlist_node hlist;
+	struct list_head list;
+	struct net_device *dev;
+	const char *name;
+};
+
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_change_name(struct net_device *dev, const char *newname);
+
+int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+
+int dev_validate_mtu(struct net_device *dev, int mtu,
+		     struct netlink_ext_ack *extack);
+int dev_set_mtu_ext(struct net_device *dev, int mtu,
+		    struct netlink_ext_ack *extack);
+
+int dev_get_phys_port_id(struct net_device *dev,
+			 struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+			   char *name, size_t len);
+
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
+void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
+				  u32 value);
+
+typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
+int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
+		      int fd, int expected_fd, u32 flags);
+
+int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
+void dev_set_group(struct net_device *dev, int new_group);
+int dev_change_carrier(struct net_device *dev, bool new_carrier);
+
+void __dev_set_rx_mode(struct net_device *dev);
+
+#endif
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index bead38c..baa63de 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -12,6 +12,8 @@
 #include <linux/export.h>
 #include <linux/list.h>
 
+#include "dev.h"
+
 /*
  * General list handling functions
  */
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 1b807d1..4f6be44 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -10,6 +10,8 @@
 #include <net/dsa.h>
 #include <net/wext.h>
 
+#include "dev.h"
+
 /*
  *	Map an interface index to its name (SIOCGIFNAME)
  */
diff --git a/net/core/filter.c b/net/core/filter.c
index 64470a7..143f442 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5173,7 +5173,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
 				if (val <= 0 || tp->data_segs_out > tp->syn_data)
 					ret = -EINVAL;
 				else
-					tp->snd_cwnd = val;
+					tcp_snd_cwnd_set(tp, val);
 				break;
 			case TCP_BPF_SNDCWND_CLAMP:
 				if (val <= 0) {
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 95098d1..a244d3b 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -18,6 +18,7 @@
 #include <linux/bitops.h>
 #include <linux/types.h>
 
+#include "dev.h"
 
 enum lw_bits {
 	LW_URGENT = 0,
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 88cc0ad..1ec23bf 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -4,6 +4,8 @@
 #include <linux/seq_file.h>
 #include <net/wext.h>
 
+#include "dev.h"
+
 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
 
 #define get_bucket(x) ((x) >> BUCKET_SPACE)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9cbc1c8..4980c3a 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
 #include <linux/of_net.h>
 #include <linux/cpu.h>
 
+#include "dev.h"
 #include "net-sysfs.h"
 
 #ifdef CONFIG_SYSFS
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 159c9c6..4041b3e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -54,6 +54,8 @@
 #include <net/rtnetlink.h>
 #include <net/net_namespace.h>
 
+#include "dev.h"
+
 #define RTNL_MAX_TYPE		50
 #define RTNL_SLAVE_MAX_TYPE	40
 
@@ -95,6 +97,39 @@ void __rtnl_unlock(void)
 
 	defer_kfree_skb_list = NULL;
 
+	/* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
+	 * is used. In some places, e.g. in cfg80211, we have code that will do
+	 * something like
+	 *   rtnl_lock()
+	 *   wiphy_lock()
+	 *   ...
+	 *   rtnl_unlock()
+	 *
+	 * and because netdev_run_todo() acquires the RTNL for items on the list
+	 * we could cause a situation such as this:
+	 * Thread 1			Thread 2
+	 *				  rtnl_lock()
+	 *				  unregister_netdevice()
+	 *				  __rtnl_unlock()
+	 * rtnl_lock()
+	 * wiphy_lock()
+	 * rtnl_unlock()
+	 *   netdev_run_todo()
+	 *     __rtnl_unlock()
+	 *
+	 *     // list not empty now
+	 *     // because of thread 2
+	 *				  rtnl_lock()
+	 *     while (!list_empty(...))
+	 *       rtnl_lock()
+	 *				  wiphy_lock()
+	 * **** DEADLOCK ****
+	 *
+	 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
+	 * it's not used in cases where something is added to do the list.
+	 */
+	WARN_ON(!list_empty(&net_todo_list));
+
 	mutex_unlock(&rtnl_mutex);
 
 	while (head) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 1180a0c..7000403 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -141,6 +141,8 @@
 
 #include <linux/ethtool.h>
 
+#include "dev.h"
+
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7123fe7..8295e58 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -23,6 +23,8 @@
 #include <net/busy_poll.h>
 #include <net/pkt_sched.h>
 
+#include "dev.h"
+
 static int two = 2;
 static int three = 3;
 static int int_3600 = 3600;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index ebcc812..62b89d6 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -391,7 +391,7 @@ EXPORT_SYMBOL(ether_setup);
 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 				      unsigned int rxqs)
 {
-	return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
+	return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_ENUM,
 				ether_setup, txqs, rxqs);
 }
 EXPORT_SYMBOL(alloc_etherdev_mqs);
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 3b2366a..a725dd9 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -314,7 +314,8 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	int err = -EOPNOTSUPP;
 	struct sk_buff *skb;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
@@ -703,7 +704,8 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	struct dgram_sock *ro = dgram_sk(sk);
 	DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 95f7bb05..b1165f7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -451,6 +451,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
 	 * that it receives, do not try to analyse it.
 	 */
 	if (skb->pkt_type == PACKET_OTHERHOST) {
+		dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
 		drop_reason = SKB_DROP_REASON_OTHERHOST;
 		goto drop;
 	}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 3ee9475..550dc5c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -861,7 +861,8 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
 	if (flags & MSG_ERRQUEUE)
 		return inet_recv_error(sk, msg, len, addr_len);
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 9f97b9c..c9dd960 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -769,7 +769,8 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 		goto out;
 	}
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf18fbc..e31cf13 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -429,7 +429,7 @@ void tcp_init_sock(struct sock *sk)
 	 * algorithms that we must have the following bandaid to talk
 	 * efficiently to them.  -DaveM
 	 */
-	tp->snd_cwnd = TCP_INIT_CWND;
+	tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
 
 	/* There's a bubble in the pipe until at least the first ACK. */
 	tp->app_limited = ~0U;
@@ -3033,7 +3033,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 	icsk->icsk_rto_min = TCP_RTO_MIN;
 	icsk->icsk_delack_max = TCP_DELACK_MAX;
 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-	tp->snd_cwnd = TCP_INIT_CWND;
+	tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
 	tp->snd_cwnd_cnt = 0;
 	tp->window_clamp = 0;
 	tp->delivered = 0;
@@ -3744,7 +3744,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 	info->tcpi_max_pacing_rate = rate64;
 
 	info->tcpi_reordering = tp->reordering;
-	info->tcpi_snd_cwnd = tp->snd_cwnd;
+	info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
 
 	if (info->tcpi_state == TCP_LISTEN) {
 		/* listeners aliased fields :
@@ -3915,7 +3915,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
 	rate64 = tcp_compute_delivery_rate(tp);
 	nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
 
-	nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
+	nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
 	nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
 	nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
 
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 02e8626..c7d30a3 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -276,7 +276,7 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
 	} else {			 /* no RTT sample yet */
 		rtt_us = USEC_PER_MSEC;	 /* use nominal default RTT */
 	}
-	bw = (u64)tp->snd_cwnd * BW_UNIT;
+	bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
 	do_div(bw, rtt_us);
 	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
 }
@@ -323,9 +323,9 @@ static void bbr_save_cwnd(struct sock *sk)
 	struct bbr *bbr = inet_csk_ca(sk);
 
 	if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
-		bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
+		bbr->prior_cwnd = tcp_snd_cwnd(tp);  /* this cwnd is good enough */
 	else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
-		bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
+		bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
 }
 
 static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
@@ -482,7 +482,7 @@ static bool bbr_set_cwnd_to_recover_or_restore(
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bbr *bbr = inet_csk_ca(sk);
 	u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
-	u32 cwnd = tp->snd_cwnd;
+	u32 cwnd = tcp_snd_cwnd(tp);
 
 	/* An ACK for P pkts should release at most 2*P packets. We do this
 	 * in two steps. First, here we deduct the number of lost packets.
@@ -520,7 +520,7 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bbr *bbr = inet_csk_ca(sk);
-	u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
+	u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0;
 
 	if (!acked)
 		goto done;  /* no packet fully ACKed; just apply caps */
@@ -544,9 +544,9 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
 	cwnd = max(cwnd, bbr_cwnd_min_target);
 
 done:
-	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);	/* apply global cap */
+	tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));	/* apply global cap */
 	if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
-		tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
+		tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target));
 }
 
 /* End cycle phase if it's time and/or we hit the phase's in-flight target. */
@@ -856,7 +856,7 @@ static void bbr_update_ack_aggregation(struct sock *sk,
 	bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
 				     bbr->ack_epoch_acked + rs->acked_sacked);
 	extra_acked = bbr->ack_epoch_acked - expected_acked;
-	extra_acked = min(extra_acked, tp->snd_cwnd);
+	extra_acked = min(extra_acked, tcp_snd_cwnd(tp));
 	if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
 		bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
 }
@@ -914,7 +914,7 @@ static void bbr_check_probe_rtt_done(struct sock *sk)
 		return;
 
 	bbr->min_rtt_stamp = tcp_jiffies32;  /* wait a while until PROBE_RTT */
-	tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
+	tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd));
 	bbr_reset_mode(sk);
 }
 
@@ -1093,7 +1093,7 @@ static u32 bbr_undo_cwnd(struct sock *sk)
 	bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
 	bbr->full_bw_cnt = 0;
 	bbr_reset_lt_bw_sampling(sk);
-	return tcp_sk(sk)->snd_cwnd;
+	return tcp_snd_cwnd(tcp_sk(sk));
 }
 
 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index f5f588b..58358bf 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		if (!acked)
 			return;
 	}
-	bictcp_update(ca, tp->snd_cwnd);
+	bictcp_update(ca, tcp_snd_cwnd(tp));
 	tcp_cong_avoid_ai(tp, ca->cnt, acked);
 }
 
@@ -166,16 +166,16 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
 	ca->epoch_start = 0;	/* end of epoch */
 
 	/* Wmax and fast convergence */
-	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
-		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
+	if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
+		ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
 			/ (2 * BICTCP_BETA_SCALE);
 	else
-		ca->last_max_cwnd = tp->snd_cwnd;
+		ca->last_max_cwnd = tcp_snd_cwnd(tp);
 
-	if (tp->snd_cwnd <= low_window)
-		return max(tp->snd_cwnd >> 1U, 2U);
+	if (tcp_snd_cwnd(tp) <= low_window)
+		return max(tcp_snd_cwnd(tp) >> 1U, 2U);
 	else
-		return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
+		return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
 }
 
 static void bictcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 709d238..ddc7ba0 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -161,8 +161,8 @@ static void tcp_cdg_hystart_update(struct sock *sk)
 					      LINUX_MIB_TCPHYSTARTTRAINDETECT);
 				NET_ADD_STATS(sock_net(sk),
 					      LINUX_MIB_TCPHYSTARTTRAINCWND,
-					      tp->snd_cwnd);
-				tp->snd_ssthresh = tp->snd_cwnd;
+					      tcp_snd_cwnd(tp));
+				tp->snd_ssthresh = tcp_snd_cwnd(tp);
 				return;
 			}
 		}
@@ -180,8 +180,8 @@ static void tcp_cdg_hystart_update(struct sock *sk)
 					      LINUX_MIB_TCPHYSTARTDELAYDETECT);
 				NET_ADD_STATS(sock_net(sk),
 					      LINUX_MIB_TCPHYSTARTDELAYCWND,
-					      tp->snd_cwnd);
-				tp->snd_ssthresh = tp->snd_cwnd;
+					      tcp_snd_cwnd(tp));
+				tp->snd_ssthresh = tcp_snd_cwnd(tp);
 			}
 		}
 	}
@@ -252,7 +252,7 @@ static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
 			return false;
 	}
 
-	ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
+	ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp));
 	ca->state = CDG_BACKOFF;
 	tcp_enter_cwr(sk);
 	return true;
@@ -285,14 +285,14 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 	}
 
 	if (!tcp_is_cwnd_limited(sk)) {
-		ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
+		ca->shadow_wnd = min(ca->shadow_wnd, tcp_snd_cwnd(tp));
 		return;
 	}
 
-	prior_snd_cwnd = tp->snd_cwnd;
+	prior_snd_cwnd = tcp_snd_cwnd(tp);
 	tcp_reno_cong_avoid(sk, ack, acked);
 
-	incr = tp->snd_cwnd - prior_snd_cwnd;
+	incr = tcp_snd_cwnd(tp) - prior_snd_cwnd;
 	ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
 }
 
@@ -331,15 +331,15 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	if (ca->state == CDG_BACKOFF)
-		return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
+		return max(2U, (tcp_snd_cwnd(tp) * min(1024U, backoff_beta)) >> 10);
 
 	if (ca->state == CDG_NONFULL && use_tolerance)
-		return tp->snd_cwnd;
+		return tcp_snd_cwnd(tp);
 
-	ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
+	ca->shadow_wnd = min(ca->shadow_wnd >> 1, tcp_snd_cwnd(tp));
 	if (use_shadow)
-		return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
-	return max(2U, tp->snd_cwnd >> 1);
+		return max3(2U, ca->shadow_wnd, tcp_snd_cwnd(tp) >> 1);
+	return max(2U, tcp_snd_cwnd(tp) >> 1);
 }
 
 static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
@@ -357,7 +357,7 @@ static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
 
 		ca->gradients = gradients;
 		ca->rtt_seq = tp->snd_nxt;
-		ca->shadow_wnd = tp->snd_cwnd;
+		ca->shadow_wnd = tcp_snd_cwnd(tp);
 		break;
 	case CA_EVENT_COMPLETE_CWR:
 		ca->state = CDG_UNKNOWN;
@@ -380,7 +380,7 @@ static void tcp_cdg_init(struct sock *sk)
 		ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
 					GFP_NOWAIT | __GFP_NOWARN);
 	ca->rtt_seq = tp->snd_nxt;
-	ca->shadow_wnd = tp->snd_cwnd;
+	ca->shadow_wnd = tcp_snd_cwnd(tp);
 }
 
 static void tcp_cdg_release(struct sock *sk)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index dc95572..d3cae40 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -16,6 +16,7 @@
 #include <linux/gfp.h>
 #include <linux/jhash.h>
 #include <net/tcp.h>
+#include <trace/events/tcp.h>
 
 static DEFINE_SPINLOCK(tcp_cong_list_lock);
 static LIST_HEAD(tcp_cong_list);
@@ -33,6 +34,17 @@ struct tcp_congestion_ops *tcp_ca_find(const char *name)
 	return NULL;
 }
 
+void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
+{
+	struct inet_connection_sock *icsk = inet_csk(sk);
+
+	trace_tcp_cong_state_set(sk, ca_state);
+
+	if (icsk->icsk_ca_ops->set_state)
+		icsk->icsk_ca_ops->set_state(sk, ca_state);
+	icsk->icsk_ca_state = ca_state;
+}
+
 /* Must be called with rcu lock held */
 static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
 						       const char *name)
@@ -393,10 +405,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
  */
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
-	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
+	u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
 
-	acked -= cwnd - tp->snd_cwnd;
-	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+	acked -= cwnd - tcp_snd_cwnd(tp);
+	tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
 
 	return acked;
 }
@@ -410,7 +422,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 	/* If credits accumulated at a higher w, apply them gently now. */
 	if (tp->snd_cwnd_cnt >= w) {
 		tp->snd_cwnd_cnt = 0;
-		tp->snd_cwnd++;
+		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 	}
 
 	tp->snd_cwnd_cnt += acked;
@@ -418,9 +430,9 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 		u32 delta = tp->snd_cwnd_cnt / w;
 
 		tp->snd_cwnd_cnt -= delta * w;
-		tp->snd_cwnd += delta;
+		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
 	}
-	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
+	tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
 }
 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 
@@ -445,7 +457,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 			return;
 	}
 	/* In dangerous area, increase slowly. */
-	tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+	tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 
@@ -454,7 +466,7 @@ u32 tcp_reno_ssthresh(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
-	return max(tp->snd_cwnd >> 1U, 2U);
+	return max(tcp_snd_cwnd(tp) >> 1U, 2U);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
 
@@ -462,7 +474,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
-	return max(tp->snd_cwnd, tp->prior_cwnd);
+	return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
 
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 24d562d..b091883 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -334,7 +334,7 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		if (!acked)
 			return;
 	}
-	bictcp_update(ca, tp->snd_cwnd, acked);
+	bictcp_update(ca, tcp_snd_cwnd(tp), acked);
 	tcp_cong_avoid_ai(tp, ca->cnt, acked);
 }
 
@@ -346,13 +346,13 @@ static u32 cubictcp_recalc_ssthresh(struct sock *sk)
 	ca->epoch_start = 0;	/* end of epoch */
 
 	/* Wmax and fast convergence */
-	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
-		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
+	if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
+		ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
 			/ (2 * BICTCP_BETA_SCALE);
 	else
-		ca->last_max_cwnd = tp->snd_cwnd;
+		ca->last_max_cwnd = tcp_snd_cwnd(tp);
 
-	return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
+	return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
 }
 
 static void cubictcp_state(struct sock *sk, u8 new_state)
@@ -413,13 +413,13 @@ static void hystart_update(struct sock *sk, u32 delay)
 				ca->found = 1;
 				pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
 					 now - ca->round_start, threshold,
-					 ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
+					 ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp));
 				NET_INC_STATS(sock_net(sk),
 					      LINUX_MIB_TCPHYSTARTTRAINDETECT);
 				NET_ADD_STATS(sock_net(sk),
 					      LINUX_MIB_TCPHYSTARTTRAINCWND,
-					      tp->snd_cwnd);
-				tp->snd_ssthresh = tp->snd_cwnd;
+					      tcp_snd_cwnd(tp));
+				tp->snd_ssthresh = tcp_snd_cwnd(tp);
 			}
 		}
 	}
@@ -438,8 +438,8 @@ static void hystart_update(struct sock *sk, u32 delay)
 					      LINUX_MIB_TCPHYSTARTDELAYDETECT);
 				NET_ADD_STATS(sock_net(sk),
 					      LINUX_MIB_TCPHYSTARTDELAYCWND,
-					      tp->snd_cwnd);
-				tp->snd_ssthresh = tp->snd_cwnd;
+					      tcp_snd_cwnd(tp));
+				tp->snd_ssthresh = tcp_snd_cwnd(tp);
 			}
 		}
 	}
@@ -469,7 +469,7 @@ static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
 
 	/* hystart triggers when cwnd is larger than some threshold */
 	if (!ca->found && tcp_in_slow_start(tp) && hystart &&
-	    tp->snd_cwnd >= hystart_low_window)
+	    tcp_snd_cwnd(tp) >= hystart_low_window)
 		hystart_update(sk, delay);
 }
 
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 1943a66..ab034a4 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -106,8 +106,8 @@ static u32 dctcp_ssthresh(struct sock *sk)
 	struct dctcp *ca = inet_csk_ca(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	ca->loss_cwnd = tp->snd_cwnd;
-	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
+	ca->loss_cwnd = tcp_snd_cwnd(tp);
+	return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U);
 }
 
 static void dctcp_update_alpha(struct sock *sk, u32 flags)
@@ -148,8 +148,8 @@ static void dctcp_react_to_loss(struct sock *sk)
 	struct dctcp *ca = inet_csk_ca(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	ca->loss_cwnd = tp->snd_cwnd;
-	tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+	ca->loss_cwnd = tcp_snd_cwnd(tp);
+	tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
 }
 
 static void dctcp_state(struct sock *sk, u8 new_state)
@@ -211,8 +211,9 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
 static u32 dctcp_cwnd_undo(struct sock *sk)
 {
 	const struct dctcp *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
 
-	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
+	return max(tcp_snd_cwnd(tp), ca->loss_cwnd);
 }
 
 static struct tcp_congestion_ops dctcp __read_mostly = {
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 349069d..c6de5ce 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -127,22 +127,22 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		 *     snd_cwnd <=
 		 *     hstcp_aimd_vals[ca->ai].cwnd
 		 */
-		if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
-			while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
+		if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) {
+			while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd &&
 			       ca->ai < HSTCP_AIMD_MAX - 1)
 				ca->ai++;
-		} else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
-			while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
+		} else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) {
+			while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd)
 				ca->ai--;
 		}
 
 		/* Do additive increase */
-		if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
+		if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) {
 			/* cwnd = cwnd + a(w) / cwnd */
 			tp->snd_cwnd_cnt += ca->ai + 1;
-			if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-				tp->snd_cwnd_cnt -= tp->snd_cwnd;
-				tp->snd_cwnd++;
+			if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
+				tp->snd_cwnd_cnt -= tcp_snd_cwnd(tp);
+				tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 			}
 		}
 	}
@@ -154,7 +154,7 @@ static u32 hstcp_ssthresh(struct sock *sk)
 	struct hstcp *ca = inet_csk_ca(sk);
 
 	/* Do multiplicative decrease */
-	return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
+	return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
 }
 
 static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 55adcfc..52b1f26 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -124,7 +124,7 @@ static void measure_achieved_throughput(struct sock *sk,
 
 	ca->packetcount += sample->pkts_acked;
 
-	if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
+	if (ca->packetcount >= tcp_snd_cwnd(tp) - (ca->alpha >> 7 ? : 1) &&
 	    now - ca->lasttime >= ca->minRTT &&
 	    ca->minRTT > 0) {
 		__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
@@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
 	const struct htcp *ca = inet_csk_ca(sk);
 
 	htcp_param_update(sk);
-	return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
+	return max((tcp_snd_cwnd(tp) * ca->beta) >> 7, 2U);
 }
 
 static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
@@ -242,9 +242,9 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		/* In dangerous area, increase slowly.
 		 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
 		 */
-		if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
-			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-				tp->snd_cwnd++;
+		if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tcp_snd_cwnd(tp)) {
+			if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp)
+				tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 			tp->snd_cwnd_cnt = 0;
 			htcp_alpha_update(ca);
 		} else
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index be39327..abd7d91 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -54,7 +54,7 @@ static void hybla_init(struct sock *sk)
 	ca->rho2_7ls = 0;
 	ca->snd_cwnd_cents = 0;
 	ca->hybla_en = true;
-	tp->snd_cwnd = 2;
+	tcp_snd_cwnd_set(tp, 2);
 	tp->snd_cwnd_clamp = 65535;
 
 	/* 1st Rho measurement based on initial srtt */
@@ -62,7 +62,7 @@ static void hybla_init(struct sock *sk)
 
 	/* set minimum rtt as this is the 1st ever seen */
 	ca->minrtt_us = tp->srtt_us;
-	tp->snd_cwnd = ca->rho;
+	tcp_snd_cwnd_set(tp, ca->rho);
 }
 
 static void hybla_state(struct sock *sk, u8 ca_state)
@@ -137,31 +137,31 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		 * as long as increment is estimated as (rho<<7)/window
 		 * it already is <<7 and we can easily count its fractions.
 		 */
-		increment = ca->rho2_7ls / tp->snd_cwnd;
+		increment = ca->rho2_7ls / tcp_snd_cwnd(tp);
 		if (increment < 128)
 			tp->snd_cwnd_cnt++;
 	}
 
 	odd = increment % 128;
-	tp->snd_cwnd += increment >> 7;
+	tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7));
 	ca->snd_cwnd_cents += odd;
 
 	/* check when fractions goes >=128 and increase cwnd by 1. */
 	while (ca->snd_cwnd_cents >= 128) {
-		tp->snd_cwnd++;
+		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 		ca->snd_cwnd_cents -= 128;
 		tp->snd_cwnd_cnt = 0;
 	}
 	/* check when cwnd has not been incremented for a while */
-	if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-		tp->snd_cwnd++;
+	if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
+		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 		tp->snd_cwnd_cnt = 0;
 	}
 	/* clamp down slowstart cwnd to ssthresh value. */
 	if (is_slowstart)
-		tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+		tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh));
 
-	tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
+	tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
 }
 
 static struct tcp_congestion_ops tcp_hybla __read_mostly = {
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 00e5487..c0c81a2 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -224,7 +224,7 @@ static void update_params(struct sock *sk)
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct illinois *ca = inet_csk_ca(sk);
 
-	if (tp->snd_cwnd < win_thresh) {
+	if (tcp_snd_cwnd(tp) < win_thresh) {
 		ca->alpha = ALPHA_BASE;
 		ca->beta = BETA_BASE;
 	} else if (ca->cnt_rtt > 0) {
@@ -284,9 +284,9 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		 * tp->snd_cwnd += alpha/tp->snd_cwnd
 		*/
 		delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
-		if (delta >= tp->snd_cwnd) {
-			tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
-					   (u32)tp->snd_cwnd_clamp);
+		if (delta >= tcp_snd_cwnd(tp)) {
+			tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp) + delta / tcp_snd_cwnd(tp),
+						 (u32)tp->snd_cwnd_clamp));
 			tp->snd_cwnd_cnt = 0;
 		}
 	}
@@ -296,9 +296,11 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct illinois *ca = inet_csk_ca(sk);
+	u32 decr;
 
 	/* Multiplicative decrease */
-	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
+	decr = (tcp_snd_cwnd(tp) * ca->beta) >> BETA_SHIFT;
+	return max(tcp_snd_cwnd(tp) - decr, 2U);
 }
 
 /* Extract info for Tcp socket info provided via netlink. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2088f93..1595b76 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -414,7 +414,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
 	per_mss = roundup_pow_of_two(per_mss) +
 		  SKB_DATA_ALIGN(sizeof(struct sk_buff));
 
-	nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+	nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
 	nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
 
 	/* Fast Recovery (RFC 5681 3.2) :
@@ -909,12 +909,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
 	 *	 If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
 	 *	 end of slow start and should slow down.
 	 */
-	if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+	if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
 		rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
 	else
 		rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
 
-	rate *= max(tp->snd_cwnd, tp->packets_out);
+	rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
 
 	if (likely(tp->srtt_us))
 		do_div(rate, tp->srtt_us);
@@ -2147,12 +2147,12 @@ void tcp_enter_loss(struct sock *sk)
 	    !after(tp->high_seq, tp->snd_una) ||
 	    (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
 		tp->prior_ssthresh = tcp_current_ssthresh(sk);
-		tp->prior_cwnd = tp->snd_cwnd;
+		tp->prior_cwnd = tcp_snd_cwnd(tp);
 		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
 		tcp_ca_event(sk, CA_EVENT_LOSS);
 		tcp_init_undo(tp);
 	}
-	tp->snd_cwnd	   = tcp_packets_in_flight(tp) + 1;
+	tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
 	tp->snd_cwnd_cnt   = 0;
 	tp->snd_cwnd_stamp = tcp_jiffies32;
 
@@ -2458,7 +2458,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
 		pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
 			 msg,
 			 &inet->inet_daddr, ntohs(inet->inet_dport),
-			 tp->snd_cwnd, tcp_left_out(tp),
+			 tcp_snd_cwnd(tp), tcp_left_out(tp),
 			 tp->snd_ssthresh, tp->prior_ssthresh,
 			 tp->packets_out);
 	}
@@ -2467,7 +2467,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
 		pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
 			 msg,
 			 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
-			 tp->snd_cwnd, tcp_left_out(tp),
+			 tcp_snd_cwnd(tp), tcp_left_out(tp),
 			 tp->snd_ssthresh, tp->prior_ssthresh,
 			 tp->packets_out);
 	}
@@ -2492,7 +2492,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
 	if (tp->prior_ssthresh) {
 		const struct inet_connection_sock *icsk = inet_csk(sk);
 
-		tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
+		tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
 
 		if (tp->prior_ssthresh > tp->snd_ssthresh) {
 			tp->snd_ssthresh = tp->prior_ssthresh;
@@ -2599,7 +2599,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
 	tp->high_seq = tp->snd_nxt;
 	tp->tlp_high_seq = 0;
 	tp->snd_cwnd_cnt = 0;
-	tp->prior_cwnd = tp->snd_cwnd;
+	tp->prior_cwnd = tcp_snd_cwnd(tp);
 	tp->prr_delivered = 0;
 	tp->prr_out = 0;
 	tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
@@ -2629,7 +2629,7 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost,
 	}
 	/* Force a fast retransmit upon entering fast recovery */
 	sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
-	tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+	tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
 }
 
 static inline void tcp_end_cwnd_reduction(struct sock *sk)
@@ -2642,7 +2642,7 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
 	/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
 	if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
 	    (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
-		tp->snd_cwnd = tp->snd_ssthresh;
+		tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
 		tp->snd_cwnd_stamp = tcp_jiffies32;
 	}
 	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2709,9 +2709,9 @@ static void tcp_mtup_probe_success(struct sock *sk)
 
 	/* FIXME: breaks with very large cwnd */
 	tp->prior_ssthresh = tcp_current_ssthresh(sk);
-	tp->snd_cwnd = tp->snd_cwnd *
-		       tcp_mss_to_mtu(sk, tp->mss_cache) /
-		       icsk->icsk_mtup.probe_size;
+	tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) *
+			     tcp_mss_to_mtu(sk, tp->mss_cache) /
+			     icsk->icsk_mtup.probe_size);
 	tp->snd_cwnd_cnt = 0;
 	tp->snd_cwnd_stamp = tcp_jiffies32;
 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
@@ -3034,7 +3034,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
 		    tp->snd_una == tp->mtu_probe.probe_seq_start) {
 			tcp_mtup_probe_failed(sk);
 			/* Restores the reduction we did in tcp_mtup_probe() */
-			tp->snd_cwnd++;
+			tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 			tcp_simple_retransmit(sk);
 			return;
 		}
@@ -5436,7 +5436,7 @@ static bool tcp_should_expand_sndbuf(struct sock *sk)
 		return false;
 
 	/* If we filled the congestion window, do not expand.  */
-	if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+	if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
 		return false;
 
 	return true;
@@ -5998,9 +5998,9 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
 	 * retransmission has occurred.
 	 */
 	if (tp->total_retrans > 1 && tp->undo_marker)
-		tp->snd_cwnd = 1;
+		tcp_snd_cwnd_set(tp, 1);
 	else
-		tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
+		tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
 	tp->snd_cwnd_stamp = tcp_jiffies32;
 
 	bpf_skops_established(sk, bpf_op, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f9cec62..157265ae 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2621,7 +2621,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 		jiffies_to_clock_t(icsk->icsk_rto),
 		jiffies_to_clock_t(icsk->icsk_ack.ato),
 		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
-		tp->snd_cwnd,
+		tcp_snd_cwnd(tp),
 		state == TCP_LISTEN ?
 		    fastopenq->max_qlen :
 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 82b36ec..ae36780 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -297,7 +297,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 		lp->flag &= ~LP_WITHIN_THR;
 
 	pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
-		 tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max,
+		 tcp_snd_cwnd(tp), lp->remote_hz, lp->owd_min, lp->owd_max,
 		 lp->sowd >> 3);
 
 	if (lp->flag & LP_WITHIN_THR)
@@ -313,12 +313,12 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 	/* happened within inference
 	 * drop snd_cwnd into 1 */
 	if (lp->flag & LP_WITHIN_INF)
-		tp->snd_cwnd = 1U;
+		tcp_snd_cwnd_set(tp, 1U);
 
 	/* happened after inference
 	 * cut snd_cwnd into half */
 	else
-		tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
+		tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp) >> 1U, 1U));
 
 	/* record this drop time */
 	lp->last_drop = now;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 0588b00..7029b0e 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -388,15 +388,15 @@ void tcp_update_metrics(struct sock *sk)
 		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
 		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
-			if (val && (tp->snd_cwnd >> 1) > val)
+			if (val && (tcp_snd_cwnd(tp) >> 1) > val)
 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
-					       tp->snd_cwnd >> 1);
+					       tcp_snd_cwnd(tp) >> 1);
 		}
 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
-			if (tp->snd_cwnd > val)
+			if (tcp_snd_cwnd(tp) > val)
 				tcp_metric_set(tm, TCP_METRIC_CWND,
-					       tp->snd_cwnd);
+					       tcp_snd_cwnd(tp));
 		}
 	} else if (!tcp_in_slow_start(tp) &&
 		   icsk->icsk_ca_state == TCP_CA_Open) {
@@ -404,10 +404,10 @@ void tcp_update_metrics(struct sock *sk)
 		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
 		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
 			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
-				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
+				       max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
-			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
+			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
 		}
 	} else {
 		/* Else slow start did not finish, cwnd is non-sense,
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index ab55235..a60662f 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -197,10 +197,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 	}
 
 	if (ca->cwnd_growth_factor < 0) {
-		cnt = tp->snd_cwnd << -ca->cwnd_growth_factor;
+		cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor;
 		tcp_cong_avoid_ai(tp, cnt, acked);
 	} else {
-		cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor);
+		cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor);
 		tcp_cong_avoid_ai(tp, cnt, acked);
 	}
 }
@@ -209,7 +209,7 @@ static u32 tcpnv_recalc_ssthresh(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
-	return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U);
+	return max((tcp_snd_cwnd(tp) * nv_loss_dec_factor) >> 10, 2U);
 }
 
 static void tcpnv_state(struct sock *sk, u8 new_state)
@@ -257,7 +257,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
 		return;
 
 	/* Stop cwnd growth if we were in catch up mode */
-	if (ca->nv_catchup && tp->snd_cwnd >= nv_min_cwnd) {
+	if (ca->nv_catchup && tcp_snd_cwnd(tp) >= nv_min_cwnd) {
 		ca->nv_catchup = 0;
 		ca->nv_allow_cwnd_growth = 0;
 	}
@@ -371,7 +371,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
 		 * if cwnd < max_win, grow cwnd
 		 * else leave the same
 		 */
-		if (tp->snd_cwnd > max_win) {
+		if (tcp_snd_cwnd(tp) > max_win) {
 			/* there is congestion, check that it is ok
 			 * to make a CA decision
 			 * 1. We should have at least nv_dec_eval_min_calls
@@ -398,20 +398,20 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
 			ca->nv_allow_cwnd_growth = 0;
 			tp->snd_ssthresh =
 				(nv_ssthresh_factor * max_win) >> 3;
-			if (tp->snd_cwnd - max_win > 2) {
+			if (tcp_snd_cwnd(tp) - max_win > 2) {
 				/* gap > 2, we do exponential cwnd decrease */
 				int dec;
 
-				dec = max(2U, ((tp->snd_cwnd - max_win) *
+				dec = max(2U, ((tcp_snd_cwnd(tp) - max_win) *
 					       nv_cong_dec_mult) >> 7);
-				tp->snd_cwnd -= dec;
+				tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - dec);
 			} else if (nv_cong_dec_mult > 0) {
-				tp->snd_cwnd = max_win;
+				tcp_snd_cwnd_set(tp, max_win);
 			}
 			if (ca->cwnd_growth_factor > 0)
 				ca->cwnd_growth_factor = 0;
 			ca->nv_no_cong_cnt = 0;
-		} else if (tp->snd_cwnd <= max_win - nv_pad_buffer) {
+		} else if (tcp_snd_cwnd(tp) <= max_win - nv_pad_buffer) {
 			/* There is no congestion, grow cwnd if allowed*/
 			if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
 				return;
@@ -444,8 +444,8 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
 		 * (it wasn't before, if it is now is because nv
 		 *  decreased it).
 		 */
-		if (tp->snd_cwnd < nv_min_cwnd)
-			tp->snd_cwnd = nv_min_cwnd;
+		if (tcp_snd_cwnd(tp) < nv_min_cwnd)
+			tcp_snd_cwnd_set(tp, nv_min_cwnd);
 	}
 }
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9ede847..c221f3b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -142,7 +142,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
-	u32 cwnd = tp->snd_cwnd;
+	u32 cwnd = tcp_snd_cwnd(tp);
 
 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 
@@ -151,7 +151,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
 
 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 		cwnd >>= 1;
-	tp->snd_cwnd = max(cwnd, restart_cwnd);
+	tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
 	tp->snd_cwnd_stamp = tcp_jiffies32;
 	tp->snd_cwnd_used = 0;
 }
@@ -1013,7 +1013,7 @@ static void tcp_tsq_write(struct sock *sk)
 		struct tcp_sock *tp = tcp_sk(sk);
 
 		if (tp->lost_out > tp->retrans_out &&
-		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
+		    tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
 			tcp_mstamp_refresh(tp);
 			tcp_xmit_retransmit_queue(sk);
 		}
@@ -1860,9 +1860,9 @@ static void tcp_cwnd_application_limited(struct sock *sk)
 		/* Limited by application or receiver window. */
 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
 		u32 win_used = max(tp->snd_cwnd_used, init_win);
-		if (win_used < tp->snd_cwnd) {
+		if (win_used < tcp_snd_cwnd(tp)) {
 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
-			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
+			tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
 		}
 		tp->snd_cwnd_used = 0;
 	}
@@ -2043,7 +2043,7 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
 		return 1;
 
 	in_flight = tcp_packets_in_flight(tp);
-	cwnd = tp->snd_cwnd;
+	cwnd = tcp_snd_cwnd(tp);
 	if (in_flight >= cwnd)
 		return 0;
 
@@ -2196,12 +2196,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 	in_flight = tcp_packets_in_flight(tp);
 
 	BUG_ON(tcp_skb_pcount(skb) <= 1);
-	BUG_ON(tp->snd_cwnd <= in_flight);
+	BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
 
 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
 
 	/* From in_flight test above, we know that cwnd > in_flight.  */
-	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
+	cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
 
 	limit = min(send_win, cong_win);
 
@@ -2215,7 +2215,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 
 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
 	if (win_divisor) {
-		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
+		u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
 
 		/* If at least some fraction of a window is available,
 		 * just use it.
@@ -2345,7 +2345,7 @@ static int tcp_mtu_probe(struct sock *sk)
 	if (likely(!icsk->icsk_mtup.enabled ||
 		   icsk->icsk_mtup.probe_size ||
 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
-		   tp->snd_cwnd < 11 ||
+		   tcp_snd_cwnd(tp) < 11 ||
 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
 		return -1;
 
@@ -2381,7 +2381,7 @@ static int tcp_mtu_probe(struct sock *sk)
 		return 0;
 
 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
-	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
+	if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
 		if (!tcp_packets_in_flight(tp))
 			return -1;
 		else
@@ -2450,7 +2450,7 @@ static int tcp_mtu_probe(struct sock *sk)
 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
 		/* Decrement cwnd here because we are sending
 		 * effectively two packets. */
-		tp->snd_cwnd--;
+		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
 		tcp_event_new_data_sent(sk, nskb);
 
 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
@@ -2708,7 +2708,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 	else
 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
 
-	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
 	if (likely(sent_pkts || is_cwnd_limited))
 		tcp_cwnd_validate(sk, is_cwnd_limited);
 
@@ -2818,7 +2818,7 @@ void tcp_send_loss_probe(struct sock *sk)
 	if (unlikely(!skb)) {
 		WARN_ONCE(tp->packets_out,
 			  "invalid inflight: %u state %u cwnd %u mss %d\n",
-			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+			  tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
 		inet_csk(sk)->icsk_pending = 0;
 		return;
 	}
@@ -3302,7 +3302,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
 		if (!hole)
 			tp->retransmit_skb_hint = skb;
 
-		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
+		segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
 		if (segs <= 0)
 			break;
 		sacked = TCP_SKB_CB(skb)->sacked;
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index fbab921..617b818 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -195,7 +195,7 @@ void tcp_rate_check_app_limited(struct sock *sk)
 	    /* Nothing in sending host's qdisc queues or NIC tx queue. */
 	    sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
 	    /* We are not limited by CWND. */
-	    tcp_packets_in_flight(tp) < tp->snd_cwnd &&
+	    tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) &&
 	    /* All lost packets have been retransmitted. */
 	    tp->lost_out <= tp->retrans_out)
 		tp->app_limited =
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 5842081..862b962 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -27,7 +27,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		if (!acked)
 			return;
 	}
-	tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+	tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
 			  acked);
 }
 
@@ -35,7 +35,7 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
-	return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
+	return max(tcp_snd_cwnd(tp) - (tcp_snd_cwnd(tp)>>TCP_SCALABLE_MD_SCALE), 2U);
 }
 
 static struct tcp_congestion_ops tcp_scalable __read_mostly = {
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index c8003c8..786848a 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
 
 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
 {
-	return  min(tp->snd_ssthresh, tp->snd_cwnd);
+	return  min(tp->snd_ssthresh, tcp_snd_cwnd(tp));
 }
 
 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
@@ -217,14 +217,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 			 * This is:
 			 *     (actual rate in segments) * baseRTT
 			 */
-			target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
+			target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT;
 			do_div(target_cwnd, rtt);
 
 			/* Calculate the difference between the window we had,
 			 * and the window we would like to have. This quantity
 			 * is the "Diff" from the Arizona Vegas papers.
 			 */
-			diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
+			diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
 			if (diff > gamma && tcp_in_slow_start(tp)) {
 				/* Going too fast. Time to slow down
@@ -238,7 +238,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 				 * truncation robs us of full link
 				 * utilization.
 				 */
-				tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
+				tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp),
+							 (u32)target_cwnd + 1));
 				tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
 			} else if (tcp_in_slow_start(tp)) {
@@ -254,14 +255,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 					/* The old window was too fast, so
 					 * we slow down.
 					 */
-					tp->snd_cwnd--;
+					tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
 					tp->snd_ssthresh
 						= tcp_vegas_ssthresh(tp);
 				} else if (diff < alpha) {
 					/* We don't have enough extra packets
 					 * in the network, so speed up.
 					 */
-					tp->snd_cwnd++;
+					tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 				} else {
 					/* Sending just as fast as we
 					 * should be.
@@ -269,10 +270,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 				}
 			}
 
-			if (tp->snd_cwnd < 2)
-				tp->snd_cwnd = 2;
-			else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
-				tp->snd_cwnd = tp->snd_cwnd_clamp;
+			if (tcp_snd_cwnd(tp) < 2)
+				tcp_snd_cwnd_set(tp, 2);
+			else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
+				tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
 
 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
 		}
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index cd50a61..366ff6f 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -146,11 +146,11 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
 		rtt = veno->minrtt;
 
-		target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
+		target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt;
 		target_cwnd <<= V_PARAM_SHIFT;
 		do_div(target_cwnd, rtt);
 
-		veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
+		veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd;
 
 		if (tcp_in_slow_start(tp)) {
 			/* Slow start. */
@@ -164,15 +164,15 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 			/* In the "non-congestive state", increase cwnd
 			 * every rtt.
 			 */
-			tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+			tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
 		} else {
 			/* In the "congestive state", increase cwnd
 			 * every other rtt.
 			 */
-			if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+			if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
 				if (veno->inc &&
-				    tp->snd_cwnd < tp->snd_cwnd_clamp) {
-					tp->snd_cwnd++;
+				    tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) {
+					tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 					veno->inc = 0;
 				} else
 					veno->inc = 1;
@@ -181,10 +181,10 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 				tp->snd_cwnd_cnt += acked;
 		}
 done:
-		if (tp->snd_cwnd < 2)
-			tp->snd_cwnd = 2;
-		else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
-			tp->snd_cwnd = tp->snd_cwnd_clamp;
+		if (tcp_snd_cwnd(tp) < 2)
+			tcp_snd_cwnd_set(tp, 2);
+		else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
+			tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
 	}
 	/* Wipe the slate clean for the next rtt. */
 	/* veno->cntrtt = 0; */
@@ -199,10 +199,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
 
 	if (veno->diff < beta)
 		/* in "non-congestive state", cut cwnd by 1/5 */
-		return max(tp->snd_cwnd * 4 / 5, 2U);
+		return max(tcp_snd_cwnd(tp) * 4 / 5, 2U);
 	else
 		/* in "congestive state", cut cwnd by 1/2 */
-		return max(tp->snd_cwnd >> 1U, 2U);
+		return max(tcp_snd_cwnd(tp) >> 1U, 2U);
 }
 
 static struct tcp_congestion_ops tcp_veno __read_mostly = {
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index b2e05c4..c6e9714 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -244,7 +244,8 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
 
 	switch (event) {
 	case CA_EVENT_COMPLETE_CWR:
-		tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
+		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
+		tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
 		break;
 	case CA_EVENT_LOSS:
 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 07c4c93..18b07ff5 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -71,11 +71,11 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
 	if (!yeah->doing_reno_now) {
 		/* Scalable */
-		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+		tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
 				  acked);
 	} else {
 		/* Reno */
-		tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+		tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
 	}
 
 	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
@@ -130,7 +130,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 			/* Compute excess number of packets above bandwidth
 			 * Avoid doing full 64 bit divide.
 			 */
-			bw = tp->snd_cwnd;
+			bw = tcp_snd_cwnd(tp);
 			bw *= rtt - yeah->vegas.baseRTT;
 			do_div(bw, rtt);
 			queue = bw;
@@ -138,20 +138,20 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 			if (queue > TCP_YEAH_ALPHA ||
 			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
 				if (queue > TCP_YEAH_ALPHA &&
-				    tp->snd_cwnd > yeah->reno_count) {
+				    tcp_snd_cwnd(tp) > yeah->reno_count) {
 					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
-							    tp->snd_cwnd >> TCP_YEAH_EPSILON);
+							    tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON);
 
-					tp->snd_cwnd -= reduction;
+					tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction);
 
-					tp->snd_cwnd = max(tp->snd_cwnd,
-							   yeah->reno_count);
+					tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp),
+								 yeah->reno_count));
 
-					tp->snd_ssthresh = tp->snd_cwnd;
+					tp->snd_ssthresh = tcp_snd_cwnd(tp);
 				}
 
 				if (yeah->reno_count <= 2)
-					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
+					yeah->reno_count = max(tcp_snd_cwnd(tp)>>1, 2U);
 				else
 					yeah->reno_count++;
 
@@ -176,7 +176,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 		 */
 		yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
 		yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
-		yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
+		yeah->vegas.beg_snd_cwnd = tcp_snd_cwnd(tp);
 
 		/* Wipe the slate clean for the next RTT. */
 		yeah->vegas.cntRTT = 0;
@@ -193,16 +193,16 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
 	if (yeah->doing_reno_now < TCP_YEAH_RHO) {
 		reduction = yeah->lastQ;
 
-		reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
+		reduction = min(reduction, max(tcp_snd_cwnd(tp)>>1, 2U));
 
-		reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
+		reduction = max(reduction, tcp_snd_cwnd(tp) >> TCP_YEAH_DELTA);
 	} else
-		reduction = max(tp->snd_cwnd>>1, 2U);
+		reduction = max(tcp_snd_cwnd(tp)>>1, 2U);
 
 	yeah->fast_count = 0;
 	yeah->reno_count = max(yeah->reno_count>>1, 2U);
 
-	return max_t(int, tp->snd_cwnd - reduction, 2);
+	return max_t(int, tcp_snd_cwnd(tp) - reduction, 2);
 }
 
 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b225041..1afc4c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -797,6 +797,7 @@ static void dev_forward_change(struct inet6_dev *idev)
 {
 	struct net_device *dev;
 	struct inet6_ifaddr *ifa;
+	LIST_HEAD(tmp_addr_list);
 
 	if (!idev)
 		return;
@@ -815,14 +816,24 @@ static void dev_forward_change(struct inet6_dev *idev)
 		}
 	}
 
+	read_lock_bh(&idev->lock);
 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
 		if (ifa->flags&IFA_F_TENTATIVE)
 			continue;
+		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
+	}
+	read_unlock_bh(&idev->lock);
+
+	while (!list_empty(&tmp_addr_list)) {
+		ifa = list_first_entry(&tmp_addr_list,
+				       struct inet6_ifaddr, if_list_aux);
+		list_del(&ifa->if_list_aux);
 		if (idev->cnf.forwarding)
 			addrconf_join_anycast(ifa);
 		else
 			addrconf_leave_anycast(ifa);
 	}
+
 	inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
 				     NETCONFA_FORWARDING,
 				     dev->ifindex, &idev->cnf);
@@ -3728,7 +3739,8 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
 	unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
 	struct net *net = dev_net(dev);
 	struct inet6_dev *idev;
-	struct inet6_ifaddr *ifa, *tmp;
+	struct inet6_ifaddr *ifa;
+	LIST_HEAD(tmp_addr_list);
 	bool keep_addr = false;
 	bool was_ready;
 	int state, i;
@@ -3820,16 +3832,23 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
 		write_lock_bh(&idev->lock);
 	}
 
-	list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+	list_for_each_entry(ifa, &idev->addr_list, if_list)
+		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
+	write_unlock_bh(&idev->lock);
+
+	while (!list_empty(&tmp_addr_list)) {
 		struct fib6_info *rt = NULL;
 		bool keep;
 
+		ifa = list_first_entry(&tmp_addr_list,
+				       struct inet6_ifaddr, if_list_aux);
+		list_del(&ifa->if_list_aux);
+
 		addrconf_del_dad_work(ifa);
 
 		keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
 			!addr_is_local(&ifa->addr);
 
-		write_unlock_bh(&idev->lock);
 		spin_lock_bh(&ifa->lock);
 
 		if (keep) {
@@ -3860,15 +3879,14 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
 			addrconf_leave_solict(ifa->idev, &ifa->addr);
 		}
 
-		write_lock_bh(&idev->lock);
 		if (!keep) {
+			write_lock_bh(&idev->lock);
 			list_del_rcu(&ifa->if_list);
+			write_unlock_bh(&idev->lock);
 			in6_ifa_put(ifa);
 		}
 	}
 
-	write_unlock_bh(&idev->lock);
-
 	/* Step 5: Discard anycast and multicast list */
 	if (unregister) {
 		ipv6_ac_destroy_dev(idev);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5b5ea35..b4880c7 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -150,6 +150,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
 	struct inet6_dev *idev;
 
 	if (skb->pkt_type == PACKET_OTHERHOST) {
+		dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
 		kfree_skb(skb);
 		return NULL;
 	}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 53f632a..19325b7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -257,8 +257,6 @@ static int ip6_tnl_create2(struct net_device *dev)
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 	int err;
 
-	t = netdev_priv(dev);
-
 	dev->rtnl_link_ops = &ip6_link_ops;
 	err = register_netdevice(dev);
 	if (err < 0)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c51d5ce..8bb41f3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -477,7 +477,8 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 13678d3..782df52 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2044,7 +2044,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		   jiffies_to_clock_t(icsk->icsk_rto),
 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
 		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
-		   tp->snd_cwnd,
+		   tcp_snd_cwnd(tp),
 		   state == TCP_LISTEN ?
 			fastopenq->max_qlen :
 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a1760ad..a0385dd 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1223,7 +1223,6 @@ static void iucv_process_message_q(struct sock *sk)
 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 			     size_t len, int flags)
 {
-	int noblock = flags & MSG_DONTWAIT;
 	struct sock *sk = sock->sk;
 	struct iucv_sock *iucv = iucv_sk(sk);
 	unsigned int copied, rlen;
@@ -1242,7 +1241,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 
 	/* receive/dequeue next skb:
 	 * the function understands MSG_PEEK and, thus, does not dequeue skb */
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb) {
 		if (sk->sk_shutdown & RCV_SHUTDOWN)
 			return 0;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index fd51db3..d09ec26 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3696,7 +3696,7 @@ static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
 		goto out;
 
-	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (skb == NULL)
 		goto out;
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index b3edafa..c6a5cc2 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -526,7 +526,8 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
 	if (flags & MSG_OOB)
 		goto out;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 96f9757..97fde8a 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -671,7 +671,8 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len, addr_len);
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto out;
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index bf35710..8be1fdc 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -191,8 +191,7 @@ static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
 		goto end;
 
 	err = 0;
-	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
-				flags & MSG_DONTWAIT, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb)
 		goto end;
 
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index e22b0cb..221863a 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -216,7 +216,7 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
 		return -EOPNOTSUPP;
 
-	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &rc);
+	skb = skb_recv_datagram(sk, flags, &rc);
 	if (!skb)
 		return rc;
 
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 61205cf..24df29e 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -352,7 +352,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
 	if (params->deliver) {
 		KUNIT_EXPECT_EQ(test, rc, 0);
 
-		skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+		skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
 		KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
 		KUNIT_EXPECT_EQ(test, skb->len, 1);
 
@@ -360,7 +360,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
 
 	} else {
 		KUNIT_EXPECT_NE(test, rc, 0);
-		skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+		skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
 		KUNIT_EXPECT_PTR_EQ(test, skb2, NULL);
 	}
 
@@ -423,7 +423,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
 		rc = mctp_route_input(&rt->rt, skb);
 	}
 
-	skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+	skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
 
 	if (params->rx_len) {
 		KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
@@ -582,7 +582,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
 	rc = mctp_route_input(&rt->rt, skb);
 
 	/* (potentially) receive message */
-	skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc);
+	skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
 
 	if (params->deliver)
 		KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index d6fdc57..35b5f80 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1527,10 +1527,9 @@ static int mpls_ifdown(struct net_device *dev, int event)
 					rt->rt_nh_size;
 				struct mpls_route *orig = rt;
 
-				rt = kmalloc(size, GFP_KERNEL);
+				rt = kmemdup(orig, size, GFP_KERNEL);
 				if (!rt)
 					return -ENOMEM;
-				memcpy(rt, orig, size);
 			}
 		}
 
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 47a876c..9fa85bb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1931,7 +1931,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	struct scm_cookie scm;
 	struct sock *sk = sock->sk;
 	struct netlink_sock *nlk = nlk_sk(sk);
-	int noblock = flags & MSG_DONTWAIT;
 	size_t copied;
 	struct sk_buff *skb, *data_skb;
 	int err, ret;
@@ -1941,7 +1940,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
 	copied = 0;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (skb == NULL)
 		goto out;
 
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fa9dc2b..6f7f439 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1159,7 +1159,8 @@ static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	}
 
 	/* Now we can treat all alike */
-	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
+	skb = skb_recv_datagram(sk, flags, &er);
+	if (!skb) {
 		release_sock(sk);
 		return er;
 	}
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 4ca3579..77642d1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -821,7 +821,6 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg,
 static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 			     size_t len, int flags)
 {
-	int noblock = flags & MSG_DONTWAIT;
 	struct sock *sk = sock->sk;
 	unsigned int copied, rlen;
 	struct sk_buff *skb, *cskb;
@@ -842,7 +841,7 @@ static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 	if (flags & (MSG_OOB))
 		return -EOPNOTSUPP;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 	if (!skb) {
 		pr_err("Recv datagram failed state %d %d %d",
 		       sk->sk_state, err, sock_error(sk));
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 0ca214a..8dd5697 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -238,7 +238,6 @@ static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 			   int flags)
 {
-	int noblock = flags & MSG_DONTWAIT;
 	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	int copied;
@@ -246,7 +245,7 @@ static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
 	pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
 
-	skb = skb_recv_datagram(sk, flags, noblock, &rc);
+	skb = skb_recv_datagram(sk, flags, &rc);
 	if (!skb)
 		return rc;
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c39c098..d3caaf4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3421,7 +3421,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	 *	but then it will block.
 	 */
 
-	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+	skb = skb_recv_datagram(sk, flags, &err);
 
 	/*
 	 *	An error occurred so return it. Because skb_recv_datagram()
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 393e6aa..3f2e62b 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -123,7 +123,8 @@ static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 			MSG_CMSG_COMPAT))
 		goto out_nofree;
 
-	skb = skb_recv_datagram(sk, flags, noblock, &rval);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &rval);
 	if (skb == NULL)
 		goto out_nofree;
 
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 65d463a..441a267 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
 	u8 pipe_handle, enabled, n_sb;
 	u8 aligned = 0;
 
-	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
+	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+				errp);
 	if (!skb)
 		return NULL;
 
@@ -1267,7 +1268,8 @@ static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 			return -EINVAL;
 	}
 
-	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	flags |= (noblock ? MSG_DONTWAIT : 0);
+	skb = skb_recv_datagram(sk, flags, &err);
 	lock_sock(sk);
 	if (skb == NULL) {
 		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
index ec23225..5c2fb99 100644
--- a/net/qrtr/af_qrtr.c
+++ b/net/qrtr/af_qrtr.c
@@ -1035,8 +1035,7 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
 		return -EADDRNOTAVAIL;
 	}
 
-	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
-				flags & MSG_DONTWAIT, &rc);
+	skb = skb_recv_datagram(sk, flags, &rc);
 	if (!skb) {
 		release_sock(sk);
 		return rc;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 30a1cf4..bf2d986 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1230,7 +1230,8 @@ static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 		return -ENOTCONN;
 
 	/* Now we can treat all alike */
-	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
+	skb = skb_recv_datagram(sk, flags, &er);
+	if (!skb)
 		return er;
 
 	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e71a312..fecbd95 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1643,7 +1643,8 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
 	 * so that no locks are necessary.
 	 */
 
-	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
+	skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+				&err);
 	if (!skb) {
 		/* This means receive shutdown. */
 		if (err == 0)
@@ -2500,7 +2501,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
 		int used, err;
 
 		mutex_lock(&u->iolock);
-		skb = skb_recv_datagram(sk, 0, 1, &err);
+		skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
 		mutex_unlock(&u->iolock);
 		if (!skb)
 			return err;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index b17dc97..b14f0ed 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1732,19 +1732,16 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
 					int flags)
 {
 	int err;
-	int noblock;
 	struct vmci_datagram *dg;
 	size_t payload_len;
 	struct sk_buff *skb;
 
-	noblock = flags & MSG_DONTWAIT;
-
 	if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
 		return -EOPNOTSUPP;
 
 	/* Retrieve the head sk_buff from the socket's receive queue. */
 	err = 0;
-	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
+	skb = skb_recv_datagram(&vsk->sk, flags, &err);
 	if (!skb)
 		return err;
 
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 3a17182..6bc2ac8 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1315,8 +1315,7 @@ static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	} else {
 		/* Now we can treat all alike */
 		release_sock(sk);
-		skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
-					flags & MSG_DONTWAIT, &rc);
+		skb = skb_recv_datagram(sk, flags, &rc);
 		lock_sock(sk);
 		if (!skb)
 			goto out;