Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pulling to get some TIPC fixes that a net-next series depends
upon.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt
new file mode 100644
index 0000000..9e33177
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/m_can.txt
@@ -0,0 +1,67 @@
+Bosch MCAN controller Device Tree Bindings
+-------------------------------------------------
+
+Required properties:
+- compatible		: Should be "bosch,m_can" for M_CAN controllers
+- reg			: physical base address and size of the M_CAN
+			  registers map and Message RAM
+- reg-names		: Should be "m_can" and "message_ram"
+- interrupts		: Should be the interrupt number of M_CAN interrupt
+			  line 0 and line 1, could be same if sharing
+			  the same interrupt.
+- interrupt-names	: Should contain "int0" and "int1"
+- clocks		: Clocks used by controller, should be host clock
+			  and CAN clock.
+- clock-names		: Should contain "hclk" and "cclk"
+- pinctrl-<n>		: Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
+- pinctrl-names 	: Names corresponding to the numbered pinctrl states
+- bosch,mram-cfg	: Message RAM configuration data.
+			  Multiple M_CAN instances can share the same Message
+			  RAM and each element(e.g Rx FIFO or Tx Buffer and etc)
+			  number in Message RAM is also configurable,
+			  so this property is telling driver how the shared or
+			  private Message RAM are used by this M_CAN controller.
+
+			  The format should be as follows:
+			  <offset sidf_elems xidf_elems rxf0_elems rxf1_elems
+			   rxb_elems txe_elems txb_elems>
+			  The 'offset' is an address offset of the Message RAM
+			  where the following elements start from. This is
+			  usually set to 0x0 if you're using a private Message
+			  RAM. The remain cells are used to specify how many
+			  elements are used for each FIFO/Buffer.
+
+			  M_CAN includes the following elements according to user manual:
+			  11-bit Filter	0-128 elements / 0-128 words
+			  29-bit Filter	0-64 elements / 0-128 words
+			  Rx FIFO 0	0-64 elements / 0-1152 words
+			  Rx FIFO 1	0-64 elements / 0-1152 words
+			  Rx Buffers	0-64 elements / 0-1152 words
+			  Tx Event FIFO	0-32 elements / 0-64 words
+			  Tx Buffers	0-32 elements / 0-576 words
+
+			  Please refer to 2.4.1 Message RAM Configuration in
+			  Bosch M_CAN user manual for details.
+
+Example:
+SoC dtsi:
+m_can1: can@020e8000 {
+	compatible = "bosch,m_can";
+	reg = <0x020e8000 0x4000>, <0x02298000 0x4000>;
+	reg-names = "m_can", "message_ram";
+	interrupts = <0 114 0x04>,
+		     <0 114 0x04>;
+	interrupt-names = "int0", "int1";
+	clocks = <&clks IMX6SX_CLK_CANFD>,
+		 <&clks IMX6SX_CLK_CANFD>;
+	clock-names = "hclk", "cclk";
+	bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>;
+	status = "disabled";
+};
+
+Board dts:
+&m_can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_m_can1>;
+	status = "enabled";
+};
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
new file mode 100644
index 0000000..002d844
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -0,0 +1,43 @@
+Renesas R-Car CAN controller Device Tree Bindings
+-------------------------------------------------
+
+Required properties:
+- compatible: "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
+	      "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
+	      "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
+	      "renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC.
+- reg: physical base address and size of the R-Car CAN register map.
+- interrupts: interrupt specifier for the sole interrupt.
+- clocks: phandles and clock specifiers for 3 CAN clock inputs.
+- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk".
+- pinctrl-0: pin control group to be used for this controller.
+- pinctrl-names: must be "default".
+
+Optional properties:
+- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
+			    <0x0> (default) : Peripheral clock (clkp1)
+			    <0x1> : Peripheral clock (clkp2)
+			    <0x3> : Externally input clock
+
+Example
+-------
+
+SoC common .dtsi file:
+
+	can0: can@e6e80000 {
+		compatible = "renesas,can-r8a7791";
+		reg = <0 0xe6e80000 0 0x1000>;
+		interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
+			 <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
+		clock-names = "clkp1", "clkp2", "can_clk";
+		status = "disabled";
+	};
+
+Board specific .dts file:
+
+&can0 {
+	pinctrl-0 = <&can0_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
index 2a60cd3..3a9d679 100644
--- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
@@ -12,6 +12,10 @@
  - altr,sysmgr-syscon : Should be the phandle to the system manager node that
    encompasses the glue register, the register offset, and the register shift.
 
+Optional properties:
+altr,emac-splitter: Should be the phandle to the emac splitter soft IP node if
+		DWMAC controller is connected emac splitter.
+
 Example:
 
 gmac0: ethernet@ff700000 {
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index b7ae0a0..aecec6d 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2365,7 +2365,7 @@
 	endpoint = &hostif->endpoint[0].desc;
 	usb_fill_int_urb(ucs->urb_int_in, udev,
 			 usb_rcvintpipe(udev,
-					(endpoint->bEndpointAddress) & 0x0f),
+					usb_endpoint_num(endpoint)),
 			 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
 			 endpoint->bInterval);
 	rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index d0a41cb..00d4077 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -751,7 +751,7 @@
 	/* Fill the interrupt urb and send it to the core */
 	usb_fill_int_urb(ucs->read_urb, udev,
 			 usb_rcvintpipe(udev,
-					endpoint->bEndpointAddress & 0x0f),
+					usb_endpoint_num(endpoint)),
 			 ucs->rcvbuf, buffer_size,
 			 gigaset_read_int_callback,
 			 cs, endpoint->bInterval);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 95dd1f5..73c21e2 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1388,7 +1388,7 @@
 	}
 
 	if (tx_slave && bond_slave_can_tx(tx_slave)) {
-		if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+		if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
 			ether_addr_copy(eth_data->h_source,
 					tx_slave->dev->dev_addr);
 		}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index dc73463..d8dc17f 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -625,6 +625,8 @@
 out:
 	if (ret)
 		bond_opt_error_interpret(bond, opt, ret, val);
+	else
+		call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
 
 	return ret;
 }
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 4168822..e78d6b3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -143,6 +143,8 @@
 
 source "drivers/net/can/c_can/Kconfig"
 
+source "drivers/net/can/m_can/Kconfig"
+
 source "drivers/net/can/cc770/Kconfig"
 
 source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1697f22..fc93041 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_CAN_SJA1000)	+= sja1000/
 obj-$(CONFIG_CAN_MSCAN)		+= mscan/
 obj-$(CONFIG_CAN_C_CAN)		+= c_can/
+obj-$(CONFIG_CAN_M_CAN)		+= m_can/
 obj-$(CONFIG_CAN_CC770)		+= cc770/
 obj-$(CONFIG_CAN_AT91)		+= at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)	+= ti_hecc.o
@@ -28,4 +29,4 @@
 obj-$(CONFIG_CAN_RCAR)		+= rcar_can.o
 obj-$(CONFIG_CAN_XILINXCAN)	+= xilinx_can.o
 
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
+subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index ad1cc84..9fdc678 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -5,5 +5,3 @@
 obj-$(CONFIG_CAN_C_CAN) += c_can.o
 obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
 obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
index 9fb8321..8657f87 100644
--- a/drivers/net/can/cc770/Makefile
+++ b/drivers/net/can/cc770/Makefile
@@ -5,5 +5,3 @@
 obj-$(CONFIG_CAN_CC770) += cc770.o
 obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
 obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 9f91fcb..02492d2 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -103,11 +103,11 @@
 			      const struct can_bittiming_const *btc)
 {
 	struct can_priv *priv = netdev_priv(dev);
-	long rate, best_rate = 0;
 	long best_error = 1000000000, error = 0;
 	int best_tseg = 0, best_brp = 0, brp = 0;
 	int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0;
 	int spt_error = 1000, spt = 0, sampl_pt;
+	long rate;
 	u64 v64;
 
 	/* Use CIA recommended sample points */
@@ -152,7 +152,6 @@
 		}
 		best_tseg = tseg / 2;
 		best_brp = brp;
-		best_rate = rate;
 		if (error == 0)
 			break;
 	}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 944aa5d..2700865 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -92,6 +92,27 @@
 #define FLEXCAN_CTRL_ERR_ALL \
 	(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
 
+/* FLEXCAN control register 2 (CTRL2) bits */
+#define FLEXCAN_CRL2_ECRWRE		BIT(29)
+#define FLEXCAN_CRL2_WRMFRZ		BIT(28)
+#define FLEXCAN_CRL2_RFFN(x)		(((x) & 0x0f) << 24)
+#define FLEXCAN_CRL2_TASD(x)		(((x) & 0x1f) << 19)
+#define FLEXCAN_CRL2_MRP		BIT(18)
+#define FLEXCAN_CRL2_RRS		BIT(17)
+#define FLEXCAN_CRL2_EACEN		BIT(16)
+
+/* FLEXCAN memory error control register (MECR) bits */
+#define FLEXCAN_MECR_ECRWRDIS		BIT(31)
+#define FLEXCAN_MECR_HANCEI_MSK		BIT(19)
+#define FLEXCAN_MECR_FANCEI_MSK		BIT(18)
+#define FLEXCAN_MECR_CEI_MSK		BIT(16)
+#define FLEXCAN_MECR_HAERRIE		BIT(15)
+#define FLEXCAN_MECR_FAERRIE		BIT(14)
+#define FLEXCAN_MECR_EXTERRIE		BIT(13)
+#define FLEXCAN_MECR_RERRDIS		BIT(9)
+#define FLEXCAN_MECR_ECCDIS		BIT(8)
+#define FLEXCAN_MECR_NCEFAFRZ		BIT(7)
+
 /* FLEXCAN error and status register (ESR) bits */
 #define FLEXCAN_ESR_TWRN_INT		BIT(17)
 #define FLEXCAN_ESR_RWRN_INT		BIT(16)
@@ -150,18 +171,20 @@
  * FLEXCAN hardware feature flags
  *
  * Below is some version info we got:
- *    SOC   Version   IP-Version  Glitch-  [TR]WRN_INT
- *                                Filter?   connected?
- *   MX25  FlexCAN2  03.00.00.00     no         no
- *   MX28  FlexCAN2  03.00.04.00    yes        yes
- *   MX35  FlexCAN2  03.00.00.00     no         no
- *   MX53  FlexCAN2  03.00.00.00    yes         no
- *   MX6s  FlexCAN3  10.00.12.00    yes        yes
+ *    SOC   Version   IP-Version  Glitch-  [TR]WRN_INT  Memory err
+ *                                Filter?   connected?  detection
+ *   MX25  FlexCAN2  03.00.00.00     no         no         no
+ *   MX28  FlexCAN2  03.00.04.00    yes        yes         no
+ *   MX35  FlexCAN2  03.00.00.00     no         no         no
+ *   MX53  FlexCAN2  03.00.00.00    yes         no         no
+ *   MX6s  FlexCAN3  10.00.12.00    yes        yes         no
+ *   VF610 FlexCAN3  ?               no        yes        yes
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
 #define FLEXCAN_HAS_V10_FEATURES	BIT(1) /* For core version >= 10 */
 #define FLEXCAN_HAS_BROKEN_ERR_STATE	BIT(2) /* [TR]WRN_INT not connected */
+#define FLEXCAN_HAS_MECR_FEATURES	BIT(3) /* Memory error detection */
 
 /* Structure of the message buffer */
 struct flexcan_mb {
@@ -192,8 +215,17 @@
 	u32 crcr;		/* 0x44 */
 	u32 rxfgmask;		/* 0x48 */
 	u32 rxfir;		/* 0x4c */
-	u32 _reserved3[12];
-	struct flexcan_mb cantxfg[64];
+	u32 _reserved3[12];	/* 0x50 */
+	struct flexcan_mb cantxfg[64];	/* 0x80 */
+	u32 _reserved4[408];
+	u32 mecr;		/* 0xae0 */
+	u32 erriar;		/* 0xae4 */
+	u32 erridpr;		/* 0xae8 */
+	u32 errippr;		/* 0xaec */
+	u32 rerrar;		/* 0xaf0 */
+	u32 rerrdr;		/* 0xaf4 */
+	u32 rerrsynr;		/* 0xaf8 */
+	u32 errsr;		/* 0xafc */
 };
 
 struct flexcan_devtype_data {
@@ -223,6 +255,9 @@
 static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
 	.features = FLEXCAN_HAS_V10_FEATURES,
 };
+static struct flexcan_devtype_data fsl_vf610_devtype_data = {
+	.features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_MECR_FEATURES,
+};
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
 	.name = DRV_NAME,
@@ -378,8 +413,9 @@
 	return 0;
 }
 
-static int flexcan_get_berr_counter(const struct net_device *dev,
-				    struct can_berr_counter *bec)
+
+static int __flexcan_get_berr_counter(const struct net_device *dev,
+				      struct can_berr_counter *bec)
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
 	struct flexcan_regs __iomem *regs = priv->base;
@@ -391,6 +427,29 @@
 	return 0;
 }
 
+static int flexcan_get_berr_counter(const struct net_device *dev,
+				    struct can_berr_counter *bec)
+{
+	const struct flexcan_priv *priv = netdev_priv(dev);
+	int err;
+
+	err = clk_prepare_enable(priv->clk_ipg);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->clk_per);
+	if (err)
+		goto out_disable_ipg;
+
+	err = __flexcan_get_berr_counter(dev, bec);
+
+	clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
+	clk_disable_unprepare(priv->clk_ipg);
+
+	return err;
+}
+
 static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
@@ -503,7 +562,7 @@
 	struct flexcan_priv *priv = netdev_priv(dev);
 	struct can_berr_counter bec;
 
-	flexcan_get_berr_counter(dev, &bec);
+	__flexcan_get_berr_counter(dev, &bec);
 
 	switch (priv->can.state) {
 	case CAN_STATE_ERROR_ACTIVE:
@@ -800,7 +859,7 @@
 	struct flexcan_priv *priv = netdev_priv(dev);
 	struct flexcan_regs __iomem *regs = priv->base;
 	int err;
-	u32 reg_mcr, reg_ctrl;
+	u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr;
 
 	/* enable module */
 	err = flexcan_chip_enable(priv);
@@ -879,6 +938,31 @@
 	if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
 		flexcan_write(0x0, &regs->rxfgmask);
 
+	/*
+	 * On Vybrid, disable memory error detection interrupts
+	 * and freeze mode.
+	 * This also works around errata e5295 which generates
+	 * false positive memory errors and put the device in
+	 * freeze mode.
+	 */
+	if (priv->devtype_data->features & FLEXCAN_HAS_MECR_FEATURES) {
+		/*
+		 * Follow the protocol as described in "Detection
+		 * and Correction of Memory Errors" to write to
+		 * MECR register
+		 */
+		reg_crl2 = flexcan_read(&regs->crl2);
+		reg_crl2 |= FLEXCAN_CRL2_ECRWRE;
+		flexcan_write(reg_crl2, &regs->crl2);
+
+		reg_mecr = flexcan_read(&regs->mecr);
+		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
+		flexcan_write(reg_mecr, &regs->mecr);
+		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
+				FLEXCAN_MECR_FANCEI_MSK);
+		flexcan_write(reg_mecr, &regs->mecr);
+	}
+
 	err = flexcan_transceiver_enable(priv);
 	if (err)
 		goto out_chip_disable;
@@ -1089,6 +1173,7 @@
 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
 	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
 	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
new file mode 100644
index 0000000..fca5482
--- /dev/null
+++ b/drivers/net/can/m_can/Kconfig
@@ -0,0 +1,4 @@
+config CAN_M_CAN
+	tristate "Bosch M_CAN devices"
+	---help---
+	  Say Y here if you want to support for Bosch M_CAN controller.
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
new file mode 100644
index 0000000..8bbd7f2
--- /dev/null
+++ b/drivers/net/can/m_can/Makefile
@@ -0,0 +1,5 @@
+#
+#  Makefile for the Bosch M_CAN controller driver.
+#
+
+obj-$(CONFIG_CAN_M_CAN) += m_can.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
new file mode 100644
index 0000000..10d571e
--- /dev/null
+++ b/drivers/net/can/m_can/m_can.c
@@ -0,0 +1,1202 @@
+/*
+ * CAN bus driver for Bosch M_CAN controller
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *	Dong Aisheng <b29396@freescale.com>
+ *
+ * Bosch M_CAN user manual can be obtained from:
+ * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
+ * mcan_users_manual_v302.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/can/dev.h>
+
+/* napi related */
+#define M_CAN_NAPI_WEIGHT	64
+
+/* message ram configuration data length */
+#define MRAM_CFG_LEN	8
+
+/* registers definition */
+enum m_can_reg {
+	M_CAN_CREL	= 0x0,
+	M_CAN_ENDN	= 0x4,
+	M_CAN_CUST	= 0x8,
+	M_CAN_FBTP	= 0xc,
+	M_CAN_TEST	= 0x10,
+	M_CAN_RWD	= 0x14,
+	M_CAN_CCCR	= 0x18,
+	M_CAN_BTP	= 0x1c,
+	M_CAN_TSCC	= 0x20,
+	M_CAN_TSCV	= 0x24,
+	M_CAN_TOCC	= 0x28,
+	M_CAN_TOCV	= 0x2c,
+	M_CAN_ECR	= 0x40,
+	M_CAN_PSR	= 0x44,
+	M_CAN_IR	= 0x50,
+	M_CAN_IE	= 0x54,
+	M_CAN_ILS	= 0x58,
+	M_CAN_ILE	= 0x5c,
+	M_CAN_GFC	= 0x80,
+	M_CAN_SIDFC	= 0x84,
+	M_CAN_XIDFC	= 0x88,
+	M_CAN_XIDAM	= 0x90,
+	M_CAN_HPMS	= 0x94,
+	M_CAN_NDAT1	= 0x98,
+	M_CAN_NDAT2	= 0x9c,
+	M_CAN_RXF0C	= 0xa0,
+	M_CAN_RXF0S	= 0xa4,
+	M_CAN_RXF0A	= 0xa8,
+	M_CAN_RXBC	= 0xac,
+	M_CAN_RXF1C	= 0xb0,
+	M_CAN_RXF1S	= 0xb4,
+	M_CAN_RXF1A	= 0xb8,
+	M_CAN_RXESC	= 0xbc,
+	M_CAN_TXBC	= 0xc0,
+	M_CAN_TXFQS	= 0xc4,
+	M_CAN_TXESC	= 0xc8,
+	M_CAN_TXBRP	= 0xcc,
+	M_CAN_TXBAR	= 0xd0,
+	M_CAN_TXBCR	= 0xd4,
+	M_CAN_TXBTO	= 0xd8,
+	M_CAN_TXBCF	= 0xdc,
+	M_CAN_TXBTIE	= 0xe0,
+	M_CAN_TXBCIE	= 0xe4,
+	M_CAN_TXEFC	= 0xf0,
+	M_CAN_TXEFS	= 0xf4,
+	M_CAN_TXEFA	= 0xf8,
+};
+
+/* m_can lec values */
+enum m_can_lec_type {
+	LEC_NO_ERROR = 0,
+	LEC_STUFF_ERROR,
+	LEC_FORM_ERROR,
+	LEC_ACK_ERROR,
+	LEC_BIT1_ERROR,
+	LEC_BIT0_ERROR,
+	LEC_CRC_ERROR,
+	LEC_UNUSED,
+};
+
+enum m_can_mram_cfg {
+	MRAM_SIDF = 0,
+	MRAM_XIDF,
+	MRAM_RXF0,
+	MRAM_RXF1,
+	MRAM_RXB,
+	MRAM_TXE,
+	MRAM_TXB,
+	MRAM_CFG_NUM,
+};
+
+/* Test Register (TEST) */
+#define TEST_LBCK	BIT(4)
+
+/* CC Control Register(CCCR) */
+#define CCCR_TEST	BIT(7)
+#define CCCR_MON	BIT(5)
+#define CCCR_CCE	BIT(1)
+#define CCCR_INIT	BIT(0)
+
+/* Bit Timing & Prescaler Register (BTP) */
+#define BTR_BRP_MASK		0x3ff
+#define BTR_BRP_SHIFT		16
+#define BTR_TSEG1_SHIFT		8
+#define BTR_TSEG1_MASK		(0x3f << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT		4
+#define BTR_TSEG2_MASK		(0xf << BTR_TSEG2_SHIFT)
+#define BTR_SJW_SHIFT		0
+#define BTR_SJW_MASK		0xf
+
+/* Error Counter Register(ECR) */
+#define ECR_RP			BIT(15)
+#define ECR_REC_SHIFT		8
+#define ECR_REC_MASK		(0x7f << ECR_REC_SHIFT)
+#define ECR_TEC_SHIFT		0
+#define ECR_TEC_MASK		0xff
+
+/* Protocol Status Register(PSR) */
+#define PSR_BO		BIT(7)
+#define PSR_EW		BIT(6)
+#define PSR_EP		BIT(5)
+#define PSR_LEC_MASK	0x7
+
+/* Interrupt Register(IR) */
+#define IR_ALL_INT	0xffffffff
+#define IR_STE		BIT(31)
+#define IR_FOE		BIT(30)
+#define IR_ACKE		BIT(29)
+#define IR_BE		BIT(28)
+#define IR_CRCE		BIT(27)
+#define IR_WDI		BIT(26)
+#define IR_BO		BIT(25)
+#define IR_EW		BIT(24)
+#define IR_EP		BIT(23)
+#define IR_ELO		BIT(22)
+#define IR_BEU		BIT(21)
+#define IR_BEC		BIT(20)
+#define IR_DRX		BIT(19)
+#define IR_TOO		BIT(18)
+#define IR_MRAF		BIT(17)
+#define IR_TSW		BIT(16)
+#define IR_TEFL		BIT(15)
+#define IR_TEFF		BIT(14)
+#define IR_TEFW		BIT(13)
+#define IR_TEFN		BIT(12)
+#define IR_TFE		BIT(11)
+#define IR_TCF		BIT(10)
+#define IR_TC		BIT(9)
+#define IR_HPM		BIT(8)
+#define IR_RF1L		BIT(7)
+#define IR_RF1F		BIT(6)
+#define IR_RF1W		BIT(5)
+#define IR_RF1N		BIT(4)
+#define IR_RF0L		BIT(3)
+#define IR_RF0F		BIT(2)
+#define IR_RF0W		BIT(1)
+#define IR_RF0N		BIT(0)
+#define IR_ERR_STATE	(IR_BO | IR_EW | IR_EP)
+#define IR_ERR_LEC	(IR_STE	| IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
+#define IR_ERR_BUS	(IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \
+			 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
+			 IR_RF1L | IR_RF0L)
+#define IR_ERR_ALL	(IR_ERR_STATE | IR_ERR_BUS)
+
+/* Interrupt Line Select (ILS) */
+#define ILS_ALL_INT0	0x0
+#define ILS_ALL_INT1	0xFFFFFFFF
+
+/* Interrupt Line Enable (ILE) */
+#define ILE_EINT0	BIT(0)
+#define ILE_EINT1	BIT(1)
+
+/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
+#define RXFC_FWM_OFF	24
+#define RXFC_FWM_MASK	0x7f
+#define RXFC_FWM_1	(1 << RXFC_FWM_OFF)
+#define RXFC_FS_OFF	16
+#define RXFC_FS_MASK	0x7f
+
+/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
+#define RXFS_RFL	BIT(25)
+#define RXFS_FF		BIT(24)
+#define RXFS_FPI_OFF	16
+#define RXFS_FPI_MASK	0x3f0000
+#define RXFS_FGI_OFF	8
+#define RXFS_FGI_MASK	0x3f00
+#define RXFS_FFL_MASK	0x7f
+
+/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
+#define M_CAN_RXESC_8BYTES	0x0
+
+/* Tx Buffer Configuration(TXBC) */
+#define TXBC_NDTB_OFF		16
+#define TXBC_NDTB_MASK		0x3f
+
+/* Tx Buffer Element Size Configuration(TXESC) */
+#define TXESC_TBDS_8BYTES	0x0
+
+/* Tx Event FIFO Con.guration (TXEFC) */
+#define TXEFC_EFS_OFF		16
+#define TXEFC_EFS_MASK		0x3f
+
+/* Message RAM Configuration (in bytes) */
+#define SIDF_ELEMENT_SIZE	4
+#define XIDF_ELEMENT_SIZE	8
+#define RXF0_ELEMENT_SIZE	16
+#define RXF1_ELEMENT_SIZE	16
+#define RXB_ELEMENT_SIZE	16
+#define TXE_ELEMENT_SIZE	8
+#define TXB_ELEMENT_SIZE	16
+
+/* Message RAM Elements */
+#define M_CAN_FIFO_ID		0x0
+#define M_CAN_FIFO_DLC		0x4
+#define M_CAN_FIFO_DATA(n)	(0x8 + ((n) << 2))
+
+/* Rx Buffer Element */
+#define RX_BUF_ESI		BIT(31)
+#define RX_BUF_XTD		BIT(30)
+#define RX_BUF_RTR		BIT(29)
+
+/* Tx Buffer Element */
+#define TX_BUF_XTD		BIT(30)
+#define TX_BUF_RTR		BIT(29)
+
+/* address offset and element number for each FIFO/Buffer in the Message RAM */
+struct mram_cfg {
+	u16 off;
+	u8  num;
+};
+
+/* m_can private data structure */
+struct m_can_priv {
+	struct can_priv can;	/* must be the first member */
+	struct napi_struct napi;
+	struct net_device *dev;
+	struct device *device;
+	struct clk *hclk;
+	struct clk *cclk;
+	void __iomem *base;
+	u32 irqstatus;
+
+	/* message ram configuration */
+	void __iomem *mram_base;
+	struct mram_cfg mcfg[MRAM_CFG_NUM];
+};
+
+static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg)
+{
+	return readl(priv->base + reg);
+}
+
+static inline void m_can_write(const struct m_can_priv *priv,
+			       enum m_can_reg reg, u32 val)
+{
+	writel(val, priv->base + reg);
+}
+
+static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
+				  u32 fgi, unsigned int offset)
+{
+	return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off +
+		     fgi * RXF0_ELEMENT_SIZE + offset);
+}
+
+static inline void m_can_fifo_write(const struct m_can_priv *priv,
+				    u32 fpi, unsigned int offset, u32 val)
+{
+	return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
+		      fpi * TXB_ELEMENT_SIZE + offset);
+}
+
+static inline void m_can_config_endisable(const struct m_can_priv *priv,
+					  bool enable)
+{
+	u32 cccr = m_can_read(priv, M_CAN_CCCR);
+	u32 timeout = 10;
+	u32 val = 0;
+
+	if (enable) {
+		/* enable m_can configuration */
+		m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
+		/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
+		m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
+	} else {
+		m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
+	}
+
+	/* there's a delay for module initialization */
+	if (enable)
+		val = CCCR_INIT | CCCR_CCE;
+
+	while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
+		if (timeout == 0) {
+			netdev_warn(priv->dev, "Failed to init module\n");
+			return;
+		}
+		timeout--;
+		udelay(1);
+	}
+}
+
+static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
+{
+	m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1);
+}
+
+static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
+{
+	m_can_write(priv, M_CAN_ILE, 0x0);
+}
+
+static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf,
+			    u32 rxfs)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	u32 id, fgi;
+
+	/* calculate the fifo get index for where to read data */
+	fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
+	id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
+	if (id & RX_BUF_XTD)
+		cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (id >> 18) & CAN_SFF_MASK;
+
+	if (id & RX_BUF_RTR) {
+		cf->can_id |= CAN_RTR_FLAG;
+	} else {
+		id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
+		cf->can_dlc = get_can_dlc((id >> 16) & 0x0F);
+		*(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi,
+							 M_CAN_FIFO_DATA(0));
+		*(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi,
+							 M_CAN_FIFO_DATA(1));
+	}
+
+	/* acknowledge rx fifo 0 */
+	m_can_write(priv, M_CAN_RXF0A, fgi);
+}
+
+static int m_can_do_rx_poll(struct net_device *dev, int quota)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *frame;
+	u32 pkts = 0;
+	u32 rxfs;
+
+	rxfs = m_can_read(priv, M_CAN_RXF0S);
+	if (!(rxfs & RXFS_FFL_MASK)) {
+		netdev_dbg(dev, "no messages in fifo0\n");
+		return 0;
+	}
+
+	while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
+		if (rxfs & RXFS_RFL)
+			netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
+
+		skb = alloc_can_skb(dev, &frame);
+		if (!skb) {
+			stats->rx_dropped++;
+			return pkts;
+		}
+
+		m_can_read_fifo(dev, frame, rxfs);
+
+		stats->rx_packets++;
+		stats->rx_bytes += frame->can_dlc;
+
+		netif_receive_skb(skb);
+
+		quota--;
+		pkts++;
+		rxfs = m_can_read(priv, M_CAN_RXF0S);
+	}
+
+	if (pkts)
+		can_led_event(dev, CAN_LED_EVENT_RX);
+
+	return pkts;
+}
+
+static int m_can_handle_lost_msg(struct net_device *dev)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *frame;
+
+	netdev_err(dev, "msg lost in rxf0\n");
+
+	stats->rx_errors++;
+	stats->rx_over_errors++;
+
+	skb = alloc_can_err_skb(dev, &frame);
+	if (unlikely(!skb))
+		return 0;
+
+	frame->can_id |= CAN_ERR_CRTL;
+	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+	netif_receive_skb(skb);
+
+	return 1;
+}
+
+static int m_can_handle_lec_err(struct net_device *dev,
+				enum m_can_lec_type lec_type)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+
+	priv->can.can_stats.bus_error++;
+	stats->rx_errors++;
+
+	/* propagate the error condition to the CAN stack */
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	/* check for 'last error code' which tells us the
+	 * type of the last error to occur on the CAN bus
+	 */
+	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+	cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+	switch (lec_type) {
+	case LEC_STUFF_ERROR:
+		netdev_dbg(dev, "stuff error\n");
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+		break;
+	case LEC_FORM_ERROR:
+		netdev_dbg(dev, "form error\n");
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+		break;
+	case LEC_ACK_ERROR:
+		netdev_dbg(dev, "ack error\n");
+		cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
+				CAN_ERR_PROT_LOC_ACK_DEL);
+		break;
+	case LEC_BIT1_ERROR:
+		netdev_dbg(dev, "bit1 error\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT1;
+		break;
+	case LEC_BIT0_ERROR:
+		netdev_dbg(dev, "bit0 error\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT0;
+		break;
+	case LEC_CRC_ERROR:
+		netdev_dbg(dev, "CRC error\n");
+		cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+				CAN_ERR_PROT_LOC_CRC_DEL);
+		break;
+	default:
+		break;
+	}
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+	netif_receive_skb(skb);
+
+	return 1;
+}
+
+static int m_can_get_berr_counter(const struct net_device *dev,
+				  struct can_berr_counter *bec)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	unsigned int ecr;
+	int err;
+
+	err = clk_prepare_enable(priv->hclk);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->cclk);
+	if (err) {
+		clk_disable_unprepare(priv->hclk);
+		return err;
+	}
+
+	ecr = m_can_read(priv, M_CAN_ECR);
+	bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
+	bec->txerr = ecr & ECR_TEC_MASK;
+
+	clk_disable_unprepare(priv->cclk);
+	clk_disable_unprepare(priv->hclk);
+
+	return 0;
+}
+
+static int m_can_handle_state_change(struct net_device *dev,
+				     enum can_state new_state)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	struct can_berr_counter bec;
+	unsigned int ecr;
+
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/* error warning state */
+		priv->can.can_stats.error_warning++;
+		priv->can.state = CAN_STATE_ERROR_WARNING;
+		break;
+	case CAN_STATE_ERROR_PASSIVE:
+		/* error passive state */
+		priv->can.can_stats.error_passive++;
+		priv->can.state = CAN_STATE_ERROR_PASSIVE;
+		break;
+	case CAN_STATE_BUS_OFF:
+		/* bus-off state */
+		priv->can.state = CAN_STATE_BUS_OFF;
+		m_can_disable_all_interrupts(priv);
+		can_bus_off(dev);
+		break;
+	default:
+		break;
+	}
+
+	/* propagate the error condition to the CAN stack */
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	m_can_get_berr_counter(dev, &bec);
+
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/* error warning state */
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = (bec.txerr > bec.rxerr) ?
+			CAN_ERR_CRTL_TX_WARNING :
+			CAN_ERR_CRTL_RX_WARNING;
+		cf->data[6] = bec.txerr;
+		cf->data[7] = bec.rxerr;
+		break;
+	case CAN_STATE_ERROR_PASSIVE:
+		/* error passive state */
+		cf->can_id |= CAN_ERR_CRTL;
+		ecr = m_can_read(priv, M_CAN_ECR);
+		if (ecr & ECR_RP)
+			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+		if (bec.txerr > 127)
+			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+		cf->data[6] = bec.txerr;
+		cf->data[7] = bec.rxerr;
+		break;
+	case CAN_STATE_BUS_OFF:
+		/* bus-off state */
+		cf->can_id |= CAN_ERR_BUSOFF;
+		break;
+	default:
+		break;
+	}
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+	netif_receive_skb(skb);
+
+	return 1;
+}
+
+static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	int work_done = 0;
+
+	if ((psr & PSR_EW) &&
+	    (priv->can.state != CAN_STATE_ERROR_WARNING)) {
+		netdev_dbg(dev, "entered error warning state\n");
+		work_done += m_can_handle_state_change(dev,
+						       CAN_STATE_ERROR_WARNING);
+	}
+
+	if ((psr & PSR_EP) &&
+	    (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
+		netdev_dbg(dev, "entered error warning state\n");
+		work_done += m_can_handle_state_change(dev,
+						       CAN_STATE_ERROR_PASSIVE);
+	}
+
+	if ((psr & PSR_BO) &&
+	    (priv->can.state != CAN_STATE_BUS_OFF)) {
+		netdev_dbg(dev, "entered error warning state\n");
+		work_done += m_can_handle_state_change(dev,
+						       CAN_STATE_BUS_OFF);
+	}
+
+	return work_done;
+}
+
+static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
+{
+	if (irqstatus & IR_WDI)
+		netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
+	if (irqstatus & IR_BEU)
+		netdev_err(dev, "Error Logging Overflow\n");
+	if (irqstatus & IR_BEU)
+		netdev_err(dev, "Bit Error Uncorrected\n");
+	if (irqstatus & IR_BEC)
+		netdev_err(dev, "Bit Error Corrected\n");
+	if (irqstatus & IR_TOO)
+		netdev_err(dev, "Timeout reached\n");
+	if (irqstatus & IR_MRAF)
+		netdev_err(dev, "Message RAM access failure occurred\n");
+}
+
+static inline bool is_lec_err(u32 psr)
+{
+	psr &= LEC_UNUSED;
+
+	return psr && (psr != LEC_UNUSED);
+}
+
+static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
+				   u32 psr)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	int work_done = 0;
+
+	if (irqstatus & IR_RF0L)
+		work_done += m_can_handle_lost_msg(dev);
+
+	/* handle lec errors on the bus */
+	if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+	    is_lec_err(psr))
+		work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+
+	/* other unproccessed error interrupts */
+	m_can_handle_other_err(dev, irqstatus);
+
+	return work_done;
+}
+
+static int m_can_poll(struct napi_struct *napi, int quota)
+{
+	struct net_device *dev = napi->dev;
+	struct m_can_priv *priv = netdev_priv(dev);
+	int work_done = 0;
+	u32 irqstatus, psr;
+
+	irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR);
+	if (!irqstatus)
+		goto end;
+
+	psr = m_can_read(priv, M_CAN_PSR);
+	if (irqstatus & IR_ERR_STATE)
+		work_done += m_can_handle_state_errors(dev, psr);
+
+	if (irqstatus & IR_ERR_BUS)
+		work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
+
+	if (irqstatus & IR_RF0N)
+		work_done += m_can_do_rx_poll(dev, (quota - work_done));
+
+	if (work_done < quota) {
+		napi_complete(napi);
+		m_can_enable_all_interrupts(priv);
+	}
+
+end:
+	return work_done;
+}
+
+static irqreturn_t m_can_isr(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct m_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	u32 ir;
+
+	ir = m_can_read(priv, M_CAN_IR);
+	if (!ir)
+		return IRQ_NONE;
+
+	/* ACK all irqs */
+	if (ir & IR_ALL_INT)
+		m_can_write(priv, M_CAN_IR, ir);
+
+	/* schedule NAPI in case of
+	 * - rx IRQ
+	 * - state change IRQ
+	 * - bus error IRQ and bus error reporting
+	 */
+	if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) {
+		priv->irqstatus = ir;
+		m_can_disable_all_interrupts(priv);
+		napi_schedule(&priv->napi);
+	}
+
+	/* transmission complete interrupt */
+	if (ir & IR_TC) {
+		stats->tx_bytes += can_get_echo_skb(dev, 0);
+		stats->tx_packets++;
+		can_led_event(dev, CAN_LED_EVENT_TX);
+		netif_wake_queue(dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static const struct can_bittiming_const m_can_bittiming_const = {
+	.name = KBUILD_MODNAME,
+	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
+	.tseg1_max = 64,
+	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
+	.tseg2_max = 16,
+	.sjw_max = 16,
+	.brp_min = 1,
+	.brp_max = 1024,
+	.brp_inc = 1,
+};
+
+static int m_can_set_bittiming(struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	const struct can_bittiming *bt = &priv->can.bittiming;
+	u16 brp, sjw, tseg1, tseg2;
+	u32 reg_btp;
+
+	brp = bt->brp - 1;
+	sjw = bt->sjw - 1;
+	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+	tseg2 = bt->phase_seg2 - 1;
+	reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
+			(tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
+	m_can_write(priv, M_CAN_BTP, reg_btp);
+	netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp);
+
+	return 0;
+}
+
+/* Configure M_CAN chip:
+ * - set rx buffer/fifo element size
+ * - configure rx fifo
+ * - accept non-matching frame into fifo 0
+ * - configure tx buffer
+ * - configure mode
+ * - setup bittiming
+ */
+static void m_can_chip_config(struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	u32 cccr, test;
+
+	m_can_config_endisable(priv, true);
+
+	/* RX Buffer/FIFO Element Size 8 bytes data field */
+	m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES);
+
+	/* Accept Non-matching Frames Into FIFO 0 */
+	m_can_write(priv, M_CAN_GFC, 0x0);
+
+	/* only support one Tx Buffer currently */
+	m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
+		    priv->mcfg[MRAM_TXB].off);
+
+	/* only support 8 bytes firstly */
+	m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES);
+
+	m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
+		    priv->mcfg[MRAM_TXE].off);
+
+	/* rx fifo configuration, blocking mode, fifo size 1 */
+	m_can_write(priv, M_CAN_RXF0C,
+		    (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) |
+		    RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off);
+
+	m_can_write(priv, M_CAN_RXF1C,
+		    (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) |
+		    RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
+
+	cccr = m_can_read(priv, M_CAN_CCCR);
+	cccr &= ~(CCCR_TEST | CCCR_MON);
+	test = m_can_read(priv, M_CAN_TEST);
+	test &= ~TEST_LBCK;
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+		cccr |= CCCR_MON;
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+		cccr |= CCCR_TEST;
+		test |= TEST_LBCK;
+	}
+
+	m_can_write(priv, M_CAN_CCCR, cccr);
+	m_can_write(priv, M_CAN_TEST, test);
+
+	/* enable interrupts */
+	m_can_write(priv, M_CAN_IR, IR_ALL_INT);
+	if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+		m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC);
+	else
+		m_can_write(priv, M_CAN_IE, IR_ALL_INT);
+
+	/* route all interrupts to INT0 */
+	m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0);
+
+	/* set bittiming params */
+	m_can_set_bittiming(dev);
+
+	m_can_config_endisable(priv, false);
+}
+
+static void m_can_start(struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+
+	/* basic m_can configuration */
+	m_can_chip_config(dev);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	m_can_enable_all_interrupts(priv);
+}
+
+static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	switch (mode) {
+	case CAN_MODE_START:
+		m_can_start(dev);
+		netif_wake_queue(dev);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static void free_m_can_dev(struct net_device *dev)
+{
+	free_candev(dev);
+}
+
+static struct net_device *alloc_m_can_dev(void)
+{
+	struct net_device *dev;
+	struct m_can_priv *priv;
+
+	dev = alloc_candev(sizeof(*priv), 1);
+	if (!dev)
+		return NULL;
+
+	priv = netdev_priv(dev);
+	netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
+
+	priv->dev = dev;
+	priv->can.bittiming_const = &m_can_bittiming_const;
+	priv->can.do_set_mode = m_can_set_mode;
+	priv->can.do_get_berr_counter = m_can_get_berr_counter;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+					CAN_CTRLMODE_LISTENONLY |
+					CAN_CTRLMODE_BERR_REPORTING;
+
+	return dev;
+}
+
+static int m_can_open(struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	int err;
+
+	err = clk_prepare_enable(priv->hclk);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->cclk);
+	if (err)
+		goto exit_disable_hclk;
+
+	/* open the can device */
+	err = open_candev(dev);
+	if (err) {
+		netdev_err(dev, "failed to open can device\n");
+		goto exit_disable_cclk;
+	}
+
+	/* register interrupt handler */
+	err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
+			  dev);
+	if (err < 0) {
+		netdev_err(dev, "failed to request interrupt\n");
+		goto exit_irq_fail;
+	}
+
+	/* start the m_can controller */
+	m_can_start(dev);
+
+	can_led_event(dev, CAN_LED_EVENT_OPEN);
+	napi_enable(&priv->napi);
+	netif_start_queue(dev);
+
+	return 0;
+
+exit_irq_fail:
+	close_candev(dev);
+exit_disable_cclk:
+	clk_disable_unprepare(priv->cclk);
+exit_disable_hclk:
+	clk_disable_unprepare(priv->hclk);
+	return err;
+}
+
+static void m_can_stop(struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+
+	/* disable all interrupts */
+	m_can_disable_all_interrupts(priv);
+
+	clk_disable_unprepare(priv->hclk);
+	clk_disable_unprepare(priv->cclk);
+
+	/* set the state as STOPPED */
+	priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int m_can_close(struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	m_can_stop(dev);
+	free_irq(dev->irq, dev);
+	close_candev(dev);
+	can_led_event(dev, CAN_LED_EVENT_STOP);
+
+	return 0;
+}
+
+static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
+				    struct net_device *dev)
+{
+	struct m_can_priv *priv = netdev_priv(dev);
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	u32 id;
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	netif_stop_queue(dev);
+
+	if (cf->can_id & CAN_EFF_FLAG) {
+		id = cf->can_id & CAN_EFF_MASK;
+		id |= TX_BUF_XTD;
+	} else {
+		id = ((cf->can_id & CAN_SFF_MASK) << 18);
+	}
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		id |= TX_BUF_RTR;
+
+	/* message ram configuration */
+	m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
+	m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16);
+	m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0));
+	m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4));
+	can_put_echo_skb(skb, dev, 0);
+
+	/* enable first TX buffer to start transfer  */
+	m_can_write(priv, M_CAN_TXBTIE, 0x1);
+	m_can_write(priv, M_CAN_TXBAR, 0x1);
+
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops m_can_netdev_ops = {
+	.ndo_open = m_can_open,
+	.ndo_stop = m_can_close,
+	.ndo_start_xmit = m_can_start_xmit,
+};
+
+static int register_m_can_dev(struct net_device *dev)
+{
+	dev->flags |= IFF_ECHO;	/* we support local echo */
+	dev->netdev_ops = &m_can_netdev_ops;
+
+	return register_candev(dev);
+}
+
+static int m_can_of_parse_mram(struct platform_device *pdev,
+			       struct m_can_priv *priv)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+	void __iomem *addr;
+	u32 out_val[MRAM_CFG_LEN];
+	int ret;
+
+	/* message ram could be shared */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
+	if (!res)
+		return -ENODEV;
+
+	addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!addr)
+		return -ENOMEM;
+
+	/* get message ram configuration */
+	ret = of_property_read_u32_array(np, "bosch,mram-cfg",
+					 out_val, sizeof(out_val) / 4);
+	if (ret) {
+		dev_err(&pdev->dev, "can not get message ram configuration\n");
+		return -ENODEV;
+	}
+
+	priv->mram_base = addr;
+	priv->mcfg[MRAM_SIDF].off = out_val[0];
+	priv->mcfg[MRAM_SIDF].num = out_val[1];
+	priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
+			priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
+	priv->mcfg[MRAM_XIDF].num = out_val[2];
+	priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
+			priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
+	priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK;
+	priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
+			priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
+	priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK;
+	priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
+			priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
+	priv->mcfg[MRAM_RXB].num = out_val[5];
+	priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
+			priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
+	priv->mcfg[MRAM_TXE].num = out_val[6];
+	priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
+			priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
+	priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK;
+
+	dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+		priv->mram_base,
+		priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
+		priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
+		priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num,
+		priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num,
+		priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num,
+		priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
+		priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
+
+	return 0;
+}
+
+static int m_can_plat_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct m_can_priv *priv;
+	struct resource *res;
+	void __iomem *addr;
+	struct clk *hclk, *cclk;
+	int irq, ret;
+
+	hclk = devm_clk_get(&pdev->dev, "hclk");
+	cclk = devm_clk_get(&pdev->dev, "cclk");
+	if (IS_ERR(hclk) || IS_ERR(cclk)) {
+		dev_err(&pdev->dev, "no clock find\n");
+		return -ENODEV;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
+	addr = devm_ioremap_resource(&pdev->dev, res);
+	irq = platform_get_irq_byname(pdev, "int0");
+	if (IS_ERR(addr) || irq < 0)
+		return -EINVAL;
+
+	/* allocate the m_can device */
+	dev = alloc_m_can_dev();
+	if (!dev)
+		return -ENOMEM;
+
+	priv = netdev_priv(dev);
+	dev->irq = irq;
+	priv->base = addr;
+	priv->device = &pdev->dev;
+	priv->hclk = hclk;
+	priv->cclk = cclk;
+	priv->can.clock.freq = clk_get_rate(cclk);
+
+	ret = m_can_of_parse_mram(pdev, priv);
+	if (ret)
+		goto failed_free_dev;
+
+	platform_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	ret = register_m_can_dev(dev);
+	if (ret) {
+		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+			KBUILD_MODNAME, ret);
+		goto failed_free_dev;
+	}
+
+	devm_can_led_init(dev);
+
+	dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+		 KBUILD_MODNAME, priv->base, dev->irq);
+
+	return 0;
+
+failed_free_dev:
+	free_m_can_dev(dev);
+	return ret;
+}
+
+static __maybe_unused int m_can_suspend(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct m_can_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		netif_stop_queue(ndev);
+		netif_device_detach(ndev);
+	}
+
+	/* TODO: enter low power */
+
+	priv->can.state = CAN_STATE_SLEEPING;
+
+	return 0;
+}
+
+static __maybe_unused int m_can_resume(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct m_can_priv *priv = netdev_priv(ndev);
+
+	/* TODO: exit low power */
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	if (netif_running(ndev)) {
+		netif_device_attach(ndev);
+		netif_start_queue(ndev);
+	}
+
+	return 0;
+}
+
+static void unregister_m_can_dev(struct net_device *dev)
+{
+	unregister_candev(dev);
+}
+
+static int m_can_plat_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	unregister_m_can_dev(dev);
+	platform_set_drvdata(pdev, NULL);
+
+	free_m_can_dev(dev);
+
+	return 0;
+}
+
+static const struct dev_pm_ops m_can_pmops = {
+	SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
+};
+
+static const struct of_device_id m_can_of_table[] = {
+	{ .compatible = "bosch,m_can", .data = NULL },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, m_can_of_table);
+
+static struct platform_driver m_can_plat_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = m_can_of_table,
+		.pm     = &m_can_pmops,
+	},
+	.probe = m_can_plat_probe,
+	.remove = m_can_plat_remove,
+};
+
+module_platform_driver(m_can_plat_driver);
+
+MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
index c9fab17..58903b4 100644
--- a/drivers/net/can/mscan/Makefile
+++ b/drivers/net/can/mscan/Makefile
@@ -1,5 +1,3 @@
 
 obj-$(CONFIG_CAN_MPC5XXX)	+= mscan-mpc5xxx.o
 mscan-mpc5xxx-objs		:= mscan.o mpc5xxx_can.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 5268d21..1abe133 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -20,6 +20,7 @@
 #include <linux/can/dev.h>
 #include <linux/clk.h>
 #include <linux/can/platform/rcar_can.h>
+#include <linux/of.h>
 
 #define RCAR_CAN_DRV_NAME	"rcar_can"
 
@@ -87,6 +88,7 @@
 	struct napi_struct napi;
 	struct rcar_can_regs __iomem *regs;
 	struct clk *clk;
+	struct clk *can_clk;
 	u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
 	u32 tx_head;
 	u32 tx_tail;
@@ -505,14 +507,20 @@
 
 	err = clk_prepare_enable(priv->clk);
 	if (err) {
-		netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
+		netdev_err(ndev, "failed to enable periperal clock, error %d\n",
 			   err);
 		goto out;
 	}
+	err = clk_prepare_enable(priv->can_clk);
+	if (err) {
+		netdev_err(ndev, "failed to enable CAN clock, error %d\n",
+			   err);
+		goto out_clock;
+	}
 	err = open_candev(ndev);
 	if (err) {
 		netdev_err(ndev, "open_candev() failed, error %d\n", err);
-		goto out_clock;
+		goto out_can_clock;
 	}
 	napi_enable(&priv->napi);
 	err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
@@ -527,6 +535,8 @@
 out_close:
 	napi_disable(&priv->napi);
 	close_candev(ndev);
+out_can_clock:
+	clk_disable_unprepare(priv->can_clk);
 out_clock:
 	clk_disable_unprepare(priv->clk);
 out:
@@ -565,6 +575,7 @@
 	rcar_can_stop(ndev);
 	free_irq(ndev->irq, ndev);
 	napi_disable(&priv->napi);
+	clk_disable_unprepare(priv->can_clk);
 	clk_disable_unprepare(priv->clk);
 	close_candev(ndev);
 	can_led_event(ndev, CAN_LED_EVENT_STOP);
@@ -715,6 +726,12 @@
 	return 0;
 }
 
+static const char * const clock_names[] = {
+	[CLKR_CLKP1]	= "clkp1",
+	[CLKR_CLKP2]	= "clkp2",
+	[CLKR_CLKEXT]	= "can_clk",
+};
+
 static int rcar_can_probe(struct platform_device *pdev)
 {
 	struct rcar_can_platform_data *pdata;
@@ -722,13 +739,20 @@
 	struct net_device *ndev;
 	struct resource *mem;
 	void __iomem *addr;
+	u32 clock_select = CLKR_CLKP1;
 	int err = -ENODEV;
 	int irq;
 
-	pdata = dev_get_platdata(&pdev->dev);
-	if (!pdata) {
-		dev_err(&pdev->dev, "No platform data provided!\n");
-		goto fail;
+	if (pdev->dev.of_node) {
+		of_property_read_u32(pdev->dev.of_node,
+				     "renesas,can-clock-select", &clock_select);
+	} else {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (!pdata) {
+			dev_err(&pdev->dev, "No platform data provided!\n");
+			goto fail;
+		}
+		clock_select = pdata->clock_select;
 	}
 
 	irq = platform_get_irq(pdev, 0);
@@ -753,10 +777,22 @@
 
 	priv = netdev_priv(ndev);
 
-	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	priv->clk = devm_clk_get(&pdev->dev, "clkp1");
 	if (IS_ERR(priv->clk)) {
 		err = PTR_ERR(priv->clk);
-		dev_err(&pdev->dev, "cannot get clock: %d\n", err);
+		dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err);
+		goto fail_clk;
+	}
+
+	if (clock_select >= ARRAY_SIZE(clock_names)) {
+		err = -EINVAL;
+		dev_err(&pdev->dev, "invalid CAN clock selected\n");
+		goto fail_clk;
+	}
+	priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+	if (IS_ERR(priv->can_clk)) {
+		err = PTR_ERR(priv->can_clk);
+		dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err);
 		goto fail_clk;
 	}
 
@@ -765,8 +801,8 @@
 	ndev->flags |= IFF_ECHO;
 	priv->ndev = ndev;
 	priv->regs = addr;
-	priv->clock_select = pdata->clock_select;
-	priv->can.clock.freq = clk_get_rate(priv->clk);
+	priv->clock_select = clock_select;
+	priv->can.clock.freq = clk_get_rate(priv->can_clk);
 	priv->can.bittiming_const = &rcar_can_bittiming_const;
 	priv->can.do_set_mode = rcar_can_do_set_mode;
 	priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
@@ -858,10 +894,20 @@
 
 static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
 
+static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
+	{ .compatible = "renesas,can-r8a7778" },
+	{ .compatible = "renesas,can-r8a7779" },
+	{ .compatible = "renesas,can-r8a7790" },
+	{ .compatible = "renesas,can-r8a7791" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, rcar_can_of_table);
+
 static struct platform_driver rcar_can_driver = {
 	.driver = {
 		.name = RCAR_CAN_DRV_NAME,
 		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(rcar_can_of_table),
 		.pm = &rcar_can_pm_ops,
 	},
 	.probe = rcar_can_probe,
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 531d5fc..be11ddd 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -12,5 +12,3 @@
 obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o
 obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
 obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
index c5e5016..a23da49 100644
--- a/drivers/net/can/softing/Makefile
+++ b/drivers/net/can/softing/Makefile
@@ -2,5 +2,3 @@
 softing-y := softing_main.o softing_fw.o
 obj-$(CONFIG_CAN_SOFTING) += softing.o
 obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index 90bcacf..0e86040 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -4,5 +4,3 @@
 
 
 obj-$(CONFIG_CAN_MCP251X)	+= mcp251x.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 5df239e..c66d699 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1107,10 +1107,10 @@
 		 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
 		 * that much and share it between Tx and Rx DMA buffers.
 		 */
-		priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
-						      PAGE_SIZE,
-						      &priv->spi_tx_dma,
-						      GFP_DMA);
+		priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
+						       PAGE_SIZE,
+						       &priv->spi_tx_dma,
+						       GFP_DMA);
 
 		if (priv->spi_tx_buf) {
 			priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
@@ -1156,9 +1156,6 @@
 	return 0;
 
 error_probe:
-	if (mcp251x_enable_dma)
-		dma_free_coherent(&spi->dev, PAGE_SIZE,
-				  priv->spi_tx_buf, priv->spi_tx_dma);
 	mcp251x_power_enable(priv->power, 0);
 
 out_clk:
@@ -1178,11 +1175,6 @@
 
 	unregister_candev(net);
 
-	if (mcp251x_enable_dma) {
-		dma_free_coherent(&spi->dev, PAGE_SIZE,
-				  priv->spi_tx_buf, priv->spi_tx_dma);
-	}
-
 	mcp251x_power_enable(priv->power, 0);
 
 	if (!IS_ERR(priv->clk))
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 7b9a393..a64cf98 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -8,5 +8,3 @@
 obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
 obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
-
-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d777fae..86e9451 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -20,13 +20,17 @@
 #include <linux/types.h>
 #include <linux/pci_regs.h>
 
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/clocksource.h>
+
 /* compilation time flags */
 
 /* define this to make the driver freeze on error to allow getting debug info
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.78.19-0"
+#define DRV_MODULE_VERSION      "1.710.51-0"
 #define DRV_MODULE_RELDATE      "2014/02/10"
 #define BNX2X_BC_VER            0x040200
 
@@ -70,6 +74,7 @@
 #define BNX2X_MSG_SP			0x0100000 /* was: NETIF_MSG_INTR */
 #define BNX2X_MSG_FP			0x0200000 /* was: NETIF_MSG_INTR */
 #define BNX2X_MSG_IOV			0x0800000
+#define BNX2X_MSG_PTP			0x1000000
 #define BNX2X_MSG_IDLE			0x2000000 /* used for idle check*/
 #define BNX2X_MSG_ETHTOOL		0x4000000
 #define BNX2X_MSG_DCB			0x8000000
@@ -1587,10 +1592,11 @@
 #define USING_SINGLE_MSIX_FLAG		(1 << 20)
 #define BC_SUPPORTS_DCBX_MSG_NON_PMF	(1 << 21)
 #define IS_VF_FLAG			(1 << 22)
-#define INTERRUPTS_ENABLED_FLAG		(1 << 23)
-#define BC_SUPPORTS_RMMOD_CMD		(1 << 24)
-#define HAS_PHYS_PORT_ID		(1 << 25)
-#define AER_ENABLED			(1 << 26)
+#define BC_SUPPORTS_RMMOD_CMD		(1 << 23)
+#define HAS_PHYS_PORT_ID		(1 << 24)
+#define AER_ENABLED			(1 << 25)
+#define PTP_SUPPORTED			(1 << 26)
+#define TX_TIMESTAMPING_EN		(1 << 27)
 
 #define BP_NOMCP(bp)			((bp)->flags & NO_MCP_FLAG)
 
@@ -1684,13 +1690,9 @@
 #define BNX2X_STATE_ERROR		0xf000
 
 #define BNX2X_MAX_PRIORITY		8
-#define BNX2X_MAX_ENTRIES_PER_PRI	16
-#define BNX2X_MAX_COS			3
-#define BNX2X_MAX_TX_COS		2
 	int			num_queues;
 	uint			num_ethernet_queues;
 	uint			num_cnic_queues;
-	int			num_napi_queues;
 	int			disable_tpa;
 
 	u32			rx_mode;
@@ -1933,6 +1935,19 @@
 
 	u8					phys_port_id[ETH_ALEN];
 
+	/* PTP related context */
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_clock_info;
+	struct work_struct ptp_task;
+	struct cyclecounter cyclecounter;
+	struct timecounter timecounter;
+	bool timecounter_init_done;
+	struct sk_buff *ptp_tx_skb;
+	unsigned long ptp_tx_start;
+	bool hwtstamp_ioctl_called;
+	u16 tx_type;
+	u16 rx_filter;
+
 	struct bnx2x_link_report_data		vf_link_vars;
 };
 
@@ -2559,4 +2574,11 @@
 
 #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
 
+void bnx2x_init_ptp(struct bnx2x *bp);
+int bnx2x_configure_ptp_filters(struct bnx2x *bp);
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+
+#define BNX2X_MAX_PHC_DRIFT 31000000
+#define BNX2X_PTP_TX_TIMEOUT
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4ccc806..2388144 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1063,6 +1063,11 @@
 
 		skb_record_rx_queue(skb, fp->rx_queue);
 
+		/* Check if this packet was timestamped */
+		if (unlikely(le16_to_cpu(cqe->fast_path_cqe.type_error_flags) &
+			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
+			bnx2x_set_rx_ts(bp, skb);
+
 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
 		    PARSING_FLAGS_VLAN)
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -2078,6 +2083,10 @@
 			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
 		if (rss_obj->udp_rss_v6)
 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+
+		if (!CHIP_IS_E1x(bp))
+			/* valid only for TUNN_MODE_GRE tunnel mode */
+			__set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
 	} else {
 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
 	}
@@ -2800,7 +2809,11 @@
 	/* Initialize Rx filter. */
 	bnx2x_set_rx_mode_inner(bp);
 
-	/* Start the Tx */
+	if (bp->flags & PTP_SUPPORTED) {
+		bnx2x_init_ptp(bp);
+		bnx2x_configure_ptp_filters(bp);
+	}
+	/* Start Tx */
 	switch (load_mode) {
 	case LOAD_NORMAL:
 		/* Tx queue should be only re-enabled */
@@ -3437,26 +3450,6 @@
 }
 #endif
 
-static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
-				 u32 xmit_type)
-{
-	struct ipv6hdr *ipv6;
-
-	*parsing_data |= (skb_shinfo(skb)->gso_size <<
-			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
-			      ETH_TX_PARSE_BD_E2_LSO_MSS;
-
-	if (xmit_type & XMIT_GSO_ENC_V6)
-		ipv6 = inner_ipv6_hdr(skb);
-	else if (xmit_type & XMIT_GSO_V6)
-		ipv6 = ipv6_hdr(skb);
-	else
-		ipv6 = NULL;
-
-	if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
-		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
-}
-
 /**
  * bnx2x_set_pbd_gso - update PBD in GSO case.
  *
@@ -3466,7 +3459,6 @@
  */
 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
 			      struct eth_tx_parse_bd_e1x *pbd,
-			      struct eth_tx_start_bd *tx_start_bd,
 			      u32 xmit_type)
 {
 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
@@ -3479,9 +3471,6 @@
 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 						   ip_hdr(skb)->daddr,
 						   0, IPPROTO_TCP, 0));
-
-		/* GSO on 57710/57711 needs FW to calculate IP checksum */
-		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
 	} else {
 		pbd->tcp_pseudo_csum =
 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -3653,18 +3642,23 @@
 			   (__force u32)iph->tot_len -
 			   (__force u32)iph->frag_off;
 
+		outerip_len = iph->ihl << 1;
+
 		pbd2->fw_ip_csum_wo_len_flags_frag =
 			bswab16(csum_fold((__force __wsum)csum));
 	} else {
 		pbd2->fw_ip_hdr_to_payload_w =
 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+		pbd_e2->data.tunnel_data.flags |=
+			1 /*IPv6*/ << ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
 	}
 
 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
 
 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
 
-	if (xmit_type & XMIT_GSO_V4) {
+	/* inner IP header info */
+	if (xmit_type & XMIT_CSUM_ENC_V4) {
 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
 
 		pbd_e2->data.tunnel_data.pseudo_csum =
@@ -3672,8 +3666,6 @@
 					inner_ip_hdr(skb)->saddr,
 					inner_ip_hdr(skb)->daddr,
 					0, IPPROTO_TCP, 0));
-
-		outerip_len = ip_hdr(skb)->ihl << 1;
 	} else {
 		pbd_e2->data.tunnel_data.pseudo_csum =
 			bswab16(~csum_ipv6_magic(
@@ -3686,8 +3678,6 @@
 
 	*global_data |=
 		outerip_off |
-		(!!(xmit_type & XMIT_CSUM_V6) <<
-			ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
 		(outerip_len <<
 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
@@ -3699,6 +3689,23 @@
 	}
 }
 
+static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
+					 u32 xmit_type)
+{
+	struct ipv6hdr *ipv6;
+
+	if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
+		return;
+
+	if (xmit_type & XMIT_GSO_ENC_V6)
+		ipv6 = inner_ipv6_hdr(skb);
+	else /* XMIT_GSO_V6 */
+		ipv6 = ipv6_hdr(skb);
+
+	if (ipv6->nexthdr == NEXTHDR_IPV6)
+		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
 /* called with netif_tx_lock
  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
  * netif_wake_queue()
@@ -3831,6 +3838,20 @@
 
 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+		if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+			BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
+		} else if (bp->ptp_tx_skb) {
+			BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+		} else {
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+			/* schedule check for Tx timestamp */
+			bp->ptp_tx_skb = skb_get(skb);
+			bp->ptp_tx_start = jiffies;
+			schedule_work(&bp->ptp_task);
+		}
+	}
+
 	/* header nbd: indirectly zero other flags! */
 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
 
@@ -3915,6 +3936,7 @@
 						     xmit_type);
 		}
 
+		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
 		/* Add the macs to the parsing BD if this is a vf or if
 		 * Tx Switching is enabled.
 		 */
@@ -3980,10 +4002,12 @@
 						 bd_prod);
 		}
 		if (!CHIP_IS_E1x(bp))
-			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
-					     xmit_type);
+			pbd_e2_parsing_data |=
+				(skb_shinfo(skb)->gso_size <<
+				 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+				 ETH_TX_PARSE_BD_E2_LSO_MSS;
 		else
-			bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
+			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
 	}
 
 	/* Set the PBD's parsing_data field if not zero
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 571427c..ac63e16 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -932,8 +932,9 @@
 	else /* CHIP_IS_E1X */
 		start_params->network_cos_mode = FW_WRR;
 
-	start_params->gre_tunnel_mode = L2GRE_TUNNEL;
-	start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+	start_params->tunnel_mode	= TUNN_MODE_GRE;
+	start_params->gre_tunnel_type	= IPGRE_TUNNEL;
+	start_params->inner_gre_rss_en	= 1;
 
 	return bnx2x_func_state_change(bp, &func_params);
 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fb26bc4..6e4294e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2092,7 +2092,6 @@
 static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
 {
 	struct bnx2x *bp = netdev_priv(netdev);
-	int rc = 0;
 
 	DP(BNX2X_MSG_DCB, "SET-ALL\n");
 
@@ -2110,9 +2109,7 @@
 				       1);
 		bnx2x_dcbx_init(bp, true);
 	}
-	DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
-	if (rc)
-		return 1;
+	DP(BNX2X_MSG_DCB, "set_dcbx_params done\n");
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 92fee84..0b173ed2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3481,6 +3481,46 @@
 	return bnx2x_nic_load(bp, LOAD_NORMAL);
 }
 
+static int bnx2x_get_ts_info(struct net_device *dev,
+			     struct ethtool_ts_info *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & PTP_SUPPORTED) {
+		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+					SOF_TIMESTAMPING_RX_SOFTWARE |
+					SOF_TIMESTAMPING_SOFTWARE |
+					SOF_TIMESTAMPING_TX_HARDWARE |
+					SOF_TIMESTAMPING_RX_HARDWARE |
+					SOF_TIMESTAMPING_RAW_HARDWARE;
+
+		if (bp->ptp_clock)
+			info->phc_index = ptp_clock_index(bp->ptp_clock);
+		else
+			info->phc_index = -1;
+
+		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+				   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+		info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
+
+		return 0;
+	}
+
+	return ethtool_op_get_ts_info(dev, info);
+}
+
 static const struct ethtool_ops bnx2x_ethtool_ops = {
 	.get_settings		= bnx2x_get_settings,
 	.set_settings		= bnx2x_set_settings,
@@ -3522,7 +3562,7 @@
 	.get_module_eeprom	= bnx2x_get_module_eeprom,
 	.get_eee		= bnx2x_get_eee,
 	.set_eee		= bnx2x_set_eee,
-	.get_ts_info		= ethtool_op_get_ts_info,
+	.get_ts_info		= bnx2x_get_ts_info,
 };
 
 static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 95dc365..7636e3c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -10,170 +10,170 @@
 #ifndef BNX2X_FW_DEFS_H
 #define BNX2X_FW_DEFS_H
 
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[148].base)
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[152].base)
 #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[147].base + ((assertListEntry) * IRO[147].m1))
+	(IRO[151].base + ((assertListEntry) * IRO[151].m1))
 #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
-	(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
-	IRO[153].m2))
+	(IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
+	IRO[157].m2))
 #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
-	(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
-	IRO[154].m2))
+	(IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
+	IRO[158].m2))
 #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
-	(IRO[159].base + ((funcId) * IRO[159].m1))
+	(IRO[163].base + ((funcId) * IRO[163].m1))
 #define CSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[149].base + ((funcId) * IRO[149].m1))
+	(IRO[153].base + ((funcId) * IRO[153].m1))
 #define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
-	(IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
+	(IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
 #define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
-	(IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
-	* IRO[138].m2) + ((sbId) * IRO[138].m3))
-#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+	(IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
+	* IRO[142].m2) + ((sbId) * IRO[142].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
 #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[317].base + ((pfId) * IRO[317].m1))
+	(IRO[323].base + ((pfId) * IRO[323].m1))
 #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[318].base + ((pfId) * IRO[318].m1))
+	(IRO[324].base + ((pfId) * IRO[324].m1))
 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
-	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+	(IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
 #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+	(IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+	(IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
-	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+	(IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
 #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
-	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
-#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
 	(IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
 #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
-	(IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+	(IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
 #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[316].base + ((pfId) * IRO[316].m1))
+	(IRO[322].base + ((pfId) * IRO[322].m1))
 #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[308].base + ((pfId) * IRO[308].m1))
+	(IRO[314].base + ((pfId) * IRO[314].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[307].base + ((pfId) * IRO[307].m1))
+	(IRO[313].base + ((pfId) * IRO[313].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[306].base + ((pfId) * IRO[306].m1))
+	(IRO[312].base + ((pfId) * IRO[312].m1))
 #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
-	(IRO[151].base + ((funcId) * IRO[151].m1))
+	(IRO[155].base + ((funcId) * IRO[155].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
-	(IRO[142].base + ((pfId) * IRO[142].m1))
+	(IRO[146].base + ((pfId) * IRO[146].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
-	(IRO[143].base + ((pfId) * IRO[143].m1))
+	(IRO[147].base + ((pfId) * IRO[147].m1))
 #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
-	(IRO[141].base + ((pfId) * IRO[141].m1))
-#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+	(IRO[145].base + ((pfId) * IRO[145].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
 #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
-	(IRO[144].base + ((pfId) * IRO[144].m1))
-#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+	(IRO[148].base + ((pfId) * IRO[148].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
 #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
-	(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+	(IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
 #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
-	(IRO[133].base + ((sbId) * IRO[133].m1))
-#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
-	(IRO[134].base + ((sbId) * IRO[134].m1))
-#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
-	(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
-#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
-	(IRO[132].base + ((sbId) * IRO[132].m1))
-#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
-#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
 	(IRO[137].base + ((sbId) * IRO[137].m1))
-#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+	(IRO[138].base + ((sbId) * IRO[138].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+	(IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+	(IRO[136].base + ((sbId) * IRO[136].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+	(IRO[141].base + ((sbId) * IRO[141].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
 #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
-	(IRO[155].base + ((vfId) * IRO[155].m1))
+	(IRO[159].base + ((vfId) * IRO[159].m1))
 #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
-	(IRO[156].base + ((vfId) * IRO[156].m1))
+	(IRO[160].base + ((vfId) * IRO[160].m1))
 #define CSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[150].base + ((funcId) * IRO[150].m1))
+	(IRO[154].base + ((funcId) * IRO[154].m1))
 #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
-	(IRO[203].base + ((pfId) * IRO[203].m1))
+	(IRO[207].base + ((pfId) * IRO[207].m1))
 #define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base)
 #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
 	(IRO[101].base + ((assertListEntry) * IRO[101].m1))
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
-	(IRO[201].base + ((pfId) * IRO[201].m1))
+	(IRO[205].base + ((pfId) * IRO[205].m1))
 #define TSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[103].base + ((funcId) * IRO[103].m1))
+	(IRO[107].base + ((funcId) * IRO[107].m1))
 #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
-	(IRO[273].base + ((pfId) * IRO[273].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
-	(IRO[274].base + ((pfId) * IRO[274].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
-	(IRO[275].base + ((pfId) * IRO[275].m1))
-#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[270].base + ((pfId) * IRO[270].m1))
-#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[269].base + ((pfId) * IRO[269].m1))
-#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[268].base + ((pfId) * IRO[268].m1))
-#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
 	(IRO[278].base + ((pfId) * IRO[278].m1))
-#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[264].base + ((pfId) * IRO[264].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[265].base + ((pfId) * IRO[265].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[266].base + ((pfId) * IRO[266].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[267].base + ((pfId) * IRO[267].m1))
-#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
-	(IRO[202].base + ((pfId) * IRO[202].m1))
-#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
-	(IRO[105].base + ((funcId) * IRO[105].m1))
-#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
-	(IRO[217].base + ((pfId) * IRO[217].m1))
-#define TSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[104].base + ((funcId) * IRO[104].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
-#define USTORM_AGG_DATA_SIZE (IRO[206].size)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[177].base)
-#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
-	(IRO[183].base + ((portId) * IRO[183].m1))
-#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
-	(IRO[319].base + ((pfId) * IRO[319].m1))
-#define USTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[178].base + ((funcId) * IRO[178].m1))
-#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[283].base + ((pfId) * IRO[283].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[284].base + ((pfId) * IRO[284].m1))
-#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[288].base + ((pfId) * IRO[288].m1))
-#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
-	(IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[281].base + ((pfId) * IRO[281].m1))
-#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[280].base + ((pfId) * IRO[280].m1))
-#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
 	(IRO[279].base + ((pfId) * IRO[279].m1))
-#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
-	(IRO[286].base + ((pfId) * IRO[286].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+	(IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[277].base + ((pfId) * IRO[277].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[276].base + ((pfId) * IRO[276].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[275].base + ((pfId) * IRO[275].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+	(IRO[284].base + ((pfId) * IRO[284].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+	(IRO[206].base + ((pfId) * IRO[206].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[109].base + ((funcId) * IRO[109].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+	(IRO[223].base + ((pfId) * IRO[223].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[108].base + ((funcId) * IRO[108].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
+#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[181].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[180].base + ((assertListEntry) * IRO[180].m1))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+	(IRO[187].base + ((portId) * IRO[187].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+	(IRO[325].base + ((pfId) * IRO[325].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[182].base + ((funcId) * IRO[182].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+	(IRO[289].base + ((pfId) * IRO[289].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[294].base + ((pfId) * IRO[294].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+	(IRO[291].base + ((pfId) * IRO[291].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
 	(IRO[287].base + ((pfId) * IRO[287].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[288].base + ((pfId) * IRO[288].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+	(IRO[292].base + ((pfId) * IRO[292].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[293].base + ((pfId) * IRO[293].m1))
 #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
-	(IRO[182].base + ((pfId) * IRO[182].m1))
+	(IRO[186].base + ((pfId) * IRO[186].m1))
 #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
-	(IRO[180].base + ((funcId) * IRO[180].m1))
+	(IRO[184].base + ((funcId) * IRO[184].m1))
 #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
-	(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
-	IRO[209].m2))
+	(IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
+	IRO[215].m2))
 #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
-	(IRO[210].base + ((qzoneId) * IRO[210].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
-#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+	(IRO[216].base + ((qzoneId) * IRO[216].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
+#define USTORM_TPA_BTR_SIZE (IRO[213].size)
 #define USTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[179].base + ((funcId) * IRO[179].m1))
+	(IRO[183].base + ((funcId) * IRO[183].m1))
 #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
 #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
 #define XSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[51].base)
@@ -186,39 +186,39 @@
 #define XSTORM_FUNC_EN_OFFSET(funcId) \
 	(IRO[47].base + ((funcId) * IRO[47].m1))
 #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[296].base + ((pfId) * IRO[296].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
-	(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
-	(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
-	(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
 	(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
-	(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
-	(IRO[304].base + ((pfId) * IRO[304].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
 	(IRO[305].base + ((pfId) * IRO[305].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+	(IRO[306].base + ((pfId) * IRO[306].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+	(IRO[307].base + ((pfId) * IRO[307].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+	(IRO[308].base + ((pfId) * IRO[308].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+	(IRO[309].base + ((pfId) * IRO[309].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+	(IRO[310].base + ((pfId) * IRO[310].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+	(IRO[311].base + ((pfId) * IRO[311].m1))
 #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[295].base + ((pfId) * IRO[295].m1))
+	(IRO[301].base + ((pfId) * IRO[301].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[294].base + ((pfId) * IRO[294].m1))
+	(IRO[300].base + ((pfId) * IRO[300].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[293].base + ((pfId) * IRO[293].m1))
+	(IRO[299].base + ((pfId) * IRO[299].m1))
 #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[298].base + ((pfId) * IRO[298].m1))
+	(IRO[304].base + ((pfId) * IRO[304].m1))
 #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
-	(IRO[297].base + ((pfId) * IRO[297].m1))
+	(IRO[303].base + ((pfId) * IRO[303].m1))
 #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
-	(IRO[292].base + ((pfId) * IRO[292].m1))
+	(IRO[298].base + ((pfId) * IRO[298].m1))
 #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[291].base + ((pfId) * IRO[291].m1))
+	(IRO[297].base + ((pfId) * IRO[297].m1))
 #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
-	(IRO[290].base + ((pfId) * IRO[290].m1))
+	(IRO[296].base + ((pfId) * IRO[296].m1))
 #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
-	(IRO[289].base + ((pfId) * IRO[289].m1))
+	(IRO[295].base + ((pfId) * IRO[295].m1))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
 	(IRO[44].base + ((pfId) * IRO[44].m1))
 #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -231,16 +231,19 @@
 #define XSTORM_SPQ_PROD_OFFSET(funcId) \
 	(IRO[31].base + ((funcId) * IRO[31].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
-	(IRO[211].base + ((portId) * IRO[211].m1))
+	(IRO[217].base + ((portId) * IRO[217].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
-	(IRO[212].base + ((portId) * IRO[212].m1))
+	(IRO[218].base + ((portId) * IRO[218].m1))
 #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
-	(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
-	IRO[214].m2))
+	(IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
+	IRO[220].m2))
 #define XSTORM_VF_TO_PF_OFFSET(funcId) \
 	(IRO[48].base + ((funcId) * IRO[48].m1))
 #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
 
+/* eth hsi version */
+#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
+
 /* Ethernet Ring parameters */
 #define X_ETH_LOCAL_RING_SIZE 13
 #define FIRST_BD_IN_PKT	0
@@ -356,6 +359,7 @@
 #define XSEMI_CLK1_RESUL_CHIP (1e-3)
 
 #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
 
 /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5ba8af5..7ea0453 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2876,8 +2876,8 @@
 };
 
 #define BCM_5710_FW_MAJOR_VERSION			7
-#define BCM_5710_FW_MINOR_VERSION			8
-#define BCM_5710_FW_REVISION_VERSION		19
+#define BCM_5710_FW_MINOR_VERSION			10
+#define BCM_5710_FW_REVISION_VERSION		51
 #define BCM_5710_FW_ENGINEERING_VERSION		0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
@@ -3446,6 +3446,7 @@
 	CLASSIFY_RULE_OPCODE_MAC,
 	CLASSIFY_RULE_OPCODE_VLAN,
 	CLASSIFY_RULE_OPCODE_PAIR,
+	CLASSIFY_RULE_OPCODE_VXLAN,
 	MAX_CLASSIFY_RULE
 };
 
@@ -3475,7 +3476,8 @@
 	u8 func_id;
 	u8 cos;
 	u8 traffic_type;
-	u32 reserved0;
+	u8 fp_hsi_ver;
+	u8 reserved0[3];
 };
 
 
@@ -3545,7 +3547,9 @@
 	__le16 rx_cos_mask;
 	__le16 silent_vlan_value;
 	__le16 silent_vlan_mask;
-	__le32 reserved6[2];
+	u8 handle_ptp_pkts_flg;
+	u8 reserved6[3];
+	__le32 reserved7;
 };
 
 /*
@@ -3576,7 +3580,7 @@
 	u8 tunnel_lso_inc_ip_id;
 	u8 refuse_outband_vlan_flg;
 	u8 tunnel_non_lso_pcsum_location;
-	u8 reserved1;
+	u8 tunnel_non_lso_outer_ip_csum_location;
 };
 
 /*
@@ -3614,7 +3618,9 @@
 	u8 refuse_outband_vlan_change_flg;
 	u8 tx_switching_flg;
 	u8 tx_switching_change_flg;
-	__le32 reserved1;
+	u8 handle_ptp_pkts_flg;
+	u8 handle_ptp_pkts_change_flg;
+	__le16 reserved1;
 	__le32 echo;
 };
 
@@ -3634,6 +3640,11 @@
 	u32 regpair1_hi;
 };
 
+/* 2nd parse bd type used in ethernet tx BDs */
+enum eth_2nd_parse_bd_type {
+	ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL,
+	MAX_ETH_2ND_PARSE_BD_TYPE
+};
 
 /*
  * Ethernet address typesm used in ethernet tx BDs
@@ -3719,12 +3730,25 @@
 };
 
 /*
+ * Command for adding/removing a VXLAN classification rule
+ */
+struct eth_classify_vxlan_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 vni;
+	__le16 inner_mac_lsb;
+	__le16 inner_mac_mid;
+	__le16 inner_mac_msb;
+	__le16 reserved1;
+};
+
+/*
  * union for eth classification rule
  */
 union eth_classify_rule_cmd {
 	struct eth_classify_mac_cmd mac;
 	struct eth_classify_vlan_cmd vlan;
 	struct eth_classify_pair_cmd pair;
+	struct eth_classify_vxlan_cmd vxlan;
 };
 
 /*
@@ -3830,8 +3854,10 @@
 #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
 #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
 #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
-#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
-#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7
 	u8 status_flags;
 #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
 #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -3902,6 +3928,13 @@
 	struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
 };
 
+/* Hsi version */
+enum eth_fp_hsi_ver {
+	ETH_FP_HSI_VER_0,
+	ETH_FP_HSI_VER_1,
+	ETH_FP_HSI_VER_2,
+	MAX_ETH_FP_HSI_VER
+};
 
 /*
  * parameters for eth classification configuration ramrod
@@ -3958,20 +3991,28 @@
 	__le16 dst_mid;
 #endif
 #if defined(__BIG_ENDIAN)
-	__le16 reserved0;
+	__le16 fw_ip_hdr_csum;
 	__le16 dst_hi;
 #elif defined(__LITTLE_ENDIAN)
 	__le16 dst_hi;
-	__le16 reserved0;
+	__le16 fw_ip_hdr_csum;
 #endif
 #if defined(__BIG_ENDIAN)
-	u8 reserved1;
+	u8 flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
 	u8 ip_hdr_start_inner_w;
 	__le16 pseudo_csum;
 #elif defined(__LITTLE_ENDIAN)
 	__le16 pseudo_csum;
 	u8 ip_hdr_start_inner_w;
-	u8 reserved1;
+	u8 flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
 #endif
 };
 
@@ -4059,31 +4100,41 @@
  */
 struct eth_rss_update_ramrod_data {
 	u8 rss_engine_id;
-	u8 capabilities;
+	u8 rss_mode;
+	__le16 capabilities;
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
 	u8 rss_result_mask;
-	u8 rss_mode;
-	__le16 udp_4tuple_dst_port_mask;
-	__le16 udp_4tuple_dst_port_value;
+	u8 reserved3;
+	__le16 reserved4;
 	u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
 	__le32 rss_key[T_ETH_RSS_KEY];
 	__le32 echo;
-	__le32 reserved3;
+	__le32 reserved5;
 };
 
 
@@ -4255,10 +4306,10 @@
 /* In case tunnel exist and L4 checksum offload,
  * the pseudo checksum location, on packet or on BD.
  */
-enum eth_tunnel_non_lso_pcsum_location {
-	PCSUM_ON_PKT,
-	PCSUM_ON_BD,
-	MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+enum eth_tunnel_non_lso_csum_location {
+	CSUM_ON_PKT,
+	CSUM_ON_BD,
+	MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
 };
 
 /*
@@ -4305,8 +4356,10 @@
 	__le16 vlan_or_ethertype;
 	struct eth_tx_bd_flags bd_flags;
 	u8 general_data;
-#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
+#define ETH_TX_START_BD_HDR_NBDS (0x7<<0)
 #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3)
+#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3
 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
 #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
 #define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
@@ -4382,8 +4435,8 @@
 	__le16 global_data;
 #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
 #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
-#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
-#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
 #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
 #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
 #define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
@@ -4392,9 +4445,14 @@
 #define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
 #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
 #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
-#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
-#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
-	__le16 reserved1;
+#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
+	u8 bd_type;
+#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4
+	u8 reserved3;
 	u8 tcp_flags;
 #define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
 #define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
@@ -4412,7 +4470,7 @@
 #define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
 #define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
 #define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
-	u8 reserved2;
+	u8 reserved4;
 	u8 tunnel_udp_hdr_start_w;
 	u8 fw_ip_hdr_to_payload_w;
 	__le16 fw_ip_csum_wo_len_flags_frag;
@@ -5200,10 +5258,18 @@
 	u8 path_id;
 	u8 network_cos_mode;
 	u8 dmae_cmd_id;
-	u8 gre_tunnel_mode;
-	u8 gre_tunnel_rss;
-	u8 nvgre_clss_en;
-	__le16 reserved1[2];
+	u8 tunnel_mode;
+	u8 gre_tunnel_type;
+	u8 tunn_clss_en;
+	u8 inner_gre_rss_en;
+	u8 sd_accept_mf_clss_fail;
+	__le16 vxlan_dst_port;
+	__le16 sd_accept_mf_clss_fail_ethtype;
+	__le16 sd_vlan_eth_type;
+	u8 sd_vlan_force_pri_flg;
+	u8 sd_vlan_force_pri_val;
+	u8 sd_accept_mf_clss_fail_match_ethtype;
+	u8 no_added_tags;
 };
 
 struct function_update_data {
@@ -5220,12 +5286,20 @@
 	u8 tx_switch_suspend_change_flg;
 	u8 tx_switch_suspend;
 	u8 echo;
+	u8 update_tunn_cfg_flg;
+	u8 tunnel_mode;
+	u8 gre_tunnel_type;
+	u8 tunn_clss_en;
+	u8 inner_gre_rss_en;
+	__le16 vxlan_dst_port;
+	u8 sd_vlan_force_pri_change_flg;
+	u8 sd_vlan_force_pri_flg;
+	u8 sd_vlan_force_pri_val;
+	u8 sd_vlan_tag_change_flg;
+	u8 sd_vlan_eth_type_change_flg;
 	u8 reserved1;
-	u8 update_gre_cfg_flg;
-	u8 gre_tunnel_mode;
-	u8 gre_tunnel_rss;
-	u8 nvgre_clss_en;
-	u32 reserved3;
+	__le16 sd_vlan_tag;
+	__le16 sd_vlan_eth_type;
 };
 
 /*
@@ -5254,17 +5328,9 @@
 #define __FW_VERSION_RESERVED_SHIFT 4
 };
 
-/* GRE RSS Mode */
-enum gre_rss_mode {
-	GRE_OUTER_HEADERS_RSS,
-	GRE_INNER_HEADERS_RSS,
-	NVGRE_KEY_ENTROPY_RSS,
-	MAX_GRE_RSS_MODE
-};
 
 /* GRE Tunnel Mode */
 enum gre_tunnel_type {
-	NO_GRE_TUNNEL,
 	NVGRE_TUNNEL,
 	L2GRE_TUNNEL,
 	IPGRE_TUNNEL,
@@ -5437,6 +5503,7 @@
  * Malicious VF error ID
  */
 enum malicious_vf_error_id {
+	MALICIOUS_VF_NO_ERROR,
 	VF_PF_CHANNEL_NOT_READY,
 	ETH_ILLEGAL_BD_LENGTHS,
 	ETH_PACKET_TOO_SHORT,
@@ -5597,6 +5664,16 @@
 	union protocol_common_specific_data data;
 };
 
+/* The data for the Set Timesync Ramrod */
+struct set_timesync_ramrod_data {
+	u8 drift_adjust_cmd;
+	u8 offset_cmd;
+	u8 add_sub_drift_adjust_value;
+	u8 drift_adjust_value;
+	u32 drift_adjust_period;
+	struct regpair offset_delta;
+};
+
 /*
  * The send queue element
  */
@@ -5719,10 +5796,38 @@
 	struct regpair reserved;
 };
 
+/* Add or Subtract Value for Set Timesync Ramrod */
+enum ts_add_sub_value {
+	TS_SUB_VALUE,
+	TS_ADD_VALUE,
+	MAX_TS_ADD_SUB_VALUE
+};
 
-/*
- * zone A per-queue data
- */
+/* Drift-Adjust Commands for Set Timesync Ramrod */
+enum ts_drift_adjust_cmd {
+	TS_DRIFT_ADJUST_KEEP,
+	TS_DRIFT_ADJUST_SET,
+	TS_DRIFT_ADJUST_RESET,
+	MAX_TS_DRIFT_ADJUST_CMD
+};
+
+/* Offset Commands for Set Timesync Ramrod */
+enum ts_offset_cmd {
+	TS_OFFSET_KEEP,
+	TS_OFFSET_INC,
+	TS_OFFSET_DEC,
+	MAX_TS_OFFSET_CMD
+};
+
+/* Tunnel Mode */
+enum tunnel_mode {
+	TUNN_MODE_NONE,
+	TUNN_MODE_VXLAN,
+	TUNN_MODE_GRE,
+	MAX_TUNNEL_MODE
+};
+
+ /* zone A per-queue data */
 struct ustorm_queue_zone_data {
 	struct ustorm_eth_rx_producers eth_rx_producers;
 	struct regpair reserved[3];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 900cab4..a008f48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -63,7 +63,6 @@
 #include "bnx2x_vfpf.h"
 #include "bnx2x_dcb.h"
 #include "bnx2x_sp.h"
-
 #include <linux/firmware.h>
 #include "bnx2x_fw_file_hdr.h"
 /* FW files */
@@ -290,6 +289,8 @@
 * General service functions
 ****************************************************************************/
 
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+
 static void __storm_memset_dma_mapping(struct bnx2x *bp,
 				       u32 addr, dma_addr_t mapping)
 {
@@ -523,6 +524,7 @@
 	 * as long as this code is called both from syscall context and
 	 * from ndo_set_rx_mode() flow that may be called from BH.
 	 */
+
 	spin_lock_bh(&bp->dmae_lock);
 
 	/* reset completion */
@@ -551,7 +553,9 @@
 	}
 
 unlock:
+
 	spin_unlock_bh(&bp->dmae_lock);
+
 	return rc;
 }
 
@@ -646,119 +650,98 @@
 	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 }
 
+enum storms {
+	   XSTORM,
+	   TSTORM,
+	   CSTORM,
+	   USTORM,
+	   MAX_STORMS
+};
+
+#define STORMS_NUM 4
+#define REGS_IN_ENTRY 4
+
+static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
+					      enum storms storm,
+					      int entry)
+{
+	switch (storm) {
+	case XSTORM:
+		return XSTORM_ASSERT_LIST_OFFSET(entry);
+	case TSTORM:
+		return TSTORM_ASSERT_LIST_OFFSET(entry);
+	case CSTORM:
+		return CSTORM_ASSERT_LIST_OFFSET(entry);
+	case USTORM:
+		return USTORM_ASSERT_LIST_OFFSET(entry);
+	case MAX_STORMS:
+	default:
+		BNX2X_ERR("unknown storm\n");
+	}
+	return -EINVAL;
+}
+
 static int bnx2x_mc_assert(struct bnx2x *bp)
 {
 	char last_idx;
-	int i, rc = 0;
-	u32 row0, row1, row2, row3;
+	int i, j, rc = 0;
+	enum storms storm;
+	u32 regs[REGS_IN_ENTRY];
+	u32 bar_storm_intmem[STORMS_NUM] = {
+		BAR_XSTRORM_INTMEM,
+		BAR_TSTRORM_INTMEM,
+		BAR_CSTRORM_INTMEM,
+		BAR_USTRORM_INTMEM
+	};
+	u32 storm_assert_list_index[STORMS_NUM] = {
+		XSTORM_ASSERT_LIST_INDEX_OFFSET,
+		TSTORM_ASSERT_LIST_INDEX_OFFSET,
+		CSTORM_ASSERT_LIST_INDEX_OFFSET,
+		USTORM_ASSERT_LIST_INDEX_OFFSET
+	};
+	char *storms_string[STORMS_NUM] = {
+		"XSTORM",
+		"TSTORM",
+		"CSTORM",
+		"USTORM"
+	};
 
-	/* XSTORM */
-	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
-			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+	for (storm = XSTORM; storm < MAX_STORMS; storm++) {
+		last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
+				   storm_assert_list_index[storm]);
+		if (last_idx)
+			BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
+				  storms_string[storm], last_idx);
 
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+		/* print the asserts */
+		for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+			/* read a single assert entry */
+			for (j = 0; j < REGS_IN_ENTRY; j++)
+				regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
+					  bnx2x_get_assert_list_entry(bp,
+								      storm,
+								      i) +
+					  sizeof(u32) * j);
 
-		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
-			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
+			/* log entry if it contains a valid assert */
+			if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+				BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					  storms_string[storm], i, regs[3],
+					  regs[2], regs[1], regs[0]);
+				rc++;
+			} else {
+				break;
+			}
 		}
 	}
 
-	/* TSTORM */
-	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
-			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
-			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
-
-	/* CSTORM */
-	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
-			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
-			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
-
-	/* USTORM */
-	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
-			   USTORM_ASSERT_LIST_INDEX_OFFSET);
-	if (last_idx)
-		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
-	/* print the asserts */
-	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
-		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i));
-		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
-		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
-		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
-			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
-
-		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
-			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
-				  i, row3, row2, row1, row0);
-			rc++;
-		} else {
-			break;
-		}
-	}
+	BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
+		  CHIP_IS_E1(bp) ? "everest1" :
+		  CHIP_IS_E1H(bp) ? "everest1h" :
+		  CHIP_IS_E2(bp) ? "everest2" : "everest3",
+		  BCM_5710_FW_MAJOR_VERSION,
+		  BCM_5710_FW_MINOR_VERSION,
+		  BCM_5710_FW_REVISION_VERSION);
 
 	return rc;
 }
@@ -983,6 +966,12 @@
 		u32 *sb_data_p;
 		struct bnx2x_fp_txdata txdata;
 
+		if (!bp->fp)
+			break;
+
+		if (!fp->rx_cons_sb)
+			continue;
+
 		/* Rx */
 		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
 			  i, fp->rx_bd_prod, fp->rx_bd_cons,
@@ -995,7 +984,14 @@
 		/* Tx */
 		for_each_cos_in_tx_queue(fp, cos)
 		{
+			if (!fp->txdata_ptr)
+				break;
+
 			txdata = *fp->txdata_ptr[cos];
+
+			if (!txdata.tx_cons_sb)
+				continue;
+
 			BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
 				  i, txdata.tx_pkt_prod,
 				  txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -1097,6 +1093,12 @@
 	for_each_valid_rx_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
+		if (!bp->fp)
+			break;
+
+		if (!fp->rx_cons_sb)
+			continue;
+
 		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
 		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
 		for (j = start; j != end; j = RX_BD(j + 1)) {
@@ -1130,9 +1132,19 @@
 	/* Tx */
 	for_each_valid_tx_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		if (!bp->fp)
+			break;
+
 		for_each_cos_in_tx_queue(fp, cos) {
 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
 
+			if (!fp->txdata_ptr)
+				break;
+
+			if (!txdata.tx_cons_sb)
+				continue;
+
 			start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
 			end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
 			for (j = start; j != end; j = TX_BD(j + 1)) {
@@ -2071,8 +2083,6 @@
 	else
 		value = 0;
 
-	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
-
 	return value;
 }
 
@@ -4678,7 +4688,7 @@
 	for (i = 0; sig; i++) {
 		cur_bit = (0x1UL << i);
 		if (sig & cur_bit) {
-			res |= true; /* Each bit is real error! */
+			res = true; /* Each bit is real error! */
 			if (print) {
 				switch (cur_bit) {
 				case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
@@ -4757,21 +4767,21 @@
 					_print_next_block((*par_num)++,
 							  "MCP ROM");
 				*global = true;
-				res |= true;
+				res = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
 				if (print)
 					_print_next_block((*par_num)++,
 							  "MCP UMP RX");
 				*global = true;
-				res |= true;
+				res = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
 				if (print)
 					_print_next_block((*par_num)++,
 							  "MCP UMP TX");
 				*global = true;
-				res |= true;
+				res = true;
 				break;
 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
 				if (print)
@@ -4803,7 +4813,7 @@
 	for (i = 0; sig; i++) {
 		cur_bit = (0x1UL << i);
 		if (sig & cur_bit) {
-			res |= true; /* Each bit is real error! */
+			res = true; /* Each bit is real error! */
 			if (print) {
 				switch (cur_bit) {
 				case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
@@ -5452,6 +5462,14 @@
 				break;
 
 			goto next_spqe;
+
+		case EVENT_RING_OPCODE_SET_TIMESYNC:
+			DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
+			   "got set_timesync ramrod completion\n");
+			if (f_obj->complete_cmd(bp, f_obj,
+						BNX2X_F_CMD_SET_TIMESYNC))
+				break;
+			goto next_spqe;
 		}
 
 		switch (opcode | bp->state) {
@@ -6102,7 +6120,7 @@
 	}
 
 	/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
-	if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+	if (rx_mode != BNX2X_RX_MODE_NONE) {
 		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
 	}
@@ -7647,7 +7665,11 @@
 	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
 	/* Function parameters */
-	switch_update_params->suspend = suspend;
+	__set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+		  &switch_update_params->changes);
+	if (suspend)
+		__set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+			  &switch_update_params->changes);
 
 	rc = bnx2x_func_state_change(bp, &func_params);
 
@@ -9010,7 +9032,7 @@
 		struct bnx2x_func_state_params func_params = {NULL};
 
 		DP(NETIF_MSG_IFDOWN,
-		   "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+		   "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
 
 		func_params.f_obj = &bp->func_obj;
 		__set_bit(RAMROD_DRV_CLR_ONLY,
@@ -9029,6 +9051,48 @@
 	return 0;
 }
 
+static void bnx2x_disable_ptp(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	/* Disable sending PTP packets to host */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+	       NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+	/* Reset PTP event detection rules */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+	       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+	       NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+	/* Disable the PTP feature */
+	REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+	       NIG_REG_P0_PTP_EN, 0x0);
+}
+
+/* Called during unload, to stop PTP-related stuff */
+void bnx2x_stop_ptp(struct bnx2x *bp)
+{
+	/* Cancel PTP work queue. Should be done after the Tx queues are
+	 * drained to prevent additional scheduling.
+	 */
+	cancel_work_sync(&bp->ptp_task);
+
+	if (bp->ptp_tx_skb) {
+		dev_kfree_skb_any(bp->ptp_tx_skb);
+		bp->ptp_tx_skb = NULL;
+	}
+
+	/* Disable PTP in HW */
+	bnx2x_disable_ptp(bp);
+
+	DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
+}
+
 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
 {
 	int port = BP_PORT(bp);
@@ -9147,6 +9211,13 @@
 #endif
 	}
 
+	/* stop_ptp should be after the Tx queues are drained to prevent
+	 * scheduling to the cancelled PTP work queue. It should also be after
+	 * function stop ramrod is sent, since as part of this ramrod FW access
+	 * PTP registers.
+	 */
+	bnx2x_stop_ptp(bp);
+
 	/* Disable HW interrupts, NAPI */
 	bnx2x_netif_stop(bp, 1);
 	/* Delete all NAPI objects */
@@ -11961,6 +12032,9 @@
 
 	bp->dump_preset_idx = 1;
 
+	if (CHIP_IS_E3B0(bp))
+		bp->flags |= PTP_SUPPORTED;
+
 	return rc;
 }
 
@@ -12293,13 +12367,17 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	struct mii_ioctl_data *mdio = if_mii(ifr);
 
-	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
-	   mdio->phy_id, mdio->reg_num, mdio->val_in);
-
 	if (!netif_running(dev))
 		return -EAGAIN;
 
-	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return bnx2x_hwtstamp_ioctl(bp, ifr);
+	default:
+		DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+		   mdio->phy_id, mdio->reg_num, mdio->val_in);
+		return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+	}
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -12943,6 +13021,191 @@
 	}
 }
 
+/* nig_tsgen registers relative address */
+#define tsgen_ctrl 0x0
+#define tsgen_freecount 0x10
+#define tsgen_synctime_t0 0x20
+#define tsgen_offset_t0 0x28
+#define tsgen_drift_t0 0x30
+#define tsgen_synctime_t1 0x58
+#define tsgen_offset_t1 0x60
+#define tsgen_drift_t1 0x68
+
+/* FW workaround for setting drift */
+static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
+					  int best_val, int best_period)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_set_timesync_params *set_timesync_params =
+		&func_params.params.set_timesync;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+	/* Function parameters */
+	set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
+	set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+	set_timesync_params->add_sub_drift_adjust_value =
+		drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
+	set_timesync_params->drift_adjust_value = best_val;
+	set_timesync_params->drift_adjust_period = best_period;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	int rc;
+	int drift_dir = 1;
+	int val, period, period1, period2, dif, dif1, dif2;
+	int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+
+	DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP adjfreq called while the interface is down\n");
+		return -EFAULT;
+	}
+
+	if (ppb < 0) {
+		ppb = -ppb;
+		drift_dir = 0;
+	}
+
+	if (ppb == 0) {
+		best_val = 1;
+		best_period = 0x1FFFFFF;
+	} else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
+		best_val = 31;
+		best_period = 1;
+	} else {
+		/* Changed not to allow val = 8, 16, 24 as these values
+		 * are not supported in workaround.
+		 */
+		for (val = 0; val <= 31; val++) {
+			if ((val & 0x7) == 0)
+				continue;
+			period1 = val * 1000000 / ppb;
+			period2 = period1 + 1;
+			if (period1 != 0)
+				dif1 = ppb - (val * 1000000 / period1);
+			else
+				dif1 = BNX2X_MAX_PHC_DRIFT;
+			if (dif1 < 0)
+				dif1 = -dif1;
+			dif2 = ppb - (val * 1000000 / period2);
+			if (dif2 < 0)
+				dif2 = -dif2;
+			dif = (dif1 < dif2) ? dif1 : dif2;
+			period = (dif1 < dif2) ? period1 : period2;
+			if (dif < best_dif) {
+				best_dif = dif;
+				best_val = val;
+				best_period = period;
+			}
+		}
+	}
+
+	rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
+					    best_period);
+	if (rc) {
+		BNX2X_ERR("Failed to set drift\n");
+		return -EFAULT;
+	}
+
+	DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
+	   best_period);
+
+	return 0;
+}
+
+static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	u64 now;
+
+	DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
+
+	now = timecounter_read(&bp->timecounter);
+	now += delta;
+	/* Re-init the timecounter */
+	timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
+
+	return 0;
+}
+
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	u64 ns;
+	u32 remainder;
+
+	ns = timecounter_read(&bp->timecounter);
+
+	DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
+			     const struct timespec *ts)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+	u64 ns;
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
+
+	/* Re-init the timecounter */
+	timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
+
+	return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+	BNX2X_ERR("PHC ancillary features are not supported\n");
+	return -ENOTSUPP;
+}
+
+void bnx2x_register_phc(struct bnx2x *bp)
+{
+	/* Fill the ptp_clock_info struct and register PTP clock*/
+	bp->ptp_clock_info.owner = THIS_MODULE;
+	snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
+	bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
+	bp->ptp_clock_info.n_alarm = 0;
+	bp->ptp_clock_info.n_ext_ts = 0;
+	bp->ptp_clock_info.n_per_out = 0;
+	bp->ptp_clock_info.pps = 0;
+	bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+	bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
+	bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
+	bp->ptp_clock_info.settime = bnx2x_ptp_settime;
+	bp->ptp_clock_info.enable = bnx2x_ptp_enable;
+
+	bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
+	if (IS_ERR(bp->ptp_clock)) {
+		bp->ptp_clock = NULL;
+		BNX2X_ERR("PTP clock registeration failed\n");
+	}
+}
+
 static int bnx2x_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
@@ -13114,6 +13377,8 @@
 		       "Unknown",
 		       dev->base_addr, bp->pdev->irq, dev->dev_addr);
 
+	bnx2x_register_phc(bp);
+
 	return 0;
 
 init_one_exit:
@@ -13140,6 +13405,11 @@
 			   struct bnx2x *bp,
 			   bool remove_netdev)
 {
+	if (bp->ptp_clock) {
+		ptp_clock_unregister(bp->ptp_clock);
+		bp->ptp_clock = NULL;
+	}
+
 	/* Delete storage MAC address */
 	if (!NO_FCOE(bp)) {
 		rtnl_lock();
@@ -14115,3 +14385,332 @@
 	REG_RD(bp, pretend_reg);
 	return 0;
 }
+
+static void bnx2x_ptp_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
+	int port = BP_PORT(bp);
+	u32 val_seq;
+	u64 timestamp, ns;
+	struct skb_shared_hwtstamps shhwtstamps;
+
+	/* Read Tx timestamp registers */
+	val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+			 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+	if (val_seq & 0x10000) {
+		/* There is a valid timestamp value */
+		timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
+				   NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
+		timestamp <<= 32;
+		timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
+				    NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
+		/* Reset timestamp register to allow new timestamp */
+		REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+		       NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+		ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+		shhwtstamps.hwtstamp = ns_to_ktime(ns);
+		skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
+		dev_kfree_skb_any(bp->ptp_tx_skb);
+		bp->ptp_tx_skb = NULL;
+
+		DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+		   timestamp, ns);
+	} else {
+		DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
+		/* Reschedule to keep checking for a valid timestamp value */
+		schedule_work(&bp->ptp_task);
+	}
+}
+
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
+{
+	int port = BP_PORT(bp);
+	u64 timestamp, ns;
+
+	timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
+			    NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
+	timestamp <<= 32;
+	timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
+			    NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
+
+	/* Reset timestamp register to allow new timestamp */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+	       NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+
+	ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+	DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+	   timestamp, ns);
+}
+
+/* Read the PHC */
+static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+{
+	struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
+	int port = BP_PORT(bp);
+	u32 wb_data[2];
+	u64 phc_cycles;
+
+	REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
+		    NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
+	phc_cycles = wb_data[1];
+	phc_cycles = (phc_cycles << 32) + wb_data[0];
+
+	DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
+
+	return phc_cycles;
+}
+
+static void bnx2x_init_cyclecounter(struct bnx2x *bp)
+{
+	memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
+	bp->cyclecounter.read = bnx2x_cyclecounter_read;
+	bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
+	bp->cyclecounter.shift = 1;
+	bp->cyclecounter.mult = 1;
+}
+
+static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {NULL};
+	struct bnx2x_func_set_timesync_params *set_timesync_params =
+		&func_params.params.set_timesync;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+	/* Function parameters */
+	set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
+	set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+{
+	struct bnx2x_queue_state_params q_params;
+	int rc, i;
+
+	/* send queue update ramrod to enable PTP packets */
+	memset(&q_params, 0, sizeof(q_params));
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+	q_params.cmd = BNX2X_Q_CMD_UPDATE;
+	__set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+		  &q_params.params.update.update_flags);
+	__set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
+		  &q_params.params.update.update_flags);
+
+	/* send the ramrod on all the queues of the PF */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		/* Set the appropriate Queue object */
+		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to enable PTP packets\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int rc;
+
+	if (!bp->hwtstamp_ioctl_called)
+		return 0;
+
+	switch (bp->tx_type) {
+	case HWTSTAMP_TX_ON:
+		bp->flags |= TX_TIMESTAMPING_EN;
+		REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
+		REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+		       NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+		break;
+	case HWTSTAMP_TX_ONESTEP_SYNC:
+		BNX2X_ERR("One-step timestamping is not supported\n");
+		return -ERANGE;
+	}
+
+	switch (bp->rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+		bp->rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		/* Initialize PTP detection for UDP/IPv4 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+		/* Initialize PTP detection L2 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+		       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
+		REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+		       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+		break;
+	}
+
+	/* Indicate to FW that this PF expects recorded PTP packets */
+	rc = bnx2x_enable_ptp_packets(bp);
+	if (rc)
+		return rc;
+
+	/* Enable sending PTP packets to host */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+	       NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
+
+	return 0;
+}
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	int rc;
+
+	DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
+	   config.tx_type, config.rx_filter);
+
+	if (config.flags) {
+		BNX2X_ERR("config.flags is reserved for future use\n");
+		return -EINVAL;
+	}
+
+	bp->hwtstamp_ioctl_called = 1;
+	bp->tx_type = config.tx_type;
+	bp->rx_filter = config.rx_filter;
+
+	rc = bnx2x_configure_ptp_filters(bp);
+	if (rc)
+		return rc;
+
+	config.rx_filter = bp->rx_filter;
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+/* Configrues HW for PTP */
+static int bnx2x_configure_ptp(struct bnx2x *bp)
+{
+	int rc, port = BP_PORT(bp);
+	u32 wb_data[2];
+
+	/* Reset PTP event detection rules - will be configured in the IOCTL */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+	       NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+	       NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+	       NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+	/* Disable PTP packets to host - will be configured in the IOCTL*/
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+	       NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+	/* Enable the PTP feature */
+	REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+	       NIG_REG_P0_PTP_EN, 0x3F);
+
+	/* Enable the free-running counter */
+	wb_data[0] = 0;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
+
+	/* Reset drift register (offset register is not reset) */
+	rc = bnx2x_send_reset_timesync_ramrod(bp);
+	if (rc) {
+		BNX2X_ERR("Failed to reset PHC drift register\n");
+		return -EFAULT;
+	}
+
+	/* Reset possibly old timestamps */
+	REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+	       NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+	REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+	       NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+
+	return 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+void bnx2x_init_ptp(struct bnx2x *bp)
+{
+	int rc;
+
+	/* Configure PTP in HW */
+	rc = bnx2x_configure_ptp(bp);
+	if (rc) {
+		BNX2X_ERR("Stopping PTP initialization\n");
+		return;
+	}
+
+	/* Init work queue for Tx timestamping */
+	INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
+
+	/* Init cyclecounter and timecounter. This is done only in the first
+	 * load. If done in every load, PTP application will fail when doing
+	 * unload / load (e.g. MTU change) while it is running.
+	 */
+	if (!bp->timecounter_init_done) {
+		bnx2x_init_cyclecounter(bp);
+		timecounter_init(&bp->timecounter, &bp->cyclecounter,
+				 ktime_to_ns(ktime_get_real()));
+		bp->timecounter_init_done = 1;
+	}
+
+	DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 2beb543..b0779d7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2182,6 +2182,45 @@
 #define NIG_REG_P0_HWPFC_ENABLE				 0x18078
 #define NIG_REG_P0_LLH_FUNC_MEM2				 0x18480
 #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE			 0x18440
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID			 0x1875c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB			 0x18754
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB			 0x18758
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_LLH_PTP_PARAM_MASK				 0x187a0
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P0_LLH_PTP_RULE_MASK				 0x187a4
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P0_LLH_PTP_TO_HOST				 0x187ac
 /* [RW 1] Input enable for RX MAC interface. */
 #define NIG_REG_P0_MAC_IN_EN					 0x185ac
 /* [RW 1] Output enable for TX MAC interface */
@@ -2194,6 +2233,17 @@
  * priority field is extracted from the outer-most VLAN in receive packet.
  * Only COS 0 and COS 1 are supported in E2. */
 #define NIG_REG_P0_PKT_PRIORITY_TO_COS				 0x18054
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P0_PTP_EN					 0x18788
 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
  * priority is mapped to COS 0 when the corresponding mask bit is 1. More
  * than one bit may be set; allowing multiple priorities to be mapped to one
@@ -2300,7 +2350,46 @@
  * Ethernet header. */
 #define NIG_REG_P1_HDRS_AFTER_BASIC				 0x1818c
 #define NIG_REG_P1_LLH_FUNC_MEM2				 0x184c0
-#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE			 0x18460
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE			 0x18460a
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID			 0x18774
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB			 0x1876c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB			 0x18770
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_LLH_PTP_PARAM_MASK				 0x187c8
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P1_LLH_PTP_RULE_MASK				 0x187cc
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P1_LLH_PTP_TO_HOST				 0x187d4
 /* [RW 32] Specify the client number to be assigned to each priority of the
  * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
  * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
@@ -2342,6 +2431,17 @@
  * priority field is extracted from the outer-most VLAN in receive packet.
  * Only COS 0 and COS 1 are supported in E2. */
 #define NIG_REG_P1_PKT_PRIORITY_TO_COS				 0x181a8
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P1_PTP_EN					 0x187b0
 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
  * priority is mapped to COS 0 when the corresponding mask bit is 1. More
  * than one bit may be set; allowing multiple priorities to be mapped to one
@@ -2361,6 +2461,78 @@
 #define NIG_REG_P1_RX_MACFIFO_EMPTY				 0x1858c
 /* [R 1] TLLH FIFO is empty. */
 #define NIG_REG_P1_TLLH_FIFO_EMPTY				 0x18338
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_SEQID				 0x187e0
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB				 0x187d8
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB				 0x187dc
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_TLLH_PTP_PARAM_MASK				 0x187f0
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P0_TLLH_PTP_RULE_MASK				 0x187f4
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_SEQID				 0x187ec
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB				 0x187e4
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB				 0x187e8
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_TLLH_PTP_PARAM_MASK				 0x187f8
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P1_TLLH_PTP_RULE_MASK				 0x187fc
 /* [RW 32] Specify which of the credit registers the client is to be mapped
  * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
  * for client 0; bits [35:32] are for client 8. For clients that are not
@@ -2513,6 +2685,10 @@
    swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
    ort swap is equal to ~nig_registers_port_swap.port_swap */
 #define NIG_REG_STRAP_OVERRIDE					 0x10398
+/* [WB 64] Addresses for TimeSync related registers in the timesync
+ * generator sub-module.
+ */
+#define NIG_REG_TIMESYNC_GEN_REG				 0x18800
 /* [RW 1] output enable for RX_XCM0 IF */
 #define NIG_REG_XCM0_OUT_EN					 0x100f0
 /* [RW 1] output enable for RX_XCM1 IF */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index b193604..798e97f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4019,6 +4019,7 @@
 	struct bnx2x_raw_obj *r = &o->raw;
 	struct eth_rss_update_ramrod_data *data =
 		(struct eth_rss_update_ramrod_data *)(r->rdata);
+	u16 caps = 0;
 	u8 rss_mode = 0;
 	int rc;
 
@@ -4042,28 +4043,27 @@
 
 	/* RSS capabilities */
 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
-		data->capabilities |=
-			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
 
 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
-		data->capabilities |=
-			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
 
 	if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
-		data->capabilities |=
-			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
 
 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
-		data->capabilities |=
-			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
 
 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
-		data->capabilities |=
-			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
 
 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
-		data->capabilities |=
-			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+
+	data->capabilities = cpu_to_le16(caps);
 
 	/* Hashing mask */
 	data->rss_result_mask = p->rss_result_mask;
@@ -4336,6 +4336,8 @@
 		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
 
+	gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
+
 	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
 	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
 }
@@ -4357,12 +4359,13 @@
 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
 	tx_data->force_default_pri_flg =
 		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
-
+	tx_data->refuse_outband_vlan_flg =
+		test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
 	tx_data->tunnel_lso_inc_ip_id =
 		test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
 	tx_data->tunnel_non_lso_pcsum_location =
-		test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
-								  PCSUM_ON_BD;
+		test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
+							    CSUM_ON_BD;
 
 	tx_data->tx_status_block_id = params->fw_sb_id;
 	tx_data->tx_sb_index_number = params->sb_cq_index;
@@ -4722,6 +4725,12 @@
 	data->tx_switching_change_flg =
 		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
 			 &params->update_flags);
+
+	/* PTP */
+	data->handle_ptp_pkts_flg =
+		test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
+	data->handle_ptp_pkts_change_flg =
+		test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
 }
 
 static inline int bnx2x_q_send_update(struct bnx2x *bp,
@@ -5376,6 +5385,10 @@
 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
 			next_state = BNX2X_F_STATE_STARTED;
 
+		else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
 		else if (cmd == BNX2X_F_CMD_TX_STOP)
 			next_state = BNX2X_F_STATE_TX_STOPPED;
 
@@ -5385,6 +5398,10 @@
 		    (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
 			next_state = BNX2X_F_STATE_TX_STOPPED;
 
+		else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_TX_STOPPED;
+
 		else if (cmd == BNX2X_F_CMD_TX_START)
 			next_state = BNX2X_F_STATE_STARTED;
 
@@ -5652,8 +5669,11 @@
 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
 	rdata->path_id		= BP_PATH(bp);
 	rdata->network_cos_mode	= start_params->network_cos_mode;
-	rdata->gre_tunnel_mode	= start_params->gre_tunnel_mode;
-	rdata->gre_tunnel_rss	= start_params->gre_tunnel_rss;
+	rdata->tunnel_mode	= start_params->tunnel_mode;
+	rdata->gre_tunnel_type	= start_params->gre_tunnel_type;
+	rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
+	rdata->vxlan_dst_port	= cpu_to_le16(4789);
+	rdata->sd_vlan_eth_type = cpu_to_le16(0x8100);
 
 	/* No need for an explicit memory barrier here as long we would
 	 * need to ensure the ordering of writing to the SPQ element
@@ -5680,8 +5700,28 @@
 	memset(rdata, 0, sizeof(*rdata));
 
 	/* Fill the ramrod data with provided parameters */
-	rdata->tx_switch_suspend_change_flg = 1;
-	rdata->tx_switch_suspend = switch_update_params->suspend;
+	if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->tx_switch_suspend_change_flg = 1;
+		rdata->tx_switch_suspend =
+			test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+				 &switch_update_params->changes);
+	}
+
+	if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+		     &switch_update_params->changes)) {
+		rdata->update_tunn_cfg_flg = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+			     &switch_update_params->changes))
+			rdata->tunn_clss_en = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+			     &switch_update_params->changes))
+			rdata->inner_gre_rss_en = 1;
+		rdata->tunnel_mode = switch_update_params->tunnel_mode;
+		rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
+		rdata->vxlan_dst_port = cpu_to_le16(4789);
+	}
+
 	rdata->echo = SWITCH_UPDATE;
 
 	/* No need for an explicit memory barrier here as long as we
@@ -5817,6 +5857,40 @@
 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
 }
 
+static inline
+int bnx2x_func_send_set_timesync(struct bnx2x *bp,
+				 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct set_timesync_ramrod_data *rdata =
+		(struct set_timesync_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_set_timesync_params *set_timesync_params =
+		&params->params.set_timesync;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
+	rdata->offset_cmd = set_timesync_params->offset_cmd;
+	rdata->add_sub_drift_adjust_value =
+		set_timesync_params->add_sub_drift_adjust_value;
+	rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
+	rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
+	rdata->offset_delta.lo = U64_LO(set_timesync_params->offset_delta);
+	rdata->offset_delta.hi = U64_HI(set_timesync_params->offset_delta);
+
+	DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
+	   rdata->drift_adjust_cmd, rdata->offset_cmd,
+	   rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
+	   rdata->drift_adjust_period, rdata->offset_delta.lo,
+	   rdata->offset_delta.hi);
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
 static int bnx2x_func_send_cmd(struct bnx2x *bp,
 			       struct bnx2x_func_state_params *params)
 {
@@ -5839,6 +5913,8 @@
 		return bnx2x_func_send_tx_start(bp, params);
 	case BNX2X_F_CMD_SWITCH_UPDATE:
 		return bnx2x_func_send_switch_update(bp, params);
+	case BNX2X_F_CMD_SET_TIMESYNC:
+		return bnx2x_func_send_set_timesync(bp, params);
 	default:
 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
 		return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 718ecd2..21c8f6f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -711,6 +711,7 @@
 	BNX2X_RSS_IPV6,
 	BNX2X_RSS_IPV6_TCP,
 	BNX2X_RSS_IPV6_UDP,
+	BNX2X_RSS_GRE_INNER_HDRS,
 };
 
 struct bnx2x_config_rss_params {
@@ -769,7 +770,9 @@
 	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
 	BNX2X_Q_UPDATE_SILENT_VLAN_REM,
 	BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
-	BNX2X_Q_UPDATE_TX_SWITCHING
+	BNX2X_Q_UPDATE_TX_SWITCHING,
+	BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+	BNX2X_Q_UPDATE_PTP_PKTS,
 };
 
 /* Allowed Queue states */
@@ -831,6 +834,7 @@
 	BNX2X_Q_FLG_ANTI_SPOOF,
 	BNX2X_Q_FLG_SILENT_VLAN_REM,
 	BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+	BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
 	BNX2X_Q_FLG_PCSUM_ON_PKT,
 	BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
 };
@@ -851,6 +855,10 @@
 #define BNX2X_MULTI_TX_COS			3 /* Maximum possible */
 
 #define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+ * timesync-related ramrods must not use this DMAE command ID.
+ */
+#define FW_DMAE_CMD_ID 6
 
 struct bnx2x_queue_init_params {
 	struct {
@@ -1085,6 +1093,16 @@
 };
 
 /********************** Function state update *********************************/
+
+/* UPDATE command options */
+enum {
+	BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+	BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+	BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+	BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+	BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+};
+
 /* Allowed Function states */
 enum bnx2x_func_state {
 	BNX2X_F_STATE_RESET,
@@ -1105,6 +1123,7 @@
 	BNX2X_F_CMD_TX_STOP,
 	BNX2X_F_CMD_TX_START,
 	BNX2X_F_CMD_SWITCH_UPDATE,
+	BNX2X_F_CMD_SET_TIMESYNC,
 	BNX2X_F_CMD_MAX,
 };
 
@@ -1146,18 +1165,25 @@
 	/* Function cos mode */
 	u8 network_cos_mode;
 
-	/* NVGRE classification enablement */
-	u8 nvgre_clss_en;
+	/* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
+	u8 tunnel_mode;
 
-	/* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
-	u8 gre_tunnel_mode;
+	/* tunneling classification enablement */
+	u8 tunn_clss_en;
 
-	/* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
-	u8 gre_tunnel_rss;
+	/* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+	u8 gre_tunnel_type;
+
+	/* Enables Inner GRE RSS on the function, depends on the client RSS
+	 * capailities
+	 */
+	u8 inner_gre_rss_en;
 };
 
 struct bnx2x_func_switch_update_params {
-	u8 suspend;
+	unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
+	u8 tunnel_mode;
+	u8 gre_tunnel_type;
 };
 
 struct bnx2x_func_afex_update_params {
@@ -1172,6 +1198,7 @@
 	u8 afex_vif_list_command;
 	u8 func_to_clear;
 };
+
 struct bnx2x_func_tx_start_params {
 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
 	u8 dcb_enabled;
@@ -1179,6 +1206,24 @@
 	u8 dont_add_pri_0_en;
 };
 
+struct bnx2x_func_set_timesync_params {
+	/* Reset, set or keep the current drift value */
+	u8 drift_adjust_cmd;
+
+	/* Dec, inc or keep the current offset */
+	u8 offset_cmd;
+
+	/* Drift value direction */
+	u8 add_sub_drift_adjust_value;
+
+	/* Drift, period and offset values to be used according to the commands
+	 * above.
+	 */
+	u8 drift_adjust_value;
+	u32 drift_adjust_period;
+	u64 offset_delta;
+};
+
 struct bnx2x_func_state_params {
 	struct bnx2x_func_sp_obj *f_obj;
 
@@ -1197,6 +1242,7 @@
 		struct bnx2x_func_afex_update_params afex_update;
 		struct bnx2x_func_afex_viflists_params afex_viflists;
 		struct bnx2x_func_tx_start_params tx_start;
+		struct bnx2x_func_set_timesync_params set_timesync;
 	} params;
 };
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 662310c..c88b20a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1125,7 +1125,7 @@
 	return dev->bus->self && dev->bus->self->ari_enabled;
 }
 
-static void
+static int
 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
 {
 	int sb_id;
@@ -1150,6 +1150,7 @@
 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
 	}
 	DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
+	return BP_VFDB(bp)->vf_sbs_pool;
 }
 
 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1314,15 +1315,17 @@
 	}
 
 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
-	bnx2x_get_vf_igu_cam_info(bp);
+	if (!bnx2x_get_vf_igu_cam_info(bp)) {
+		BNX2X_ERR("No entries in IGU CAM for vfs\n");
+		err = -EINVAL;
+		goto failed;
+	}
 
 	/* allocate the queue arrays for all VFs */
 	bp->vfdb->vfqs = kzalloc(
 		BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
 		GFP_KERNEL);
 
-	DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
-
 	if (!bp->vfdb->vfqs) {
 		BNX2X_ERR("failed to allocate vf queue array\n");
 		err = -ENOMEM;
@@ -1349,9 +1352,7 @@
 	if (!IS_SRIOV(bp))
 		return;
 
-	DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
-	pci_disable_sriov(bp->pdev);
-	DP(BNX2X_MSG_IOV, "sriov disabled\n");
+	bnx2x_disable_sriov(bp);
 
 	/* disable access to all VFs */
 	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
@@ -1985,21 +1986,6 @@
 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
 }
 
-static inline
-struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
-{
-	int i;
-	struct bnx2x_virtf *vf = NULL;
-
-	for_each_vf(bp, i) {
-		vf = BP_VF(bp, i);
-		if (stat_id >= vf->igu_base_id &&
-		    stat_id < vf->igu_base_id + vf_sb_count(vf))
-			break;
-	}
-	return vf;
-}
-
 /* VF API helpers */
 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
 				u8 enable)
@@ -2362,12 +2348,6 @@
 	return rc;
 }
 
-static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
-			      struct bnx2x_virtf *vf, u32 *sbdf)
-{
-	*sbdf = vf->devfn | (vf->bus << 8);
-}
-
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
 			      enum channel_tlvs tlv)
 {
@@ -2416,7 +2396,7 @@
 
 	/* log the unlock */
 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
-	   vf->abs_vfid, vf->op_current);
+	   vf->abs_vfid, current_tlv);
 }
 
 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
@@ -2501,7 +2481,7 @@
 	bp->requested_nr_virtfn = num_vfs_param;
 	if (num_vfs_param == 0) {
 		bnx2x_set_pf_tx_switching(bp, false);
-		pci_disable_sriov(dev);
+		bnx2x_disable_sriov(bp);
 		return 0;
 	} else {
 		return bnx2x_enable_sriov(bp);
@@ -2614,6 +2594,12 @@
 
 void bnx2x_disable_sriov(struct bnx2x *bp)
 {
+	if (pci_vfs_assigned(bp->pdev)) {
+		DP(BNX2X_MSG_IOV,
+		   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+		return;
+	}
+
 	pci_disable_sriov(bp->pdev);
 }
 
@@ -2628,7 +2614,7 @@
 	}
 
 	if (!IS_SRIOV(bp)) {
-		BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n");
+		BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index ca1055f..01bafa4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -299,7 +299,8 @@
 #define BP_VFDB(bp)		((bp)->vfdb)
 	/* vf array */
 	struct bnx2x_virtf	*vfs;
-#define BP_VF(bp, idx)		(&((bp)->vfdb->vfs[idx]))
+#define BP_VF(bp, idx)		((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
+					&((bp)->vfdb->vfs[idx]) : NULL)
 #define bnx2x_vf(bp, idx, var)	((bp)->vfdb->vfs[idx].var)
 
 	/* queue array - for all vfs */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index ca47665..e1c8193 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -137,7 +137,7 @@
 			cpu_to_le16(bp->stats_counter++);
 
 		DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
-			bp->fw_stats_req->hdr.drv_stats_counter);
+		   le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
 
 		/* adjust the ramrod to include VF queues statistics */
 		bnx2x_iov_adjust_stats_req(bp);
@@ -200,7 +200,7 @@
 	}
 }
 
-static int bnx2x_stats_comp(struct bnx2x *bp)
+static void bnx2x_stats_comp(struct bnx2x *bp)
 {
 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
 	int cnt = 10;
@@ -214,7 +214,6 @@
 		cnt--;
 		usleep_range(1000, 2000);
 	}
-	return 1;
 }
 
 /*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 54e0427..b1d9c44 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -583,7 +583,6 @@
 	flags |= VFPF_QUEUE_FLG_STATS;
 	flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
 	flags |= VFPF_QUEUE_FLG_VLAN;
-	DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
 
 	/* Common */
 	req->vf_qid = fp_idx;
@@ -952,14 +951,6 @@
 	REG_WR8(bp, addr, 1);
 }
 
-static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
-{
-	int i;
-
-	for_each_vf(bp, i)
-		storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
-}
-
 /* enable vf_pf mailbox (aka vf-pf-channel) */
 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
 {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 27861a6..eef63a8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -383,7 +383,7 @@
 			break;
 
 		rcu_read_lock();
-		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
+		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
 			rc = -ENODEV;
 			rcu_read_unlock();
 			break;
@@ -527,7 +527,7 @@
 	list_for_each_entry(dev, &cnic_dev_list, list) {
 		struct cnic_local *cp = dev->cnic_priv;
 
-		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 			pr_err("%s: Type %d still has devices registered\n",
 			       __func__, ulp_type);
 			read_unlock(&cnic_dev_lock);
@@ -575,7 +575,7 @@
 		mutex_unlock(&cnic_lock);
 		return -EAGAIN;
 	}
-	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
 		pr_err("%s: Type %d has already been registered to this device\n",
 		       __func__, ulp_type);
 		mutex_unlock(&cnic_lock);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91652e7..c8375f6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -52,6 +52,10 @@
 #define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
 #define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
 #define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
+#define FIRMWARE_8168H_1	"rtl_nic/rtl8168h-1.fw"
+#define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
+#define FIRMWARE_8107E_1	"rtl_nic/rtl8107e-1.fw"
+#define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
 
 #ifdef RTL8169_DEBUG
 #define assert(expr) \
@@ -147,6 +151,10 @@
 	RTL_GIGA_MAC_VER_42,
 	RTL_GIGA_MAC_VER_43,
 	RTL_GIGA_MAC_VER_44,
+	RTL_GIGA_MAC_VER_45,
+	RTL_GIGA_MAC_VER_46,
+	RTL_GIGA_MAC_VER_47,
+	RTL_GIGA_MAC_VER_48,
 	RTL_GIGA_MAC_NONE   = 0xff,
 };
 
@@ -282,6 +290,18 @@
 	[RTL_GIGA_MAC_VER_44] =
 		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_2,
 							JUMBO_9K, false),
+	[RTL_GIGA_MAC_VER_45] =
+		_R("RTL8168h/8111h",	RTL_TD_1, FIRMWARE_8168H_1,
+							JUMBO_9K, false),
+	[RTL_GIGA_MAC_VER_46] =
+		_R("RTL8168h/8111h",	RTL_TD_1, FIRMWARE_8168H_2,
+							JUMBO_9K, false),
+	[RTL_GIGA_MAC_VER_47] =
+		_R("RTL8107e",		RTL_TD_1, FIRMWARE_8107E_1,
+							JUMBO_1K, false),
+	[RTL_GIGA_MAC_VER_48] =
+		_R("RTL8107e",		RTL_TD_1, FIRMWARE_8107E_2,
+							JUMBO_1K, false),
 };
 #undef _R
 
@@ -410,6 +430,7 @@
 #define	EPHYAR_DATA_MASK		0xffff
 	DLLPR			= 0xd0,
 #define	PFM_EN				(1 << 6)
+#define	TX_10M_PS_EN			(1 << 7)
 	DBG_REG			= 0xd1,
 #define	FIX_NAK_1			(1 << 4)
 #define	FIX_NAK_2			(1 << 3)
@@ -429,6 +450,8 @@
 #define	EFUSEAR_REG_MASK		0x03ff
 #define	EFUSEAR_REG_SHIFT		8
 #define	EFUSEAR_DATA_MASK		0xff
+	MISC_1			= 0xf2,
+#define	PFM_D3COLD_EN			(1 << 6)
 };
 
 enum rtl8168_registers {
@@ -447,6 +470,7 @@
 #define ERIAR_MASK_SHIFT		12
 #define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
 #define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
 #define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
 #define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
 	EPHY_RXER_NUM		= 0x7c,
@@ -598,6 +622,9 @@
 
 	/* DumpCounterCommand */
 	CounterDump	= 0x8,
+
+	/* magic enable v2 */
+	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
 };
 
 enum rtl_desc_bit {
@@ -823,6 +850,8 @@
 MODULE_FIRMWARE(FIRMWARE_8106E_2);
 MODULE_FIRMWARE(FIRMWARE_8168G_2);
 MODULE_FIRMWARE(FIRMWARE_8168G_3);
+MODULE_FIRMWARE(FIRMWARE_8168H_1);
+MODULE_FIRMWARE(FIRMWARE_8168H_2);
 
 static void rtl_lock_work(struct rtl8169_private *tp)
 {
@@ -1514,8 +1543,17 @@
 	options = RTL_R8(Config3);
 	if (options & LinkUp)
 		wolopts |= WAKE_PHY;
-	if (options & MagicPacket)
-		wolopts |= WAKE_MAGIC;
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+		if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
+			wolopts |= WAKE_MAGIC;
+		break;
+	default:
+		if (options & MagicPacket)
+			wolopts |= WAKE_MAGIC;
+		break;
+	}
 
 	options = RTL_R8(Config5);
 	if (options & UWF)
@@ -1543,24 +1581,48 @@
 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
-	unsigned int i;
+	unsigned int i, tmp;
 	static const struct {
 		u32 opt;
 		u16 reg;
 		u8  mask;
 	} cfg[] = {
 		{ WAKE_PHY,   Config3, LinkUp },
-		{ WAKE_MAGIC, Config3, MagicPacket },
 		{ WAKE_UCAST, Config5, UWF },
 		{ WAKE_BCAST, Config5, BWF },
 		{ WAKE_MCAST, Config5, MWF },
-		{ WAKE_ANY,   Config5, LanWake }
+		{ WAKE_ANY,   Config5, LanWake },
+		{ WAKE_MAGIC, Config3, MagicPacket }
 	};
 	u8 options;
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 
-	for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+		tmp = ARRAY_SIZE(cfg) - 1;
+		if (wolopts & WAKE_MAGIC)
+			rtl_w1w0_eri(tp,
+				     0x0dc,
+				     ERIAR_MASK_0100,
+				     MagicPacket_v2,
+				     0x0000,
+				     ERIAR_EXGMAC);
+		else
+			rtl_w1w0_eri(tp,
+				     0x0dc,
+				     ERIAR_MASK_0100,
+				     0x0000,
+				     MagicPacket_v2,
+				     ERIAR_EXGMAC);
+		break;
+	default:
+		tmp = ARRAY_SIZE(cfg);
+		break;
+	}
+
+	for (i = 0; i < tmp; i++) {
 		options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
 		if (wolopts & cfg[i].opt)
 			options |= cfg[i].mask;
@@ -2044,6 +2106,10 @@
 		u32 val;
 		int mac_version;
 	} mac_info[] = {
+		/* 8168H family. */
+		{ 0x7cf00000, 0x54100000,	RTL_GIGA_MAC_VER_46 },
+		{ 0x7cf00000, 0x54000000,	RTL_GIGA_MAC_VER_45 },
+
 		/* 8168G family. */
 		{ 0x7cf00000, 0x5c800000,	RTL_GIGA_MAC_VER_44 },
 		{ 0x7cf00000, 0x50900000,	RTL_GIGA_MAC_VER_42 },
@@ -2139,6 +2205,14 @@
 		tp->mac_version = tp->mii.supports_gmii ?
 				  RTL_GIGA_MAC_VER_42 :
 				  RTL_GIGA_MAC_VER_43;
+	} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
+		tp->mac_version = tp->mii.supports_gmii ?
+				  RTL_GIGA_MAC_VER_45 :
+				  RTL_GIGA_MAC_VER_47;
+	} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
+		tp->mac_version = tp->mii.supports_gmii ?
+				  RTL_GIGA_MAC_VER_46 :
+				  RTL_GIGA_MAC_VER_48;
 	}
 }
 
@@ -3464,6 +3538,189 @@
 	rtl_apply_firmware(tp);
 }
 
+static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
+{
+	u16 dout_tapbin;
+	u32 data;
+
+	rtl_apply_firmware(tp);
+
+	/* CHN EST parameters adjust - giga master */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x809b);
+	rtl_w1w0_phy(tp, 0x14, 0x8000, 0xf800);
+	rtl_writephy(tp, 0x13, 0x80a2);
+	rtl_w1w0_phy(tp, 0x14, 0x8000, 0xff00);
+	rtl_writephy(tp, 0x13, 0x80a4);
+	rtl_w1w0_phy(tp, 0x14, 0x8500, 0xff00);
+	rtl_writephy(tp, 0x13, 0x809c);
+	rtl_w1w0_phy(tp, 0x14, 0xbd00, 0xff00);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* CHN EST parameters adjust - giga slave */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x80ad);
+	rtl_w1w0_phy(tp, 0x14, 0x7000, 0xf800);
+	rtl_writephy(tp, 0x13, 0x80b4);
+	rtl_w1w0_phy(tp, 0x14, 0x5000, 0xff00);
+	rtl_writephy(tp, 0x13, 0x80ac);
+	rtl_w1w0_phy(tp, 0x14, 0x4000, 0xff00);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* CHN EST parameters adjust - fnet */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x808e);
+	rtl_w1w0_phy(tp, 0x14, 0x1200, 0xff00);
+	rtl_writephy(tp, 0x13, 0x8090);
+	rtl_w1w0_phy(tp, 0x14, 0xe500, 0xff00);
+	rtl_writephy(tp, 0x13, 0x8092);
+	rtl_w1w0_phy(tp, 0x14, 0x9f00, 0xff00);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* enable R-tune & PGA-retune function */
+	dout_tapbin = 0;
+	rtl_writephy(tp, 0x1f, 0x0a46);
+	data = rtl_readphy(tp, 0x13);
+	data &= 3;
+	data <<= 2;
+	dout_tapbin |= data;
+	data = rtl_readphy(tp, 0x12);
+	data &= 0xc000;
+	data >>= 14;
+	dout_tapbin |= data;
+	dout_tapbin = ~(dout_tapbin^0x08);
+	dout_tapbin <<= 12;
+	dout_tapbin &= 0xf000;
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x827a);
+	rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+	rtl_writephy(tp, 0x13, 0x827b);
+	rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+	rtl_writephy(tp, 0x13, 0x827c);
+	rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+	rtl_writephy(tp, 0x13, 0x827d);
+	rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x0811);
+	rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0a42);
+	rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* enable GPHY 10M */
+	rtl_writephy(tp, 0x1f, 0x0a44);
+	rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* SAR ADC performance */
+	rtl_writephy(tp, 0x1f, 0x0bca);
+	rtl_w1w0_phy(tp, 0x17, 0x4000, 0x3000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x803f);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x13, 0x8047);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x13, 0x804f);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x13, 0x8057);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x13, 0x805f);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x13, 0x8067);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x13, 0x806f);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* disable phy pfm mode */
+	rtl_writephy(tp, 0x1f, 0x0a44);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* Check ALDPS bit, disable it if enabled */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	if (rtl_readphy(tp, 0x10) & 0x0004)
+		rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
+
+	rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
+{
+	u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
+	u16 rlen;
+	u32 data;
+
+	rtl_apply_firmware(tp);
+
+	/* CHIN EST parameter update */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x808a);
+	rtl_w1w0_phy(tp, 0x14, 0x000a, 0x003f);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* enable R-tune & PGA-retune function */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	rtl_writephy(tp, 0x13, 0x0811);
+	rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0a42);
+	rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* enable GPHY 10M */
+	rtl_writephy(tp, 0x1f, 0x0a44);
+	rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
+	data = r8168_mac_ocp_read(tp, 0xdd02);
+	ioffset_p3 = ((data & 0x80)>>7);
+	ioffset_p3 <<= 3;
+
+	data = r8168_mac_ocp_read(tp, 0xdd00);
+	ioffset_p3 |= ((data & (0xe000))>>13);
+	ioffset_p2 = ((data & (0x1e00))>>9);
+	ioffset_p1 = ((data & (0x01e0))>>5);
+	ioffset_p0 = ((data & 0x0010)>>4);
+	ioffset_p0 <<= 3;
+	ioffset_p0 |= (data & (0x07));
+	data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
+
+	if ((ioffset_p3 != 0x0F) || (ioffset_p2 != 0x0F) ||
+	    (ioffset_p1 != 0x0F) || (ioffset_p0 == 0x0F)) {
+		rtl_writephy(tp, 0x1f, 0x0bcf);
+		rtl_writephy(tp, 0x16, data);
+		rtl_writephy(tp, 0x1f, 0x0000);
+	}
+
+	/* Modify rlen (TX LPF corner frequency) level */
+	rtl_writephy(tp, 0x1f, 0x0bcd);
+	data = rtl_readphy(tp, 0x16);
+	data &= 0x000f;
+	rlen = 0;
+	if (data > 3)
+		rlen = data - 3;
+	data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
+	rtl_writephy(tp, 0x17, data);
+	rtl_writephy(tp, 0x1f, 0x0bcd);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* disable phy pfm mode */
+	rtl_writephy(tp, 0x1f, 0x0a44);
+	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* Check ALDPS bit, disable it if enabled */
+	rtl_writephy(tp, 0x1f, 0x0a43);
+	if (rtl_readphy(tp, 0x10) & 0x0004)
+		rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
+
+	rtl_writephy(tp, 0x1f, 0x0000);
+}
+
 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -3654,6 +3911,14 @@
 	case RTL_GIGA_MAC_VER_44:
 		rtl8168g_2_hw_phy_config(tp);
 		break;
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_47:
+		rtl8168h_1_hw_phy_config(tp);
+		break;
+	case RTL_GIGA_MAC_VER_46:
+	case RTL_GIGA_MAC_VER_48:
+		rtl8168h_2_hw_phy_config(tp);
+		break;
 
 	case RTL_GIGA_MAC_VER_41:
 	default:
@@ -3865,6 +4130,10 @@
 	case RTL_GIGA_MAC_VER_42:
 	case RTL_GIGA_MAC_VER_43:
 	case RTL_GIGA_MAC_VER_44:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
 		ops->write	= r8168g_mdio_write;
 		ops->read	= r8168g_mdio_read;
 		break;
@@ -3919,6 +4188,10 @@
 	case RTL_GIGA_MAC_VER_42:
 	case RTL_GIGA_MAC_VER_43:
 	case RTL_GIGA_MAC_VER_44:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
 		RTL_W32(RxConfig, RTL_R32(RxConfig) |
 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
 		break;
@@ -3987,6 +4260,10 @@
 	case RTL_GIGA_MAC_VER_13:
 	case RTL_GIGA_MAC_VER_16:
 		break;
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
+		RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
+		break;
 	default:
 		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
 		break;
@@ -4087,6 +4364,8 @@
 	case RTL_GIGA_MAC_VER_31:
 	case RTL_GIGA_MAC_VER_32:
 	case RTL_GIGA_MAC_VER_33:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
 		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
 		break;
 	case RTL_GIGA_MAC_VER_40:
@@ -4111,6 +4390,10 @@
 	case RTL_GIGA_MAC_VER_33:
 		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
 		break;
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+		RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
+		break;
 	case RTL_GIGA_MAC_VER_40:
 	case RTL_GIGA_MAC_VER_41:
 		rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
@@ -4153,6 +4436,8 @@
 	case RTL_GIGA_MAC_VER_37:
 	case RTL_GIGA_MAC_VER_39:
 	case RTL_GIGA_MAC_VER_43:
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
 		ops->down	= r810x_pll_power_down;
 		ops->up		= r810x_pll_power_up;
 		break;
@@ -4182,6 +4467,8 @@
 	case RTL_GIGA_MAC_VER_41:
 	case RTL_GIGA_MAC_VER_42:
 	case RTL_GIGA_MAC_VER_44:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
 		ops->down	= r8168_pll_power_down;
 		ops->up		= r8168_pll_power_up;
 		break;
@@ -4232,6 +4519,10 @@
 	case RTL_GIGA_MAC_VER_42:
 	case RTL_GIGA_MAC_VER_43:
 	case RTL_GIGA_MAC_VER_44:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
 		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
 		break;
 	default:
@@ -4393,6 +4684,10 @@
 	case RTL_GIGA_MAC_VER_42:
 	case RTL_GIGA_MAC_VER_43:
 	case RTL_GIGA_MAC_VER_44:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
 	default:
 		ops->disable	= NULL;
 		ops->enable	= NULL;
@@ -4495,15 +4790,19 @@
 	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
 		rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_35 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_36 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_37 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_40 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_41 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_42 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_43 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_44 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_38) {
+		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_36 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_38 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_40 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_41 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_42 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_43 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_44 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_45 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_47 ||
+		   tp->mac_version == RTL_GIGA_MAC_VER_48) {
 		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
 		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
 	} else {
@@ -5330,6 +5629,105 @@
 	rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
 }
 
+static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+	u16 rg_saw_cnt;
+	u32 data;
+	static const struct ephy_info e_info_8168h_1[] = {
+		{ 0x1e, 0x0800,	0x0001 },
+		{ 0x1d, 0x0000,	0x0800 },
+		{ 0x05, 0xffff,	0x2089 },
+		{ 0x06, 0xffff,	0x5881 },
+		{ 0x04, 0xffff,	0x154a },
+		{ 0x01, 0xffff,	0x068b }
+	};
+
+	/* disable aspm and clock request before access ephy */
+	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+	rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
+
+	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+
+	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
+	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
+	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
+	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+
+	rtl_csi_access_enable_1(tp);
+
+	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+
+	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
+
+	rtl_w1w0_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
+
+	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
+
+	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+	RTL_W8(MaxTxPacketSize, EarlySize);
+
+	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+	/* Adjust EEE LED frequency */
+	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+	RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
+
+	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
+
+	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+	rtl_pcie_state_l2l3_enable(tp, false);
+
+	rtl_writephy(tp, 0x1f, 0x0c42);
+	rg_saw_cnt = rtl_readphy(tp, 0x13);
+	rtl_writephy(tp, 0x1f, 0x0000);
+	if (rg_saw_cnt > 0) {
+		u16 sw_cnt_1ms_ini;
+
+		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
+		sw_cnt_1ms_ini &= 0x0fff;
+		data = r8168_mac_ocp_read(tp, 0xd412);
+		data &= 0x0fff;
+		data |= sw_cnt_1ms_ini;
+		r8168_mac_ocp_write(tp, 0xd412, data);
+	}
+
+	data = r8168_mac_ocp_read(tp, 0xe056);
+	data &= 0xf0;
+	data |= 0x07;
+	r8168_mac_ocp_write(tp, 0xe056, data);
+
+	data = r8168_mac_ocp_read(tp, 0xe052);
+	data &= 0x8008;
+	data |= 0x6000;
+	r8168_mac_ocp_write(tp, 0xe052, data);
+
+	data = r8168_mac_ocp_read(tp, 0xe0d6);
+	data &= 0x01ff;
+	data |= 0x017f;
+	r8168_mac_ocp_write(tp, 0xe0d6, data);
+
+	data = r8168_mac_ocp_read(tp, 0xd420);
+	data &= 0x0fff;
+	data |= 0x047f;
+	r8168_mac_ocp_write(tp, 0xd420, data);
+
+	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
+	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
+	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
+	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+}
+
 static void rtl_hw_start_8168(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -5440,6 +5838,11 @@
 		rtl_hw_start_8411_2(tp);
 		break;
 
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+		rtl_hw_start_8168h_1(tp);
+		break;
+
 	default:
 		printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
 			dev->name, tp->mac_version);
@@ -5655,6 +6058,10 @@
 	case RTL_GIGA_MAC_VER_43:
 		rtl_hw_start_8168g_2(tp);
 		break;
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
+		rtl_hw_start_8168h_1(tp);
+		break;
 	}
 
 	RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5895,7 +6302,7 @@
 {
 	struct skb_shared_info *info = skb_shinfo(skb);
 	unsigned int cur_frag, entry;
-	struct TxDesc * uninitialized_var(txd);
+	struct TxDesc *uninitialized_var(txd);
 	struct device *d = &tp->pci_dev->dev;
 
 	entry = tp->cur_tx;
@@ -7110,6 +7517,10 @@
 	case RTL_GIGA_MAC_VER_42:
 	case RTL_GIGA_MAC_VER_43:
 	case RTL_GIGA_MAC_VER_44:
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+	case RTL_GIGA_MAC_VER_47:
+	case RTL_GIGA_MAC_VER_48:
 		rtl_hw_init_8168g(tp);
 		break;
 
@@ -7255,8 +7666,19 @@
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 	RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
 	RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
-	if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
-		tp->features |= RTL_FEATURE_WOL;
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_45:
+	case RTL_GIGA_MAC_VER_46:
+		if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
+			tp->features |= RTL_FEATURE_WOL;
+		if ((RTL_R8(Config3) & LinkUp) != 0)
+			tp->features |= RTL_FEATURE_WOL;
+		break;
+	default:
+		if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+			tp->features |= RTL_FEATURE_WOL;
+		break;
+	}
 	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
 		tp->features |= RTL_FEATURE_WOL;
 	tp->features |= rtl_try_msi(tp, cfg);
@@ -7283,6 +7705,18 @@
 	u64_stats_init(&tp->tx_stats.syncp);
 
 	/* Get MAC address */
+	if (tp->mac_version == RTL_GIGA_MAC_VER_45 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_47 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_48) {
+		u16 mac_addr[3];
+
+		*(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xE0, ERIAR_EXGMAC);
+		*(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xE4, ERIAR_EXGMAC);
+
+		if (is_valid_ether_addr((u8 *)mac_addr))
+			rtl_rar_set(tp, (u8 *)mac_addr);
+	}
 	for (i = 0; i < ETH_ALEN; i++)
 		dev->dev_addr[i] = RTL_R8(MAC0 + i);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ec632e6..cd613d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -17,6 +17,7 @@
 
 #include <linux/mfd/syscon.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/regmap.h>
@@ -30,6 +31,12 @@
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
 
+#define EMAC_SPLITTER_CTRL_REG			0x0
+#define EMAC_SPLITTER_CTRL_SPEED_MASK		0x3
+#define EMAC_SPLITTER_CTRL_SPEED_10		0x2
+#define EMAC_SPLITTER_CTRL_SPEED_100		0x3
+#define EMAC_SPLITTER_CTRL_SPEED_1000		0x0
+
 struct socfpga_dwmac {
 	int	interface;
 	u32	reg_offset;
@@ -37,14 +44,46 @@
 	struct	device *dev;
 	struct regmap *sys_mgr_base_addr;
 	struct reset_control *stmmac_rst;
+	void __iomem *splitter_base;
 };
 
+static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+	struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+	void __iomem *splitter_base = dwmac->splitter_base;
+	u32 val;
+
+	if (!splitter_base)
+		return;
+
+	val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+	val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
+
+	switch (speed) {
+	case 1000:
+		val |= EMAC_SPLITTER_CTRL_SPEED_1000;
+		break;
+	case 100:
+		val |= EMAC_SPLITTER_CTRL_SPEED_100;
+		break;
+	case 10:
+		val |= EMAC_SPLITTER_CTRL_SPEED_10;
+		break;
+	default:
+		return;
+	}
+
+	writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+}
+
 static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
 {
 	struct device_node *np = dev->of_node;
 	struct regmap *sys_mgr_base_addr;
 	u32 reg_offset, reg_shift;
 	int ret;
+	struct device_node *np_splitter;
+	struct resource res_splitter;
 
 	dwmac->stmmac_rst = devm_reset_control_get(dev,
 						  STMMAC_RESOURCE_NAME);
@@ -73,6 +112,21 @@
 		return -EINVAL;
 	}
 
+	np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
+	if (np_splitter) {
+		if (of_address_to_resource(np_splitter, 0, &res_splitter)) {
+			dev_info(dev, "Missing emac splitter address\n");
+			return -EINVAL;
+		}
+
+		dwmac->splitter_base = (void *)devm_ioremap_resource(dev,
+			&res_splitter);
+		if (!dwmac->splitter_base) {
+			dev_info(dev, "Failed to mapping emac splitter\n");
+			return -EINVAL;
+		}
+	}
+
 	dwmac->reg_offset = reg_offset;
 	dwmac->reg_shift = reg_shift;
 	dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
@@ -91,6 +145,7 @@
 
 	switch (phymode) {
 	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
 		val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
 		break;
 	case PHY_INTERFACE_MODE_MII:
@@ -102,6 +157,13 @@
 		return -EINVAL;
 	}
 
+	/* Overwrite val to GMII if splitter core is enabled. The phymode here
+	 * is the actual phy mode on phy hardware, but phy interface from
+	 * EMAC core is GMII.
+	 */
+	if (dwmac->splitter_base)
+		val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
 	regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
 	ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
 	ctrl |= val << reg_shift;
@@ -196,4 +258,5 @@
 	.setup = socfpga_dwmac_probe,
 	.init = socfpga_dwmac_init,
 	.exit = socfpga_dwmac_exit,
+	.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
 };
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d5e07de..2f48f79 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -591,7 +591,7 @@
 
 #define NETVSC_RECEIVE_BUFFER_ID		0xcafe
 
-#define NETVSC_PACKET_SIZE                      2048
+#define NETVSC_PACKET_SIZE                      4096
 
 #define VRSS_SEND_TAB_SIZE 16
 
@@ -642,7 +642,7 @@
 	int ring_size;
 
 	/* The primary channel callback buffer */
-	unsigned char cb_buffer[NETVSC_PACKET_SIZE];
+	unsigned char *cb_buffer;
 	/* The sub channel callback buffer */
 	unsigned char *sub_cb_buf;
 };
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 66979cf..5b5644a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,12 @@
 	if (!net_device)
 		return NULL;
 
+	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
+	if (!net_device->cb_buffer) {
+		kfree(net_device);
+		return NULL;
+	}
+
 	init_waitqueue_head(&net_device->wait_drain);
 	net_device->start_remove = false;
 	net_device->destroy = false;
@@ -52,6 +58,12 @@
 	return net_device;
 }
 
+static void free_netvsc_device(struct netvsc_device *nvdev)
+{
+	kfree(nvdev->cb_buffer);
+	kfree(nvdev);
+}
+
 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 {
 	struct netvsc_device *net_device;
@@ -551,7 +563,7 @@
 	if (net_device->sub_cb_buf)
 		vfree(net_device->sub_cb_buf);
 
-	kfree(net_device);
+	free_netvsc_device(net_device);
 	return 0;
 }
 
@@ -1093,7 +1105,7 @@
 	vmbus_close(device->channel);
 
 cleanup:
-	kfree(net_device);
+	free_netvsc_device(net_device);
 
 	return ret;
 }
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index c301e4c..d5991ac 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -721,7 +721,7 @@
 }
 
 static int decode_evnt(struct dp83640_private *dp83640,
-		       void *data, u16 ests)
+		       void *data, int len, u16 ests)
 {
 	struct phy_txts *phy_txts;
 	struct ptp_clock_event event;
@@ -729,6 +729,16 @@
 	int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
 	u16 ext_status = 0;
 
+	/* calculate length of the event timestamp status message */
+	if (ests & MULT_EVNT)
+		parsed = (words + 2) * sizeof(u16);
+	else
+		parsed = (words + 1) * sizeof(u16);
+
+	/* check if enough data is available */
+	if (len < parsed)
+		return len;
+
 	if (ests & MULT_EVNT) {
 		ext_status = *(u16 *) data;
 		data += sizeof(ext_status);
@@ -747,10 +757,7 @@
 		dp83640->edata.ns_lo = phy_txts->ns_lo;
 	}
 
-	if (ext_status) {
-		parsed = words + 2;
-	} else {
-		parsed = words + 1;
+	if (!ext_status) {
 		i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT;
 		ext_status = exts_chan_to_edata(i);
 	}
@@ -768,7 +775,7 @@
 		}
 	}
 
-	return parsed * sizeof(u16);
+	return parsed;
 }
 
 static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
@@ -905,9 +912,9 @@
 			decode_txts(dp83640, phy_txts);
 			size = sizeof(*phy_txts);
 
-		} else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
+		} else if (PSF_EVNT == type) {
 
-			size = decode_evnt(dp83640, ptr, ests);
+			size = decode_evnt(dp83640, ptr, len, ests);
 
 		} else {
 			size = 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ca5ec3e..3fc91e8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -230,13 +230,13 @@
 	for (i = 1;
 	     i < num_ids && c45_ids->devices_in_package == 0;
 	     i++) {
-		reg_addr = MII_ADDR_C45 | i << 16 | 6;
+		reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2;
 		phy_reg = mdiobus_read(bus, addr, reg_addr);
 		if (phy_reg < 0)
 			return -EIO;
 		c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
 
-		reg_addr = MII_ADDR_C45 | i << 16 | 5;
+		reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1;
 		phy_reg = mdiobus_read(bus, addr, reg_addr);
 		if (phy_reg < 0)
 			return -EIO;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 38377392..7e2b0b8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1982,6 +1982,7 @@
 #define NETDEV_CHANGEUPPER	0x0015
 #define NETDEV_RESEND_IGMP	0x0016
 #define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
+#define NETDEV_CHANGEINFODATA	0x0018
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fa5258f..e567f0d 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -276,7 +276,7 @@
 	u32	retrans_stamp;	/* Timestamp of the last retransmit,
 				 * also used in SYN-SENT to remember stamp of
 				 * the first SYN. */
-	u32	undo_marker;	/* tracking retrans started here. */
+	u32	undo_marker;	/* snd_una upon a new recovery episode. */
 	int	undo_retrans;	/* number of undoable retransmissions. */
 	u32	total_retrans;	/* Total retransmits for entire connection */
 
diff --git a/include/net/codel.h b/include/net/codel.h
index fe0eab3..aeee280 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -66,7 +66,7 @@
 
 static inline codel_time_t codel_get_time(void)
 {
-	u64 ns = ktime_to_ns(ktime_get());
+	u64 ns = ktime_get_ns();
 
 	return ns >> CODEL_SHIFT;
 }
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ec030cd..8bbe626 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -50,7 +50,7 @@
 
 static inline psched_time_t psched_get_time(void)
 {
-	return PSCHED_NS2TICKS(ktime_to_ns(ktime_get()));
+	return PSCHED_NS2TICKS(ktime_get_ns());
 }
 
 static inline psched_tdiff_t
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7751c92..648d79c 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1822,7 +1822,7 @@
 	if (query->startup_sent < br->multicast_startup_query_count)
 		query->startup_sent++;
 
-	rcu_assign_pointer(querier, NULL);
+	RCU_INIT_POINTER(querier, NULL);
 	br_multicast_send_query(br, NULL, query);
 	spin_unlock(&br->multicast_lock);
 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index ba71212..51dd319 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@
 	 *	overlaps less than one time per MSL (2 minutes).
 	 *	Choosing a clock of 64 ns period is OK. (period of 274 s)
 	 */
-	return seq + (ktime_to_ns(ktime_get_real()) >> 6);
+	return seq + (ktime_get_real_ns() >> 6);
 }
 #endif
 
@@ -135,7 +135,7 @@
 	md5_transform(hash, net_secret);
 
 	seq = hash[0] | (((u64)hash[1]) << 32);
-	seq += ktime_to_ns(ktime_get_real());
+	seq += ktime_get_real_ns();
 	seq &= (1ull << 48) - 1;
 
 	return seq;
@@ -163,7 +163,7 @@
 	md5_transform(hash, secret);
 
 	seq = hash[0] | (((u64)hash[1]) << 32);
-	seq += ktime_to_ns(ktime_get_real());
+	seq += ktime_get_real_ns();
 	seq &= (1ull << 48) - 1;
 
 	return seq;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2714811..f7f2352 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -437,7 +437,6 @@
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	int err;
-	int skb_len;
 	unsigned long flags;
 	struct sk_buff_head *list = &sk->sk_receive_queue;
 
@@ -459,13 +458,6 @@
 	skb->dev = NULL;
 	skb_set_owner_r(skb, sk);
 
-	/* Cache the SKB length before we tack it onto the receive
-	 * queue.  Once it is added it no longer belongs to us and
-	 * may be freed by other threads of control pulling packets
-	 * from the queue.
-	 */
-	skb_len = skb->len;
-
 	/* we escape from rcu protected region, make sure we dont leak
 	 * a norefcounted dst
 	 */
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index ae011b4..25733d5 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -127,6 +127,7 @@
 #include <linux/stat.h>
 #include <linux/init.h>
 #include <linux/poll.h>
+#include <linux/jiffies.h>
 #include <net/net_namespace.h>
 #include <net/neighbour.h>
 #include <net/dst.h>
@@ -598,7 +599,7 @@
 	if (sk->sk_socket)
 		return 0;
 
-	if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
+	if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
 		dn_unhash_sock(sk);
 		sock_put(sk);
 		return 1;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 3b726f3..4400da7 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -41,6 +41,7 @@
 #include <linux/sysctl.h>
 #include <linux/notifier.h>
 #include <linux/slab.h>
+#include <linux/jiffies.h>
 #include <asm/uaccess.h>
 #include <net/net_namespace.h>
 #include <net/neighbour.h>
@@ -875,7 +876,7 @@
 static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
 {
 	/* First check time since device went up */
-	if ((jiffies - dn_db->uptime) < DRDELAY)
+	if (time_before(jiffies, dn_db->uptime + DRDELAY))
 		return 0;
 
 	/* If there is no router, then yes... */
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index d9c150c..1d330fd 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -23,6 +23,7 @@
 #include <linux/spinlock.h>
 #include <net/sock.h>
 #include <linux/atomic.h>
+#include <linux/jiffies.h>
 #include <net/flow.h>
 #include <net/dn.h>
 
@@ -91,7 +92,7 @@
 	 * since the last successful transmission.
 	 */
 	if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
-		if ((jiffies - scp->stamp) >= scp->keepalive)
+		if (time_after_eq(jiffies, scp->stamp + scp->keepalive))
 			scp->keepalive_fxn(sk);
 	}
 
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 255aa99..23104a3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -243,7 +243,7 @@
 				 u8 tos, int oif, struct net_device *dev,
 				 int rpf, struct in_device *idev, u32 *itag)
 {
-	int ret, no_addr, accept_local;
+	int ret, no_addr;
 	struct fib_result res;
 	struct flowi4 fl4;
 	struct net *net;
@@ -258,16 +258,17 @@
 
 	no_addr = idev->ifa_list == NULL;
 
-	accept_local = IN_DEV_ACCEPT_LOCAL(idev);
 	fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
 
 	net = dev_net(dev);
 	if (fib_lookup(net, &fl4, &res))
 		goto last_resort;
-	if (res.type != RTN_UNICAST) {
-		if (res.type != RTN_LOCAL || !accept_local)
-			goto e_inval;
-	}
+	if (res.type != RTN_UNICAST &&
+	    (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
+		goto e_inval;
+	if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
+	    (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
+		goto last_resort;
 	fib_combine_itag(itag, &res);
 	dev_match = false;
 
@@ -321,6 +322,7 @@
 	int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
 
 	if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
+	    IN_DEV_ACCEPT_LOCAL(idev) &&
 	    (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
 		*itag = 0;
 		return 0;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f10eab4..890c425 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2539,7 +2539,7 @@
 		querier = "NONE";
 #endif
 
-		if (rcu_dereference(state->in_dev->mc_list) == im) {
+		if (rcu_access_pointer(state->in_dev->mc_list) == im) {
 			seq_printf(seq, "%d\t%-10s: %5d %7s\n",
 				   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
 		}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 5bbef4f..648fa14 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -262,7 +262,8 @@
 	/* wait for a carrier on at least one device */
 	start = jiffies;
 	next_msg = start + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12);
-	while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
+	while (time_before(jiffies, start +
+			   msecs_to_jiffies(CONF_CARRIER_TIMEOUT))) {
 		int wait, elapsed;
 
 		for_each_netdev(&init_net, dev)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a906e02..aba4926 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1888,23 +1888,23 @@
 	tp->sacked_out = 0;
 }
 
-static void tcp_clear_retrans_partial(struct tcp_sock *tp)
+void tcp_clear_retrans(struct tcp_sock *tp)
 {
 	tp->retrans_out = 0;
 	tp->lost_out = 0;
-
 	tp->undo_marker = 0;
 	tp->undo_retrans = -1;
-}
-
-void tcp_clear_retrans(struct tcp_sock *tp)
-{
-	tcp_clear_retrans_partial(tp);
-
 	tp->fackets_out = 0;
 	tp->sacked_out = 0;
 }
 
+static inline void tcp_init_undo(struct tcp_sock *tp)
+{
+	tp->undo_marker = tp->snd_una;
+	/* Retransmission still in flight may cause DSACKs later. */
+	tp->undo_retrans = tp->retrans_out ? : -1;
+}
+
 /* Enter Loss state. If we detect SACK reneging, forget all SACK information
  * and reset tags completely, otherwise preserve SACKs. If receiver
  * dropped its ofo queue, we will know this due to reneging detection.
@@ -1925,18 +1925,18 @@
 		tp->prior_ssthresh = tcp_current_ssthresh(sk);
 		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
 		tcp_ca_event(sk, CA_EVENT_LOSS);
+		tcp_init_undo(tp);
 	}
 	tp->snd_cwnd	   = 1;
 	tp->snd_cwnd_cnt   = 0;
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 
-	tcp_clear_retrans_partial(tp);
+	tp->retrans_out = 0;
+	tp->lost_out = 0;
 
 	if (tcp_is_reno(tp))
 		tcp_reset_reno_sack(tp);
 
-	tp->undo_marker = tp->snd_una;
-
 	skb = tcp_write_queue_head(sk);
 	is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
 	if (is_reneg) {
@@ -1950,9 +1950,6 @@
 		if (skb == tcp_send_head(sk))
 			break;
 
-		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
-			tp->undo_marker = 0;
-
 		TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
 		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) {
 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
@@ -2671,8 +2668,7 @@
 	NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
 	tp->prior_ssthresh = 0;
-	tp->undo_marker = tp->snd_una;
-	tp->undo_retrans = tp->retrans_out ? : -1;
+	tcp_init_undo(tp);
 
 	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
 		if (!ece_ack)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index de88c4a..0b634e7 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -358,7 +358,7 @@
 
 	tstamp = nf_conn_tstamp_find(ct);
 	if (tstamp && tstamp->stop == 0)
-		tstamp->stop = ktime_to_ns(ktime_get_real());
+		tstamp->stop = ktime_get_real_ns();
 
 	if (nf_ct_is_dying(ct))
 		goto delete;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 355a5c4..1bd9ed9 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1737,7 +1737,7 @@
 	}
 	tstamp = nf_conn_tstamp_find(ct);
 	if (tstamp)
-		tstamp->start = ktime_to_ns(ktime_get_real());
+		tstamp->start = ktime_get_real_ns();
 
 	err = nf_conntrack_hash_check_insert(ct);
 	if (err < 0)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f641751..cf65a1e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -101,7 +101,7 @@
 {
 	struct ct_iter_state *st = seq->private;
 
-	st->time_now = ktime_to_ns(ktime_get_real());
+	st->time_now = ktime_get_real_ns();
 	rcu_read_lock();
 	return ct_get_idx(seq, *pos);
 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index d07ab538..7064da9 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -89,7 +89,7 @@
 			 * allocated stats as we have already locked them.
 			 */
 			if (likely(flow->stats_last_writer != NUMA_NO_NODE)
-			    && likely(!rcu_dereference(flow->stats[node]))) {
+			    && likely(!rcu_access_pointer(flow->stats[node]))) {
 				/* Try to allocate node-specific stats. */
 				struct flow_stats *new_stats;
 
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 63b21e5..481f89f 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -45,7 +45,7 @@
 	struct rxrpc_skb_priv *sp;
 	struct rxrpc_sock *rx = call->socket;
 	struct sock *sk;
-	int skb_len, ret;
+	int ret;
 
 	_enter(",,%d,%d", force, terminal);
 
@@ -101,13 +101,6 @@
 			rx->interceptor(sk, call->user_call_ID, skb);
 			spin_unlock_bh(&sk->sk_receive_queue.lock);
 		} else {
-
-			/* Cache the SKB length before we tack it onto the
-			 * receive queue.  Once it is added it no longer
-			 * belongs to us and may be freed by other threads of
-			 * control pulling packets from the queue */
-			skb_len = skb->len;
-
 			_net("post skb %p", skb);
 			__skb_queue_tail(&sk->sk_receive_queue, skb);
 			spin_unlock_bh(&sk->sk_receive_queue.lock);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 0566e46..f32bcb0 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,7 +231,7 @@
 	if (ret != ACT_P_CREATED)
 		return ret;
 
-	police->tcfp_t_c = ktime_to_ns(ktime_get());
+	police->tcfp_t_c = ktime_get_ns();
 	police->tcf_index = parm->index ? parm->index :
 		tcf_hash_new_index(hinfo);
 	h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -279,7 +279,7 @@
 			return police->tcfp_result;
 		}
 
-		now = ktime_to_ns(ktime_get());
+		now = ktime_get_ns();
 		toks = min_t(s64, now - police->tcfp_t_c,
 			     police->tcfp_burst);
 		if (police->peak_present) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index ba32c2b..e12f997 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -416,7 +416,7 @@
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
-	u64 now = ktime_to_ns(ktime_get());
+	u64 now = ktime_get_ns();
 	struct fq_flow_head *head;
 	struct sk_buff *skb;
 	struct fq_flow *f;
@@ -787,7 +787,7 @@
 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
-	u64 now = ktime_to_ns(ktime_get());
+	u64 now = ktime_get_ns();
 	struct tc_fq_qd_stats st = {
 		.gc_flows		= q->stat_gc_flows,
 		.highprio_packets	= q->stat_internal_packets,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9f949ab..aea942c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -895,7 +895,7 @@
 
 	if (!sch->q.qlen)
 		goto fin;
-	q->now = ktime_to_ns(ktime_get());
+	q->now = ktime_get_ns();
 	start_at = jiffies;
 
 	next_event = q->now + 5LLU * NSEC_PER_SEC;
@@ -1225,7 +1225,7 @@
 	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
 	parent->tokens = parent->buffer;
 	parent->ctokens = parent->cbuffer;
-	parent->t_c = ktime_to_ns(ktime_get());
+	parent->t_c = ktime_get_ns();
 	parent->cmode = HTB_CAN_SEND;
 }
 
@@ -1455,7 +1455,7 @@
 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
-		cl->t_c = ktime_to_ns(ktime_get());
+		cl->t_c = ktime_get_ns();
 		cl->cmode = HTB_CAN_SEND;
 
 		/* attach to the hash list and parent's family */
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 18ff634..0c39b75 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -239,7 +239,7 @@
 		s64 ptoks = 0;
 		unsigned int len = qdisc_pkt_len(skb);
 
-		now = ktime_to_ns(ktime_get());
+		now = ktime_get_ns();
 		toks = min_t(s64, now - q->t_c, q->buffer);
 
 		if (tbf_peak_present(q)) {
@@ -292,7 +292,7 @@
 
 	qdisc_reset(q->qdisc);
 	sch->q.qlen = 0;
-	q->t_c = ktime_to_ns(ktime_get());
+	q->t_c = ktime_get_ns();
 	q->tokens = q->buffer;
 	q->ptokens = q->mtu;
 	qdisc_watchdog_cancel(&q->watchdog);
@@ -431,7 +431,7 @@
 	if (opt == NULL)
 		return -EINVAL;
 
-	q->t_c = ktime_to_ns(ktime_get());
+	q->t_c = ktime_get_ns();
 	qdisc_watchdog_init(&q->watchdog, sch);
 	q->qdisc = &noop_qdisc;